blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
281
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
57
| license_type
stringclasses 2
values | repo_name
stringlengths 6
116
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 313
values | visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 18.2k
668M
⌀ | star_events_count
int64 0
102k
| fork_events_count
int64 0
38.2k
| gha_license_id
stringclasses 17
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 107
values | src_encoding
stringclasses 20
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.02M
| extension
stringclasses 78
values | content
stringlengths 2
6.02M
| authors
listlengths 1
1
| author
stringlengths 0
175
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
bec1f0dc5f69ea8d65450c0d1ff74d595382e6b6
|
0986dc02b1c288bfad0ec0e69c1e4e44bf3bb5cd
|
/contas/urls.py
|
f5e07715e32c5d206b4894c78ade120de43fc2b7
|
[] |
no_license
|
hugodias360/projeto_TecWeb
|
97a6f1f9071931e3e409f23fbcbf2bd125e45cde
|
5dd3b0e9ee7a8e6d374e4955278625ab1d031fdf
|
refs/heads/master
| 2022-11-04T14:04:22.539497
| 2018-05-17T23:31:59
| 2018-05-17T23:31:59
| 125,785,931
| 0
| 1
| null | 2022-10-19T04:41:43
| 2018-03-19T01:36:44
|
Python
|
UTF-8
|
Python
| false
| false
| 1,111
|
py
|
from django.conf.urls import url
from . import views
urlpatterns= [
#URLS PROFESSOR
url(r'^professor/$', views.indexProfessor , name='index'),
# url(r'^professor/create/$', views.createProfessor, name='create'),
url(r'^professor/edit/(?P<id>\d+)$', views.editProfessor, name='edit'),
url(r'^professor/edit/update/(?P<id>\d+)$', views.updateProfessor, name='update'),
url(r'^professor/delete/(?P<id>\d+)$', views.deleteProfessor, name='delete'),
#URLS COORDENADOR
url(r'^coordenador/$', views.indexCoordenador , name='index'),
url(r'^coordenador/edit/(?P<id>\d+)$', views.editCoordenador, name='edit'),
url(r'^coordenador/edit/update/(?P<id>\d+)$', views.updateCoordenador, name='update'),
url(r'^coordenador/delete/(?P<id>\d+)$', views.deleteCoordenador, name='delete'),
#URLS ALUNO
url(r'^aluno/$', views.indexAluno , name='index'),
url(r'^aluno/edit/(?P<id>\d+)$', views.editAluno, name='edit'),
url(r'^aluno/edit/update/(?P<id>\d+)$', views.updateAluno, name='update'),
url(r'^aluno/delete/(?P<id>\d+)$', views.deleteAluno, name='delete'),
]
|
[
"hugodias360@gmail.com"
] |
hugodias360@gmail.com
|
de377639fae6e41409818847cd35e0f14be66353
|
8de519715db979f128ff44464a84b6d75cba48d8
|
/login_samply.py
|
9aa374892819506e4d89d9e023a792d25298949c
|
[] |
no_license
|
Yukig-github/resarch
|
af146eed7a70b19b507e61ae075833caf9d486bf
|
88f031a8a45eb20bf3d30eba37f4adf9eb219d2e
|
refs/heads/main
| 2023-06-13T08:30:42.886220
| 2021-07-12T14:57:54
| 2021-07-12T14:57:54
| 376,378,887
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 909
|
py
|
import requests
from bs4 import BeautifulSoup
from urllib.parse import urljoin
# Setting the user name and password
USER = "1095287"
PASS = "h110622"
# Start a settion
session = requests.session()
# Loggin
login_info = {
"uid":USER,
"pw":PASS,
"_csrf":"f28e34c1-6ad6-462c-b518-86cf4c23d298",
"password":"0"
}
# Action
url_login = "http://portal/student/inKITP0000001Login"
res = session.post(url_login, data=login_info)
res.raise_for_status() # check any error
print(res.text)
# jump the another page: pick up URL is you want to jump
soup = BeautifulSoup(res.test,"html.parser")
a = soup.select_one(".") ##check
if a is None:
print("it dosen't work.")
quit()
#相対URLを絶対URLへ変換
url_mypage = urljoin(url_login,a.attrs["href"])
print("my page",url_mypage)
#accece to the mypage
res = session.get(url_mypage)
res.raise_for_status()
#pick up topics from web page
|
[
"noreply@github.com"
] |
noreply@github.com
|
fccaa06125ae9df91685965edf6b3ae0f129d512
|
b06f4dc6b1703f7e05026f2f3ff87ab776105b18
|
/google/cloud/logging_v2/services/config_service_v2/transports/base.py
|
d52c97635c4cf13e0b3e6f98c0890c5f01146f10
|
[
"Apache-2.0"
] |
permissive
|
anilit99/python-logging
|
040eeb4eed7abe92757b285a1047db09242e89c9
|
9307ad72cb5a6d524ed79613a05858dbf88cc156
|
refs/heads/master
| 2023-06-01T04:49:38.975408
| 2021-06-17T10:52:15
| 2021-06-17T10:52:15
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 19,708
|
py
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import abc
from typing import Awaitable, Callable, Dict, Optional, Sequence, Union
import packaging.version
import pkg_resources
import google.auth # type: ignore
import google.api_core # type: ignore
from google.api_core import exceptions as core_exceptions # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.cloud.logging_v2.types import logging_config
from google.protobuf import empty_pb2 # type: ignore
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution("google-cloud-logging",).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
try:
# google.auth.__version__ was added in 1.26.0
_GOOGLE_AUTH_VERSION = google.auth.__version__
except AttributeError:
try: # try pkg_resources if it is available
_GOOGLE_AUTH_VERSION = pkg_resources.get_distribution("google-auth").version
except pkg_resources.DistributionNotFound: # pragma: NO COVER
_GOOGLE_AUTH_VERSION = None
_API_CORE_VERSION = google.api_core.__version__
class ConfigServiceV2Transport(abc.ABC):
"""Abstract transport class for ConfigServiceV2."""
AUTH_SCOPES = (
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/cloud-platform.read-only",
"https://www.googleapis.com/auth/logging.admin",
"https://www.googleapis.com/auth/logging.read",
)
DEFAULT_HOST: str = "logging.googleapis.com"
def __init__(
self,
*,
host: str = DEFAULT_HOST,
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
**kwargs,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is mutually exclusive with credentials.
scopes (Optional[Sequence[str]]): A list of scopes.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
"""
# Save the hostname. Default to port 443 (HTTPS) if none is specified.
if ":" not in host:
host += ":443"
self._host = host
scopes_kwargs = self._get_scopes_kwargs(self._host, scopes)
# Save the scopes.
self._scopes = scopes or self.AUTH_SCOPES
# If no credentials are provided, then determine the appropriate
# defaults.
if credentials and credentials_file:
raise core_exceptions.DuplicateCredentialArgs(
"'credentials_file' and 'credentials' are mutually exclusive"
)
if credentials_file is not None:
credentials, _ = google.auth.load_credentials_from_file(
credentials_file, **scopes_kwargs, quota_project_id=quota_project_id
)
elif credentials is None:
credentials, _ = google.auth.default(
**scopes_kwargs, quota_project_id=quota_project_id
)
# Save the credentials.
self._credentials = credentials
# TODO(busunkim): These two class methods are in the base transport
# to avoid duplicating code across the transport classes. These functions
# should be deleted once the minimum required versions of google-api-core
# and google-auth are increased.
# TODO: Remove this function once google-auth >= 1.25.0 is required
@classmethod
def _get_scopes_kwargs(
cls, host: str, scopes: Optional[Sequence[str]]
) -> Dict[str, Optional[Sequence[str]]]:
"""Returns scopes kwargs to pass to google-auth methods depending on the google-auth version"""
scopes_kwargs = {}
if _GOOGLE_AUTH_VERSION and (
packaging.version.parse(_GOOGLE_AUTH_VERSION)
>= packaging.version.parse("1.25.0")
):
scopes_kwargs = {"scopes": scopes, "default_scopes": cls.AUTH_SCOPES}
else:
scopes_kwargs = {"scopes": scopes or cls.AUTH_SCOPES}
return scopes_kwargs
# TODO: Remove this function once google-api-core >= 1.26.0 is required
@classmethod
def _get_self_signed_jwt_kwargs(
cls, host: str, scopes: Optional[Sequence[str]]
) -> Dict[str, Union[Optional[Sequence[str]], str]]:
"""Returns kwargs to pass to grpc_helpers.create_channel depending on the google-api-core version"""
self_signed_jwt_kwargs: Dict[str, Union[Optional[Sequence[str]], str]] = {}
if _API_CORE_VERSION and (
packaging.version.parse(_API_CORE_VERSION)
>= packaging.version.parse("1.26.0")
):
self_signed_jwt_kwargs["default_scopes"] = cls.AUTH_SCOPES
self_signed_jwt_kwargs["scopes"] = scopes
self_signed_jwt_kwargs["default_host"] = cls.DEFAULT_HOST
else:
self_signed_jwt_kwargs["scopes"] = scopes or cls.AUTH_SCOPES
return self_signed_jwt_kwargs
def _prep_wrapped_messages(self, client_info):
# Precompute the wrapped methods.
self._wrapped_methods = {
self.list_buckets: gapic_v1.method.wrap_method(
self.list_buckets, default_timeout=None, client_info=client_info,
),
self.get_bucket: gapic_v1.method.wrap_method(
self.get_bucket, default_timeout=None, client_info=client_info,
),
self.create_bucket: gapic_v1.method.wrap_method(
self.create_bucket, default_timeout=None, client_info=client_info,
),
self.update_bucket: gapic_v1.method.wrap_method(
self.update_bucket, default_timeout=None, client_info=client_info,
),
self.delete_bucket: gapic_v1.method.wrap_method(
self.delete_bucket, default_timeout=None, client_info=client_info,
),
self.undelete_bucket: gapic_v1.method.wrap_method(
self.undelete_bucket, default_timeout=None, client_info=client_info,
),
self.list_views: gapic_v1.method.wrap_method(
self.list_views, default_timeout=None, client_info=client_info,
),
self.get_view: gapic_v1.method.wrap_method(
self.get_view, default_timeout=None, client_info=client_info,
),
self.create_view: gapic_v1.method.wrap_method(
self.create_view, default_timeout=None, client_info=client_info,
),
self.update_view: gapic_v1.method.wrap_method(
self.update_view, default_timeout=None, client_info=client_info,
),
self.delete_view: gapic_v1.method.wrap_method(
self.delete_view, default_timeout=None, client_info=client_info,
),
self.list_sinks: gapic_v1.method.wrap_method(
self.list_sinks,
default_retry=retries.Retry(
initial=0.1,
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.InternalServerError,
core_exceptions.ServiceUnavailable,
),
deadline=60.0,
),
default_timeout=60.0,
client_info=client_info,
),
self.get_sink: gapic_v1.method.wrap_method(
self.get_sink,
default_retry=retries.Retry(
initial=0.1,
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.InternalServerError,
core_exceptions.ServiceUnavailable,
),
deadline=60.0,
),
default_timeout=60.0,
client_info=client_info,
),
self.create_sink: gapic_v1.method.wrap_method(
self.create_sink, default_timeout=120.0, client_info=client_info,
),
self.update_sink: gapic_v1.method.wrap_method(
self.update_sink,
default_retry=retries.Retry(
initial=0.1,
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.InternalServerError,
core_exceptions.ServiceUnavailable,
),
deadline=60.0,
),
default_timeout=60.0,
client_info=client_info,
),
self.delete_sink: gapic_v1.method.wrap_method(
self.delete_sink,
default_retry=retries.Retry(
initial=0.1,
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.InternalServerError,
core_exceptions.ServiceUnavailable,
),
deadline=60.0,
),
default_timeout=60.0,
client_info=client_info,
),
self.list_exclusions: gapic_v1.method.wrap_method(
self.list_exclusions,
default_retry=retries.Retry(
initial=0.1,
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.InternalServerError,
core_exceptions.ServiceUnavailable,
),
deadline=60.0,
),
default_timeout=60.0,
client_info=client_info,
),
self.get_exclusion: gapic_v1.method.wrap_method(
self.get_exclusion,
default_retry=retries.Retry(
initial=0.1,
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.InternalServerError,
core_exceptions.ServiceUnavailable,
),
deadline=60.0,
),
default_timeout=60.0,
client_info=client_info,
),
self.create_exclusion: gapic_v1.method.wrap_method(
self.create_exclusion, default_timeout=120.0, client_info=client_info,
),
self.update_exclusion: gapic_v1.method.wrap_method(
self.update_exclusion, default_timeout=120.0, client_info=client_info,
),
self.delete_exclusion: gapic_v1.method.wrap_method(
self.delete_exclusion,
default_retry=retries.Retry(
initial=0.1,
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.InternalServerError,
core_exceptions.ServiceUnavailable,
),
deadline=60.0,
),
default_timeout=60.0,
client_info=client_info,
),
self.get_cmek_settings: gapic_v1.method.wrap_method(
self.get_cmek_settings, default_timeout=None, client_info=client_info,
),
self.update_cmek_settings: gapic_v1.method.wrap_method(
self.update_cmek_settings,
default_timeout=None,
client_info=client_info,
),
}
@property
def list_buckets(
self,
) -> Callable[
[logging_config.ListBucketsRequest],
Union[
logging_config.ListBucketsResponse,
Awaitable[logging_config.ListBucketsResponse],
],
]:
raise NotImplementedError()
@property
def get_bucket(
self,
) -> Callable[
[logging_config.GetBucketRequest],
Union[logging_config.LogBucket, Awaitable[logging_config.LogBucket]],
]:
raise NotImplementedError()
@property
def create_bucket(
self,
) -> Callable[
[logging_config.CreateBucketRequest],
Union[logging_config.LogBucket, Awaitable[logging_config.LogBucket]],
]:
raise NotImplementedError()
@property
def update_bucket(
self,
) -> Callable[
[logging_config.UpdateBucketRequest],
Union[logging_config.LogBucket, Awaitable[logging_config.LogBucket]],
]:
raise NotImplementedError()
@property
def delete_bucket(
self,
) -> Callable[
[logging_config.DeleteBucketRequest],
Union[empty_pb2.Empty, Awaitable[empty_pb2.Empty]],
]:
raise NotImplementedError()
@property
def undelete_bucket(
self,
) -> Callable[
[logging_config.UndeleteBucketRequest],
Union[empty_pb2.Empty, Awaitable[empty_pb2.Empty]],
]:
raise NotImplementedError()
@property
def list_views(
self,
) -> Callable[
[logging_config.ListViewsRequest],
Union[
logging_config.ListViewsResponse,
Awaitable[logging_config.ListViewsResponse],
],
]:
raise NotImplementedError()
@property
def get_view(
self,
) -> Callable[
[logging_config.GetViewRequest],
Union[logging_config.LogView, Awaitable[logging_config.LogView]],
]:
raise NotImplementedError()
@property
def create_view(
self,
) -> Callable[
[logging_config.CreateViewRequest],
Union[logging_config.LogView, Awaitable[logging_config.LogView]],
]:
raise NotImplementedError()
@property
def update_view(
self,
) -> Callable[
[logging_config.UpdateViewRequest],
Union[logging_config.LogView, Awaitable[logging_config.LogView]],
]:
raise NotImplementedError()
@property
def delete_view(
self,
) -> Callable[
[logging_config.DeleteViewRequest],
Union[empty_pb2.Empty, Awaitable[empty_pb2.Empty]],
]:
raise NotImplementedError()
@property
def list_sinks(
self,
) -> Callable[
[logging_config.ListSinksRequest],
Union[
logging_config.ListSinksResponse,
Awaitable[logging_config.ListSinksResponse],
],
]:
raise NotImplementedError()
@property
def get_sink(
self,
) -> Callable[
[logging_config.GetSinkRequest],
Union[logging_config.LogSink, Awaitable[logging_config.LogSink]],
]:
raise NotImplementedError()
@property
def create_sink(
self,
) -> Callable[
[logging_config.CreateSinkRequest],
Union[logging_config.LogSink, Awaitable[logging_config.LogSink]],
]:
raise NotImplementedError()
@property
def update_sink(
self,
) -> Callable[
[logging_config.UpdateSinkRequest],
Union[logging_config.LogSink, Awaitable[logging_config.LogSink]],
]:
raise NotImplementedError()
@property
def delete_sink(
self,
) -> Callable[
[logging_config.DeleteSinkRequest],
Union[empty_pb2.Empty, Awaitable[empty_pb2.Empty]],
]:
raise NotImplementedError()
@property
def list_exclusions(
self,
) -> Callable[
[logging_config.ListExclusionsRequest],
Union[
logging_config.ListExclusionsResponse,
Awaitable[logging_config.ListExclusionsResponse],
],
]:
raise NotImplementedError()
@property
def get_exclusion(
self,
) -> Callable[
[logging_config.GetExclusionRequest],
Union[logging_config.LogExclusion, Awaitable[logging_config.LogExclusion]],
]:
raise NotImplementedError()
@property
def create_exclusion(
self,
) -> Callable[
[logging_config.CreateExclusionRequest],
Union[logging_config.LogExclusion, Awaitable[logging_config.LogExclusion]],
]:
raise NotImplementedError()
@property
def update_exclusion(
self,
) -> Callable[
[logging_config.UpdateExclusionRequest],
Union[logging_config.LogExclusion, Awaitable[logging_config.LogExclusion]],
]:
raise NotImplementedError()
@property
def delete_exclusion(
self,
) -> Callable[
[logging_config.DeleteExclusionRequest],
Union[empty_pb2.Empty, Awaitable[empty_pb2.Empty]],
]:
raise NotImplementedError()
@property
def get_cmek_settings(
self,
) -> Callable[
[logging_config.GetCmekSettingsRequest],
Union[logging_config.CmekSettings, Awaitable[logging_config.CmekSettings]],
]:
raise NotImplementedError()
@property
def update_cmek_settings(
self,
) -> Callable[
[logging_config.UpdateCmekSettingsRequest],
Union[logging_config.CmekSettings, Awaitable[logging_config.CmekSettings]],
]:
raise NotImplementedError()
__all__ = ("ConfigServiceV2Transport",)
|
[
"noreply@github.com"
] |
noreply@github.com
|
373377356e344a5f785375da56c4540432697303
|
49b861ac5ca3adfe8861a10839b18d9448eb8020
|
/python/chop.py
|
6f74fd49fb24d4987b5f98bb39819310de26f8b4
|
[
"BSD-2-Clause"
] |
permissive
|
symisc/pixlab
|
deee8dcd8b4816c135ad666340e4023dbcefbb66
|
b4ffb1d6b8ff2204ac0db51842ada921478ed66c
|
refs/heads/master
| 2023-08-04T19:39:44.813119
| 2023-07-31T03:51:16
| 2023-07-31T03:51:16
| 85,357,712
| 107
| 34
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 465
|
py
|
import requests
import json
# Removes a region of an image and collapses the image to occupy the removed portion.
# https://pixlab.io/#/cmd?id=chop for more info.
req = requests.get('https://api.pixlab.io/chop',params={'img':'http://www.allaboutbirds.org/guide/PHOTO/LARGE/blue_jay_8.jpg','height':20,'x':45,'y':72,'key':'My_PixLab_Key'})
reply = req.json()
if reply['status'] != 200:
print (reply['error'])
else:
print ("Link to the pic: "+ reply['link'])
|
[
"noreply@github.com"
] |
noreply@github.com
|
60610a10abba973f1b0d8914810087bda779787e
|
c5585b95f42757064b64a535e45983b88e7cc7cb
|
/hw1/es1-2.py
|
44d52b5f6d2c6debc3b83a159c5cce37c875c218
|
[] |
no_license
|
giuliom95/ScientificViz_SS2018
|
e94dd19b58ffb9a2f94f8bf369a74aaac91f1031
|
0e3545363d51ce31cf760a25f9f3d1118783a63c
|
refs/heads/master
| 2020-03-11T14:06:54.445542
| 2018-04-26T11:01:31
| 2018-04-26T11:01:31
| 130,044,394
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,157
|
py
|
import numpy as np
from mpl_toolkits.mplot3d import axes3d
import matplotlib as mpl
import matplotlib.pyplot as plt
DATA_RES = 200
VIZ_RES = 400
BASE_PLANE = -5
COLOR_MAP = mpl.cm.coolwarm
if __name__=='__main__':
domain = np.linspace(-1,1,DATA_RES)**3
domain_size = len(domain)
#colormap = mpl.colors.LinearSegmentedColormap('RED', {'red': [(0,1,1), (1,1,1)], 'green': [(0,1,0), (1,1,1)], 'blue': [(0,1,0), (1,1,1)]})
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.set_xlabel('x')
ax.set_xticks(np.linspace(-1,1,5))
ax.set_ylabel('y')
ax.set_yticks(np.linspace(-1,1,5))
ax.set_zlabel('z')
ax.set_zticks(np.linspace(-1,1,5))
ax.set_zlim(BASE_PLANE+.1, 1)
X, Y = np.meshgrid(domain, domain)
Z = np.sin(6*np.cos(np.sqrt(X**2+Y**2))+5*np.arctan2(Y,X))
cfset = ax.contourf(X,Y,Z, levels=np.linspace(-1,1,VIZ_RES), cmap=COLOR_MAP, antialiased=False)
ax.contour(X,Y,Z, offset=BASE_PLANE, cmap=COLOR_MAP)
cb = plt.colorbar(cfset, aspect=5, shrink=.5)
cb.set_ticks(np.linspace(-.75,.75,7))#.set_ticks(np.linspace(-1,1,5))
plt.show()
|
[
"giuliomartella1995@gmail.com"
] |
giuliomartella1995@gmail.com
|
a5cabc5a80802e5542d44f22fc4f861683ba8459
|
beb32d495a68bb4ba47ef8aeea2f5afd6acdc244
|
/infosec-lab1/lab1-2/ClientA.py
|
e45c0a5317aed31ecac298c7f3628a8a910786b6
|
[] |
no_license
|
Hatuw/infosec2018
|
b0378cd5460371add65a25195d244a8afbcf8908
|
a48cc2b2fe6958d4f3a064c41ca1a487d62ab798
|
refs/heads/master
| 2020-04-11T01:12:39.126216
| 2018-04-27T08:31:50
| 2018-04-27T08:31:50
| 124,316,665
| 0
| 2
| null | 2018-04-15T14:30:50
| 2018-03-08T01:10:51
|
Python
|
UTF-8
|
Python
| false
| false
| 1,605
|
py
|
# -*- coding: utf-8 -*-
import re
import rsa
import socket
from ServerC import *
class ClientA:
def __init__(self, name='client_a', ip='127.0.0.1', port=6666, **kwargs):
assert re.match(r"^(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)$"
, ip), "IP address invalid"
self.name = name
self.ip = ip
self.port = port
self.ca = ServerC()
self._privkey = self.ca.gen_newkeys(self.name)
assert self._privkey, "Failed to generate keys"
self.start_server()
def start_server(self):
sock = socket.socket()
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind((self.ip, self.port))
sock.listen(5)
print('Client A start at {}:{}'.format(self.ip, self.port))
# begin to talk
conn, addr = sock.accept()
while True:
# accept_data = str(conn.recv(1024), encoding='utf8')
accept_data = conn.recv(1024)
accept_data = rsa.decrypt(accept_data, self._privkey).decode()
print("Receive: ", accept_data)
if accept_data == 'bye':
break
send_data = input("Reply> ")
pubkey = self.ca.get_pubkey('client_b')
if pubkey:
send_data = rsa.encrypt(send_data.encode(), pubkey)
else:
print("Failed to encrypt.")
sock.close()
exit()
conn.sendall(send_data)
conn.close()
sock.close()
client_a = ClientA()
|
[
"jiaxi_wu@ieee.org"
] |
jiaxi_wu@ieee.org
|
88e94952e8652a14ab593a057fe5cd8e5c67fecc
|
d9298c3f4634a3523b7a2f5f1e5c7e7c8d9f0272
|
/Xpresso_kipoi/model.py
|
5ee6928c9d23d1daa7c262fea5512d858b01a1bd
|
[] |
no_license
|
Hoeze/Xpresso_kipoi
|
105127185fbf0d4cbbded073298963cb880395f9
|
252c76b6d71bad2ebde34ce7a1035c3f38fed6aa
|
refs/heads/main
| 2023-01-04T01:04:26.418662
| 2020-10-20T20:33:06
| 2020-10-20T20:33:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,099
|
py
|
from kipoi.model import BaseModel
from keras.models import load_model
from keras.layers import Layer
from keras import backend as K
import tensorflow as tf
import numpy as np
class Xpresso(BaseModel):
def __init__(self, weights):
self.nuc_dict = {'A':[1 ,0 ,0 ,0 ],'C':[0 ,1 ,0 ,0 ],'G':[0 ,0 ,1 ,0 ],
'T':[0 ,0 ,0 ,1 ],'N':[0 ,0 ,0 ,0 ]]}
self.weights = weights
self.model = load_model(weights)
# One-hot encodes a particular sequence
def encode_seq(self, seq):
# Add padding:
if (len(seq) != 10500):
sys.exit( "Error in sequence %s: length is not equal to the required 10,500 nts. \
Please fix or pad with Ns if necessary, with intended TSS centered at position 7,000." % seq )
seq = seq.upper() # force upper case to be sure!
# One hot encode
try:
one_hot = np.array([self.nuc_dict[x] for x in seq]) # get stacked on top of each other
except KeyError as e:
raise ValueError('Cant one-hot encode unknown base: {} in seq: {}. \
Must be A, C, G, T, or N. If so, please filter'.format(str(e), seq))
return one_hot
# One-hot encodes the entire tensor
def encode(self, inputs):
# One Hot Encode input
one_hot = np.stack([self.encode_seq(seq)
for seq in inputs], axis = 0)
return one_hot
# Predicts for a batch of inputs
def predict_on_batch(self, inputs):
# Encode
one_hot = self.encode(inputs)
#In this limited model, treat RNA as having average mRNA features, to ignore half-life contribution
#For full model with half-life features, see Xpresso Github
mean_half_life_features = np.zeros((inputs.shape[0],6), dtype='float32')
pred = self.model.predict_on_batch([one_hot, mean_half_life_features]).reshape(-1)
return {"expression_pred": pred}
|
[
"vagar@sci-pvm-vagar.calicolabs.local"
] |
vagar@sci-pvm-vagar.calicolabs.local
|
439ae386234b53e7520364ad483113a36b46d116
|
362f8eeb4dad793eb488f4e80acb65ace793c6e8
|
/test/sorry_jpg.py
|
29e738548f5e6dc88f7561bb43d957ddeb9366a8
|
[] |
no_license
|
louxinye/lxybot_v2
|
ab3613fab060b9d8d805482a9705fbdaab776cd9
|
8ac3ce319491d81e2ec5dda54c778e317fd56719
|
refs/heads/master
| 2021-04-15T07:59:17.103282
| 2018-12-03T03:01:29
| 2018-12-03T03:01:29
| 126,855,317
| 9
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,112
|
py
|
# -*- coding: utf-8 -*-
# 开发中........
from urllib import parse,request
import requests
import json
headers = {
'Accept' : '*/*',
'Accept-Language' : 'zh-CN,zh;q=0.8',
'Cache-Control': 'max-age=0',
'Upgrade-Insecure-Requests' : '1',
'content-type': 'text/plain;charset=UTF-8',
'User-Agent': 'Mozilla/5.0 (Linux; Android 6.0; Nexus 5 Build/MRA58N) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 Mobile Safari/537.36'
}
textmod={0:"好啊",1:"就算你是一流工程师",2:"就算你出报告再完美",3:"我叫你改报告你就要改",4:"毕竟我是客户",5:"客户了不起啊",6:"sorry 客户真的了不起",7:"以后叫他天天改报告",8:"111111"}
text = {"0": "111", "1": "222", "2": "333", "3": "444", "4": "555", "5": "666", "6": "777", "7": "888", "8": "111111"}
# textmod = json.dumps(textmod).encode(encoding='utf-8')
# textmod = parse.urlencode(textmod).encode(encoding='utf-8')
url='https://sorry.xuty.tk/api/sorry/make'
req = requests.post(url=url,data=text,headers=headers)
print(req.text)
# res = req.json()
# print(res.decode(encoding='utf-8'))
|
[
"1061566571@qq.com"
] |
1061566571@qq.com
|
c4a1000943aabf39b68ff2e45e00ea41562ca42f
|
626fdb9df49fc36fff5018a489e6089b0986ebd8
|
/05/amusement_park.py
|
2871dca198081b63ada57329b96ab2e876707d60
|
[] |
no_license
|
ypan1988/python_work
|
7bbf9ee2badb981bd2b309f39e07d49d761002c0
|
464f86f8e9348f4c604928d7db63c75e83aefd10
|
refs/heads/main
| 2023-03-27T11:12:28.934378
| 2021-03-28T22:00:28
| 2021-03-28T22:00:28
| 352,447,281
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 631
|
py
|
age = 12
if age < 4:
print("Your admission cost is $0.")
elif age < 18:
print("Your admission cost is $25.")
else:
print("Your admission cost is $40.")
age = 12
if age < 4:
price = 0
elif age < 18:
price = 25
else:
price = 40
print(f"Your admission cost is ${price}.")
age = 12
if age < 4:
price = 0
elif age < 18:
price = 25
elif age < 65:
price = 40
else:
price = 20
print(f"Your admission cost is ${price}.")
age = 12
if age < 4:
price = 0
elif age < 18:
price = 25
elif age < 65:
price = 40
elif age >= 65:
price = 20
print(f"Your admission cost is ${price}.")
|
[
"ypan1988@gmail.com"
] |
ypan1988@gmail.com
|
dad4360db0dbf7aab1cdcc42647bc8853831028d
|
83b938483ed42ede91d46392599161a2f0a01cb0
|
/apps/users/migrations/0001_initial.py
|
8d4ad134e2a9c814f4a31aef6225ce41f8e9ce8d
|
[] |
no_license
|
fshFSH1387/mxshop
|
65fa0eb41f6ac673bbdd4fadc986c5668b040f96
|
e04b8bc0cff6422d7e8e2b39c2a2436c69b51bfe
|
refs/heads/master
| 2020-03-25T19:35:05.981787
| 2018-08-09T02:53:43
| 2018-08-09T02:53:43
| 144,090,040
| 2
| 1
| null | 2018-08-09T02:53:44
| 2018-08-09T02:14:46
| null |
UTF-8
|
Python
| false
| false
| 4,037
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.7 on 2018-07-24 02:48
from __future__ import unicode_literals
import datetime
import django.contrib.auth.models
import django.contrib.auth.validators
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0008_alter_user_username_max_length'),
]
operations = [
migrations.CreateModel(
name='UserProfile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('username', models.CharField(error_messages={'unique': 'A user with that username already exists.'}, help_text='Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only.', max_length=150, unique=True, validators=[django.contrib.auth.validators.UnicodeUsernameValidator()], verbose_name='username')),
('first_name', models.CharField(blank=True, max_length=30, verbose_name='first name')),
('last_name', models.CharField(blank=True, max_length=30, verbose_name='last name')),
('is_staff', models.BooleanField(default=False, help_text='Designates whether the user can log into this admin site.', verbose_name='staff status')),
('is_active', models.BooleanField(default=True, help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.', verbose_name='active')),
('date_joined', models.DateTimeField(default=django.utils.timezone.now, verbose_name='date joined')),
('name', models.CharField(blank=True, max_length=30, null=True, verbose_name='姓名')),
('birthday', models.DateField(blank=True, null=True, verbose_name='出生年月')),
('gender', models.CharField(choices=[('male', '男'), ('female', '女')], default='female', max_length=6, verbose_name='性别')),
('mobile', models.CharField(blank=True, max_length=11, null=True, verbose_name='电话')),
('email', models.EmailField(blank=True, max_length=100, null=True, verbose_name='邮箱')),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'verbose_name': '用户',
'verbose_name_plural': '用户',
},
managers=[
('objects', django.contrib.auth.models.UserManager()),
],
),
migrations.CreateModel(
name='VerifyCode',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('code', models.CharField(max_length=10, verbose_name='验证码')),
('mobile', models.CharField(max_length=11, verbose_name='电话')),
('add_time', models.DateTimeField(default=datetime.datetime.now, verbose_name='添加时间')),
],
options={
'verbose_name': '短信验证码',
'verbose_name_plural': '短信验证码',
},
),
]
|
[
"1907144407@qq.com"
] |
1907144407@qq.com
|
262e1d9f54429dd7118716daf6cfbc910a323686
|
4fb9150b08a128571ed4a84897c8c95afb76ccb6
|
/healthy/migrations/0002_labdetail.py
|
e6624ae565d211b7af58232ca3a06dfcfe941dd7
|
[] |
no_license
|
eduarde/ChunkyMonkeys
|
815feb7f3e6e2085babb61d12f2255ea2cb46ada
|
34f30e6aaeef6af15aa12e6d599f55d67c6fb7d7
|
refs/heads/master
| 2021-07-09T21:30:49.084584
| 2016-12-05T10:42:04
| 2016-12-05T10:42:04
| 58,738,867
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,175
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.11 on 2016-11-14 11:38
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('healthy', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='LabDetail',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('reason', models.TextField(blank=True, null=True, verbose_name='Reason')),
('cause', models.TextField(blank=True, null=True, verbose_name='Cause')),
('action', models.TextField(blank=True, null=True, verbose_name='Action')),
('lab_ref', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='LabDet', to='healthy.Lab')),
('user_ref', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
|
[
"eduard.erja@gmail.com"
] |
eduard.erja@gmail.com
|
c00202055a9f383b7755f979c3d1ba3c997f004e
|
0e9bb8a5f0be4be19ee3c00af6f5532218bc288e
|
/maching_learning/task3/alexandra_vesloguzova.py
|
e6ca6dbdcb1c03c3d7cfe7a16c9f8f78cc892ba8
|
[] |
no_license
|
avesloguzova/spbau_homework
|
82abbb02735af68a5ae5e8580746beacb3d13592
|
6e143c3da0cb1f8a8f6915db1f9135143f9cf5a8
|
refs/heads/master
| 2021-01-23T06:40:16.648288
| 2015-08-11T19:51:31
| 2015-08-11T19:51:31
| 27,387,685
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,973
|
py
|
__author__ = 'av'
import numpy
def read_fasta(path):
records = list()
labels = list()
current_long_string = ""
current_id = ""
with open(path, 'r') as f:
for line in f:
if is_id(line):
if current_long_string != "":
records.append(current_long_string)
labels.append(current_id)
current_id = line
current_long_string = ""
else:
current_long_string += line.replace("\n", "")
return (records, labels)
def is_id(line):
return line[0] == ">"
def get_substring(s, n):
result = dict()
for i in range(0, len(s) - n - 1):
tmp = s[i:i + n]
if result.get(tmp):
result[tmp] += 1
else:
result[tmp] = 1
return result
def intersection(multiset1, multiset2):
result = dict()
for s, count in multiset1.iteritems():
if multiset2.get(s):
result[s] = min(multiset1[s], multiset2[s])
return result
def union(multiset1, multiset2):
result = dict(multiset1)
for s, count in multiset2.iteritems():
if result.get(s):
result[s] += count
else:
result[s] = count
return result
def jaccard(s, t, n):
substr_s = get_substring(s, n)
substr_t = get_substring(t, n)
return 1.0 - float(sum(intersection(substr_s, substr_t).values())) / float(sum(union(substr_s, substr_t).values()))
def levenshtein(s, t):
if len(s) < len(t):
return levenshtein(t, s)
if len(t) == 0:
return len(s)
s = numpy.array(tuple(s))
t = numpy.array(tuple(t))
previous_row = numpy.arange(t.size + 1)
for symbol in s:
current_row = previous_row + 1
current_row[1:] = numpy.minimum(current_row[1:], numpy.add(previous_row[:-1], t != symbol))
current_row[1:] = numpy.minimum(current_row[1:], current_row[0:-1] + 1)
previous_row = current_row
return previous_row[-1]
def find_min(distances, C):
min_dist = numpy.inf
min_ind = (-1, -1)
for i in C:
for j in C:
if distances[i][j] < min_dist:
min_dist = distances[i][j]
min_ind = (i, j)
return min_ind
def lance_williams(X, dist):
n = len(X)
Z = numpy.zeros((n - 1, 3))
clusters = list(range(0, n))
distances = numpy.zeros((2 * n - 1, 2 * n - 1))
cluster_sizes = numpy.zeros((2 * n - 1, 1))
for i in range(0, len(X)):
cluster_sizes[i] = 1
for j in range(0, len(X)):
if i != j:
distances[i, j] = dist(X[i], X[j])
else:
distances[i, j] = numpy.inf
for t in range(0, len(X) - 1):
i, j = find_min(distances, clusters)
Z[t, 0] = i + 1
Z[t, 1] = j + 1
Z[t, 2] = distances[i, j]
clusters.remove(i)
clusters.remove(j)
cluster = n + t
clusters.append(cluster)
cluster_sizes[n + t] = cluster_sizes[i] + cluster_sizes[j]
for ind in clusters:
if ind != cluster:
distances[cluster, ind] = distances[i, ind] * (cluster_sizes[i] / (cluster_sizes[cluster])) + \
distances[j, ind] * (cluster_sizes[j] / (cluster_sizes[cluster]))
distances[ind, cluster] = distances[cluster, ind]
else:
distances[ind, ind] = numpy.inf
return Z
def show_dendrogram(Z, **kwargs):
from scipy.cluster.hierarchy import dendrogram, from_mlab_linkage
from matplotlib import pyplot as plt
dendrogram(from_mlab_linkage(Z), **kwargs)
plt.show()
if __name__ == "__main__":
params = {16, 8, 1}
X, labels = read_fasta("ribosome.fasta")
show_dendrogram(lance_williams(X, levenshtein), labels=labels)
for n in params:
jac = lambda s, t: jaccard(s, t, n)
show_dendrogram(lance_williams(X, jac), labels=labels)
|
[
"sashickk@gmail.com"
] |
sashickk@gmail.com
|
937ad49bfc2d7aa2201b9927a626479eaea90261
|
8f352a174c6a2ef7787668bf0b9f0a2c5b5668d4
|
/app.py
|
2fc6a1f7635f2efb7e2ff47aed0351b8cd884649
|
[] |
no_license
|
DavidLGoldberg/github-user-stats
|
be82e75528ca15be484db6775aaae25276d4c3cf
|
ceed274504f65b2842eabb021ddd58fd4049e9f5
|
refs/heads/master
| 2020-05-30T23:54:54.623558
| 2012-12-21T12:47:21
| 2012-12-21T12:47:21
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 328
|
py
|
from hero_tmpl import create_app
if __name__ == "__main__":
#TODO: make configurable string
print "Starting Server."
app = create_app()
import os
# Bind to PORT if defined, otherwise default to 5000.
port = int(os.environ.get('PORT', 5000))
app.run(host='0.0.0.0', port=port)
print "App Imported"
|
[
"davidlgoldberg@gmail.com"
] |
davidlgoldberg@gmail.com
|
357248cb94353f91c0ae97139073ed46440fcdc8
|
c5cb5ef8bc136a40ef79628daf06c6cc1f7e44ac
|
/compare_mcx_printing.py
|
777a96051fbbe0b2b82776195f855360ea60fb6d
|
[] |
no_license
|
metasyn/mcx-comparison
|
6e7b3164d9cc2452be0b111425c0b84e19783f58
|
5e3d3988c0c0e97d870af177cb7eabd3551b930a
|
refs/heads/master
| 2020-06-04T01:43:13.415252
| 2014-03-04T19:33:37
| 2014-03-04T19:33:37
| 20,038,624
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,043
|
py
|
# mcx setting equivalence test
# ajohnson - 2014 - 2 11
from __future__ import division
import sys
# use ard to send a unix command: mcxquery -computerOnly
# to all the computers that you want to compare mcx values for
# export the command results as a text file, that will be the input
infile = open(sys.argv[1], 'r')
# get raw text
raw = infile.read()
infile.close()
# split by means of two new lines (the separator for computers)
computerlist = raw.split('\n\n')
computerlist = computerlist[:-1]
# now we need to only take the relevant parts of the mcxquery to compare
# starting index = 'com.apple.mcxprinting'
# ending index = the beginning of the next com, which is com.microsoft.autoupdate2
mcxlist = []
for computer in computerlist:
# in case there is just no plist...
if 'com.apple.mcxprinting' in computer:
name = computer[:12].strip()
beg = computer.index('com.apple.mcxprinting')
# adas mcx settings end at the mcx printing so we can use a try/except to deal with that
try:
beg = computer.index('com.apple.mcxprinting')
end = computer.index('com.microsoft.autoupdate2')
mcxlist.append(computer[beg:end])
except ValueError:
mcxlist.append(computer[beg:])
else:
print "{} has no com.apple.mcxprinting plist !".format(computer[:12])
# here we create a dictionary for the name + match key value pairs
match_dict = {}
# loop through the list
for setting in range(len(mcxlist)):
# slice the computer name
name = computerlist[setting][:12].strip()
# create a blank match count for matches
match_count = 0
# loop again and see if one setting matches all the others
for setting2 in mcxlist:
if mcxlist[setting] == setting2:
match_count += 1
# add values to the dictionary
match_dict[name] = match_count
# print results
print "="*25
print "{:15} {:3}".format("Computer", "Matches\n")
for k,v in match_dict.items():
print "{:15} {:3}".format(k, v)
|
[
"alexx.johnson@gmail.com"
] |
alexx.johnson@gmail.com
|
43170fa8f7fc5a3560607c4b21a1cb123096b586
|
f6c1a4593859ad75000e726414f25fbf02766143
|
/setup.py
|
7edb29cfc794fbf5f917801018c219ab2e44a25c
|
[] |
no_license
|
jbeezley/metadata_extractor
|
b753ce6f9e55e5bc92f16b5decfbab5b992ac621
|
1401127bf572119353e3c504278ff7436e077c9e
|
refs/heads/master
| 2020-03-20T00:57:52.713434
| 2018-06-12T13:49:54
| 2018-06-12T13:49:54
| 137,062,489
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,917
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# Copyright 2013 Kitware Inc.
#
# Licensed under the Apache License, Version 2.0 ( the "License" );
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
from setuptools import setup, find_packages
# perform the install
setup(
name='girder-plugin-metadata-extractor',
version='0.2.0',
description='Enables the extraction of metadata from uploaded files',
author='Kitware, Inc.',
author_email='kitware@kitware.com',
url='https://github.com/girder/metadata_extractor',
license='Apache 2.0',
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
],
package_data={
'': ['web_client/**']
},
packages=find_packages(exclude=['test']),
zip_safe=False,
install_requires=[
'girder',
'hachoir-core',
'hachoir-metadata',
'hachoir-parser'
],
entry_points={
'girder.plugin': [
'metadata_extractor = girder_plugin_metadata_extractor:MetadataExtractorPlugin'
]
}
)
|
[
"jonathan.beezley@kitware.com"
] |
jonathan.beezley@kitware.com
|
91abc10c50eaf8da3dbf150d0f254a8c6b7a3fc2
|
30c6d52e9d285b358d62f24e971ca20b180ed2ee
|
/tests/tc2.py
|
2a7e618f00bd0831c1dcdcebb0c7d57a96c34c70
|
[
"MIT"
] |
permissive
|
rxse/python-demoshop
|
c4c3e79f5f67e697bf221e241dc0e7978a0c9586
|
b7b8c2ef59a63350433c9d50b4730c7344cbbcc6
|
refs/heads/master
| 2020-08-01T15:42:55.537210
| 2020-03-30T07:25:50
| 2020-03-30T07:25:50
| 211,036,804
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 986
|
py
|
# Ranorex Webtestit Test File
from utils.base_test import BaseTest
from pageobjects.items_overview_po import ItemsOverviewPo
class Tc2(BaseTest):
def test_fast_checkout(self):
driver = self.get_driver()
# Open the page
overview = ItemsOverviewPo(driver).open(
"https://demoshop.webtestit.com/")
# Add items to the cart
overview.add_item1_to_cart()
overview.add_item2_to_cart()
overview.add_item3_to_cart()
# View cart
cart = overview.click_on_cart()
# Perform checkout
checkout = cart.click_proceed_to_checkout()
# Fill out the form
checkout.set_first_name("Chuck")
checkout.set_last_name("Norris")
checkout.set_email("chuck.norris@test.com")
# Place the order
confirmation = checkout.place_order()
# Assert that the ordered amount is correct
self.assertEqual(confirmation.get_total_amount(), "€3,700.00")
|
[
"smatijas@Sasas-MacBook-Pro.local"
] |
smatijas@Sasas-MacBook-Pro.local
|
eb53a990da835beaca9e9cc878481161831bfb1f
|
1bb2a9150de01c618163bbb8f872bdce6f14df4f
|
/BaekJoon/2981_검문.py
|
acbbec8e742ffdac47cb7a67e0dc300dcd8ab895
|
[] |
no_license
|
whyj107/Algorithm
|
a1c9a49a12a067366bd0f93abf9fa35ebd62102e
|
aca83908cee49ba638bef906087ab3559b36b146
|
refs/heads/master
| 2023-04-14T12:59:52.761752
| 2021-05-01T03:53:31
| 2021-05-01T03:53:31
| 240,014,212
| 4
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 989
|
py
|
# 문제
# 검문
# https://www.acmicpc.net/problem/2981
# 풀이
from sys import stdin
N = int(stdin.readline())
M = [int(stdin.readline()) for i in range(N)]
M.sort()
tmp = M[-1] - M[0]
y = []
for i in range(2, int(tmp**0.5)+1):
if tmp % i == 0:
y.append(i)
if tmp//i not in y: y.append(tmp//i)
y.sort()
y.append(tmp)
for i in y:
for n in range(N):
if n == N-1:
print(i, end=" ")
elif M[n] % i != M[n+1] % i:
break
# 다른 사람의 풀이
"""
import sys
input = sys.stdin.readline
def gcd(a, b):
return gcd(b, a % b) if a % b else b
n = int(input())
num = sorted([int(input()) for _ in range(n)])
get = num[1] - num[0]
for i in range(2, n):
get = gcd(get, num[i]-num[i-1])
res = set()
for i in range(2, int(get**0.5)+1):
if get % i == 0:
res.add(i)
res.add(get//i)
res.add(get)
res = sorted(list(res))
print(' '.join(map(str, res)))
"""
|
[
"noreply@github.com"
] |
noreply@github.com
|
af7e89df385ab20dc1f91bac730a8ca9b629cf3f
|
1316cd6763e784811c769c1de577235c921af0de
|
/Apps/qscan/pyth2p7/scanner.py
|
fe77520db52aa55bf64dc1ebb7679cf1b63d600f
|
[] |
no_license
|
VELA-CLARA-software/Software
|
a6fb6b848584e5893fd6939a447d23134ce636cc
|
2e2a88ac0b2b03a495c868d2e11e6481e05097c3
|
refs/heads/master
| 2023-02-05T07:40:58.260798
| 2023-01-27T09:39:09
| 2023-01-27T09:39:09
| 69,860,536
| 7
| 3
| null | 2021-04-07T14:17:07
| 2016-10-03T10:20:46
|
Mathematica
|
UTF-8
|
Python
| false
| false
| 9,766
|
py
|
from PyQt4 import QtGui, QtCore
from PyQt4.QtCore import pyqtSignal
from PyQt4.QtCore import pyqtSlot
import sys,os
#import view
import numpy as np
sys.path.append("\\\\apclara1\\ControlRoomApps\\Controllers\\bin\\stage\\")
#sys.path.append("\\\\apclara1\\ControlRoomApps\\Controllers\\bin\\stage\\Python3_x64\\")
#sys.path.append("\\\\apclara1.dl.ac.uk\\ControlRoomApps\\Controllers\\bin\\stage\\Python3_x64\\")
#
#for item in sys.path:
# print item
#0# import VELA_CLARA_PILaser_Control as pil
import time
#0# pil_init = pil.init()
#pil_init.setVerbose()
#0# pil_control = pil_init.physical_PILaser_Controller()
#import lasmover as lm
import math as ma
import numpy as np
import time
timestr = time.strftime("%Y%m%d-%H%M%S")
import VELA_CLARA_LLRF_Control as rf
rfinit = rf.init()
therf = rfinit.physical_CLARA_LRRG_LLRF_Controller()
import VELA_CLARA_BPM_Control as bpm
bpminit = bpm.init()
bpminit.setVerbose()
bpms = bpminit.physical_CLARA_PH1_BPM_Controller()
import VELA_CLARA_General_Monitor as mon
monini = mon.init()
charge = monini.connectPV('CLA-S01-DIA-WCM-01:Q')
lasE = monini.connectPV('CLA-LAS-DIA-EM-01:E')
vcsump = monini.connectPV('CLA-VCA-DIA-CAM-01:ANA:Intensity_RBV')
# NEW section to get llrf stuff. Tried copying from Duncan charge app.
therf2 = rfinit.getLLRFController(rf.MACHINE_MODE.PHYSICAL,rf.LLRF_TYPE.CLARA_LRRG)
##therf2.getCavFwdPwr()
##therf2.getCavRevPwr()
##print("hello!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!",therf2.getCavFwdPwr())
#exit()
class chargescanner(QtCore.QObject):
changedval = pyqtSignal(float, float, float, float)
changedlogtxt = pyqtSignal(str)
# lo, hi and the min and max values of the area on the VC to scan
# values are mm from bottom left of the VC imagecollector
# nx,y is number of points to stop and measure charge at in x,y
xlo = 3
xhi = 7
ylo = 3
yhi = 7
nx = 3
ny = 3
xrange = np.linspace(xlo,xhi,nx)
yrange = np.linspace(ylo,yhi,ny)
def setxrange(self,dumxlo,dumxhi,dumnx):
self.xrange = np.linspace(dumxlo,dumxhi,dumnx)
def setyrange(self,dumylo,dumyhi,dumyx):
self.yrange = np.linspace(dumylo,dumyhi,dumny)
def doscan(self,xxlo,xxhi,xxn,yylo,yyhi,yyn):
self.xrange = np.linspace(xxlo,xxhi,xxn)
self.yrange = np.linspace(yylo,yyhi,yyn)
print('IN DOSCAN',self.xrange)
print('IN DOSCAN',self.yrange)
print(therf.getPhiDEG())
print('***********************************************************')
print('!!!!!!!!!!!!!!!!!!!!!PLEASE READ!!!!!!!!!!!!!!!!!!!!!!!!!!!')
print('***********************************************************\n')
print('This is a script to scan the laser over the cathode (via VC)')
print('and measure the charge on the WCM \n')
print('Please have imagecollector open on the VC and check that the')
print('mask feedback is switched on, and that the mask follows the VC laser spot')
print('throughout the scan.\n')
print('the x locations where charge will be measured are', self.xrange, ' mm')
print('the y locations where charge will be measured are', self.yrange, ' mm')
print('the total number of scan points is', self.nx*self.ny)
print('the wcm reading at each point will be recorded in the file qscan.txt')
print('which is created whereever you run this script from')
# write results to work folder.
timestr = time.strftime("%H%M%S")
dir = '\\\\fed.cclrc.ac.uk\\Org\\NLab\\ASTeC\\Projects\\VELA\\Work\\'+time.strftime("%Y\\%m\\%d")+'\\'
try:
os.makedirs(dir)
except OSError:
if not os.path.isdir(dir):
self.logger.emit('Error creating directory - saving to local directory')
dir = '.'
filename = dir+'qscan'+str(timestr)+'.txt'
f = open(filename,'a')
# f = open('qscan'+str(timestr)+'.txt','a')
#exit()
#0# pil_control.setMaskFeedBackOn_VC()
#xrange = [5.5]
#yrange = [4.5]
# # next section of code to access data from PI Contoller (code c/o Duncan)
# # from this we can get individual peices of hardware, let's store them in a dictionary called hardware
# hardware = {}
# # VC camera image object object
# vc_image = "vc_image"
# hardware[vc_image] = pil_control.getImageObj()
# # to access the vc_image object just call: hardware[vc_image]
# # image data / anmalaysis
# vc_image_data= "vc_image_data"
# hardware[vc_image_data] = pil_control.getVCDataObjConstRef()
# # the VC camera
# vc_cam= "vc_cam"
# hardware[vc_cam] = pil_control.getCameraObj()
# # laser mirror object
# mirror = "mirror"
# # the PI laser object (from here you can get Q, laser energy ...)
# pil = "pil"
# hardware[pil] = pil_control.getPILObjConstRef()
# #number of shots:
# num_shots_to_average = 12
# pil_control.setAllRSBufferSize(num_shots_to_average)
# # Check the buffer size for the Q
# print("getRSBufferSize = ",hardware[pil].max_buffer_count)
# # some constants, (probably should save once for each run )
# x_pix_scale_factor = hardware[vc_image].x_pix_scale_factor
# y_pix_scale_factor = hardware[vc_image].y_pix_scale_factor
# x_pix_to_mm = hardware[vc_image].x_pix_to_mm
# y_pix_to_mm = hardware[vc_image].y_pix_to_mm
# num_pix_x = hardware[vc_image].num_pix_x
# num_pix_y = hardware[vc_image].num_pix_y
# # ++ others??
ix = 0
chargebest = 0
for x in self.xrange:
if ix % 2 == 0:
dumyrange = self.yrange
print('going up', dumyrange)
else:
dumyrange = self.yrange[::-1]
print('going up', dumyrange)
ix = ix + 1
for y in dumyrange:
print(x, y, '\n')
#l a = pil_control.setVCPos(x,y)
#l # monitor this paramter to know when ity has finished
#l set_pos_succes = False
#l# exit()
#l
#l while 1:
#l set_pos_state = pil_control.getSetVCPosState()
#l print 'success status', set_pos_state
#l if set_pos_state == pil.VC_SET_POS_STATE.SUCCESS:
#l set_pos_succes = True
#l break
#l else:
#l print set_pos_state
#l time.sleep(1)
#l print("Set Position Finished",pil_control.getSetVCPosState())
# exit()
# mylasmove.setposition(x,y,5,0.1)
# raw_input("Press Enter to continue...")0
# # get the qscan quantities at this point (c.o Duncan for the code)
# # set next-position
# # when at next position
# print("At next_positoin, getting some data")
# pil_control.clearRunningValues()
# # wait for buffer to fill, we will just check against the Q buffer
# while hardware[pil].Q_full == False: # suggest for pil_control, we could do with a isRFBufferNOTFull function(!)
# print("Waiting for running stat buffer to fill, ", hardware[pil].Q_n)
# time.sleep(0.5)
# print("Buffer Is Full, ",hardware[pil].Q_n," getting data")
# # mean and (standard deviation) sd for Q
# Q_mean = hardware[pil].Q_mean
# Q_sd = hardware[pil].Q_sd
# # mean and sd for energy
# energy_mean = hardware[pil].energy_mean
# energy_sd = hardware[pil].energy_sd
# # laser x position mean and sd
# x_pix_mean = hardware[vc_image_data].x_pix_mean
# x_pix_sd = hardware[vc_image_data].x_pix_sd
# # laser y position mean and sd
# y_pix_mean = hardware[vc_image_data].y_pix_mean
# y_pix_sd = hardware[vc_image_data].y_pix_sd
# # laser x width mean and sd
# sig_x_pix_mean = hardware[vc_image_data].sig_x_pix_mean
# sig_x_pix_sd = hardware[vc_image_data].sig_x_pix_sd
# # y position mean and sd
# sig_y_pix_mean = hardware[vc_image_data].sig_y_pix_mean
# sig_y_pix_sd = hardware[vc_image_data].sig_y_pix_sd
chargenow = 1.1
# chargenow = monini.getValue(charge)
lasEnow = 1.1
# lasEnow = monini.getValue(lasE)
vcsumpnow = monini.getValue(vcsump)
# f.write('str(x)+' '+str(x_pix_mean)+' '+str(x_pix_sd)+' '+str(y)+' '+str(y_pix_mean)+' '+str(y_pix_sd)+' '+str(Q_mean)+' '+str(Q_sd)+' ' '+'str(Q_mean)+' '+str(Q_sd)')
# f.write('RF phase '+str(therf.getPhiDEG())+' vcx '+str(x)+' vcy '+str(y)+' charge '+str(chargenow)+' laserE '+str(lasEnow)+' VCintens '+str(vcsumpnow)+'\n')
f.flush()
self.changedval.emit(x,y,chargenow,lasEnow)
iterstring = "hello string signal"
print(iterstring)
self.changedlogtxt.emit(iterstring)
print("charge now", chargenow, " best charge ", chargebest)
if chargenow > chargebest:
chargebest = chargenow
print("got a higher charge")
print('finished the scan, the higher charge was', chargebest)
f.close()
|
[
"duncan.scott@stfc.ac.uk"
] |
duncan.scott@stfc.ac.uk
|
c56a3f8d77a5d05be57428bbda596c5e31709503
|
241724e83f5c12ed9d7dd3b825dfe4e2b1b0f777
|
/examples/boundary_conditions.py
|
a73111c7860a10c82ddfefc46005d3f0954a7718
|
[
"MIT"
] |
permissive
|
xuanxu/py-pde
|
d8be358ab76d4060b14afc74bc7d836591c6188e
|
de33d938aea8680eff872ae1b64569895662a248
|
refs/heads/master
| 2021-03-09T21:37:13.920717
| 2020-03-10T12:18:03
| 2020-03-10T12:18:03
| 246,382,909
| 0
| 0
|
MIT
| 2020-03-10T18:54:22
| 2020-03-10T18:54:22
| null |
UTF-8
|
Python
| false
| false
| 521
|
py
|
#!/usr/bin/env python3
from pde import UnitGrid, ScalarField, DiffusionPDE
grid = UnitGrid([16, 16], periodic=[False, True]) # generate grid
state = ScalarField.random_uniform(grid, 0.2, 0.3) # generate initial condition
# set boundary conditions `bc` for all axes
bc_x_left = {'type': 'derivative', 'value': 0.1}
bc_x_right = {'type': 'value', 'value': 0}
bc_x = [bc_x_left, bc_x_right]
bc_y = 'periodic'
eq = DiffusionPDE(bc=[bc_x, bc_y])
result = eq.solve(state, t_range=10, dt=0.005)
result.plot(show=True)
|
[
"david.zwicker@ds.mpg.de"
] |
david.zwicker@ds.mpg.de
|
98d22f1272cf3b173a633a6aa4f96343b8ca83c7
|
1683cd92471c08a6e6fb4a2913a78ba2658d1fc8
|
/tests/abell2218_high_red_huber.py
|
cff3fc8793439591c84261f8971c84abe91ff249
|
[
"CECILL-B",
"LicenseRef-scancode-cecill-b-en"
] |
permissive
|
pombredanne/csh
|
04ed74007d3725782871ef7a4fb52ba6b4a0a45c
|
6e36fea6ccb9fdd6e79f92d9fd1840f98f4c9805
|
refs/heads/master
| 2021-01-01T00:06:15.637419
| 2011-07-18T15:28:04
| 2011-07-18T15:28:04
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,619
|
py
|
import os
import csh
import lo
# define data set
datadir = os.getenv('CSH_DATA')
#filenames = [datadir + '/1342184598_red_PreparedFrames.fits[5954:67614]',
# datadir + '/1342184599_red_PreparedFrames.fits[5954:67615]']
filenames = [datadir + '/1342184598_red_PreparedFrames.fits',
datadir + '/1342184599_red_PreparedFrames.fits']
# no compression
output_path = os.path.join(os.getenv('HOME'), 'data', 'csh', 'output',)
# compression modes
compressions = ["", "ca", "cs"]
#compressions = ["ca"]
# median filter length
deglitch=True
covariance=False
filtering = True
filter_length = 100
#hypers = (1e9, 1e9)
hypers = (1e0, 1e0)
wavelet='haar'
deltas = (None, 1e-8, 1e-8)
ext = ".fits"
pre = "abell2218_high_red_huber_"
# to store results
sol = []
# define same header for all maps
tod, projection, header, obs = csh.load_data(filenames)
# get the weight map
weights = projection.transpose(tod.ones(tod.shape))
weights.writefits(os.path.join(output_path, pre + 'weights' + ext))
del tod, projection, obs
# find a map for each compression and save it
for comp in compressions:
if comp == "":
hypers = (1/8., 1/8.)
else:
hypers = (1e0, 1e0)
sol.append(csh.rls(filenames, compression=comp, hypers=hypers,
header=header, deltas=deltas,
deglitch=deglitch, covariance=covariance,
filtering=filtering, filter_length=filter_length,
algo=lo.hacg, tol=1e-8, wavelet=wavelet
))
fname = os.path.join(output_path, pre + comp + ext)
sol[-1].writefits(fname)
|
[
"nbarbey@sapherschel4.extra.cea.fr"
] |
nbarbey@sapherschel4.extra.cea.fr
|
2ce9a1b049459c79da30b6a1c77b1b59475eaa01
|
2f260fa01c744d93aacfe592b62b1cee08b469de
|
/sphinx/tello/source/_static/code/python/control-program/tello.py
|
5b7390453ddf2deefede00ae55943829785944fd
|
[
"CC-BY-4.0"
] |
permissive
|
oneoffcoder/books
|
2c1b9b5c97d3eaaf47bafcb1af884b1adcc23bba
|
35c69915a2a54f62c2c3a542045719cf5540f6ba
|
refs/heads/master
| 2023-06-25T16:00:10.926072
| 2023-06-20T03:40:09
| 2023-06-20T03:40:09
| 216,915,443
| 50
| 3
| null | 2023-03-07T01:27:50
| 2019-10-22T21:46:03
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 8,757
|
py
|
import socket
import threading
import time
class Tello(object):
"""
Wrapper class to interact with the Tello drone.
"""
def __init__(self, local_ip, local_port, imperial=False,
command_timeout=.3,
tello_ip='192.168.10.1',
tello_port=8889):
"""
Binds to the local IP/port and puts the Tello into command mode.
:param local_ip: Local IP address to bind.
:param local_port: Local port to bind.
:param imperial: If True, speed is MPH and distance is feet.
If False, speed is KPH and distance is meters.
:param command_timeout: Number of seconds to wait for a response to a command.
:param tello_ip: Tello IP.
:param tello_port: Tello port.
"""
self.abort_flag = False
self.command_timeout = command_timeout
self.imperial = imperial
self.response = None
self.socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.tello_address = (tello_ip, tello_port)
self.last_height = 0
self.socket.bind((local_ip, local_port))
# thread for receiving cmd ack
self.receive_thread = threading.Thread(target=self._receive_thread)
self.receive_thread.daemon = True
self.receive_thread.start()
self.socket.sendto(b'command', self.tello_address)
print ('sent: command')
def __del__(self):
"""
Closes the local socket.
:return: None.
"""
self.socket.close()
def _receive_thread(self):
"""
Listen to responses from the Tello.
Runs as a thread, sets self.response to whatever the Tello last returned.
:return: None.
"""
while True:
try:
self.response, _ = self.socket.recvfrom(3000)
except socket.error as exc:
print(f'Caught exception socket.error : {exc}')
def send_command(self, command):
"""
Send a command to the Tello and wait for a response.
:param command: Command to send.
:return: Response from Tello.
"""
print(f'>> send cmd: {command}')
self.abort_flag = False
timer = threading.Timer(self.command_timeout, self.set_abort_flag)
self.socket.sendto(command.encode('utf-8'), self.tello_address)
timer.start()
while self.response is None:
if self.abort_flag is True:
break
timer.cancel()
if self.response is None:
response = 'none_response'
else:
response = self.response.decode('utf-8')
self.response = None
return response
def set_abort_flag(self):
"""
Sets self.abort_flag to True.
Used by the timer in Tello.send_command() to indicate to that a response
timeout has occurred.
:return: None.
"""
self.abort_flag = True
def takeoff(self):
"""
Initiates take-off.
:return: Response from Tello, 'OK' or 'FALSE'.
"""
return self.send_command('takeoff')
def set_speed(self, speed):
"""
Sets speed.
This method expects KPH or MPH. The Tello API expects speeds from
1 to 100 centimeters/second.
Metric: .1 to 3.6 KPH
Imperial: .1 to 2.2 MPH
:param speed: Speed.
:return: Response from Tello, 'OK' or 'FALSE'.
"""
speed = float(speed)
if self.imperial is True:
speed = int(round(speed * 44.704))
else:
speed = int(round(speed * 27.7778))
return self.send_command(f'speed {speed}')
def rotate_cw(self, degrees):
"""
Rotates clockwise.
:param degrees: Degrees to rotate, 1 to 360.
:return:Response from Tello, 'OK' or 'FALSE'.
"""
return self.send_command(f'cw {degrees}')
def rotate_ccw(self, degrees):
"""
Rotates counter-clockwise.
:param degrees: Degrees to rotate, 1 to 360.
:return: Response from Tello, 'OK' or 'FALSE'.
"""
return self.send_command(f'ccw {degrees}')
def flip(self, direction):
"""
Flips.
:param direction: Direction to flip, 'l', 'r', 'f', 'b'.
:return: Response from Tello, 'OK' or 'FALSE'.
"""
return self.send_command(f'flip {direction}')
def get_response(self):
"""
Returns response of tello.
:return: Response of tello.
"""
response = self.response
return response
def get_height(self):
"""
Returns height(dm) of tello.
:return: Height(dm) of tello.
"""
height = self.send_command('height?')
height = str(height)
height = filter(str.isdigit, height)
try:
height = int(height)
self.last_height = height
except:
height = self.last_height
pass
return height
def get_battery(self):
"""
Returns percent battery life remaining.
:return: Percent battery life remaining.
"""
battery = self.send_command('battery?')
try:
battery = int(battery)
except:
pass
return battery
def get_flight_time(self):
"""
Returns the number of seconds elapsed during flight.
:return: Seconds elapsed during flight.
"""
flight_time = self.send_command('time?')
try:
flight_time = int(flight_time)
except:
pass
return flight_time
def get_speed(self):
"""
Returns the current speed.
:return: Current speed in KPH or MPH.
"""
speed = self.send_command('speed?')
try:
speed = float(speed)
if self.imperial is True:
speed = round((speed / 44.704), 1)
else:
speed = round((speed / 27.7778), 1)
except:
pass
return speed
def land(self):
"""
Initiates landing.
:return: Response from Tello, 'OK' or 'FALSE'.
"""
return self.send_command('land')
def move(self, direction, distance):
"""
Moves in a direction for a distance.
This method expects meters or feet. The Tello API expects distances
from 20 to 500 centimeters.
Metric: .02 to 5 meters
Imperial: .7 to 16.4 feet
:param direction: Direction to move, 'forward', 'back', 'right' or 'left'.
:param distance: Distance to move.
:return: Response from Tello, 'OK' or 'FALSE'.
"""
distance = float(distance)
if self.imperial is True:
distance = int(round(distance * 30.48))
else:
distance = int(round(distance * 100))
return self.send_command(f'{direction} {distance}')
def move_backward(self, distance):
"""
Moves backward for a distance.
See comments for Tello.move().
:param distance: Distance to move.
:return: Response from Tello, 'OK' or 'FALSE'.
"""
return self.move('back', distance)
def move_down(self, distance):
"""
Moves down for a distance.
See comments for Tello.move().
:param distance: Distance to move.
:return: Response from Tello, 'OK' or 'FALSE'.
"""
return self.move('down', distance)
def move_forward(self, distance):
"""
Moves forward for a distance.
See comments for Tello.move().
:param distance: Distance to move.
:return: Response from Tello, 'OK' or 'FALSE'.
"""
return self.move('forward', distance)
def move_left(self, distance):
"""
Moves left for a distance.
See comments for Tello.move().
:param distance: Distance to move.
:return: Response from Tello, 'OK' or 'FALSE'.
"""
return self.move('left', distance)
def move_right(self, distance):
"""
Moves right for a distance.
See comments for Tello.move().
:param distance: Distance to move.
:return: Response from Tello, 'OK' or 'FALSE'.
"""
return self.move('right', distance)
def move_up(self, distance):
"""
Moves up for a distance.
See comments for Tello.move().
:param distance: Distance to move.
:return: Response from Tello, 'OK' or 'FALSE'.
"""
return self.move('up', distance)
|
[
"vangjee@gmail.com"
] |
vangjee@gmail.com
|
184b7da7cb5866239070d75f25a4f648b2c21ba3
|
d83a3ee1084311df70626fb2ce0be483f5631472
|
/histVarPng.py
|
3386cfd4a875f26af378b42735047ac756a1b6cf
|
[
"MIT"
] |
permissive
|
AineNicD/pands-project
|
1ea0e758dbcbab0f72958712672d12da36878294
|
55f7dbab4f106f4776be4e0248c941a4246e64ac
|
refs/heads/master
| 2021-04-19T03:03:12.837525
| 2020-04-30T22:50:45
| 2020-04-30T22:50:45
| 249,573,334
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 887
|
py
|
#Saves a historgram of each variable to png files
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
#read data
data = pd.read_csv("irisDataSet.csv")
#names of variables
names = ['sepal_length', 'sepal_width', 'petal_length', 'petal_width', 'species']
# seperating by species
setosa =data[data['species']=='setosa']
versicolor =data[data['species']=='versicolor']
virginica =data[data['species']=='virginica']
#outputs histograms
setosa.hist()
versicolor.hist()
virginica.hist()
#saves each historgram to png files with their species title
plt.savefig("setosa.png")
plt.savefig("versicolor.png")
plt.savefig("virginica.png")
plt.show()
# Ian Mc Loughlin lecture on plots
#data frame code for each species from https://www.kaggle.com/abhishekkrg/python-iris-data-visualization-and-explanation
|
[
"noreply@github.com"
] |
noreply@github.com
|
8ebd15e0eeac0255627e3a4bca3146244f8e6cb3
|
425c28656cceb9f4098f86f4e53d810beb7e1f89
|
/Store/migrations/0007_remove_orders_user_profile.py
|
665699cc56e8e72fa67f5850761e248eb85a171b
|
[] |
no_license
|
Arpan555/KGN_BAZAR
|
4a06c564828125cce6e32faff1984fbdaab43149
|
b7e84ceea4b17de0f62566ca9f3d2d96948a09ec
|
refs/heads/main
| 2023-08-15T07:33:42.886654
| 2021-10-06T16:09:46
| 2021-10-06T16:09:46
| 414,282,447
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 343
|
py
|
# Generated by Django 3.1.1 on 2020-09-30 04:47
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('Store', '0006_orders_email'),
]
operations = [
migrations.RemoveField(
model_name='orders',
name='user_profile',
),
]
|
[
"arpanpatidar65@gmail.com"
] |
arpanpatidar65@gmail.com
|
b7f1a23ed37c6438acc3ebc50d0a908d8e2e6287
|
67dd57d1a529a05405f201ce0d0d04829d9f3e2f
|
/Practica2/ejercicio2.py
|
e54ccae5469b177d631aac7f552e53f7ff1b2470
|
[] |
no_license
|
chema969/IMD
|
72b7fe82cec54181e3670bc5827b36bdd656a8f8
|
742ddcb7fdbbea4da602b96c20188d87ea96f340
|
refs/heads/master
| 2020-08-01T05:27:30.153815
| 2019-12-21T15:40:57
| 2019-12-21T15:40:57
| 210,880,226
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 540
|
py
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
fig, axs = plt.subplots(ncols=2,sharex=True)
nombre_fichero="basesDeDatos/diabetes.csv"
data = pd.read_csv(nombre_fichero)
data.groupby("class").plas.hist(alpha=0.4, ax=axs[1])
axs[1].set_title('Histograma sin normalizar')
data.groupby("class").plas.hist(alpha=0.4,density=True, ax=axs[0])
axs[0].set_title('Histograma normalizado')
plt.xlabel("Glucosa plasmática tras el test")
plt.ylabel("Frecuencia")
plt.legend(["tested_negative","tested_positive"])
plt.show()
|
[
"chema969@hotmail.com"
] |
chema969@hotmail.com
|
1804e27f2aaf570252b3379d08e9789ad92856ba
|
f0e53d4eb9517db4a62070590c7a3256f3bd7743
|
/neutron/tests/functional/agent/linux/test_ovsdb_monitor.py
|
93bc06fe3665ccf9bfc4785fd4ea9ea2bff8a8b9
|
[
"Apache-2.0"
] |
permissive
|
lookmee/neutron
|
064e58aef246dd95edd391bb07ee34acf97019e6
|
313fdd16140e9a42eb0f21d4a45406ae1c7adf25
|
refs/heads/master
| 2020-12-31T01:36:18.938921
| 2013-11-16T04:07:51
| 2013-11-16T04:07:51
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,042
|
py
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests in this module will be skipped unless:
- ovsdb-client is installed
- ovsdb-client can be invoked via password-less sudo
- OS_SUDO_TESTING is set to '1' or 'True' in the test execution
environment
The jenkins gate does not allow direct sudo invocation during test
runs, but configuring OS_SUDO_TESTING ensures that developers are
still able to execute tests that require the capability.
"""
import os
import random
import eventlet
from neutron.agent.linux import ovs_lib
from neutron.agent.linux import ovsdb_monitor
from neutron.agent.linux import utils
from neutron.tests import base
def get_rand_name(name='test'):
return name + str(random.randint(1, 0x7fffffff))
def create_ovs_resource(name_prefix, creation_func):
"""Create a new ovs resource that does not already exist.
:param name_prefix: The prefix for a randomly generated name
:param creation_func: A function taking the name of the resource
to be created. An error is assumed to indicate a name
collision.
"""
while True:
name = get_rand_name(name_prefix)
try:
return creation_func(name)
except RuntimeError:
continue
break
class BaseMonitorTest(base.BaseTestCase):
def setUp(self):
super(BaseMonitorTest, self).setUp()
self._check_test_requirements()
self.root_helper = 'sudo'
self.ovs = ovs_lib.BaseOVS(self.root_helper)
self.bridge = create_ovs_resource('test-br-', self.ovs.add_bridge)
def cleanup_bridge():
self.bridge.destroy()
self.addCleanup(cleanup_bridge)
def _check_command(self, cmd, error_text, skip_msg):
try:
utils.execute(cmd)
except RuntimeError as e:
if error_text in str(e):
self.skipTest(skip_msg)
raise
def _check_test_requirements(self):
if os.environ.get('OS_SUDO_TESTING') not in base.TRUE_STRING:
self.skipTest('testing with sudo is not enabled')
self._check_command(['which', 'ovsdb-client'],
'Exit code: 1',
'ovsdb-client is not installed')
self._check_command(['sudo', '-n', 'ovsdb-client', 'list-dbs'],
'Exit code: 1',
'password-less sudo not granted for ovsdb-client')
class TestOvsdbMonitor(BaseMonitorTest):
def setUp(self):
super(TestOvsdbMonitor, self).setUp()
self.monitor = ovsdb_monitor.OvsdbMonitor('Bridge',
root_helper=self.root_helper)
self.addCleanup(self.monitor.stop)
self.monitor.start()
def collect_initial_output(self):
while True:
output = list(self.monitor.iter_stdout())
if output:
return output[0]
eventlet.sleep(0.01)
def test_killed_monitor_respawns(self):
with self.assert_max_execution_time():
self.monitor.respawn_interval = 0
old_pid = self.monitor._process.pid
output1 = self.collect_initial_output()
pid = self.monitor._get_pid_to_kill()
self.monitor._reset_queues()
self.monitor._kill_process(pid)
while (self.monitor._process.pid == old_pid):
eventlet.sleep(0.01)
output2 = self.collect_initial_output()
# Initial output should appear twice
self.assertEqual(output1, output2)
class TestSimpleInterfaceMonitor(BaseMonitorTest):
def setUp(self):
super(TestSimpleInterfaceMonitor, self).setUp()
self.monitor = ovsdb_monitor.SimpleInterfaceMonitor(
root_helper=self.root_helper)
self.addCleanup(self.monitor.stop)
self.monitor.start(block=True)
def test_has_updates(self):
self.assertTrue(self.monitor.has_updates,
'Initial call should always be true')
self.assertFalse(self.monitor.has_updates,
'has_updates without port addition should be False')
create_ovs_resource('test-port-', self.bridge.add_port)
with self.assert_max_execution_time():
# has_updates after port addition should become True
while not self.monitor.has_updates:
eventlet.sleep(0.01)
|
[
"marun@redhat.com"
] |
marun@redhat.com
|
056c947683ddb8f5d4853e367c173f5af567bb82
|
1818bd3151bb1a04e669918d3d59579709136df3
|
/coconut/setup.py
|
f7ada82ff62f9ca23719096c54a1cb078fadb0a2
|
[
"Apache-2.0"
] |
permissive
|
MatthewBrien/PythonCorpus
|
3db489cf1f0e8245d2c284af7439f7d69e623a87
|
526c1f78cdf17f9151552bcbfef5582cf8655446
|
refs/heads/master
| 2021-04-27T10:34:45.880859
| 2016-07-02T02:17:48
| 2016-07-02T02:17:48
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,787
|
py
|
#!/usr/bin/env python
#-----------------------------------------------------------------------------------------------------------------------
# INFO:
#-----------------------------------------------------------------------------------------------------------------------
"""
Author: Evan Hubinger
License: Apache 2.0
Description: Installer for the Coconut Programming Language.
"""
#-----------------------------------------------------------------------------------------------------------------------
# IMPORTS:
#-----------------------------------------------------------------------------------------------------------------------
from __future__ import print_function, absolute_import, unicode_literals, division
import sys
import os.path
sys.path.insert(0, os.path.join(os.path.dirname(os.path.abspath(__file__)), "coconut"))
from root import *
import setuptools
#-----------------------------------------------------------------------------------------------------------------------
# MAIN:
#-----------------------------------------------------------------------------------------------------------------------
with open("README.rst", "r") as opened:
readme_raw = opened.read()
readme_lines = []
in_toc = False
for line in readme_raw.splitlines():
if in_toc and line and not line.startswith(" "):
in_toc = False
if line == ".. toctree::":
in_toc = True
if not in_toc:
readme_lines.append(line)
readme = "\n".join(readme_lines)
setuptools.setup(
name = "coconut",
version = VERSION,
description = "Simple, elegant, Pythonic functional programming.",
long_description = readme,
url = "http://coconut-lang.org",
author = "Evan Hubinger",
author_email = "evanjhub@gmail.com",
install_requires = [
"pyparsing==2.1.5"
],
packages = setuptools.find_packages(),
include_package_data = True,
entry_points = {
"console_scripts": [
"coconut = coconut.__main__:main"
],
"pygments.lexers": [
"coconut_python = coconut.highlighter:pylexer",
"coconut_pycon = coconut.highlighter:pyconlexer",
"coconut = coconut.highlighter:coclexer"
]
},
classifiers = [
"Development Status :: 5 - Production/Stable",
"License :: OSI Approved :: Apache Software License",
"Intended Audience :: Developers",
"Intended Audience :: Information Technology",
"Topic :: Software Development",
"Topic :: Software Development :: Code Generators",
"Topic :: Software Development :: Compilers",
"Topic :: Software Development :: Interpreters",
"Topic :: Software Development :: Libraries :: Python Modules",
"Topic :: Utilities",
"Environment :: Console",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.2",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Other",
"Programming Language :: Other Scripting Engines",
"Programming Language :: Python :: Implementation :: CPython",
"Programming Language :: Python :: Implementation :: PyPy",
"Framework :: IPython"
],
keywords = [
"functional programming language",
"functional programming",
"functional",
"programming language",
"compiler",
"match",
"matches",
"matching",
"pattern-matching",
"pattern matching",
"algebraic data type",
"algebraic data types",
"data",
"data type",
"data types",
"lambda",
"lambdas",
"lazy list",
"lazy lists",
"lazy evaluation",
"lazy",
"tail recursion",
"tail call",
"recursion",
"recursive",
"infix",
"function composition",
"partial application",
"currying",
"curry",
"pipeline",
"pipe",
"unicode operator",
"unicode operators",
"frozenset literal",
"frozenset literals",
"destructuring",
"destructuring assignment",
"reduce",
"takewhile",
"dropwhile",
"tee",
"consume",
"count",
"parallel_map",
"MatchError",
"datamaker",
"data keyword",
"match keyword",
"case keyword"
]
)
|
[
"luke.inkster@hotmail.com"
] |
luke.inkster@hotmail.com
|
8a6a7e50e68280271d06ba16a0a7c6fcd77c4f9e
|
6950eb30f8fa3b62ab11abe8ee1f70609f486fbc
|
/shell/pbomb.py
|
960b06d8ea17fc351cd93c45bcaadb2c65010a62
|
[
"MIT"
] |
permissive
|
pbl64k/icfpc2016
|
ca347139f06323f1de9ca7f559a2d432e7ef283f
|
57edac99b0f21f48e14dfdb4742783ba8abb2da1
|
refs/heads/master
| 2021-01-19T03:22:15.738647
| 2016-08-08T19:35:34
| 2016-08-08T19:35:34
| 65,162,062
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 543
|
py
|
#!/usr/bin/env python
import sys
import os
import time
import simplejson as json
blob = json.loads(sys.stdin.read())
for prob in blob['problems']:
print 'Problem', str(prob['problem_id'])
os.system('.cabal-sandbox/bin/icfpc2016-zhmppxm ' + ' <probs/' + str(prob['problem_id']) + '-' + prob['problem_spec_hash'] + '.txt' + ' >sols/' + str(prob['problem_id']) + '.txt')
os.system('shell/sol-submit.sh ' + str(prob['problem_id']) + ' sols/' + str(prob['problem_id']) + '.txt')
print ' ...Done.'
time.sleep(5)
sys.exit(0)
|
[
"pbl64k@gmail.com"
] |
pbl64k@gmail.com
|
e9658d417076aadb516ad617c0dd66220ee1e14e
|
e394eb85637bb2683aee3cfa1004b2a6e22ffeea
|
/Zy/learn/drawTogether.py
|
239c9aecd43fbd9d655973e909f3241385285949
|
[] |
no_license
|
icyeyes1999/DataScience
|
d55d08ca808ccc315d6548f388cef142b1e454d8
|
2c10a720663d7ee31d72677cfc93e44ae43c72bf
|
refs/heads/master
| 2022-11-24T04:53:45.485883
| 2020-07-28T15:47:19
| 2020-07-28T15:47:19
| 281,598,700
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,029
|
py
|
import numpy as np
import matplotlib.pyplot as plt
import json
#折线图
# 一共20天 6.20-7.5 有些天掉出榜单
with open("test_data.json", "r", encoding='UTF-8') as f:
temp = json.loads(f.read())
index_1 = 1
for i in temp.keys():
x = []
y = []
print(i)
print(temp[i])
if(len(temp[i])<10):
continue
numlist = temp[i]
index = 1
for j in numlist:
num = list(map(float,j.split()))
#print(num)
#x.append(num[0]*100)
x.append(index)
index = index + 1
y.append(num[1])
plt.plot(x, y, 's-', color='r', label="song"+str(index_1)) # s-:方形
index_1=index_1+1
print(index_1)
plt.xlabel("data") # 横坐标名字
plt.ylabel("hot") # 纵坐标名字
plt.legend(loc="best") # 图例
# plt.savefig('.../image_learn/'+i+'.png')
# plt.close()
plt.show()
#plt.savefig('image1/《微微》.pdf')
|
[
"181250188@nju.edu.cn"
] |
181250188@nju.edu.cn
|
a8753bfe34916e7be2706d5e78dbd5c973275542
|
d9652ac6f107747da9f57b47d054dc6862d98119
|
/XiAnalyzer/XiMassPt/python/ximasspt_cfi.py
|
81f8be50247f77e04001bb73f1df10db21520fc7
|
[] |
no_license
|
BenjaminTran/XI_CMSSW5_3_20
|
306f9ae51c5505a6ae2a4488142d78da4d13fcec
|
cfdc4fb6810a7ad9fc2a1fdac8e52c2f3199ae30
|
refs/heads/master
| 2021-01-21T10:59:54.595125
| 2017-05-18T16:34:39
| 2017-05-18T16:34:39
| 91,715,758
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 858
|
py
|
import FWCore.ParameterSet.Config as cms
import HLTrigger.HLTfilters.hltHighLevel_cfi
hltHM = HLTrigger.HLTfilters.hltHighLevel_cfi.hltHighLevel.clone()
hltHM.HLTPaths = [
'HLT_PAPixelTracks_Multiplicity100_v*',
'HLT_PAPixelTracks_Multiplicity130_v*',
'HLT_PAPixelTracks_Multiplicity160_v*'
#'HLT_PAPixelTracks_Multiplicity190_v',
#'HLT_PAPixelTracks_Multiplicity220_v'
]
xiMassPt = cms.EDAnalyzer('XiMassPt',
trkSrc = cms.InputTag('generalTracks'),
xiCollection = cms.InputTag('selectV0CandidatesLowXi:Xi'),
vertexCollName = cms.InputTag('offlinePrimaryVertices'),
zVtxHigh = cms.double(15.0),
zVtxLow = cms.double(-15.0),
multHigh = cms.double(220),
multLow = cms.double(185)
)
|
[
"blt1@192.168.1.10"
] |
blt1@192.168.1.10
|
9de55bb18ec1a43da35cc70b126f10dac1654751
|
60d47d279a007adbef10f3f808c6497f6c4a328d
|
/views.py
|
86c08d4dc42eba82f6d51b520c0691367a306f72
|
[] |
no_license
|
tmaszkiewicz/app
|
97ca1a13f4c9128d4c2ca8413d617de08db4881b
|
f4adbed1aad6cbdb949f949f308268ff67580f86
|
refs/heads/main
| 2023-08-23T15:46:57.598122
| 2021-11-05T14:15:11
| 2021-11-05T14:15:11
| 424,933,265
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 312
|
py
|
from django.shortcuts import render
from django.http import HttpResponse
# Create your views here.
def home(request, *arg, **kwargs):
url = 'app/home.html'
context = {
}
tekst = "dasdasd"
context['tekst']=tekst
#return HttpResponse("LoveMonika")
return render(request,url,context)
|
[
"t.maszkiewicz@gmail.com"
] |
t.maszkiewicz@gmail.com
|
eed790ecc6634099cbae4eed0343224ecf168876
|
bcba30701c3b09995e4bfcfdd7cdcafd81635187
|
/mp.py
|
df6f8b68fe8e93d84c09569a8cd5f621c93f8d42
|
[] |
no_license
|
guotechfin/istock
|
f7376f04c74097afcb96a925a6823036b037c9f7
|
3eae7a58cf36c69a1e410d3647841f1e14c81cc6
|
refs/heads/master
| 2021-01-13T13:57:21.598684
| 2016-08-15T08:17:36
| 2016-08-15T08:17:36
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 403
|
py
|
#!/usr/bin/env python
#coding=utf-8
from multiprocessing import Pool
from time import sleep
from ga1 import *
def main():
pool = Pool(processes=20) # set the processes max number 3
sc = sc_get()
for i in sc:
result = pool.apply_async(get_result, i)
pool.close()
pool.join()
if result.successful():
print 'successful'
if __name__ == "__main__":
main()
|
[
"noreply@github.com"
] |
noreply@github.com
|
c9eb862aeb4359055031d9ae873ff6ea6800bbe0
|
eeca249274299a5acc495126eeda237c7317d40a
|
/THE/yt_lib_opt/try.py
|
4ec3a2e3972fb2a41de14c49422fec5b7a46e6b4
|
[] |
no_license
|
nickmillr/yt-ontogenesis
|
ab44891b115b32d87c758ae8552715603c316a14
|
8fa9795f1e8861e3a809a6237851697512576cd8
|
refs/heads/master
| 2016-09-06T09:09:44.008411
| 2014-11-12T20:41:11
| 2014-11-12T20:41:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 337
|
py
|
import yt
from yt.units import *
from yt.mods import *
import load_dataset as ld
import nearest_halo as nh
import star as s
code='enzo'
z=1.0
halo=54
ds = ld.load_data(code, z)
sphere_center=nh._center_of_mass(code,z,halo)
radius=nh._radius(code,z,halo)
sp = ds.sphere(sphere_center, (int(radius), "kpc"))
dd = sp.quantities()
print dd
|
[
"nam002@marietta.edu"
] |
nam002@marietta.edu
|
7a6c2a7dc5a319d417e4d9d4710514d22f3313ff
|
48548fa087bb3b8f69fdf2b5d4af5d3703ea755c
|
/Django_new/apprenti/python/developcom/listes.py
|
7047e8f7258b1d2c32c8eb42b3f16a13dbe914ca
|
[] |
no_license
|
BKIBONZI/DJANGO
|
168a69190e0b26796d33b50abc7f4e6b45a64693
|
c34d6dcd243e31226bb99da6bf260601f98966fc
|
refs/heads/master
| 2020-09-06T14:25:22.837546
| 2019-11-08T11:15:41
| 2019-11-08T11:15:41
| 220,450,227
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 255
|
py
|
numbers = [1,2,3,4,5]
if __name__ == '__main__' :
# afficher la liste à l'écran
print(numbers)
# afficher à l'écran le type dela lits
print(type(numbers))
# afficher l'element de la liste en position 0
print(numbers[3])
|
[
"bkibonzi@e1r1p14.42.fr"
] |
bkibonzi@e1r1p14.42.fr
|
6daa5e177776fee9980d7e78a7104777855e9d2e
|
e3c72896c9379425cc06e8ff1c1b35a0b755169b
|
/Analysis/plot.py
|
8c05c90d3cfeaaf9b72b3590b54e96f95cb686c9
|
[] |
no_license
|
geracobo/PyPiano
|
ced350558c40d202be3a85c0a111cebb6c109b20
|
e5e840738eef11751fd3e536ce5a4d37945ecedf
|
refs/heads/master
| 2020-05-06T12:19:09.257066
| 2014-01-05T18:55:01
| 2014-01-05T18:55:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 652
|
py
|
#!/usr/bin/env python
# coding=utf-8
from matplotlib import pyplot
import numpy as np
import csv
samplerate = 44100
period = 1/float(samplerate)
f = open('sample-data.csv')
y = np.empty((1))
time = np.empty((1))
at = float(0)
for row in f:
data = row
y=np.append(y,float(data))
time=np.append(time, at)
at = at+period
pyplot.plot(time, y)
def cn(n):
c = y*np.exp(-1j*2*n*np.pi*time/period)
return c.sum()/c.size
def f(x, Nh):
f = np.array([2*cn(i)*np.exp(1j*2*i*np.pi*x/period) for i in range(1,Nh+1)])
return f.sum()
y2 = np.array([f(t,50).real for t in time])
#pyplot.xlim(0,600)
pyplot.plot(time, y2)
pyplot.show()
print y
|
[
"gera.cobo@gmail.com"
] |
gera.cobo@gmail.com
|
ec909014a75777f9c98e33e6bfc8a8965ec22fec
|
4448001f31d1f7a56915c620d7a8a12a137b29a2
|
/PySpedNFSe/pysped_nfse/rj/xmldsig-core-schema_v01.py
|
631217de4b952bfb536cc2c467ca6018907958bf
|
[] |
no_license
|
DITIntl/lets-keep-open
|
c7d639a0de9f1fc4778864e74a304ef6facf7506
|
61a6b5b9500b4d4da1799099995176b594a27fb7
|
refs/heads/master
| 2021-09-07T20:32:46.587547
| 2018-02-28T16:37:56
| 2018-02-28T16:37:56
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 168,964
|
py
|
# -*- coding: utf-8 -*-
# © 2016 Danimar Ribeiro, Trustcode
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
import sys
import getopt
import re as re_
import base64
import datetime as datetime_
etree_ = None
Verbose_import_ = False
(
XMLParser_import_none, XMLParser_import_lxml,
XMLParser_import_elementtree
) = range(3)
XMLParser_import_library = None
try:
# lxml
from lxml import etree as etree_
XMLParser_import_library = XMLParser_import_lxml
if Verbose_import_:
print("running with lxml.etree")
except ImportError:
try:
# cElementTree from Python 2.5+
import xml.etree.cElementTree as etree_
XMLParser_import_library = XMLParser_import_elementtree
if Verbose_import_:
print("running with cElementTree on Python 2.5+")
except ImportError:
try:
# ElementTree from Python 2.5+
import xml.etree.ElementTree as etree_
XMLParser_import_library = XMLParser_import_elementtree
if Verbose_import_:
print("running with ElementTree on Python 2.5+")
except ImportError:
try:
# normal cElementTree install
import cElementTree as etree_
XMLParser_import_library = XMLParser_import_elementtree
if Verbose_import_:
print("running with cElementTree")
except ImportError:
try:
# normal ElementTree install
import elementtree.ElementTree as etree_
XMLParser_import_library = XMLParser_import_elementtree
if Verbose_import_:
print("running with ElementTree")
except ImportError:
raise ImportError(
"Failed to import ElementTree from any known place")
def parsexml_(*args, **kwargs):
if (XMLParser_import_library == XMLParser_import_lxml and
'parser' not in kwargs):
# Use the lxml ElementTree compatible parser so that, e.g.,
# we ignore comments.
kwargs['parser'] = etree_.ETCompatXMLParser()
doc = etree_.parse(*args, **kwargs)
return doc
#
# User methods
#
# Calls to the methods in these classes are generated by generateDS.py.
# You can replace these methods by re-implementing the following class
# in a module named generatedssuper.py.
try:
from generatedssuper import GeneratedsSuper
except ImportError, exp:
class GeneratedsSuper(object):
tzoff_pattern = re_.compile(r'(\+|-)((0\d|1[0-3]):[0-5]\d|14:00)$')
class _FixedOffsetTZ(datetime_.tzinfo):
def __init__(self, offset, name):
self.__offset = datetime_.timedelta(minutes=offset)
self.__name = name
def utcoffset(self, dt):
return self.__offset
def tzname(self, dt):
return self.__name
def dst(self, dt):
return None
def gds_format_string(self, input_data, input_name=''):
return input_data
def gds_validate_string(self, input_data, node, input_name=''):
return input_data
def gds_format_base64(self, input_data, input_name=''):
return base64.b64encode(input_data)
def gds_validate_base64(self, input_data, node, input_name=''):
return input_data
def gds_format_integer(self, input_data, input_name=''):
return '%d' % input_data
def gds_validate_integer(self, input_data, node, input_name=''):
return input_data
def gds_format_integer_list(self, input_data, input_name=''):
return '%s' % input_data
def gds_validate_integer_list(self, input_data, node, input_name=''):
values = input_data.split()
for value in values:
try:
float(value)
except (TypeError, ValueError):
raise_parse_error(node, 'Requires sequence of integers')
return input_data
def gds_format_float(self, input_data, input_name=''):
return ('%.15f' % input_data).rstrip('0')
def gds_validate_float(self, input_data, node, input_name=''):
return input_data
def gds_format_float_list(self, input_data, input_name=''):
return '%s' % input_data
def gds_validate_float_list(self, input_data, node, input_name=''):
values = input_data.split()
for value in values:
try:
float(value)
except (TypeError, ValueError):
raise_parse_error(node, 'Requires sequence of floats')
return input_data
def gds_format_double(self, input_data, input_name=''):
return '%e' % input_data
def gds_validate_double(self, input_data, node, input_name=''):
return input_data
def gds_format_double_list(self, input_data, input_name=''):
return '%s' % input_data
def gds_validate_double_list(self, input_data, node, input_name=''):
values = input_data.split()
for value in values:
try:
float(value)
except (TypeError, ValueError):
raise_parse_error(node, 'Requires sequence of doubles')
return input_data
def gds_format_boolean(self, input_data, input_name=''):
return ('%s' % input_data).lower()
def gds_validate_boolean(self, input_data, node, input_name=''):
return input_data
def gds_format_boolean_list(self, input_data, input_name=''):
return '%s' % input_data
def gds_validate_boolean_list(self, input_data, node, input_name=''):
values = input_data.split()
for value in values:
if value not in ('true', '1', 'false', '0', ):
raise_parse_error(
node,
'Requires sequence of booleans '
'("true", "1", "false", "0")')
return input_data
def gds_validate_datetime(self, input_data, node, input_name=''):
return input_data
def gds_format_datetime(self, input_data, input_name=''):
if input_data.microsecond == 0:
_svalue = '%04d-%02d-%02dT%02d:%02d:%02d' % (
input_data.year,
input_data.month,
input_data.day,
input_data.hour,
input_data.minute,
input_data.second,
)
else:
_svalue = '%04d-%02d-%02dT%02d:%02d:%02d.%s' % (
input_data.year,
input_data.month,
input_data.day,
input_data.hour,
input_data.minute,
input_data.second,
('%f' % (float(input_data.microsecond) / 1000000))[2:],
)
if input_data.tzinfo is not None:
tzoff = input_data.tzinfo.utcoffset(input_data)
if tzoff is not None:
total_seconds = tzoff.seconds + (86400 * tzoff.days)
if total_seconds == 0:
_svalue += 'Z'
else:
if total_seconds < 0:
_svalue += '-'
total_seconds *= -1
else:
_svalue += '+'
hours = total_seconds // 3600
minutes = (total_seconds - (hours * 3600)) // 60
_svalue += '{0:02d}:{1:02d}'.format(hours, minutes)
return _svalue
@classmethod
def gds_parse_datetime(cls, input_data):
tz = None
if input_data[-1] == 'Z':
tz = GeneratedsSuper._FixedOffsetTZ(0, 'UTC')
input_data = input_data[:-1]
else:
results = GeneratedsSuper.tzoff_pattern.search(input_data)
if results is not None:
tzoff_parts = results.group(2).split(':')
tzoff = int(tzoff_parts[0]) * 60 + int(tzoff_parts[1])
if results.group(1) == '-':
tzoff *= -1
tz = GeneratedsSuper._FixedOffsetTZ(
tzoff, results.group(0))
input_data = input_data[:-6]
if len(input_data.split('.')) > 1:
dt = datetime_.datetime.strptime(
input_data, '%Y-%m-%dT%H:%M:%S.%f')
else:
dt = datetime_.datetime.strptime(
input_data, '%Y-%m-%dT%H:%M:%S')
dt = dt.replace(tzinfo=tz)
return dt
def gds_validate_date(self, input_data, node, input_name=''):
return input_data
def gds_format_date(self, input_data, input_name=''):
_svalue = '%04d-%02d-%02d' % (
input_data.year,
input_data.month,
input_data.day,
)
try:
if input_data.tzinfo is not None:
tzoff = input_data.tzinfo.utcoffset(input_data)
if tzoff is not None:
total_seconds = tzoff.seconds + (86400 * tzoff.days)
if total_seconds == 0:
_svalue += 'Z'
else:
if total_seconds < 0:
_svalue += '-'
total_seconds *= -1
else:
_svalue += '+'
hours = total_seconds // 3600
minutes = (total_seconds - (hours * 3600)) // 60
_svalue += '{0:02d}:{1:02d}'.format(hours, minutes)
except AttributeError:
pass
return _svalue
@classmethod
def gds_parse_date(cls, input_data):
tz = None
if input_data[-1] == 'Z':
tz = GeneratedsSuper._FixedOffsetTZ(0, 'UTC')
input_data = input_data[:-1]
else:
results = GeneratedsSuper.tzoff_pattern.search(input_data)
if results is not None:
tzoff_parts = results.group(2).split(':')
tzoff = int(tzoff_parts[0]) * 60 + int(tzoff_parts[1])
if results.group(1) == '-':
tzoff *= -1
tz = GeneratedsSuper._FixedOffsetTZ(
tzoff, results.group(0))
input_data = input_data[:-6]
dt = datetime_.datetime.strptime(input_data, '%Y-%m-%d')
dt = dt.replace(tzinfo=tz)
return dt.date()
def gds_validate_time(self, input_data, node, input_name=''):
return input_data
def gds_format_time(self, input_data, input_name=''):
if input_data.microsecond == 0:
_svalue = '%02d:%02d:%02d' % (
input_data.hour,
input_data.minute,
input_data.second,
)
else:
_svalue = '%02d:%02d:%02d.%s' % (
input_data.hour,
input_data.minute,
input_data.second,
('%f' % (float(input_data.microsecond) / 1000000))[2:],
)
if input_data.tzinfo is not None:
tzoff = input_data.tzinfo.utcoffset(input_data)
if tzoff is not None:
total_seconds = tzoff.seconds + (86400 * tzoff.days)
if total_seconds == 0:
_svalue += 'Z'
else:
if total_seconds < 0:
_svalue += '-'
total_seconds *= -1
else:
_svalue += '+'
hours = total_seconds // 3600
minutes = (total_seconds - (hours * 3600)) // 60
_svalue += '{0:02d}:{1:02d}'.format(hours, minutes)
return _svalue
@classmethod
def gds_parse_time(cls, input_data):
tz = None
if input_data[-1] == 'Z':
tz = GeneratedsSuper._FixedOffsetTZ(0, 'UTC')
input_data = input_data[:-1]
else:
results = GeneratedsSuper.tzoff_pattern.search(input_data)
if results is not None:
tzoff_parts = results.group(2).split(':')
tzoff = int(tzoff_parts[0]) * 60 + int(tzoff_parts[1])
if results.group(1) == '-':
tzoff *= -1
tz = GeneratedsSuper._FixedOffsetTZ(
tzoff, results.group(0))
input_data = input_data[:-6]
if len(input_data.split('.')) > 1:
dt = datetime_.datetime.strptime(input_data, '%H:%M:%S.%f')
else:
dt = datetime_.datetime.strptime(input_data, '%H:%M:%S')
dt = dt.replace(tzinfo=tz)
return dt.time()
def gds_str_lower(self, instring):
return instring.lower()
def get_path_(self, node):
path_list = []
self.get_path_list_(node, path_list)
path_list.reverse()
path = '/'.join(path_list)
return path
Tag_strip_pattern_ = re_.compile(r'\{.*\}')
def get_path_list_(self, node, path_list):
if node is None:
return
tag = GeneratedsSuper.Tag_strip_pattern_.sub('', node.tag)
if tag:
path_list.append(tag)
self.get_path_list_(node.getparent(), path_list)
def get_class_obj_(self, node, default_class=None):
class_obj1 = default_class
if 'xsi' in node.nsmap:
classname = node.get('{%s}type' % node.nsmap['xsi'])
if classname is not None:
names = classname.split(':')
if len(names) == 2:
classname = names[1]
class_obj2 = globals().get(classname)
if class_obj2 is not None:
class_obj1 = class_obj2
return class_obj1
def gds_build_any(self, node, type_name=None):
return None
@classmethod
def gds_reverse_node_mapping(cls, mapping):
return dict(((v, k) for k, v in mapping.iteritems()))
#
# If you have installed IPython you can uncomment and use the following.
# IPython is available from http://ipython.scipy.org/.
#
## from IPython.Shell import IPShellEmbed
## args = ''
## ipshell = IPShellEmbed(args,
## banner = 'Dropping into IPython',
## exit_msg = 'Leaving Interpreter, back to program.')
# Then use the following line where and when you want to drop into the
# IPython shell:
# ipshell('<some message> -- Entering ipshell.\nHit Ctrl-D to exit')
#
# Globals
#
ExternalEncoding = 'ascii'
Tag_pattern_ = re_.compile(r'({.*})?(.*)')
String_cleanup_pat_ = re_.compile(r"[\n\r\s]+")
Namespace_extract_pat_ = re_.compile(r'{(.*)}(.*)')
#
# Support/utility functions.
#
def showIndent(outfile, level, pretty_print=True):
if pretty_print:
for idx in range(level):
outfile.write(' ')
def quote_xml(inStr):
if not inStr:
return ''
s1 = (isinstance(inStr, basestring) and inStr or
'%s' % inStr)
s1 = s1.replace('&', '&')
s1 = s1.replace('<', '<')
s1 = s1.replace('>', '>')
return s1
def quote_attrib(inStr):
s1 = (isinstance(inStr, basestring) and inStr or
'%s' % inStr)
s1 = s1.replace('&', '&')
s1 = s1.replace('<', '<')
s1 = s1.replace('>', '>')
if '"' in s1:
if "'" in s1:
s1 = '"%s"' % s1.replace('"', """)
else:
s1 = "'%s'" % s1
else:
s1 = '"%s"' % s1
return s1
def quote_python(inStr):
s1 = inStr
if s1.find("'") == -1:
if s1.find('\n') == -1:
return "'%s'" % s1
else:
return "'''%s'''" % s1
else:
if s1.find('"') != -1:
s1 = s1.replace('"', '\\"')
if s1.find('\n') == -1:
return '"%s"' % s1
else:
return '"""%s"""' % s1
def get_all_text_(node):
if node.text is not None:
text = node.text
else:
text = ''
for child in node:
if child.tail is not None:
text += child.tail
return text
def find_attr_value_(attr_name, node):
attrs = node.attrib
attr_parts = attr_name.split(':')
value = None
if len(attr_parts) == 1:
value = attrs.get(attr_name)
elif len(attr_parts) == 2:
prefix, name = attr_parts
namespace = node.nsmap.get(prefix)
if namespace is not None:
value = attrs.get('{%s}%s' % (namespace, name, ))
return value
class GDSParseError(Exception):
pass
def raise_parse_error(node, msg):
if XMLParser_import_library == XMLParser_import_lxml:
msg = '%s (element %s/line %d)' % (
msg, node.tag, node.sourceline, )
else:
msg = '%s (element %s)' % (msg, node.tag, )
raise GDSParseError(msg)
class MixedContainer:
# Constants for category:
CategoryNone = 0
CategoryText = 1
CategorySimple = 2
CategoryComplex = 3
# Constants for content_type:
TypeNone = 0
TypeText = 1
TypeString = 2
TypeInteger = 3
TypeFloat = 4
TypeDecimal = 5
TypeDouble = 6
TypeBoolean = 7
TypeBase64 = 8
def __init__(self, category, content_type, name, value):
self.category = category
self.content_type = content_type
self.name = name
self.value = value
def getCategory(self):
return self.category
def getContenttype(self, content_type):
return self.content_type
def getValue(self):
return self.value
def getName(self):
return self.name
def export(self, outfile, level, name, namespace, pretty_print=True):
if self.category == MixedContainer.CategoryText:
# Prevent exporting empty content as empty lines.
if self.value.strip():
outfile.write(self.value)
elif self.category == MixedContainer.CategorySimple:
self.exportSimple(outfile, level, name)
else: # category == MixedContainer.CategoryComplex
self.value.export(outfile, level, namespace, name, pretty_print)
def exportSimple(self, outfile, level, name):
if self.content_type == MixedContainer.TypeString:
outfile.write('<%s>%s</%s>' % (
self.name, self.value, self.name))
elif self.content_type == MixedContainer.TypeInteger or \
self.content_type == MixedContainer.TypeBoolean:
outfile.write('<%s>%d</%s>' % (
self.name, self.value, self.name))
elif self.content_type == MixedContainer.TypeFloat or \
self.content_type == MixedContainer.TypeDecimal:
outfile.write('<%s>%f</%s>' % (
self.name, self.value, self.name))
elif self.content_type == MixedContainer.TypeDouble:
outfile.write('<%s>%g</%s>' % (
self.name, self.value, self.name))
elif self.content_type == MixedContainer.TypeBase64:
outfile.write('<%s>%s</%s>' % (
self.name, base64.b64encode(self.value), self.name))
def to_etree(self, element):
if self.category == MixedContainer.CategoryText:
# Prevent exporting empty content as empty lines.
if self.value.strip():
if len(element) > 0:
if element[-1].tail is None:
element[-1].tail = self.value
else:
element[-1].tail += self.value
else:
if element.text is None:
element.text = self.value
else:
element.text += self.value
elif self.category == MixedContainer.CategorySimple:
subelement = etree_.SubElement(element, '%s' % self.name)
subelement.text = self.to_etree_simple()
else: # category == MixedContainer.CategoryComplex
self.value.to_etree(element)
def to_etree_simple(self):
if self.content_type == MixedContainer.TypeString:
text = self.value
elif (self.content_type == MixedContainer.TypeInteger or
self.content_type == MixedContainer.TypeBoolean):
text = '%d' % self.value
elif (self.content_type == MixedContainer.TypeFloat or
self.content_type == MixedContainer.TypeDecimal):
text = '%f' % self.value
elif self.content_type == MixedContainer.TypeDouble:
text = '%g' % self.value
elif self.content_type == MixedContainer.TypeBase64:
text = '%s' % base64.b64encode(self.value)
return text
def exportLiteral(self, outfile, level, name):
if self.category == MixedContainer.CategoryText:
showIndent(outfile, level)
outfile.write(
'model_.MixedContainer(%d, %d, "%s", "%s"),\n' % (
self.category, self.content_type, self.name, self.value))
elif self.category == MixedContainer.CategorySimple:
showIndent(outfile, level)
outfile.write(
'model_.MixedContainer(%d, %d, "%s", "%s"),\n' % (
self.category, self.content_type, self.name, self.value))
else: # category == MixedContainer.CategoryComplex
showIndent(outfile, level)
outfile.write(
'model_.MixedContainer(%d, %d, "%s",\n' % (
self.category, self.content_type, self.name,))
self.value.exportLiteral(outfile, level + 1)
showIndent(outfile, level)
outfile.write(')\n')
class MemberSpec_(object):
def __init__(self, name='', data_type='', container=0):
self.name = name
self.data_type = data_type
self.container = container
def set_name(self, name): self.name = name
def get_name(self): return self.name
def set_data_type(self, data_type): self.data_type = data_type
def get_data_type_chain(self): return self.data_type
def get_data_type(self):
if isinstance(self.data_type, list):
if len(self.data_type) > 0:
return self.data_type[-1]
else:
return 'xs:string'
else:
return self.data_type
def set_container(self, container): self.container = container
def get_container(self): return self.container
def _cast(typ, value):
if typ is None or value is None:
return value
return typ(value)
#
# Data representation classes.
#
class SignatureType(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, Id=None, SignedInfo=None, SignatureValue=None, KeyInfo=None, Object=None):
self.Id = _cast(None, Id)
self.SignedInfo = SignedInfo
self.SignatureValue = SignatureValue
self.KeyInfo = KeyInfo
if Object is None:
self.Object = []
else:
self.Object = Object
def factory(*args_, **kwargs_):
if SignatureType.subclass:
return SignatureType.subclass(*args_, **kwargs_)
else:
return SignatureType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_SignedInfo(self): return self.SignedInfo
def set_SignedInfo(self, SignedInfo): self.SignedInfo = SignedInfo
def get_SignatureValue(self): return self.SignatureValue
def set_SignatureValue(self, SignatureValue): self.SignatureValue = SignatureValue
def get_KeyInfo(self): return self.KeyInfo
def set_KeyInfo(self, KeyInfo): self.KeyInfo = KeyInfo
def get_Object(self): return self.Object
def set_Object(self, Object): self.Object = Object
def add_Object(self, value): self.Object.append(value)
def insert_Object(self, index, value): self.Object[index] = value
def get_Id(self): return self.Id
def set_Id(self, Id): self.Id = Id
def hasContent_(self):
if (
self.SignedInfo is not None or
self.SignatureValue is not None or
self.KeyInfo is not None or
self.Object
):
return True
else:
return False
def export(self, outfile, level, namespace_='ds:', name_='SignatureType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='SignatureType')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='ds:', name_='SignatureType'):
if self.Id is not None and 'Id' not in already_processed:
already_processed.add('Id')
outfile.write(' Id=%s' % (self.gds_format_string(quote_attrib(self.Id).encode(ExternalEncoding), input_name='Id'), ))
def exportChildren(self, outfile, level, namespace_='ds:', name_='SignatureType', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.SignedInfo is not None:
self.SignedInfo.export(outfile, level, namespace_, name_='SignedInfo', pretty_print=pretty_print)
if self.SignatureValue is not None:
self.SignatureValue.export(outfile, level, namespace_, name_='SignatureValue', pretty_print=pretty_print)
if self.KeyInfo is not None:
self.KeyInfo.export(outfile, level, namespace_, name_='KeyInfo', pretty_print=pretty_print)
for Object_ in self.Object:
Object_.export(outfile, level, namespace_, name_='Object', pretty_print=pretty_print)
def exportLiteral(self, outfile, level, name_='SignatureType'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.Id is not None and 'Id' not in already_processed:
already_processed.add('Id')
showIndent(outfile, level)
outfile.write('Id="%s",\n' % (self.Id,))
def exportLiteralChildren(self, outfile, level, name_):
if self.SignedInfo is not None:
showIndent(outfile, level)
outfile.write('SignedInfo=model_.SignedInfo(\n')
self.SignedInfo.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
if self.SignatureValue is not None:
showIndent(outfile, level)
outfile.write('SignatureValue=model_.SignatureValue(\n')
self.SignatureValue.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
if self.KeyInfo is not None:
showIndent(outfile, level)
outfile.write('KeyInfo=model_.KeyInfo(\n')
self.KeyInfo.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
showIndent(outfile, level)
outfile.write('Object=[\n')
level += 1
for Object_ in self.Object:
showIndent(outfile, level)
outfile.write('model_.Object(\n')
Object_.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('Id', node)
if value is not None and 'Id' not in already_processed:
already_processed.add('Id')
self.Id = value
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'SignedInfo':
obj_ = SignedInfoType.factory()
obj_.build(child_)
self.SignedInfo = obj_
elif nodeName_ == 'SignatureValue':
obj_ = SignatureValueType.factory()
obj_.build(child_)
self.SignatureValue = obj_
elif nodeName_ == 'KeyInfo':
obj_ = KeyInfoType.factory()
obj_.build(child_)
self.KeyInfo = obj_
elif nodeName_ == 'Object':
obj_ = ObjectType.factory()
obj_.build(child_)
self.Object.append(obj_)
# end class SignatureType
class SignatureValueType(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, Id=None, valueOf_=None):
self.Id = _cast(None, Id)
self.valueOf_ = valueOf_
def factory(*args_, **kwargs_):
if SignatureValueType.subclass:
return SignatureValueType.subclass(*args_, **kwargs_)
else:
return SignatureValueType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_Id(self): return self.Id
def set_Id(self, Id): self.Id = Id
def get_valueOf_(self): return self.valueOf_
def set_valueOf_(self, valueOf_): self.valueOf_ = valueOf_
def hasContent_(self):
if (
self.valueOf_
):
return True
else:
return False
def export(self, outfile, level, namespace_='ds:', name_='SignatureValueType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='SignatureValueType')
if self.hasContent_():
outfile.write('>')
outfile.write(str(self.valueOf_).encode(ExternalEncoding))
self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='ds:', name_='SignatureValueType'):
if self.Id is not None and 'Id' not in already_processed:
already_processed.add('Id')
outfile.write(' Id=%s' % (self.gds_format_string(quote_attrib(self.Id).encode(ExternalEncoding), input_name='Id'), ))
def exportChildren(self, outfile, level, namespace_='ds:', name_='SignatureValueType', fromsubclass_=False, pretty_print=True):
pass
def exportLiteral(self, outfile, level, name_='SignatureValueType'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('valueOf_ = """%s""",\n' % (self.valueOf_,))
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.Id is not None and 'Id' not in already_processed:
already_processed.add('Id')
showIndent(outfile, level)
outfile.write('Id="%s",\n' % (self.Id,))
def exportLiteralChildren(self, outfile, level, name_):
pass
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
self.valueOf_ = get_all_text_(node)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('Id', node)
if value is not None and 'Id' not in already_processed:
already_processed.add('Id')
self.Id = value
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class SignatureValueType
class SignedInfoType(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, Id=None, CanonicalizationMethod=None, SignatureMethod=None, Reference=None):
self.Id = _cast(None, Id)
self.CanonicalizationMethod = CanonicalizationMethod
self.SignatureMethod = SignatureMethod
if Reference is None:
self.Reference = []
else:
self.Reference = Reference
def factory(*args_, **kwargs_):
if SignedInfoType.subclass:
return SignedInfoType.subclass(*args_, **kwargs_)
else:
return SignedInfoType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_CanonicalizationMethod(self): return self.CanonicalizationMethod
def set_CanonicalizationMethod(self, CanonicalizationMethod): self.CanonicalizationMethod = CanonicalizationMethod
def get_SignatureMethod(self): return self.SignatureMethod
def set_SignatureMethod(self, SignatureMethod): self.SignatureMethod = SignatureMethod
def get_Reference(self): return self.Reference
def set_Reference(self, Reference): self.Reference = Reference
def add_Reference(self, value): self.Reference.append(value)
def insert_Reference(self, index, value): self.Reference[index] = value
def get_Id(self): return self.Id
def set_Id(self, Id): self.Id = Id
def hasContent_(self):
if (
self.CanonicalizationMethod is not None or
self.SignatureMethod is not None or
self.Reference
):
return True
else:
return False
def export(self, outfile, level, namespace_='ds:', name_='SignedInfoType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='SignedInfoType')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='ds:', name_='SignedInfoType'):
if self.Id is not None and 'Id' not in already_processed:
already_processed.add('Id')
outfile.write(' Id=%s' % (self.gds_format_string(quote_attrib(self.Id).encode(ExternalEncoding), input_name='Id'), ))
def exportChildren(self, outfile, level, namespace_='ds:', name_='SignedInfoType', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.CanonicalizationMethod is not None:
self.CanonicalizationMethod.export(outfile, level, namespace_, name_='CanonicalizationMethod', pretty_print=pretty_print)
if self.SignatureMethod is not None:
self.SignatureMethod.export(outfile, level, namespace_, name_='SignatureMethod', pretty_print=pretty_print)
for Reference_ in self.Reference:
Reference_.export(outfile, level, namespace_, name_='Reference', pretty_print=pretty_print)
def exportLiteral(self, outfile, level, name_='SignedInfoType'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.Id is not None and 'Id' not in already_processed:
already_processed.add('Id')
showIndent(outfile, level)
outfile.write('Id="%s",\n' % (self.Id,))
def exportLiteralChildren(self, outfile, level, name_):
if self.CanonicalizationMethod is not None:
showIndent(outfile, level)
outfile.write('CanonicalizationMethod=model_.CanonicalizationMethod(\n')
self.CanonicalizationMethod.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
if self.SignatureMethod is not None:
showIndent(outfile, level)
outfile.write('SignatureMethod=model_.SignatureMethod(\n')
self.SignatureMethod.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
showIndent(outfile, level)
outfile.write('Reference=[\n')
level += 1
for Reference_ in self.Reference:
showIndent(outfile, level)
outfile.write('model_.Reference(\n')
Reference_.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('Id', node)
if value is not None and 'Id' not in already_processed:
already_processed.add('Id')
self.Id = value
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'CanonicalizationMethod':
obj_ = CanonicalizationMethodType.factory()
obj_.build(child_)
self.CanonicalizationMethod = obj_
elif nodeName_ == 'SignatureMethod':
obj_ = SignatureMethodType.factory()
obj_.build(child_)
self.SignatureMethod = obj_
elif nodeName_ == 'Reference':
obj_ = ReferenceType.factory()
obj_.build(child_)
self.Reference.append(obj_)
# end class SignedInfoType
class CanonicalizationMethodType(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, Algorithm=None, anytypeobjs_=None, valueOf_=None, mixedclass_=None, content_=None):
self.Algorithm = _cast(None, Algorithm)
if anytypeobjs_ is None:
self.anytypeobjs_ = []
else:
self.anytypeobjs_ = anytypeobjs_
self.valueOf_ = valueOf_
if mixedclass_ is None:
self.mixedclass_ = MixedContainer
else:
self.mixedclass_ = mixedclass_
if content_ is None:
self.content_ = []
else:
self.content_ = content_
self.valueOf_ = valueOf_
def factory(*args_, **kwargs_):
if CanonicalizationMethodType.subclass:
return CanonicalizationMethodType.subclass(*args_, **kwargs_)
else:
return CanonicalizationMethodType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_anytypeobjs_(self): return self.anytypeobjs_
def set_anytypeobjs_(self, anytypeobjs_): self.anytypeobjs_ = anytypeobjs_
def add_anytypeobjs_(self, value): self.anytypeobjs_.append(value)
def insert_anytypeobjs_(self, index, value): self._anytypeobjs_[index] = value
def get_Algorithm(self): return self.Algorithm
def set_Algorithm(self, Algorithm): self.Algorithm = Algorithm
def get_valueOf_(self): return self.valueOf_
def set_valueOf_(self, valueOf_): self.valueOf_ = valueOf_
def hasContent_(self):
if (
self.anytypeobjs_ or
self.valueOf_
):
return True
else:
return False
def export(self, outfile, level, namespace_='ds:', name_='CanonicalizationMethodType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='CanonicalizationMethodType')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='ds:', name_='CanonicalizationMethodType'):
if self.Algorithm is not None and 'Algorithm' not in already_processed:
already_processed.add('Algorithm')
outfile.write(' Algorithm=%s' % (self.gds_format_string(quote_attrib(self.Algorithm).encode(ExternalEncoding), input_name='Algorithm'), ))
def exportChildren(self, outfile, level, namespace_='ds:', name_='CanonicalizationMethodType', fromsubclass_=False, pretty_print=True):
if not fromsubclass_:
for item_ in self.content_:
item_.export(outfile, level, item_.name, namespace_, pretty_print=pretty_print)
def exportLiteral(self, outfile, level, name_='CanonicalizationMethodType'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('valueOf_ = """%s""",\n' % (self.valueOf_,))
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.Algorithm is not None and 'Algorithm' not in already_processed:
already_processed.add('Algorithm')
showIndent(outfile, level)
outfile.write('Algorithm="%s",\n' % (self.Algorithm,))
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('content_ = [\n')
for item_ in self.content_:
item_.exportLiteral(outfile, level, name_)
showIndent(outfile, level)
outfile.write('],\n')
pass
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
self.valueOf_ = get_all_text_(node)
if node.text is not None:
obj_ = self.mixedclass_(MixedContainer.CategoryText,
MixedContainer.TypeNone, '', node.text)
self.content_.append(obj_)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('Algorithm', node)
if value is not None and 'Algorithm' not in already_processed:
already_processed.add('Algorithm')
self.Algorithm = value
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == '':
obj_ = __ANY__.factory()
obj_.build(child_)
obj_ = self.mixedclass_(MixedContainer.CategoryComplex,
MixedContainer.TypeNone, '', obj_)
self.content_.append(obj_)
if hasattr(self, 'add_'):
self.add_(obj_.value)
elif hasattr(self, 'set_'):
self.set_(obj_.value)
if not fromsubclass_ and child_.tail is not None:
obj_ = self.mixedclass_(MixedContainer.CategoryText,
MixedContainer.TypeNone, '', child_.tail)
self.content_.append(obj_)
# end class CanonicalizationMethodType
class SignatureMethodType(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, Algorithm=None, HMACOutputLength=None, anytypeobjs_=None, valueOf_=None, mixedclass_=None, content_=None):
self.Algorithm = _cast(None, Algorithm)
self.HMACOutputLength = HMACOutputLength
if anytypeobjs_ is None:
self.anytypeobjs_ = []
else:
self.anytypeobjs_ = anytypeobjs_
self.valueOf_ = valueOf_
if mixedclass_ is None:
self.mixedclass_ = MixedContainer
else:
self.mixedclass_ = mixedclass_
if content_ is None:
self.content_ = []
else:
self.content_ = content_
self.valueOf_ = valueOf_
def factory(*args_, **kwargs_):
if SignatureMethodType.subclass:
return SignatureMethodType.subclass(*args_, **kwargs_)
else:
return SignatureMethodType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_HMACOutputLength(self): return self.HMACOutputLength
def set_HMACOutputLength(self, HMACOutputLength): self.HMACOutputLength = HMACOutputLength
def get_anytypeobjs_(self): return self.anytypeobjs_
def set_anytypeobjs_(self, anytypeobjs_): self.anytypeobjs_ = anytypeobjs_
def add_anytypeobjs_(self, value): self.anytypeobjs_.append(value)
def insert_anytypeobjs_(self, index, value): self._anytypeobjs_[index] = value
def get_Algorithm(self): return self.Algorithm
def set_Algorithm(self, Algorithm): self.Algorithm = Algorithm
def get_valueOf_(self): return self.valueOf_
def set_valueOf_(self, valueOf_): self.valueOf_ = valueOf_
def validate_HMACOutputLengthType(self, value):
# Validate type HMACOutputLengthType, a restriction on integer.
pass
def hasContent_(self):
if (
self.HMACOutputLength is not None or
self.anytypeobjs_ or
self.valueOf_
):
return True
else:
return False
def export(self, outfile, level, namespace_='ds:', name_='SignatureMethodType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='SignatureMethodType')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='ds:', name_='SignatureMethodType'):
if self.Algorithm is not None and 'Algorithm' not in already_processed:
already_processed.add('Algorithm')
outfile.write(' Algorithm=%s' % (self.gds_format_string(quote_attrib(self.Algorithm).encode(ExternalEncoding), input_name='Algorithm'), ))
def exportChildren(self, outfile, level, namespace_='ds:', name_='SignatureMethodType', fromsubclass_=False, pretty_print=True):
if not fromsubclass_:
for item_ in self.content_:
item_.export(outfile, level, item_.name, namespace_, pretty_print=pretty_print)
def exportLiteral(self, outfile, level, name_='SignatureMethodType'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('valueOf_ = """%s""",\n' % (self.valueOf_,))
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.Algorithm is not None and 'Algorithm' not in already_processed:
already_processed.add('Algorithm')
showIndent(outfile, level)
outfile.write('Algorithm="%s",\n' % (self.Algorithm,))
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('content_ = [\n')
for item_ in self.content_:
item_.exportLiteral(outfile, level, name_)
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('content_ = [\n')
for item_ in self.content_:
item_.exportLiteral(outfile, level, name_)
showIndent(outfile, level)
outfile.write('],\n')
pass
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
self.valueOf_ = get_all_text_(node)
if node.text is not None:
obj_ = self.mixedclass_(MixedContainer.CategoryText,
MixedContainer.TypeNone, '', node.text)
self.content_.append(obj_)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('Algorithm', node)
if value is not None and 'Algorithm' not in already_processed:
already_processed.add('Algorithm')
self.Algorithm = value
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'HMACOutputLength' and child_.text is not None:
sval_ = child_.text
try:
ival_ = int(sval_)
except (TypeError, ValueError), exp:
raise_parse_error(child_, 'requires integer: %s' % exp)
obj_ = self.mixedclass_(MixedContainer.CategorySimple,
MixedContainer.TypeInteger, 'HMACOutputLength', ival_)
self.content_.append(obj_)
elif nodeName_ == '':
obj_ = __ANY__.factory()
obj_.build(child_)
obj_ = self.mixedclass_(MixedContainer.CategoryComplex,
MixedContainer.TypeNone, '', obj_)
self.content_.append(obj_)
if hasattr(self, 'add_'):
self.add_(obj_.value)
elif hasattr(self, 'set_'):
self.set_(obj_.value)
if not fromsubclass_ and child_.tail is not None:
obj_ = self.mixedclass_(MixedContainer.CategoryText,
MixedContainer.TypeNone, '', child_.tail)
self.content_.append(obj_)
# end class SignatureMethodType
class ReferenceType(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, Type=None, Id=None, URI=None, Transforms=None, DigestMethod=None, DigestValue=None):
self.Type = _cast(None, Type)
self.Id = _cast(None, Id)
self.URI = _cast(None, URI)
self.Transforms = Transforms
self.DigestMethod = DigestMethod
self.DigestValue = DigestValue
def factory(*args_, **kwargs_):
if ReferenceType.subclass:
return ReferenceType.subclass(*args_, **kwargs_)
else:
return ReferenceType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_Transforms(self): return self.Transforms
def set_Transforms(self, Transforms): self.Transforms = Transforms
def get_DigestMethod(self): return self.DigestMethod
def set_DigestMethod(self, DigestMethod): self.DigestMethod = DigestMethod
def get_DigestValue(self): return self.DigestValue
def set_DigestValue(self, DigestValue): self.DigestValue = DigestValue
def get_Type(self): return self.Type
def set_Type(self, Type): self.Type = Type
def get_Id(self): return self.Id
def set_Id(self, Id): self.Id = Id
def get_URI(self): return self.URI
def set_URI(self, URI): self.URI = URI
def hasContent_(self):
if (
self.Transforms is not None or
self.DigestMethod is not None or
self.DigestValue is not None
):
return True
else:
return False
def export(self, outfile, level, namespace_='ds:', name_='ReferenceType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='ReferenceType')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='ds:', name_='ReferenceType'):
if self.Type is not None and 'Type' not in already_processed:
already_processed.add('Type')
outfile.write(' Type=%s' % (self.gds_format_string(quote_attrib(self.Type).encode(ExternalEncoding), input_name='Type'), ))
if self.Id is not None and 'Id' not in already_processed:
already_processed.add('Id')
outfile.write(' Id=%s' % (self.gds_format_string(quote_attrib(self.Id).encode(ExternalEncoding), input_name='Id'), ))
if self.URI is not None and 'URI' not in already_processed:
already_processed.add('URI')
outfile.write(' URI=%s' % (self.gds_format_string(quote_attrib(self.URI).encode(ExternalEncoding), input_name='URI'), ))
def exportChildren(self, outfile, level, namespace_='ds:', name_='ReferenceType', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.Transforms is not None:
self.Transforms.export(outfile, level, namespace_, name_='Transforms', pretty_print=pretty_print)
if self.DigestMethod is not None:
self.DigestMethod.export(outfile, level, namespace_, name_='DigestMethod', pretty_print=pretty_print)
if self.DigestValue is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sDigestValue>%s</%sDigestValue>%s' % (namespace_, self.gds_format_base64(self.DigestValue, input_name='DigestValue'), namespace_, eol_))
def exportLiteral(self, outfile, level, name_='ReferenceType'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.Type is not None and 'Type' not in already_processed:
already_processed.add('Type')
showIndent(outfile, level)
outfile.write('Type="%s",\n' % (self.Type,))
if self.Id is not None and 'Id' not in already_processed:
already_processed.add('Id')
showIndent(outfile, level)
outfile.write('Id="%s",\n' % (self.Id,))
if self.URI is not None and 'URI' not in already_processed:
already_processed.add('URI')
showIndent(outfile, level)
outfile.write('URI="%s",\n' % (self.URI,))
def exportLiteralChildren(self, outfile, level, name_):
if self.Transforms is not None:
showIndent(outfile, level)
outfile.write('Transforms=model_.Transforms(\n')
self.Transforms.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
if self.DigestMethod is not None:
showIndent(outfile, level)
outfile.write('DigestMethod=model_.DigestMethod(\n')
self.DigestMethod.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
if self.DigestValue is not None:
showIndent(outfile, level)
outfile.write('DigestValue=model_.base64Binary(\n')
self.DigestValue.exportLiteral(outfile, level, name_='DigestValue')
showIndent(outfile, level)
outfile.write('),\n')
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('Type', node)
if value is not None and 'Type' not in already_processed:
already_processed.add('Type')
self.Type = value
value = find_attr_value_('Id', node)
if value is not None and 'Id' not in already_processed:
already_processed.add('Id')
self.Id = value
value = find_attr_value_('URI', node)
if value is not None and 'URI' not in already_processed:
already_processed.add('URI')
self.URI = value
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'Transforms':
obj_ = TransformsType.factory()
obj_.build(child_)
self.Transforms = obj_
elif nodeName_ == 'DigestMethod':
obj_ = DigestMethodType.factory()
obj_.build(child_)
self.DigestMethod = obj_
elif nodeName_ == 'DigestValue':
sval_ = child_.text
if sval_ is not None:
try:
bval_ = base64.b64decode(sval_)
except (TypeError, ValueError), exp:
raise_parse_error(child_, 'requires base64 encoded string: %s' % exp)
bval_ = self.gds_validate_base64(bval_, node, 'DigestValue')
else:
bval_ = None
self.DigestValue = bval_
# end class ReferenceType
class TransformsType(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, Transform=None):
if Transform is None:
self.Transform = []
else:
self.Transform = Transform
def factory(*args_, **kwargs_):
if TransformsType.subclass:
return TransformsType.subclass(*args_, **kwargs_)
else:
return TransformsType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_Transform(self): return self.Transform
def set_Transform(self, Transform): self.Transform = Transform
def add_Transform(self, value): self.Transform.append(value)
def insert_Transform(self, index, value): self.Transform[index] = value
def hasContent_(self):
if (
self.Transform
):
return True
else:
return False
def export(self, outfile, level, namespace_='ds:', name_='TransformsType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='TransformsType')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='ds:', name_='TransformsType'):
pass
def exportChildren(self, outfile, level, namespace_='ds:', name_='TransformsType', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
for Transform_ in self.Transform:
Transform_.export(outfile, level, namespace_, name_='Transform', pretty_print=pretty_print)
def exportLiteral(self, outfile, level, name_='TransformsType'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
pass
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('Transform=[\n')
level += 1
for Transform_ in self.Transform:
showIndent(outfile, level)
outfile.write('model_.Transform(\n')
Transform_.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'Transform':
obj_ = TransformType.factory()
obj_.build(child_)
self.Transform.append(obj_)
# end class TransformsType
class TransformType(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, Algorithm=None, anytypeobjs_=None, XPath=None, valueOf_=None, mixedclass_=None, content_=None):
self.Algorithm = _cast(None, Algorithm)
self.anytypeobjs_ = anytypeobjs_
if XPath is None:
self.XPath = []
else:
self.XPath = XPath
self.valueOf_ = valueOf_
if mixedclass_ is None:
self.mixedclass_ = MixedContainer
else:
self.mixedclass_ = mixedclass_
if content_ is None:
self.content_ = []
else:
self.content_ = content_
self.valueOf_ = valueOf_
def factory(*args_, **kwargs_):
if TransformType.subclass:
return TransformType.subclass(*args_, **kwargs_)
else:
return TransformType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_anytypeobjs_(self): return self.anytypeobjs_
def set_anytypeobjs_(self, anytypeobjs_): self.anytypeobjs_ = anytypeobjs_
def get_XPath(self): return self.XPath
def set_XPath(self, XPath): self.XPath = XPath
def add_XPath(self, value): self.XPath.append(value)
def insert_XPath(self, index, value): self.XPath[index] = value
def get_Algorithm(self): return self.Algorithm
def set_Algorithm(self, Algorithm): self.Algorithm = Algorithm
def get_valueOf_(self): return self.valueOf_
def set_valueOf_(self, valueOf_): self.valueOf_ = valueOf_
def hasContent_(self):
if (
self.anytypeobjs_ is not None or
self.XPath or
self.valueOf_
):
return True
else:
return False
def export(self, outfile, level, namespace_='ds:', name_='TransformType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='TransformType')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='ds:', name_='TransformType'):
if self.Algorithm is not None and 'Algorithm' not in already_processed:
already_processed.add('Algorithm')
outfile.write(' Algorithm=%s' % (self.gds_format_string(quote_attrib(self.Algorithm).encode(ExternalEncoding), input_name='Algorithm'), ))
def exportChildren(self, outfile, level, namespace_='ds:', name_='TransformType', fromsubclass_=False, pretty_print=True):
if not fromsubclass_:
for item_ in self.content_:
item_.export(outfile, level, item_.name, namespace_, pretty_print=pretty_print)
def exportLiteral(self, outfile, level, name_='TransformType'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('valueOf_ = """%s""",\n' % (self.valueOf_,))
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.Algorithm is not None and 'Algorithm' not in already_processed:
already_processed.add('Algorithm')
showIndent(outfile, level)
outfile.write('Algorithm="%s",\n' % (self.Algorithm,))
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('content_ = [\n')
for item_ in self.content_:
item_.exportLiteral(outfile, level, name_)
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('content_ = [\n')
for item_ in self.content_:
item_.exportLiteral(outfile, level, name_)
showIndent(outfile, level)
outfile.write('],\n')
pass
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
self.valueOf_ = get_all_text_(node)
if node.text is not None:
obj_ = self.mixedclass_(MixedContainer.CategoryText,
MixedContainer.TypeNone, '', node.text)
self.content_.append(obj_)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('Algorithm', node)
if value is not None and 'Algorithm' not in already_processed:
already_processed.add('Algorithm')
self.Algorithm = value
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == '':
obj_ = __ANY__.factory()
obj_.build(child_)
obj_ = self.mixedclass_(MixedContainer.CategoryComplex,
MixedContainer.TypeNone, '', obj_)
self.content_.append(obj_)
if hasattr(self, 'add_'):
self.add_(obj_.value)
elif hasattr(self, 'set_'):
self.set_(obj_.value)
elif nodeName_ == 'XPath' and child_.text is not None:
valuestr_ = child_.text
obj_ = self.mixedclass_(MixedContainer.CategorySimple,
MixedContainer.TypeString, 'XPath', valuestr_)
self.content_.append(obj_)
if not fromsubclass_ and child_.tail is not None:
obj_ = self.mixedclass_(MixedContainer.CategoryText,
MixedContainer.TypeNone, '', child_.tail)
self.content_.append(obj_)
# end class TransformType
class DigestMethodType(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, Algorithm=None, anytypeobjs_=None, valueOf_=None, mixedclass_=None, content_=None):
self.Algorithm = _cast(None, Algorithm)
if anytypeobjs_ is None:
self.anytypeobjs_ = []
else:
self.anytypeobjs_ = anytypeobjs_
self.valueOf_ = valueOf_
if mixedclass_ is None:
self.mixedclass_ = MixedContainer
else:
self.mixedclass_ = mixedclass_
if content_ is None:
self.content_ = []
else:
self.content_ = content_
self.valueOf_ = valueOf_
def factory(*args_, **kwargs_):
if DigestMethodType.subclass:
return DigestMethodType.subclass(*args_, **kwargs_)
else:
return DigestMethodType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_anytypeobjs_(self): return self.anytypeobjs_
def set_anytypeobjs_(self, anytypeobjs_): self.anytypeobjs_ = anytypeobjs_
def add_anytypeobjs_(self, value): self.anytypeobjs_.append(value)
def insert_anytypeobjs_(self, index, value): self._anytypeobjs_[index] = value
def get_Algorithm(self): return self.Algorithm
def set_Algorithm(self, Algorithm): self.Algorithm = Algorithm
def get_valueOf_(self): return self.valueOf_
def set_valueOf_(self, valueOf_): self.valueOf_ = valueOf_
def hasContent_(self):
if (
self.anytypeobjs_ or
self.valueOf_
):
return True
else:
return False
def export(self, outfile, level, namespace_='ds:', name_='DigestMethodType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='DigestMethodType')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='ds:', name_='DigestMethodType'):
if self.Algorithm is not None and 'Algorithm' not in already_processed:
already_processed.add('Algorithm')
outfile.write(' Algorithm=%s' % (self.gds_format_string(quote_attrib(self.Algorithm).encode(ExternalEncoding), input_name='Algorithm'), ))
def exportChildren(self, outfile, level, namespace_='ds:', name_='DigestMethodType', fromsubclass_=False, pretty_print=True):
if not fromsubclass_:
for item_ in self.content_:
item_.export(outfile, level, item_.name, namespace_, pretty_print=pretty_print)
def exportLiteral(self, outfile, level, name_='DigestMethodType'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('valueOf_ = """%s""",\n' % (self.valueOf_,))
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.Algorithm is not None and 'Algorithm' not in already_processed:
already_processed.add('Algorithm')
showIndent(outfile, level)
outfile.write('Algorithm="%s",\n' % (self.Algorithm,))
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('content_ = [\n')
for item_ in self.content_:
item_.exportLiteral(outfile, level, name_)
showIndent(outfile, level)
outfile.write('],\n')
pass
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
self.valueOf_ = get_all_text_(node)
if node.text is not None:
obj_ = self.mixedclass_(MixedContainer.CategoryText,
MixedContainer.TypeNone, '', node.text)
self.content_.append(obj_)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('Algorithm', node)
if value is not None and 'Algorithm' not in already_processed:
already_processed.add('Algorithm')
self.Algorithm = value
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == '':
obj_ = __ANY__.factory()
obj_.build(child_)
obj_ = self.mixedclass_(MixedContainer.CategoryComplex,
MixedContainer.TypeNone, '', obj_)
self.content_.append(obj_)
if hasattr(self, 'add_'):
self.add_(obj_.value)
elif hasattr(self, 'set_'):
self.set_(obj_.value)
if not fromsubclass_ and child_.tail is not None:
obj_ = self.mixedclass_(MixedContainer.CategoryText,
MixedContainer.TypeNone, '', child_.tail)
self.content_.append(obj_)
# end class DigestMethodType
class KeyInfoType(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, Id=None, KeyName=None, KeyValue=None, RetrievalMethod=None, X509Data=None, PGPData=None, SPKIData=None, MgmtData=None, anytypeobjs_=None, valueOf_=None, mixedclass_=None, content_=None):
self.Id = _cast(None, Id)
if KeyName is None:
self.KeyName = []
else:
self.KeyName = KeyName
if KeyValue is None:
self.KeyValue = []
else:
self.KeyValue = KeyValue
if RetrievalMethod is None:
self.RetrievalMethod = []
else:
self.RetrievalMethod = RetrievalMethod
if X509Data is None:
self.X509Data = []
else:
self.X509Data = X509Data
if PGPData is None:
self.PGPData = []
else:
self.PGPData = PGPData
if SPKIData is None:
self.SPKIData = []
else:
self.SPKIData = SPKIData
if MgmtData is None:
self.MgmtData = []
else:
self.MgmtData = MgmtData
self.anytypeobjs_ = anytypeobjs_
self.valueOf_ = valueOf_
if mixedclass_ is None:
self.mixedclass_ = MixedContainer
else:
self.mixedclass_ = mixedclass_
if content_ is None:
self.content_ = []
else:
self.content_ = content_
self.valueOf_ = valueOf_
def factory(*args_, **kwargs_):
if KeyInfoType.subclass:
return KeyInfoType.subclass(*args_, **kwargs_)
else:
return KeyInfoType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_KeyName(self): return self.KeyName
def set_KeyName(self, KeyName): self.KeyName = KeyName
def add_KeyName(self, value): self.KeyName.append(value)
def insert_KeyName(self, index, value): self.KeyName[index] = value
def get_KeyValue(self): return self.KeyValue
def set_KeyValue(self, KeyValue): self.KeyValue = KeyValue
def add_KeyValue(self, value): self.KeyValue.append(value)
def insert_KeyValue(self, index, value): self.KeyValue[index] = value
def get_RetrievalMethod(self): return self.RetrievalMethod
def set_RetrievalMethod(self, RetrievalMethod): self.RetrievalMethod = RetrievalMethod
def add_RetrievalMethod(self, value): self.RetrievalMethod.append(value)
def insert_RetrievalMethod(self, index, value): self.RetrievalMethod[index] = value
def get_X509Data(self): return self.X509Data
def set_X509Data(self, X509Data): self.X509Data = X509Data
def add_X509Data(self, value): self.X509Data.append(value)
def insert_X509Data(self, index, value): self.X509Data[index] = value
def get_PGPData(self): return self.PGPData
def set_PGPData(self, PGPData): self.PGPData = PGPData
def add_PGPData(self, value): self.PGPData.append(value)
def insert_PGPData(self, index, value): self.PGPData[index] = value
def get_SPKIData(self): return self.SPKIData
def set_SPKIData(self, SPKIData): self.SPKIData = SPKIData
def add_SPKIData(self, value): self.SPKIData.append(value)
def insert_SPKIData(self, index, value): self.SPKIData[index] = value
def get_MgmtData(self): return self.MgmtData
def set_MgmtData(self, MgmtData): self.MgmtData = MgmtData
def add_MgmtData(self, value): self.MgmtData.append(value)
def insert_MgmtData(self, index, value): self.MgmtData[index] = value
def get_anytypeobjs_(self): return self.anytypeobjs_
def set_anytypeobjs_(self, anytypeobjs_): self.anytypeobjs_ = anytypeobjs_
def get_Id(self): return self.Id
def set_Id(self, Id): self.Id = Id
def get_valueOf_(self): return self.valueOf_
def set_valueOf_(self, valueOf_): self.valueOf_ = valueOf_
def hasContent_(self):
if (
self.KeyName or
self.KeyValue or
self.RetrievalMethod or
self.X509Data or
self.PGPData or
self.SPKIData or
self.MgmtData or
self.anytypeobjs_ is not None or
self.valueOf_
):
return True
else:
return False
def export(self, outfile, level, namespace_='ds:', name_='KeyInfoType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='KeyInfoType')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='ds:', name_='KeyInfoType'):
if self.Id is not None and 'Id' not in already_processed:
already_processed.add('Id')
outfile.write(' Id=%s' % (self.gds_format_string(quote_attrib(self.Id).encode(ExternalEncoding), input_name='Id'), ))
def exportChildren(self, outfile, level, namespace_='ds:', name_='KeyInfoType', fromsubclass_=False, pretty_print=True):
if not fromsubclass_:
for item_ in self.content_:
item_.export(outfile, level, item_.name, namespace_, pretty_print=pretty_print)
def exportLiteral(self, outfile, level, name_='KeyInfoType'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('valueOf_ = """%s""",\n' % (self.valueOf_,))
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.Id is not None and 'Id' not in already_processed:
already_processed.add('Id')
showIndent(outfile, level)
outfile.write('Id="%s",\n' % (self.Id,))
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('content_ = [\n')
for item_ in self.content_:
item_.exportLiteral(outfile, level, name_)
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('content_ = [\n')
for item_ in self.content_:
item_.exportLiteral(outfile, level, name_)
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('content_ = [\n')
for item_ in self.content_:
item_.exportLiteral(outfile, level, name_)
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('content_ = [\n')
for item_ in self.content_:
item_.exportLiteral(outfile, level, name_)
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('content_ = [\n')
for item_ in self.content_:
item_.exportLiteral(outfile, level, name_)
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('content_ = [\n')
for item_ in self.content_:
item_.exportLiteral(outfile, level, name_)
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('content_ = [\n')
for item_ in self.content_:
item_.exportLiteral(outfile, level, name_)
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('content_ = [\n')
for item_ in self.content_:
item_.exportLiteral(outfile, level, name_)
showIndent(outfile, level)
outfile.write('],\n')
pass
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
self.valueOf_ = get_all_text_(node)
if node.text is not None:
obj_ = self.mixedclass_(MixedContainer.CategoryText,
MixedContainer.TypeNone, '', node.text)
self.content_.append(obj_)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('Id', node)
if value is not None and 'Id' not in already_processed:
already_processed.add('Id')
self.Id = value
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'KeyName' and child_.text is not None:
valuestr_ = child_.text
obj_ = self.mixedclass_(MixedContainer.CategorySimple,
MixedContainer.TypeString, 'KeyName', valuestr_)
self.content_.append(obj_)
elif nodeName_ == 'KeyValue':
obj_ = KeyValue.factory()
obj_.build(child_)
obj_ = self.mixedclass_(MixedContainer.CategoryComplex,
MixedContainer.TypeNone, 'KeyValue', obj_)
self.content_.append(obj_)
if hasattr(self, 'add_KeyValue'):
self.add_KeyValue(obj_.value)
elif hasattr(self, 'set_KeyValue'):
self.set_KeyValue(obj_.value)
elif nodeName_ == 'RetrievalMethod':
obj_ = RetrievalMethod.factory()
obj_.build(child_)
obj_ = self.mixedclass_(MixedContainer.CategoryComplex,
MixedContainer.TypeNone, 'RetrievalMethod', obj_)
self.content_.append(obj_)
if hasattr(self, 'add_RetrievalMethod'):
self.add_RetrievalMethod(obj_.value)
elif hasattr(self, 'set_RetrievalMethod'):
self.set_RetrievalMethod(obj_.value)
elif nodeName_ == 'X509Data':
obj_ = X509Data.factory()
obj_.build(child_)
obj_ = self.mixedclass_(MixedContainer.CategoryComplex,
MixedContainer.TypeNone, 'X509Data', obj_)
self.content_.append(obj_)
if hasattr(self, 'add_X509Data'):
self.add_X509Data(obj_.value)
elif hasattr(self, 'set_X509Data'):
self.set_X509Data(obj_.value)
elif nodeName_ == 'PGPData':
obj_ = PGPData.factory()
obj_.build(child_)
obj_ = self.mixedclass_(MixedContainer.CategoryComplex,
MixedContainer.TypeNone, 'PGPData', obj_)
self.content_.append(obj_)
if hasattr(self, 'add_PGPData'):
self.add_PGPData(obj_.value)
elif hasattr(self, 'set_PGPData'):
self.set_PGPData(obj_.value)
elif nodeName_ == 'SPKIData':
obj_ = SPKIData.factory()
obj_.build(child_)
obj_ = self.mixedclass_(MixedContainer.CategoryComplex,
MixedContainer.TypeNone, 'SPKIData', obj_)
self.content_.append(obj_)
if hasattr(self, 'add_SPKIData'):
self.add_SPKIData(obj_.value)
elif hasattr(self, 'set_SPKIData'):
self.set_SPKIData(obj_.value)
elif nodeName_ == 'MgmtData' and child_.text is not None:
valuestr_ = child_.text
obj_ = self.mixedclass_(MixedContainer.CategorySimple,
MixedContainer.TypeString, 'MgmtData', valuestr_)
self.content_.append(obj_)
elif nodeName_ == '':
obj_ = __ANY__.factory()
obj_.build(child_)
obj_ = self.mixedclass_(MixedContainer.CategoryComplex,
MixedContainer.TypeNone, '', obj_)
self.content_.append(obj_)
if hasattr(self, 'add_'):
self.add_(obj_.value)
elif hasattr(self, 'set_'):
self.set_(obj_.value)
if not fromsubclass_ and child_.tail is not None:
obj_ = self.mixedclass_(MixedContainer.CategoryText,
MixedContainer.TypeNone, '', child_.tail)
self.content_.append(obj_)
# end class KeyInfoType
class KeyValueType(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, DSAKeyValue=None, RSAKeyValue=None, anytypeobjs_=None, valueOf_=None, mixedclass_=None, content_=None):
self.DSAKeyValue = DSAKeyValue
self.RSAKeyValue = RSAKeyValue
self.anytypeobjs_ = anytypeobjs_
self.valueOf_ = valueOf_
if mixedclass_ is None:
self.mixedclass_ = MixedContainer
else:
self.mixedclass_ = mixedclass_
if content_ is None:
self.content_ = []
else:
self.content_ = content_
self.valueOf_ = valueOf_
def factory(*args_, **kwargs_):
if KeyValueType.subclass:
return KeyValueType.subclass(*args_, **kwargs_)
else:
return KeyValueType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_DSAKeyValue(self): return self.DSAKeyValue
def set_DSAKeyValue(self, DSAKeyValue): self.DSAKeyValue = DSAKeyValue
def get_RSAKeyValue(self): return self.RSAKeyValue
def set_RSAKeyValue(self, RSAKeyValue): self.RSAKeyValue = RSAKeyValue
def get_anytypeobjs_(self): return self.anytypeobjs_
def set_anytypeobjs_(self, anytypeobjs_): self.anytypeobjs_ = anytypeobjs_
def get_valueOf_(self): return self.valueOf_
def set_valueOf_(self, valueOf_): self.valueOf_ = valueOf_
def hasContent_(self):
if (
self.DSAKeyValue is not None or
self.RSAKeyValue is not None or
self.anytypeobjs_ is not None or
self.valueOf_
):
return True
else:
return False
def export(self, outfile, level, namespace_='ds:', name_='KeyValueType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='KeyValueType')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='ds:', name_='KeyValueType'):
pass
def exportChildren(self, outfile, level, namespace_='ds:', name_='KeyValueType', fromsubclass_=False, pretty_print=True):
if not fromsubclass_:
for item_ in self.content_:
item_.export(outfile, level, item_.name, namespace_, pretty_print=pretty_print)
def exportLiteral(self, outfile, level, name_='KeyValueType'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('valueOf_ = """%s""",\n' % (self.valueOf_,))
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
pass
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('content_ = [\n')
for item_ in self.content_:
item_.exportLiteral(outfile, level, name_)
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('content_ = [\n')
for item_ in self.content_:
item_.exportLiteral(outfile, level, name_)
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('content_ = [\n')
for item_ in self.content_:
item_.exportLiteral(outfile, level, name_)
showIndent(outfile, level)
outfile.write('],\n')
pass
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
self.valueOf_ = get_all_text_(node)
if node.text is not None:
obj_ = self.mixedclass_(MixedContainer.CategoryText,
MixedContainer.TypeNone, '', node.text)
self.content_.append(obj_)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'DSAKeyValue':
obj_ = DSAKeyValue.factory()
obj_.build(child_)
obj_ = self.mixedclass_(MixedContainer.CategoryComplex,
MixedContainer.TypeNone, 'DSAKeyValue', obj_)
self.content_.append(obj_)
if hasattr(self, 'add_DSAKeyValue'):
self.add_DSAKeyValue(obj_.value)
elif hasattr(self, 'set_DSAKeyValue'):
self.set_DSAKeyValue(obj_.value)
elif nodeName_ == 'RSAKeyValue':
obj_ = RSAKeyValue.factory()
obj_.build(child_)
obj_ = self.mixedclass_(MixedContainer.CategoryComplex,
MixedContainer.TypeNone, 'RSAKeyValue', obj_)
self.content_.append(obj_)
if hasattr(self, 'add_RSAKeyValue'):
self.add_RSAKeyValue(obj_.value)
elif hasattr(self, 'set_RSAKeyValue'):
self.set_RSAKeyValue(obj_.value)
elif nodeName_ == '':
obj_ = __ANY__.factory()
obj_.build(child_)
obj_ = self.mixedclass_(MixedContainer.CategoryComplex,
MixedContainer.TypeNone, '', obj_)
self.content_.append(obj_)
if hasattr(self, 'add_'):
self.add_(obj_.value)
elif hasattr(self, 'set_'):
self.set_(obj_.value)
if not fromsubclass_ and child_.tail is not None:
obj_ = self.mixedclass_(MixedContainer.CategoryText,
MixedContainer.TypeNone, '', child_.tail)
self.content_.append(obj_)
# end class KeyValueType
class RetrievalMethodType(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, Type=None, URI=None, Transforms=None):
self.Type = _cast(None, Type)
self.URI = _cast(None, URI)
self.Transforms = Transforms
def factory(*args_, **kwargs_):
if RetrievalMethodType.subclass:
return RetrievalMethodType.subclass(*args_, **kwargs_)
else:
return RetrievalMethodType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_Transforms(self): return self.Transforms
def set_Transforms(self, Transforms): self.Transforms = Transforms
def get_Type(self): return self.Type
def set_Type(self, Type): self.Type = Type
def get_URI(self): return self.URI
def set_URI(self, URI): self.URI = URI
def hasContent_(self):
if (
self.Transforms is not None
):
return True
else:
return False
def export(self, outfile, level, namespace_='ds:', name_='RetrievalMethodType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='RetrievalMethodType')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='ds:', name_='RetrievalMethodType'):
if self.Type is not None and 'Type' not in already_processed:
already_processed.add('Type')
outfile.write(' Type=%s' % (self.gds_format_string(quote_attrib(self.Type).encode(ExternalEncoding), input_name='Type'), ))
if self.URI is not None and 'URI' not in already_processed:
already_processed.add('URI')
outfile.write(' URI=%s' % (self.gds_format_string(quote_attrib(self.URI).encode(ExternalEncoding), input_name='URI'), ))
def exportChildren(self, outfile, level, namespace_='ds:', name_='RetrievalMethodType', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.Transforms is not None:
self.Transforms.export(outfile, level, namespace_, name_='Transforms', pretty_print=pretty_print)
def exportLiteral(self, outfile, level, name_='RetrievalMethodType'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.Type is not None and 'Type' not in already_processed:
already_processed.add('Type')
showIndent(outfile, level)
outfile.write('Type="%s",\n' % (self.Type,))
if self.URI is not None and 'URI' not in already_processed:
already_processed.add('URI')
showIndent(outfile, level)
outfile.write('URI="%s",\n' % (self.URI,))
def exportLiteralChildren(self, outfile, level, name_):
if self.Transforms is not None:
showIndent(outfile, level)
outfile.write('Transforms=model_.Transforms(\n')
self.Transforms.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('Type', node)
if value is not None and 'Type' not in already_processed:
already_processed.add('Type')
self.Type = value
value = find_attr_value_('URI', node)
if value is not None and 'URI' not in already_processed:
already_processed.add('URI')
self.URI = value
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'Transforms':
obj_ = TransformsType.factory()
obj_.build(child_)
self.Transforms = obj_
# end class RetrievalMethodType
class X509DataType(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, X509IssuerSerial=None, X509SKI=None, X509SubjectName=None, X509Certificate=None, X509CRL=None, anytypeobjs_=None):
self.X509IssuerSerial = X509IssuerSerial
self.X509SKI = X509SKI
self.X509SubjectName = X509SubjectName
self.X509Certificate = X509Certificate
self.X509CRL = X509CRL
self.anytypeobjs_ = anytypeobjs_
def factory(*args_, **kwargs_):
if X509DataType.subclass:
return X509DataType.subclass(*args_, **kwargs_)
else:
return X509DataType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_X509IssuerSerial(self): return self.X509IssuerSerial
def set_X509IssuerSerial(self, X509IssuerSerial): self.X509IssuerSerial = X509IssuerSerial
def get_X509SKI(self): return self.X509SKI
def set_X509SKI(self, X509SKI): self.X509SKI = X509SKI
def get_X509SubjectName(self): return self.X509SubjectName
def set_X509SubjectName(self, X509SubjectName): self.X509SubjectName = X509SubjectName
def get_X509Certificate(self): return self.X509Certificate
def set_X509Certificate(self, X509Certificate): self.X509Certificate = X509Certificate
def get_X509CRL(self): return self.X509CRL
def set_X509CRL(self, X509CRL): self.X509CRL = X509CRL
def get_anytypeobjs_(self): return self.anytypeobjs_
def set_anytypeobjs_(self, anytypeobjs_): self.anytypeobjs_ = anytypeobjs_
def hasContent_(self):
if (
self.X509IssuerSerial is not None or
self.X509SKI is not None or
self.X509SubjectName is not None or
self.X509Certificate is not None or
self.X509CRL is not None or
self.anytypeobjs_ is not None
):
return True
else:
return False
def export(self, outfile, level, namespace_='ds:', name_='X509DataType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='X509DataType')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='ds:', name_='X509DataType'):
pass
def exportChildren(self, outfile, level, namespace_='ds:', name_='X509DataType', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.X509IssuerSerial is not None:
self.X509IssuerSerial.export(outfile, level, namespace_, name_='X509IssuerSerial', pretty_print=pretty_print)
if self.X509SKI is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sX509SKI>%s</%sX509SKI>%s' % (namespace_, self.gds_format_base64(self.X509SKI, input_name='X509SKI'), namespace_, eol_))
if self.X509SubjectName is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sX509SubjectName>%s</%sX509SubjectName>%s' % (namespace_, self.gds_format_string(quote_xml(self.X509SubjectName).encode(ExternalEncoding), input_name='X509SubjectName'), namespace_, eol_))
if self.X509Certificate is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sX509Certificate>%s</%sX509Certificate>%s' % (namespace_, self.gds_format_base64(self.X509Certificate, input_name='X509Certificate'), namespace_, eol_))
if self.X509CRL is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sX509CRL>%s</%sX509CRL>%s' % (namespace_, self.gds_format_base64(self.X509CRL, input_name='X509CRL'), namespace_, eol_))
if self.anytypeobjs_ is not None:
self.anytypeobjs_.export(outfile, level, namespace_, pretty_print=pretty_print)
def exportLiteral(self, outfile, level, name_='X509DataType'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
pass
def exportLiteralChildren(self, outfile, level, name_):
if self.X509IssuerSerial is not None:
showIndent(outfile, level)
outfile.write('X509IssuerSerial=model_.X509IssuerSerialType(\n')
self.X509IssuerSerial.exportLiteral(outfile, level, name_='X509IssuerSerial')
showIndent(outfile, level)
outfile.write('),\n')
if self.X509SKI is not None:
showIndent(outfile, level)
outfile.write('X509SKI=model_.base64Binary(\n')
self.X509SKI.exportLiteral(outfile, level, name_='X509SKI')
showIndent(outfile, level)
outfile.write('),\n')
if self.X509SubjectName is not None:
showIndent(outfile, level)
outfile.write('X509SubjectName=%s,\n' % quote_python(self.X509SubjectName).encode(ExternalEncoding))
if self.X509Certificate is not None:
showIndent(outfile, level)
outfile.write('X509Certificate=model_.base64Binary(\n')
self.X509Certificate.exportLiteral(outfile, level, name_='X509Certificate')
showIndent(outfile, level)
outfile.write('),\n')
if self.X509CRL is not None:
showIndent(outfile, level)
outfile.write('X509CRL=model_.base64Binary(\n')
self.X509CRL.exportLiteral(outfile, level, name_='X509CRL')
showIndent(outfile, level)
outfile.write('),\n')
if self.anytypeobjs_ is not None:
showIndent(outfile, level)
outfile.write('anytypeobjs_=model_.anytypeobjs_(\n')
self.anytypeobjs_.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'X509IssuerSerial':
obj_ = X509IssuerSerialType.factory()
obj_.build(child_)
self.X509IssuerSerial = obj_
elif nodeName_ == 'X509SKI':
sval_ = child_.text
if sval_ is not None:
try:
bval_ = base64.b64decode(sval_)
except (TypeError, ValueError), exp:
raise_parse_error(child_, 'requires base64 encoded string: %s' % exp)
bval_ = self.gds_validate_base64(bval_, node, 'X509SKI')
else:
bval_ = None
self.X509SKI = bval_
elif nodeName_ == 'X509SubjectName':
X509SubjectName_ = child_.text
X509SubjectName_ = self.gds_validate_string(X509SubjectName_, node, 'X509SubjectName')
self.X509SubjectName = X509SubjectName_
elif nodeName_ == 'X509Certificate':
sval_ = child_.text
if sval_ is not None:
try:
bval_ = base64.b64decode(sval_)
except (TypeError, ValueError), exp:
raise_parse_error(child_, 'requires base64 encoded string: %s' % exp)
bval_ = self.gds_validate_base64(bval_, node, 'X509Certificate')
else:
bval_ = None
self.X509Certificate = bval_
elif nodeName_ == 'X509CRL':
sval_ = child_.text
if sval_ is not None:
try:
bval_ = base64.b64decode(sval_)
except (TypeError, ValueError), exp:
raise_parse_error(child_, 'requires base64 encoded string: %s' % exp)
bval_ = self.gds_validate_base64(bval_, node, 'X509CRL')
else:
bval_ = None
self.X509CRL = bval_
else:
obj_ = self.gds_build_any(child_, 'X509DataType')
if obj_ is not None:
self.set_anytypeobjs_(obj_)
# end class X509DataType
class X509IssuerSerialType(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, X509IssuerName=None, X509SerialNumber=None):
self.X509IssuerName = X509IssuerName
self.X509SerialNumber = X509SerialNumber
def factory(*args_, **kwargs_):
if X509IssuerSerialType.subclass:
return X509IssuerSerialType.subclass(*args_, **kwargs_)
else:
return X509IssuerSerialType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_X509IssuerName(self): return self.X509IssuerName
def set_X509IssuerName(self, X509IssuerName): self.X509IssuerName = X509IssuerName
def get_X509SerialNumber(self): return self.X509SerialNumber
def set_X509SerialNumber(self, X509SerialNumber): self.X509SerialNumber = X509SerialNumber
def hasContent_(self):
if (
self.X509IssuerName is not None or
self.X509SerialNumber is not None
):
return True
else:
return False
def export(self, outfile, level, namespace_='ds:', name_='X509IssuerSerialType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='X509IssuerSerialType')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='ds:', name_='X509IssuerSerialType'):
pass
def exportChildren(self, outfile, level, namespace_='ds:', name_='X509IssuerSerialType', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.X509IssuerName is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sX509IssuerName>%s</%sX509IssuerName>%s' % (namespace_, self.gds_format_string(quote_xml(self.X509IssuerName).encode(ExternalEncoding), input_name='X509IssuerName'), namespace_, eol_))
if self.X509SerialNumber is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sX509SerialNumber>%s</%sX509SerialNumber>%s' % (namespace_, self.gds_format_integer(self.X509SerialNumber, input_name='X509SerialNumber'), namespace_, eol_))
def exportLiteral(self, outfile, level, name_='X509IssuerSerialType'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
pass
def exportLiteralChildren(self, outfile, level, name_):
if self.X509IssuerName is not None:
showIndent(outfile, level)
outfile.write('X509IssuerName=%s,\n' % quote_python(self.X509IssuerName).encode(ExternalEncoding))
if self.X509SerialNumber is not None:
showIndent(outfile, level)
outfile.write('X509SerialNumber=%d,\n' % self.X509SerialNumber)
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'X509IssuerName':
X509IssuerName_ = child_.text
X509IssuerName_ = self.gds_validate_string(X509IssuerName_, node, 'X509IssuerName')
self.X509IssuerName = X509IssuerName_
elif nodeName_ == 'X509SerialNumber':
sval_ = child_.text
try:
ival_ = int(sval_)
except (TypeError, ValueError), exp:
raise_parse_error(child_, 'requires integer: %s' % exp)
ival_ = self.gds_validate_integer(ival_, node, 'X509SerialNumber')
self.X509SerialNumber = ival_
# end class X509IssuerSerialType
class PGPDataType(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, PGPKeyID=None, PGPKeyPacket=None, anytypeobjs_=None):
self.PGPKeyID = PGPKeyID
self.PGPKeyPacket = PGPKeyPacket
if anytypeobjs_ is None:
self.anytypeobjs_ = []
else:
self.anytypeobjs_ = anytypeobjs_
self.PGPKeyPacket = PGPKeyPacket
if anytypeobjs_ is None:
self.anytypeobjs_ = []
else:
self.anytypeobjs_ = anytypeobjs_
def factory(*args_, **kwargs_):
if PGPDataType.subclass:
return PGPDataType.subclass(*args_, **kwargs_)
else:
return PGPDataType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_PGPKeyID(self): return self.PGPKeyID
def set_PGPKeyID(self, PGPKeyID): self.PGPKeyID = PGPKeyID
def get_PGPKeyPacket(self): return self.PGPKeyPacket
def set_PGPKeyPacket(self, PGPKeyPacket): self.PGPKeyPacket = PGPKeyPacket
def get_anytypeobjs_(self): return self.anytypeobjs_
def set_anytypeobjs_(self, anytypeobjs_): self.anytypeobjs_ = anytypeobjs_
def add_anytypeobjs_(self, value): self.anytypeobjs_.append(value)
def insert_anytypeobjs_(self, index, value): self._anytypeobjs_[index] = value
def get_PGPKeyPacket(self): return self.PGPKeyPacket
def set_PGPKeyPacket(self, PGPKeyPacket): self.PGPKeyPacket = PGPKeyPacket
def get_anytypeobjs_(self): return self.anytypeobjs_
def set_anytypeobjs_(self, anytypeobjs_): self.anytypeobjs_ = anytypeobjs_
def add_anytypeobjs_(self, value): self.anytypeobjs_.append(value)
def insert_anytypeobjs_(self, index, value): self._anytypeobjs_[index] = value
def hasContent_(self):
if (
self.PGPKeyID is not None or
self.PGPKeyPacket is not None or
self.anytypeobjs_ or
self.PGPKeyPacket is not None or
self.anytypeobjs_
):
return True
else:
return False
def export(self, outfile, level, namespace_='ds:', name_='PGPDataType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='PGPDataType')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='ds:', name_='PGPDataType'):
pass
def exportChildren(self, outfile, level, namespace_='ds:', name_='PGPDataType', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.PGPKeyID is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sPGPKeyID>%s</%sPGPKeyID>%s' % (namespace_, self.gds_format_base64(self.PGPKeyID, input_name='PGPKeyID'), namespace_, eol_))
if self.PGPKeyPacket is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sPGPKeyPacket>%s</%sPGPKeyPacket>%s' % (namespace_, self.gds_format_base64(self.PGPKeyPacket, input_name='PGPKeyPacket'), namespace_, eol_))
if self.PGPKeyPacket is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sPGPKeyPacket>%s</%sPGPKeyPacket>%s' % (namespace_, self.gds_format_base64(self.PGPKeyPacket, input_name='PGPKeyPacket'), namespace_, eol_))
for obj_ in self.anytypeobjs_:
obj_.export(outfile, level, namespace_, pretty_print=pretty_print)
def exportLiteral(self, outfile, level, name_='PGPDataType'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
pass
def exportLiteralChildren(self, outfile, level, name_):
if self.PGPKeyID is not None:
showIndent(outfile, level)
outfile.write('PGPKeyID=model_.base64Binary(\n')
self.PGPKeyID.exportLiteral(outfile, level, name_='PGPKeyID')
showIndent(outfile, level)
outfile.write('),\n')
if self.PGPKeyPacket is not None:
showIndent(outfile, level)
outfile.write('PGPKeyPacket=model_.base64Binary(\n')
self.PGPKeyPacket.exportLiteral(outfile, level, name_='PGPKeyPacket')
showIndent(outfile, level)
outfile.write('),\n')
showIndent(outfile, level)
outfile.write('anytypeobjs_=[\n')
level += 1
for anytypeobjs_ in self.anytypeobjs_:
anytypeobjs_.exportLiteral(outfile, level)
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
if self.PGPKeyPacket is not None:
showIndent(outfile, level)
outfile.write('PGPKeyPacket=model_.base64Binary(\n')
self.PGPKeyPacket.exportLiteral(outfile, level, name_='PGPKeyPacket')
showIndent(outfile, level)
outfile.write('),\n')
showIndent(outfile, level)
outfile.write('anytypeobjs_=[\n')
level += 1
for anytypeobjs_ in self.anytypeobjs_:
anytypeobjs_.exportLiteral(outfile, level)
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'PGPKeyID':
sval_ = child_.text
if sval_ is not None:
try:
bval_ = base64.b64decode(sval_)
except (TypeError, ValueError), exp:
raise_parse_error(child_, 'requires base64 encoded string: %s' % exp)
bval_ = self.gds_validate_base64(bval_, node, 'PGPKeyID')
else:
bval_ = None
self.PGPKeyID = bval_
elif nodeName_ == 'PGPKeyPacket':
sval_ = child_.text
if sval_ is not None:
try:
bval_ = base64.b64decode(sval_)
except (TypeError, ValueError), exp:
raise_parse_error(child_, 'requires base64 encoded string: %s' % exp)
bval_ = self.gds_validate_base64(bval_, node, 'PGPKeyPacket')
else:
bval_ = None
self.PGPKeyPacket = bval_
elif nodeName_ == 'PGPKeyPacket':
sval_ = child_.text
if sval_ is not None:
try:
bval_ = base64.b64decode(sval_)
except (TypeError, ValueError), exp:
raise_parse_error(child_, 'requires base64 encoded string: %s' % exp)
bval_ = self.gds_validate_base64(bval_, node, 'PGPKeyPacket')
else:
bval_ = None
self.PGPKeyPacket = bval_
else:
obj_ = self.gds_build_any(child_, 'PGPDataType')
if obj_ is not None:
self.add_anytypeobjs_(obj_)
# end class PGPDataType
class SPKIDataType(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, SPKISexp=None, anytypeobjs_=None):
self.SPKISexp = SPKISexp
self.anytypeobjs_ = anytypeobjs_
def factory(*args_, **kwargs_):
if SPKIDataType.subclass:
return SPKIDataType.subclass(*args_, **kwargs_)
else:
return SPKIDataType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_SPKISexp(self): return self.SPKISexp
def set_SPKISexp(self, SPKISexp): self.SPKISexp = SPKISexp
def get_anytypeobjs_(self): return self.anytypeobjs_
def set_anytypeobjs_(self, anytypeobjs_): self.anytypeobjs_ = anytypeobjs_
def hasContent_(self):
if (
self.SPKISexp is not None or
self.anytypeobjs_ is not None
):
return True
else:
return False
def export(self, outfile, level, namespace_='ds:', name_='SPKIDataType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='SPKIDataType')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='ds:', name_='SPKIDataType'):
pass
def exportChildren(self, outfile, level, namespace_='ds:', name_='SPKIDataType', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.SPKISexp is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sSPKISexp>%s</%sSPKISexp>%s' % (namespace_, self.gds_format_base64(self.SPKISexp, input_name='SPKISexp'), namespace_, eol_))
if self.anytypeobjs_ is not None:
self.anytypeobjs_.export(outfile, level, namespace_, pretty_print=pretty_print)
def exportLiteral(self, outfile, level, name_='SPKIDataType'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
pass
def exportLiteralChildren(self, outfile, level, name_):
if self.SPKISexp is not None:
showIndent(outfile, level)
outfile.write('SPKISexp=model_.base64Binary(\n')
self.SPKISexp.exportLiteral(outfile, level, name_='SPKISexp')
showIndent(outfile, level)
outfile.write('),\n')
if self.anytypeobjs_ is not None:
showIndent(outfile, level)
outfile.write('anytypeobjs_=model_.anytypeobjs_(\n')
self.anytypeobjs_.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'SPKISexp':
sval_ = child_.text
if sval_ is not None:
try:
bval_ = base64.b64decode(sval_)
except (TypeError, ValueError), exp:
raise_parse_error(child_, 'requires base64 encoded string: %s' % exp)
bval_ = self.gds_validate_base64(bval_, node, 'SPKISexp')
else:
bval_ = None
self.SPKISexp = bval_
else:
obj_ = self.gds_build_any(child_, 'SPKIDataType')
if obj_ is not None:
self.set_anytypeobjs_(obj_)
# end class SPKIDataType
class ObjectType(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, MimeType=None, Id=None, Encoding=None, anytypeobjs_=None, valueOf_=None, mixedclass_=None, content_=None):
self.MimeType = _cast(None, MimeType)
self.Id = _cast(None, Id)
self.Encoding = _cast(None, Encoding)
self.anytypeobjs_ = anytypeobjs_
self.valueOf_ = valueOf_
if mixedclass_ is None:
self.mixedclass_ = MixedContainer
else:
self.mixedclass_ = mixedclass_
if content_ is None:
self.content_ = []
else:
self.content_ = content_
self.valueOf_ = valueOf_
def factory(*args_, **kwargs_):
if ObjectType.subclass:
return ObjectType.subclass(*args_, **kwargs_)
else:
return ObjectType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_anytypeobjs_(self): return self.anytypeobjs_
def set_anytypeobjs_(self, anytypeobjs_): self.anytypeobjs_ = anytypeobjs_
def get_MimeType(self): return self.MimeType
def set_MimeType(self, MimeType): self.MimeType = MimeType
def get_Id(self): return self.Id
def set_Id(self, Id): self.Id = Id
def get_Encoding(self): return self.Encoding
def set_Encoding(self, Encoding): self.Encoding = Encoding
def get_valueOf_(self): return self.valueOf_
def set_valueOf_(self, valueOf_): self.valueOf_ = valueOf_
def hasContent_(self):
if (
self.anytypeobjs_ is not None or
self.valueOf_
):
return True
else:
return False
def export(self, outfile, level, namespace_='ds:', name_='ObjectType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='ObjectType')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='ds:', name_='ObjectType'):
if self.MimeType is not None and 'MimeType' not in already_processed:
already_processed.add('MimeType')
outfile.write(' MimeType=%s' % (self.gds_format_string(quote_attrib(self.MimeType).encode(ExternalEncoding), input_name='MimeType'), ))
if self.Id is not None and 'Id' not in already_processed:
already_processed.add('Id')
outfile.write(' Id=%s' % (self.gds_format_string(quote_attrib(self.Id).encode(ExternalEncoding), input_name='Id'), ))
if self.Encoding is not None and 'Encoding' not in already_processed:
already_processed.add('Encoding')
outfile.write(' Encoding=%s' % (self.gds_format_string(quote_attrib(self.Encoding).encode(ExternalEncoding), input_name='Encoding'), ))
def exportChildren(self, outfile, level, namespace_='ds:', name_='ObjectType', fromsubclass_=False, pretty_print=True):
if not fromsubclass_:
for item_ in self.content_:
item_.export(outfile, level, item_.name, namespace_, pretty_print=pretty_print)
def exportLiteral(self, outfile, level, name_='ObjectType'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('valueOf_ = """%s""",\n' % (self.valueOf_,))
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.MimeType is not None and 'MimeType' not in already_processed:
already_processed.add('MimeType')
showIndent(outfile, level)
outfile.write('MimeType="%s",\n' % (self.MimeType,))
if self.Id is not None and 'Id' not in already_processed:
already_processed.add('Id')
showIndent(outfile, level)
outfile.write('Id="%s",\n' % (self.Id,))
if self.Encoding is not None and 'Encoding' not in already_processed:
already_processed.add('Encoding')
showIndent(outfile, level)
outfile.write('Encoding="%s",\n' % (self.Encoding,))
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('content_ = [\n')
for item_ in self.content_:
item_.exportLiteral(outfile, level, name_)
showIndent(outfile, level)
outfile.write('],\n')
pass
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
self.valueOf_ = get_all_text_(node)
if node.text is not None:
obj_ = self.mixedclass_(MixedContainer.CategoryText,
MixedContainer.TypeNone, '', node.text)
self.content_.append(obj_)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('MimeType', node)
if value is not None and 'MimeType' not in already_processed:
already_processed.add('MimeType')
self.MimeType = value
value = find_attr_value_('Id', node)
if value is not None and 'Id' not in already_processed:
already_processed.add('Id')
self.Id = value
value = find_attr_value_('Encoding', node)
if value is not None and 'Encoding' not in already_processed:
already_processed.add('Encoding')
self.Encoding = value
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == '':
obj_ = __ANY__.factory()
obj_.build(child_)
obj_ = self.mixedclass_(MixedContainer.CategoryComplex,
MixedContainer.TypeNone, '', obj_)
self.content_.append(obj_)
if hasattr(self, 'add_'):
self.add_(obj_.value)
elif hasattr(self, 'set_'):
self.set_(obj_.value)
if not fromsubclass_ and child_.tail is not None:
obj_ = self.mixedclass_(MixedContainer.CategoryText,
MixedContainer.TypeNone, '', child_.tail)
self.content_.append(obj_)
# end class ObjectType
class ManifestType(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, Id=None, Reference=None):
self.Id = _cast(None, Id)
if Reference is None:
self.Reference = []
else:
self.Reference = Reference
def factory(*args_, **kwargs_):
if ManifestType.subclass:
return ManifestType.subclass(*args_, **kwargs_)
else:
return ManifestType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_Reference(self): return self.Reference
def set_Reference(self, Reference): self.Reference = Reference
def add_Reference(self, value): self.Reference.append(value)
def insert_Reference(self, index, value): self.Reference[index] = value
def get_Id(self): return self.Id
def set_Id(self, Id): self.Id = Id
def hasContent_(self):
if (
self.Reference
):
return True
else:
return False
def export(self, outfile, level, namespace_='ds:', name_='ManifestType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='ManifestType')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='ds:', name_='ManifestType'):
if self.Id is not None and 'Id' not in already_processed:
already_processed.add('Id')
outfile.write(' Id=%s' % (self.gds_format_string(quote_attrib(self.Id).encode(ExternalEncoding), input_name='Id'), ))
def exportChildren(self, outfile, level, namespace_='ds:', name_='ManifestType', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
for Reference_ in self.Reference:
Reference_.export(outfile, level, namespace_, name_='Reference', pretty_print=pretty_print)
def exportLiteral(self, outfile, level, name_='ManifestType'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.Id is not None and 'Id' not in already_processed:
already_processed.add('Id')
showIndent(outfile, level)
outfile.write('Id="%s",\n' % (self.Id,))
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('Reference=[\n')
level += 1
for Reference_ in self.Reference:
showIndent(outfile, level)
outfile.write('model_.Reference(\n')
Reference_.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('Id', node)
if value is not None and 'Id' not in already_processed:
already_processed.add('Id')
self.Id = value
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'Reference':
obj_ = ReferenceType.factory()
obj_.build(child_)
self.Reference.append(obj_)
# end class ManifestType
class SignaturePropertiesType(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, Id=None, SignatureProperty=None):
self.Id = _cast(None, Id)
if SignatureProperty is None:
self.SignatureProperty = []
else:
self.SignatureProperty = SignatureProperty
def factory(*args_, **kwargs_):
if SignaturePropertiesType.subclass:
return SignaturePropertiesType.subclass(*args_, **kwargs_)
else:
return SignaturePropertiesType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_SignatureProperty(self): return self.SignatureProperty
def set_SignatureProperty(self, SignatureProperty): self.SignatureProperty = SignatureProperty
def add_SignatureProperty(self, value): self.SignatureProperty.append(value)
def insert_SignatureProperty(self, index, value): self.SignatureProperty[index] = value
def get_Id(self): return self.Id
def set_Id(self, Id): self.Id = Id
def hasContent_(self):
if (
self.SignatureProperty
):
return True
else:
return False
def export(self, outfile, level, namespace_='ds:', name_='SignaturePropertiesType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='SignaturePropertiesType')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='ds:', name_='SignaturePropertiesType'):
if self.Id is not None and 'Id' not in already_processed:
already_processed.add('Id')
outfile.write(' Id=%s' % (self.gds_format_string(quote_attrib(self.Id).encode(ExternalEncoding), input_name='Id'), ))
def exportChildren(self, outfile, level, namespace_='ds:', name_='SignaturePropertiesType', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
for SignatureProperty_ in self.SignatureProperty:
SignatureProperty_.export(outfile, level, namespace_, name_='SignatureProperty', pretty_print=pretty_print)
def exportLiteral(self, outfile, level, name_='SignaturePropertiesType'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.Id is not None and 'Id' not in already_processed:
already_processed.add('Id')
showIndent(outfile, level)
outfile.write('Id="%s",\n' % (self.Id,))
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('SignatureProperty=[\n')
level += 1
for SignatureProperty_ in self.SignatureProperty:
showIndent(outfile, level)
outfile.write('model_.SignatureProperty(\n')
SignatureProperty_.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('Id', node)
if value is not None and 'Id' not in already_processed:
already_processed.add('Id')
self.Id = value
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'SignatureProperty':
obj_ = SignaturePropertyType.factory()
obj_.build(child_)
self.SignatureProperty.append(obj_)
# end class SignaturePropertiesType
class SignaturePropertyType(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, Target=None, Id=None, anytypeobjs_=None, valueOf_=None, mixedclass_=None, content_=None):
self.Target = _cast(None, Target)
self.Id = _cast(None, Id)
self.anytypeobjs_ = anytypeobjs_
self.valueOf_ = valueOf_
if mixedclass_ is None:
self.mixedclass_ = MixedContainer
else:
self.mixedclass_ = mixedclass_
if content_ is None:
self.content_ = []
else:
self.content_ = content_
self.valueOf_ = valueOf_
def factory(*args_, **kwargs_):
if SignaturePropertyType.subclass:
return SignaturePropertyType.subclass(*args_, **kwargs_)
else:
return SignaturePropertyType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_anytypeobjs_(self): return self.anytypeobjs_
def set_anytypeobjs_(self, anytypeobjs_): self.anytypeobjs_ = anytypeobjs_
def get_Target(self): return self.Target
def set_Target(self, Target): self.Target = Target
def get_Id(self): return self.Id
def set_Id(self, Id): self.Id = Id
def get_valueOf_(self): return self.valueOf_
def set_valueOf_(self, valueOf_): self.valueOf_ = valueOf_
def hasContent_(self):
if (
self.anytypeobjs_ is not None or
self.valueOf_
):
return True
else:
return False
def export(self, outfile, level, namespace_='ds:', name_='SignaturePropertyType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='SignaturePropertyType')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='ds:', name_='SignaturePropertyType'):
if self.Target is not None and 'Target' not in already_processed:
already_processed.add('Target')
outfile.write(' Target=%s' % (self.gds_format_string(quote_attrib(self.Target).encode(ExternalEncoding), input_name='Target'), ))
if self.Id is not None and 'Id' not in already_processed:
already_processed.add('Id')
outfile.write(' Id=%s' % (self.gds_format_string(quote_attrib(self.Id).encode(ExternalEncoding), input_name='Id'), ))
def exportChildren(self, outfile, level, namespace_='ds:', name_='SignaturePropertyType', fromsubclass_=False, pretty_print=True):
if not fromsubclass_:
for item_ in self.content_:
item_.export(outfile, level, item_.name, namespace_, pretty_print=pretty_print)
def exportLiteral(self, outfile, level, name_='SignaturePropertyType'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('valueOf_ = """%s""",\n' % (self.valueOf_,))
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.Target is not None and 'Target' not in already_processed:
already_processed.add('Target')
showIndent(outfile, level)
outfile.write('Target="%s",\n' % (self.Target,))
if self.Id is not None and 'Id' not in already_processed:
already_processed.add('Id')
showIndent(outfile, level)
outfile.write('Id="%s",\n' % (self.Id,))
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('content_ = [\n')
for item_ in self.content_:
item_.exportLiteral(outfile, level, name_)
showIndent(outfile, level)
outfile.write('],\n')
pass
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
self.valueOf_ = get_all_text_(node)
if node.text is not None:
obj_ = self.mixedclass_(MixedContainer.CategoryText,
MixedContainer.TypeNone, '', node.text)
self.content_.append(obj_)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('Target', node)
if value is not None and 'Target' not in already_processed:
already_processed.add('Target')
self.Target = value
value = find_attr_value_('Id', node)
if value is not None and 'Id' not in already_processed:
already_processed.add('Id')
self.Id = value
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == '':
obj_ = __ANY__.factory()
obj_.build(child_)
obj_ = self.mixedclass_(MixedContainer.CategoryComplex,
MixedContainer.TypeNone, '', obj_)
self.content_.append(obj_)
if hasattr(self, 'add_'):
self.add_(obj_.value)
elif hasattr(self, 'set_'):
self.set_(obj_.value)
if not fromsubclass_ and child_.tail is not None:
obj_ = self.mixedclass_(MixedContainer.CategoryText,
MixedContainer.TypeNone, '', child_.tail)
self.content_.append(obj_)
# end class SignaturePropertyType
class DSAKeyValueType(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, P=None, Q=None, G=None, Y=None, J=None, Seed=None, PgenCounter=None):
self.P = P
self.Q = Q
self.G = G
self.Y = Y
self.J = J
self.Seed = Seed
self.PgenCounter = PgenCounter
def factory(*args_, **kwargs_):
if DSAKeyValueType.subclass:
return DSAKeyValueType.subclass(*args_, **kwargs_)
else:
return DSAKeyValueType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_P(self): return self.P
def set_P(self, P): self.P = P
def get_Q(self): return self.Q
def set_Q(self, Q): self.Q = Q
def get_G(self): return self.G
def set_G(self, G): self.G = G
def get_Y(self): return self.Y
def set_Y(self, Y): self.Y = Y
def get_J(self): return self.J
def set_J(self, J): self.J = J
def get_Seed(self): return self.Seed
def set_Seed(self, Seed): self.Seed = Seed
def get_PgenCounter(self): return self.PgenCounter
def set_PgenCounter(self, PgenCounter): self.PgenCounter = PgenCounter
def validate_CryptoBinary(self, value):
# Validate type CryptoBinary, a restriction on base64Binary.
pass
def hasContent_(self):
if (
self.P is not None or
self.Q is not None or
self.G is not None or
self.Y is not None or
self.J is not None or
self.Seed is not None or
self.PgenCounter is not None
):
return True
else:
return False
def export(self, outfile, level, namespace_='ds:', name_='DSAKeyValueType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='DSAKeyValueType')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='ds:', name_='DSAKeyValueType'):
pass
def exportChildren(self, outfile, level, namespace_='ds:', name_='DSAKeyValueType', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.P is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sP>%s</%sP>%s' % (namespace_, self.gds_format_base64(self.P, input_name='P'), namespace_, eol_))
if self.Q is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sQ>%s</%sQ>%s' % (namespace_, self.gds_format_base64(self.Q, input_name='Q'), namespace_, eol_))
if self.G is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sG>%s</%sG>%s' % (namespace_, self.gds_format_base64(self.G, input_name='G'), namespace_, eol_))
if self.Y is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sY>%s</%sY>%s' % (namespace_, self.gds_format_base64(self.Y, input_name='Y'), namespace_, eol_))
if self.J is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sJ>%s</%sJ>%s' % (namespace_, self.gds_format_base64(self.J, input_name='J'), namespace_, eol_))
if self.Seed is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sSeed>%s</%sSeed>%s' % (namespace_, self.gds_format_base64(self.Seed, input_name='Seed'), namespace_, eol_))
if self.PgenCounter is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sPgenCounter>%s</%sPgenCounter>%s' % (namespace_, self.gds_format_base64(self.PgenCounter, input_name='PgenCounter'), namespace_, eol_))
def exportLiteral(self, outfile, level, name_='DSAKeyValueType'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
pass
def exportLiteralChildren(self, outfile, level, name_):
if self.P is not None:
showIndent(outfile, level)
outfile.write('P=model_.base64Binary(\n')
self.P.exportLiteral(outfile, level, name_='P')
showIndent(outfile, level)
outfile.write('),\n')
if self.Q is not None:
showIndent(outfile, level)
outfile.write('Q=model_.base64Binary(\n')
self.Q.exportLiteral(outfile, level, name_='Q')
showIndent(outfile, level)
outfile.write('),\n')
if self.G is not None:
showIndent(outfile, level)
outfile.write('G=model_.base64Binary(\n')
self.G.exportLiteral(outfile, level, name_='G')
showIndent(outfile, level)
outfile.write('),\n')
if self.Y is not None:
showIndent(outfile, level)
outfile.write('Y=model_.base64Binary(\n')
self.Y.exportLiteral(outfile, level, name_='Y')
showIndent(outfile, level)
outfile.write('),\n')
if self.J is not None:
showIndent(outfile, level)
outfile.write('J=model_.base64Binary(\n')
self.J.exportLiteral(outfile, level, name_='J')
showIndent(outfile, level)
outfile.write('),\n')
if self.Seed is not None:
showIndent(outfile, level)
outfile.write('Seed=model_.base64Binary(\n')
self.Seed.exportLiteral(outfile, level, name_='Seed')
showIndent(outfile, level)
outfile.write('),\n')
if self.PgenCounter is not None:
showIndent(outfile, level)
outfile.write('PgenCounter=model_.base64Binary(\n')
self.PgenCounter.exportLiteral(outfile, level, name_='PgenCounter')
showIndent(outfile, level)
outfile.write('),\n')
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'P':
sval_ = child_.text
if sval_ is not None:
try:
bval_ = base64.b64decode(sval_)
except (TypeError, ValueError), exp:
raise_parse_error(child_, 'requires base64 encoded string: %s' % exp)
bval_ = self.gds_validate_base64(bval_, node, 'P')
else:
bval_ = None
self.P = bval_
self.validate_CryptoBinary(self.P) # validate type CryptoBinary
elif nodeName_ == 'Q':
sval_ = child_.text
if sval_ is not None:
try:
bval_ = base64.b64decode(sval_)
except (TypeError, ValueError), exp:
raise_parse_error(child_, 'requires base64 encoded string: %s' % exp)
bval_ = self.gds_validate_base64(bval_, node, 'Q')
else:
bval_ = None
self.Q = bval_
self.validate_CryptoBinary(self.Q) # validate type CryptoBinary
elif nodeName_ == 'G':
sval_ = child_.text
if sval_ is not None:
try:
bval_ = base64.b64decode(sval_)
except (TypeError, ValueError), exp:
raise_parse_error(child_, 'requires base64 encoded string: %s' % exp)
bval_ = self.gds_validate_base64(bval_, node, 'G')
else:
bval_ = None
self.G = bval_
self.validate_CryptoBinary(self.G) # validate type CryptoBinary
elif nodeName_ == 'Y':
sval_ = child_.text
if sval_ is not None:
try:
bval_ = base64.b64decode(sval_)
except (TypeError, ValueError), exp:
raise_parse_error(child_, 'requires base64 encoded string: %s' % exp)
bval_ = self.gds_validate_base64(bval_, node, 'Y')
else:
bval_ = None
self.Y = bval_
self.validate_CryptoBinary(self.Y) # validate type CryptoBinary
elif nodeName_ == 'J':
sval_ = child_.text
if sval_ is not None:
try:
bval_ = base64.b64decode(sval_)
except (TypeError, ValueError), exp:
raise_parse_error(child_, 'requires base64 encoded string: %s' % exp)
bval_ = self.gds_validate_base64(bval_, node, 'J')
else:
bval_ = None
self.J = bval_
self.validate_CryptoBinary(self.J) # validate type CryptoBinary
elif nodeName_ == 'Seed':
sval_ = child_.text
if sval_ is not None:
try:
bval_ = base64.b64decode(sval_)
except (TypeError, ValueError), exp:
raise_parse_error(child_, 'requires base64 encoded string: %s' % exp)
bval_ = self.gds_validate_base64(bval_, node, 'Seed')
else:
bval_ = None
self.Seed = bval_
self.validate_CryptoBinary(self.Seed) # validate type CryptoBinary
elif nodeName_ == 'PgenCounter':
sval_ = child_.text
if sval_ is not None:
try:
bval_ = base64.b64decode(sval_)
except (TypeError, ValueError), exp:
raise_parse_error(child_, 'requires base64 encoded string: %s' % exp)
bval_ = self.gds_validate_base64(bval_, node, 'PgenCounter')
else:
bval_ = None
self.PgenCounter = bval_
self.validate_CryptoBinary(self.PgenCounter) # validate type CryptoBinary
# end class DSAKeyValueType
class RSAKeyValueType(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, Modulus=None, Exponent=None):
self.Modulus = Modulus
self.Exponent = Exponent
def factory(*args_, **kwargs_):
if RSAKeyValueType.subclass:
return RSAKeyValueType.subclass(*args_, **kwargs_)
else:
return RSAKeyValueType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_Modulus(self): return self.Modulus
def set_Modulus(self, Modulus): self.Modulus = Modulus
def get_Exponent(self): return self.Exponent
def set_Exponent(self, Exponent): self.Exponent = Exponent
def validate_CryptoBinary(self, value):
# Validate type CryptoBinary, a restriction on base64Binary.
pass
def hasContent_(self):
if (
self.Modulus is not None or
self.Exponent is not None
):
return True
else:
return False
def export(self, outfile, level, namespace_='ds:', name_='RSAKeyValueType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='RSAKeyValueType')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='ds:', name_='RSAKeyValueType'):
pass
def exportChildren(self, outfile, level, namespace_='ds:', name_='RSAKeyValueType', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.Modulus is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sModulus>%s</%sModulus>%s' % (namespace_, self.gds_format_base64(self.Modulus, input_name='Modulus'), namespace_, eol_))
if self.Exponent is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sExponent>%s</%sExponent>%s' % (namespace_, self.gds_format_base64(self.Exponent, input_name='Exponent'), namespace_, eol_))
def exportLiteral(self, outfile, level, name_='RSAKeyValueType'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
pass
def exportLiteralChildren(self, outfile, level, name_):
if self.Modulus is not None:
showIndent(outfile, level)
outfile.write('Modulus=model_.base64Binary(\n')
self.Modulus.exportLiteral(outfile, level, name_='Modulus')
showIndent(outfile, level)
outfile.write('),\n')
if self.Exponent is not None:
showIndent(outfile, level)
outfile.write('Exponent=model_.base64Binary(\n')
self.Exponent.exportLiteral(outfile, level, name_='Exponent')
showIndent(outfile, level)
outfile.write('),\n')
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'Modulus':
sval_ = child_.text
if sval_ is not None:
try:
bval_ = base64.b64decode(sval_)
except (TypeError, ValueError), exp:
raise_parse_error(child_, 'requires base64 encoded string: %s' % exp)
bval_ = self.gds_validate_base64(bval_, node, 'Modulus')
else:
bval_ = None
self.Modulus = bval_
self.validate_CryptoBinary(self.Modulus) # validate type CryptoBinary
elif nodeName_ == 'Exponent':
sval_ = child_.text
if sval_ is not None:
try:
bval_ = base64.b64decode(sval_)
except (TypeError, ValueError), exp:
raise_parse_error(child_, 'requires base64 encoded string: %s' % exp)
bval_ = self.gds_validate_base64(bval_, node, 'Exponent')
else:
bval_ = None
self.Exponent = bval_
self.validate_CryptoBinary(self.Exponent) # validate type CryptoBinary
# end class RSAKeyValueType
GDSClassesMapping = {
'PGPData': PGPDataType,
'Transform': TransformType,
'X509IssuerSerial': X509IssuerSerialType,
'SignatureMethod': SignatureMethodType,
'SPKIData': SPKIDataType,
'SignatureProperty': SignaturePropertyType,
'Object': ObjectType,
'X509Data': X509DataType,
'DigestMethod': DigestMethodType,
'KeyValue': KeyValueType,
'CanonicalizationMethod': CanonicalizationMethodType,
'SignatureProperties': SignaturePropertiesType,
'KeyInfo': KeyInfoType,
'Manifest': ManifestType,
'RSAKeyValue': RSAKeyValueType,
'Signature': SignatureType,
'RetrievalMethod': RetrievalMethodType,
'DSAKeyValue': DSAKeyValueType,
'Reference': ReferenceType,
'Transforms': TransformsType,
'SignedInfo': SignedInfoType,
'SignatureValue': SignatureValueType,
}
USAGE_TEXT = """
Usage: python <Parser>.py [ -s ] <in_xml_file>
"""
def usage():
print USAGE_TEXT
sys.exit(1)
def get_root_tag(node):
tag = Tag_pattern_.match(node.tag).groups()[-1]
rootClass = GDSClassesMapping.get(tag)
if rootClass is None:
rootClass = globals().get(tag)
return tag, rootClass
def parse(inFileName, silence=False):
doc = parsexml_(inFileName)
rootNode = doc.getroot()
rootTag, rootClass = get_root_tag(rootNode)
if rootClass is None:
rootTag = 'Signature'
rootClass = SignatureType
rootObj = rootClass.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
doc = None
if not silence:
sys.stdout.write('<?xml version="1.0" ?>\n')
rootObj.export(
sys.stdout, 0, name_=rootTag,
namespacedef_='xmlns:ds="http://www.w3.org/2000/09/xmldsig#"',
pretty_print=True)
return rootObj
def parseEtree(inFileName, silence=False):
doc = parsexml_(inFileName)
rootNode = doc.getroot()
rootTag, rootClass = get_root_tag(rootNode)
if rootClass is None:
rootTag = 'Signature'
rootClass = SignatureType
rootObj = rootClass.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
doc = None
mapping = {}
rootElement = rootObj.to_etree(None, name_=rootTag, mapping_=mapping)
reverse_mapping = rootObj.gds_reverse_node_mapping(mapping)
if not silence:
content = etree_.tostring(
rootElement, pretty_print=True,
xml_declaration=True, encoding="utf-8")
sys.stdout.write(content)
sys.stdout.write('\n')
return rootObj, rootElement, mapping, reverse_mapping
def parseString(inString, silence=False):
from StringIO import StringIO
doc = parsexml_(StringIO(inString))
rootNode = doc.getroot()
roots = get_root_tag(rootNode)
rootClass = roots[1]
if rootClass is None:
rootClass = SignatureType
rootObj = rootClass.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
doc = None
if not silence:
sys.stdout.write('<?xml version="1.0" ?>\n')
rootObj.export(
sys.stdout, 0, name_="Signature",
namespacedef_='xmlns:ds="http://www.w3.org/2000/09/xmldsig#"')
return rootObj
def parseLiteral(inFileName, silence=False):
doc = parsexml_(inFileName)
rootNode = doc.getroot()
rootTag, rootClass = get_root_tag(rootNode)
if rootClass is None:
rootTag = 'Signature'
rootClass = SignatureType
rootObj = rootClass.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
doc = None
if not silence:
sys.stdout.write('#from xmldsig-core-schema_v01 import *\n\n')
sys.stdout.write('import xmldsig-core-schema_v01 as model_\n\n')
sys.stdout.write('rootObj = model_.rootTag(\n')
rootObj.exportLiteral(sys.stdout, 0, name_=rootTag)
sys.stdout.write(')\n')
return rootObj
def main():
args = sys.argv[1:]
if len(args) == 1:
parse(args[0])
else:
usage()
if __name__ == '__main__':
#import pdb; pdb.set_trace()
main()
__all__ = [
"CanonicalizationMethodType",
"DSAKeyValueType",
"DigestMethodType",
"KeyInfoType",
"KeyValueType",
"ManifestType",
"ObjectType",
"PGPDataType",
"RSAKeyValueType",
"ReferenceType",
"RetrievalMethodType",
"SPKIDataType",
"SignatureMethodType",
"SignaturePropertiesType",
"SignaturePropertyType",
"SignatureType",
"SignatureValueType",
"SignedInfoType",
"TransformType",
"TransformsType",
"X509DataType",
"X509IssuerSerialType"
]
|
[
"danimaribeiro@gmail.com"
] |
danimaribeiro@gmail.com
|
8a9ae51e7fb7f3ad1ad2245e5036b15a9fa4ec17
|
293b290895e97465460905b492901c434b3c6dcf
|
/crime_stats.py
|
d1648992693ae8c12e654e5e9be67a92e4e98177
|
[] |
no_license
|
srujanbelde/EnvisionChicago-DataAnalytics
|
0b5acb68d02c4df6bd36946b7a1da1c0e5d8433b
|
461077d1308a210d79af05015b55b244879ccd41
|
refs/heads/master
| 2020-03-11T11:19:31.268867
| 2018-04-17T21:23:16
| 2018-04-17T21:23:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,886
|
py
|
import pandas as pd
import re
from difflib import SequenceMatcher
import datetime
crimes_file_path = "Crimes_Census_Final_output.csv"
census_ages = "age_groups.csv"
def integrate_ages_crimes():
crimes_census = pd.read_csv(crimes_file_path, nrows=50000000)
crimes_census.columns = [c.replace(' ', '_') for c in crimes_census.columns]
crimes_census['tract_block'] = crimes_census['tract_block'].apply(lambda x: fun(x))
crimes_census = crimes_census.rename(columns={"tract_block": "census_block"})
census_demographs = pd.read_csv(census_ages, nrows=50000000)
census_demographs.columns = [c.replace(' ', '_') for c in census_demographs.columns]
census_demographs['census_block'] = census_demographs.apply(lambda row: my_fun(row), axis=1)
census_demographs['max_age'] = census_demographs.apply(lambda row: my_fun2(row), axis=1)
integrated = pd.merge(census_demographs,crimes_census, on=['census_block'])
return integrated
print("hello")
def my_fun2(row):
dict = {
"5-17": row['Pop_5_17'],
"18-24": row['Pop_18_24'],
"25-44": row['Pop_25_44'],
"45-64": row['Pop_45_64'],
"65+": row['Pop_65']
}
age_grp = max(dict, key=dict.get)
return age_grp
def my_fun(row):
return int(str(row['tract']) + str(row['block_group']))
def fun(x):
st = str(x)
st = st[:-3]
x = int(st)
return x
def main():
df = integrate_ages_crimes()
no_crimes_data = pd.DataFrame({'crimes_no': df.groupby(['census_block','Primary_Type','year']).size()}).reset_index()
df = pd.DataFrame({'unique': df.groupby(['census_block','max_age']).size()}).reset_index()
df = df[['census_block', 'max_age']]
final_frame = pd.merge(df,no_crimes_data,on=['census_block'])
final_frame.to_csv('crime_statistics_out.csv', encoding='utf-8', index=False)
print("done")
main()
|
[
"teja.bhargav@gmail.com"
] |
teja.bhargav@gmail.com
|
7613b87d4f7bb041d44a46de933c8c6fcf6b6ca7
|
7c27302ec89cda9b14a4dc5b2c3b41824e694dd6
|
/class2.py
|
d57b5a73e4bb6d2e85672af5b483edb9ed154948
|
[
"MIT"
] |
permissive
|
ranjithvbr/test
|
583580a8d35ecae64a118c4ab3ae58d28835dc75
|
37cb7a876c402fc68e4624071924b2851ab7b1c4
|
refs/heads/master
| 2020-03-23T03:24:30.013593
| 2018-10-11T12:49:22
| 2018-10-11T12:49:22
| 141,028,651
| 2
| 1
|
MIT
| 2018-10-11T12:49:23
| 2018-07-15T13:15:10
|
Python
|
UTF-8
|
Python
| false
| false
| 642
|
py
|
class Employee:
'Common base class for all employees'
empCount = 0
def __init__(num,name, salary):
num.new = name
num.new2 = salary
Employee.empCount =Employee.empCount+ 1
def display(num):
print "Total Employee %d" % Employee.empCount
def displayEmployee(num):
print "Name : ", num.new, ", Salary: ",num.new2
"This would create first object of Employee class"
emp1 = Employee("Zara", 2000)
"This would create second object of Employee class"
emp2 = Employee("Manni", 5000)
emp1.displayEmployee()
emp2.displayEmployee()
print "Total Employee %d" % Employee.empCount
|
[
"ranjithvbr@gmail.com"
] |
ranjithvbr@gmail.com
|
fb833a786a0d20f87937019f8e9caa12a42bd37f
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02859/s282657113.py
|
e434f658756ad3e7e51c07a4f92cf7ee39d78ef2
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 33
|
py
|
N = int(input())
print(int(N**2))
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
9144392619c0c79d7fa7143d83361431229e5a74
|
f8f0263b9febfa4eeccabe920290abd8afbd54c7
|
/app/functions/customer_happiness.py
|
2cd7cc2d7c260cf173c5bfbc17f652a4052874f8
|
[] |
no_license
|
kiamatt/helios-zoho-api
|
8a2949e802f2a5cda4f62f6d31b5fc41112a2709
|
7c24882d7a2fd123440ae347f5aa23310fecbb4d
|
refs/heads/master
| 2022-12-05T22:55:22.151738
| 2020-08-21T11:26:32
| 2020-08-21T11:26:32
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 702
|
py
|
from .utils import zoho_auth
import os
import boto3
import json
from datetime import datetime
BUCKET = os.environ['BUCKET']
def main(event, context):
s3 = boto3.client('s3')
department_id = 'allDepartment'
params = f'?department={department_id}'
base_path = 'customerHappiness'
path = f'{base_path}{params}'
try:
now = datetime.now().strftime("%Y/%m/%d/%H:%M:%S")
result = zoho_auth.main(path)
response = result['body']['data']
s3.put_object(
Body=str(json.dumps(response)),
Bucket=BUCKET,
Key=f'{base_path}/departmentId:{department_id}/{now}.json'
)
except Exception as e:
return str(e)
|
[
"matthewmartinez1003@gmail.com"
] |
matthewmartinez1003@gmail.com
|
aaed72c4c34418066429eb2c96fbe9b95606cdb3
|
de358ba57518d65393c810da20c53e1c41494bff
|
/LRUcache.py
|
49f000a37b16c4cd24efb3415b3888324acb43b6
|
[] |
no_license
|
avirupdandapat/ALGOPROJECT
|
43eef94b13e38452cdc6a506b17b6fee581a07e1
|
55b60a0c6e51cae900e243505f6a4557ad4d7069
|
refs/heads/master
| 2022-12-29T13:02:54.655976
| 2020-10-18T12:23:57
| 2020-10-18T12:23:57
| 305,095,375
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 878
|
py
|
from collections import deque
class LRUCache:
# @param capacity, an integer
def __init__(self, capacity):
self.capacity = capacity
self.dic = {}
self.q = deque()
# @return an integer
def get(self, key):
if key in self.dic:
self.q.remove(key)
self.q.appendleft(key)
return self.dic[key]
return -1
# @param key, an integer
# @param value, an integer
# @return nothing
def set(self, key, value):
if key in self.dic:
self.q.remove(key)
elif self.capacity == len(self.dic):
keyToRemove = self.q.pop()
del self.dic[keyToRemove]
self.q.appendleft(key)
self.dic[key] = value
if __name__ == '__main__':
l = LRUCache(2)
l.set(1, 10)
l.set(5, 12)
print(l.get(5))
l.get(5)
l.get(1)
|
[
"avirup.dandapat@mindtree.com"
] |
avirup.dandapat@mindtree.com
|
4d1dc1f084686e22f9f832a79dae3c1d0d56dc01
|
43fe6a9d6875f7524204177a3a68229059133789
|
/social/account/multiforms.py
|
844065a4370c0da415a5df2b271ab382d43f2db9
|
[
"MIT"
] |
permissive
|
MiKueen/Social-Network
|
a011836805ad45228b0031ed1883526b0af02920
|
0b872860f08c3ec6f48a53160128af28787737c7
|
refs/heads/master
| 2023-04-17T15:33:13.212550
| 2019-07-13T04:40:54
| 2019-07-13T04:40:54
| 196,678,685
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,182
|
py
|
from django.views.generic.base import ContextMixin, TemplateResponseMixin
from django.views.generic.edit import ProcessFormView
from django.http import HttpResponseForbidden
class MultiFormMixin(ContextMixin):
form_classes = {}
prefixes = {}
success_urls = {}
initial = {}
prefix = None
success_url = None
def get_form_classes(self):
return self.form_classes
def get_forms(self, form_classes):
return dict([(key, self._create_form(key, class_name)) \
for key, class_name in form_classes.items()])
def get_form_kwargs(self, form_name):
kwargs = {}
kwargs.update({'initial':self.get_initial(form_name)})
kwargs.update({'prefix':self.get_prefix(form_name)})
if self.request.method in ('POST', 'PUT'):
kwargs.update({
'data': self.request.POST,
'files': self.request.FILES,
})
return kwargs
def forms_valid(self, forms, form_name):
form_valid_method = '%s_form_valid' % form_name
if hasattr(self, form_valid_method):
return getattr(self, form_valid_method)(forms[form_name])
else:
return HttpResponseRedirect(self.get_success_url(form_name))
def forms_invalid(self, forms):
return self.render_to_response(self.get_context_data(forms=forms))
def get_initial(self, form_name):
initial_method = 'get_%s_initial' % form_name
if hasattr(self, initial_method):
return getattr(self, initial_method)()
else:
return {'action': form_name}
def get_prefix(self, form_name):
return self.prefixes.get(form_name, self.prefix)
def get_success_url(self, form_name=None):
return self.success_urls.get(form_name, self.success_url)
def _create_form(self, form_name, form_class):
form_kwargs = self.get_form_kwargs(form_name)
form = form_class(**form_kwargs)
return form
class ProcessMultipleFormsView(ProcessFormView):
def get(self, request, *args, **kwargs):
form_classes = self.get_form_classes()
forms = self.get_forms(form_classes)
return self.render_to_response(self.get_context_data(forms=forms))
def post(self, request, *args, **kwargs):
form_classes = self.get_form_classes()
form_name = request.POST.get('action')
return self._process_individual_form(form_name, form_classes)
def _process_individual_form(self, form_name, form_classes):
forms = self.get_forms(form_classes)
form = forms.get(form_name)
if not form:
return HttpResponseForbidden()
elif form.is_valid():
return self.forms_valid(forms, form_name)
else:
return self.forms_invalid(forms)
class BaseMultipleFormsView(MultiFormMixin, ProcessMultipleFormsView):
"""
A base view for displaying several forms.
"""
class MultiFormsView(TemplateResponseMixin, BaseMultipleFormsView):
"""
A view for displaying several forms, and rendering a template response.
"""
|
[
"keshvi2298@gmail.com"
] |
keshvi2298@gmail.com
|
92abc9246fc22cd7a32206f48ca38cf04e952ccb
|
a19431dd88abad381368c547648038089e817b4c
|
/venv/Scripts/django-admin.py
|
39555b4993d519d25f778d659814b24dd7381d00
|
[] |
no_license
|
allimpossible/my-first-blog
|
6d3a41d7d2350f21b28699eca8cecf36a1f96cfc
|
63b684cd7723766c1fa607c3d9561fa2cbe873d0
|
refs/heads/master
| 2020-04-13T17:35:53.678133
| 2018-12-28T01:31:57
| 2018-12-28T01:31:57
| 163,351,773
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 172
|
py
|
#!C:\Users\shadow\PycharmProjects\WebTest\venv\Scripts\python.exe
from django.core import management
if __name__ == "__main__":
management.execute_from_command_line()
|
[
"zhuangbidafa@gmail.com"
] |
zhuangbidafa@gmail.com
|
6453b2b087efd59f3078f1b53708a18af9747775
|
055cc43b5eda7bbf4a077fbec7307f40c37cddfa
|
/Part 3 - Classification/Section 17 - Kernel SVM/kernel_svm.py
|
85b8e690d2084c37fb24edb6f5895fd9f81c5b95
|
[] |
no_license
|
sinamahbobi/Machine-Learning-A-Z-Assignments
|
2548244cef99a78c25ed7ba6a9ab3fb915395761
|
e9ba69580e436cdaba819d20b2d53a5a83c325e8
|
refs/heads/master
| 2021-01-16T12:30:13.775359
| 2020-02-25T23:29:10
| 2020-02-25T23:29:10
| 243,122,116
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,678
|
py
|
# Classification template
# Importing the libraries
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# Importing the dataset
dataset = pd.read_csv('Social_Network_Ads.csv')
X = dataset.iloc[:, [2, 3]].values
y = dataset.iloc[:, 4].values
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.25, random_state = 0)
# Feature Scaling
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
X_train = sc.fit_transform(X_train)
X_test = sc.transform(X_test)
# Fitting classifier to the Training set
# Create your classifier here
from sklearn.svm import SVC
classifier = SVC(kernel = 'rbf', random_state = 0)
classifier.fit(X_train, y_train)
# Predicting the Test set results
y_pred = classifier.predict(X_test)
# Making the Confusion Matrix
from sklearn.metrics import confusion_matrix
cm = confusion_matrix(y_test, y_pred)
# Visualising the Training set results
from matplotlib.colors import ListedColormap
X_set, y_set = X_train, y_train
X1, X2 = np.meshgrid(np.arange(start = X_set[:, 0].min() - 1, stop = X_set[:, 0].max() + 1, step = 0.01),
np.arange(start = X_set[:, 1].min() - 1, stop = X_set[:, 1].max() + 1, step = 0.01))
plt.figure(1)
plt.contourf(X1, X2, classifier.predict(np.array([X1.ravel(), X2.ravel()]).T).reshape(X1.shape),
alpha = 0.75, cmap = ListedColormap(('red', 'green')))
plt.xlim(X1.min(), X1.max())
plt.ylim(X2.min(), X2.max())
for i, j in enumerate(np.unique(y_set)):
plt.scatter(X_set[y_set == j, 0], X_set[y_set == j, 1],
c = ListedColormap(('red', 'green'))(i), label = j)
plt.title('Classifier (Training set)')
plt.xlabel('Age')
plt.ylabel('Estimated Salary')
plt.legend()
plt.show()
# Visualising the Test set results
from matplotlib.colors import ListedColormap
X_set, y_set = X_test, y_test
X1, X2 = np.meshgrid(np.arange(start = X_set[:, 0].min() - 1, stop = X_set[:, 0].max() + 1, step = 0.01),
np.arange(start = X_set[:, 1].min() - 1, stop = X_set[:, 1].max() + 1, step = 0.01))
plt.figure(2)
plt.contourf(X1, X2, classifier.predict(np.array([X1.ravel(), X2.ravel()]).T).reshape(X1.shape),
alpha = 0.75, cmap = ListedColormap(('red', 'green')))
plt.xlim(X1.min(), X1.max())
plt.ylim(X2.min(), X2.max())
for i, j in enumerate(np.unique(y_set)):
plt.scatter(X_set[y_set == j, 0], X_set[y_set == j, 1],
c = ListedColormap(('red', 'green'))(i), label = j)
plt.title('Classifier (Test set)')
plt.xlabel('Age')
plt.ylabel('Estimated Salary')
plt.legend()
plt.show()
|
[
"noreply@github.com"
] |
noreply@github.com
|
e1e3882e61c86c5880e027985494b00f1b8b756e
|
dc6ebd10d352915600a3297606f5c4b9dc558704
|
/Eshop/urls.py
|
53b3b00e059bb407fe8aed956c74a49b3e4338f3
|
[] |
no_license
|
NishaNegi1/Eshop
|
c0bdd82cb9ee556727c1a247776fa473eb84e4b2
|
12dcef76c0697b3344e0ed53a28b43df4ffb892c
|
refs/heads/main
| 2023-03-27T07:22:44.473628
| 2021-03-31T07:19:35
| 2021-03-31T07:19:35
| 353,261,237
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,728
|
py
|
"""Eshop URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from mainApp import views
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
path('admin/', admin.site.urls),
path('',views.home),
path('cart/', views.cartDetails),
path('checkout/', views.checkoutDetails),
path('login/',views.loginDetails),
path('product/<int:num>/', views.productDetails),
path('contact/',views.contactDetails),
path('shop/<str:cat>/<str:br>/', views.shopDetails),
path('signup/', views.signupUser),
path('profile/', views.profile),
path('logout/', views.logout),
path('addproduct/',views.addProduct),
path('deleteproduct/<int:num>/',views.deleteProduct),
path('editproduct/<int:num>/',views.editProduct),
path('deletecart/<int:num>/',views.deleteProduct),
path('confirm/',views.confirm),
path('wishlist/<int:num>/',views.wishlistDetails),
path('wishlist/',views.wishlistBuyer),
path('deletewishlist/<int:num>/',views.wishlistDelete),
]+static(settings.MEDIA_URL,document_root=settings.MEDIA_ROOT)
|
[
"nishabun99@gmail.com"
] |
nishabun99@gmail.com
|
da45f7852916d35f50bd49f037a7b3edd42a3e21
|
68d38b305b81e0216fa9f6769fe47e34784c77f2
|
/alascrapy/spiders/amazon_uk_reviews.py
|
15695e7d86cb23644a4dfb659ed43372c84943c0
|
[] |
no_license
|
ADJet1437/ScrapyProject
|
2a6ed472c7c331e31eaecff26f9b38b283ffe9c2
|
db52844411f6dac1e8bd113cc32a814bd2ea3632
|
refs/heads/master
| 2022-11-10T05:02:54.871344
| 2020-02-06T08:01:17
| 2020-02-06T08:01:17
| 237,448,562
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 396
|
py
|
__author__ = 'leonardo'
from alascrapy.spiders.base_spiders.amazon import AmazonReviewsSpider
class AmazonUkReviewsSpider(AmazonReviewsSpider):
name = 'amazon_uk_reviews'
start_url_format = "https://www.amazon.co.uk/product-reviews/%s/ref=cm_cr_dp_see_all_btm?ie=UTF8&showViewpoints=1&sortBy=recent"
date_format = 'on %d %B %Y'
amazon_kind = 'amazon_uk_id'
language = 'en'
|
[
"liangzijie1437@gmail.com"
] |
liangzijie1437@gmail.com
|
008c0cfc5535c8ef65db4e64a7fb8f83c307aed7
|
971b74f845ea064e3880580c5fb8e4a5eb35035a
|
/nlex/lib/abc/structure.py
|
f058bbcc6e68f1815b1897b184b26f237ebb67e6
|
[] |
no_license
|
a710128/nlex
|
44cad95ef198ed19275246cefaa0e21cda8a63ec
|
b9a96d862669b977df43979c234c8edfcb7f2a15
|
refs/heads/main
| 2022-12-27T00:14:22.961008
| 2020-10-12T08:13:44
| 2020-10-12T08:13:44
| 300,345,359
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 367
|
py
|
from typing import Union, Tuple
class Structure(object):
def __init__(self):
pass
def set_slot(self, slot : 'Slot') -> None:
pass
def is_slot(self, slot : 'Slot') -> bool:
return False
def get_value(self) -> Tuple[int, int]:
return (0, 0)
def last(self) -> Union[None, 'Structure']:
return None
|
[
"qbjooo@qq.com"
] |
qbjooo@qq.com
|
9e30e808b8df340558eca315c2ab6554d53b3622
|
9be6b593f7740dbcbcbf56765b4ba55bc65d3e55
|
/notebooks/import_scripts/import_interrogator_fits.py
|
755927330cc042e0b202e4c38343a74e87e984fb
|
[
"MIT"
] |
permissive
|
camipacifici/art_sedfitting
|
a78b06a36ed277139c581049b7720d9bfa7e3fc1
|
32d23a2bef5a1e0499e8ce225019478d94b28521
|
refs/heads/main
| 2023-04-13T21:36:07.409163
| 2022-12-04T03:31:41
| 2022-12-04T03:31:41
| 440,978,868
| 11
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,515
|
py
|
import numpy as np
from import_catalogs import get_cat
cat_small_ids, cat_z1_ids, cat_z3_ids = get_cat()
vb = False
# ID
#BPASS_log10M*_16 BPASS_log10M*_50 BPASS_log10M*_84
#BPASS_SFR10_16 BPASS_SFR10_50 BPASS_SFR10_84
#BPASS_SFR100_16 BPASS_SFR100_50 BPASS_SFR100_84
#BPASS_A_V_16 BPASS_A_V_50 BPASS_A_V_84
#PEGASE_log10M*_16 PEGASE_log10M*_50 PEGASE_log10M*_84
#PEGASE_SFR10_16 PEGASE_SFR10_50 PEGASE_SFR10_84
#PEGASE_SFR100_16 PEGASE_SFR100_50 PEGASE_SFR100_84
#PEGASE_A_V_16 PEGASE_A_V_50 PEGASE_A_V_84
# Currently using SFR10, come back to this if it needs to be changed to SFR100
#---------------------z1-----------------------------
interrogator_mass_z1_lo_bpass = np.zeros_like(cat_z1_ids)*np.nan
interrogator_mass_z1_bpass = np.zeros_like(cat_z1_ids)*np.nan
interrogator_mass_z1_hi_bpass = np.zeros_like(cat_z1_ids)*np.nan
interrogator_mass_z1_lo_pegase = np.zeros_like(cat_z1_ids)*np.nan
interrogator_mass_z1_pegase = np.zeros_like(cat_z1_ids)*np.nan
interrogator_mass_z1_hi_pegase = np.zeros_like(cat_z1_ids)*np.nan
interrogator_sfr_z1_lo_bpass = np.zeros_like(cat_z1_ids)*np.nan
interrogator_sfr_z1_bpass = np.zeros_like(cat_z1_ids)*np.nan
interrogator_sfr_z1_hi_bpass = np.zeros_like(cat_z1_ids)*np.nan
interrogator_sfr_z1_lo_pegase = np.zeros_like(cat_z1_ids)*np.nan
interrogator_sfr_z1_pegase = np.zeros_like(cat_z1_ids)*np.nan
interrogator_sfr_z1_hi_pegase = np.zeros_like(cat_z1_ids)*np.nan
interrogator_Av_z1_lo_bpass = np.zeros_like(cat_z1_ids)*np.nan
interrogator_Av_z1_bpass = np.zeros_like(cat_z1_ids)*np.nan
interrogator_Av_z1_hi_bpass = np.zeros_like(cat_z1_ids)*np.nan
interrogator_Av_z1_lo_pegase = np.zeros_like(cat_z1_ids)*np.nan
interrogator_Av_z1_pegase = np.zeros_like(cat_z1_ids)*np.nan
interrogator_Av_z1_hi_pegase = np.zeros_like(cat_z1_ids)*np.nan
interrogator_z1_cat = np.genfromtxt('../code_outputs/INTERROGATOR_z1_output.dat')
if vb == True:
print(interrogator_z1_cat.shape)
interrogator_id_z1 = interrogator_z1_cat[0:,0]
if vb == True:
print(np.sort(interrogator_id_z1)[1:10])
for i, idtemp in enumerate(cat_z1_ids):
#print(np.sum(interrogator_id_z1 == (idtemp+1)))
if np.sum(interrogator_id_z1 == (idtemp+1)) == 1:
interrogator_mass_z1_lo_bpass[i] = interrogator_z1_cat[interrogator_id_z1 == (idtemp+1),1]
interrogator_mass_z1_bpass[i] = interrogator_z1_cat[interrogator_id_z1 == (idtemp+1),2]
interrogator_mass_z1_hi_bpass[i] = interrogator_z1_cat[interrogator_id_z1 == (idtemp+1),3]
interrogator_mass_z1_lo_pegase[i] = interrogator_z1_cat[interrogator_id_z1 == (idtemp+1),13]
interrogator_mass_z1_pegase[i] = interrogator_z1_cat[interrogator_id_z1 == (idtemp+1),14]
interrogator_mass_z1_hi_pegase[i] = interrogator_z1_cat[interrogator_id_z1 == (idtemp+1),15]
interrogator_sfr_z1_lo_bpass[i] = interrogator_z1_cat[interrogator_id_z1 == (idtemp+1),4]
interrogator_sfr_z1_bpass[i] = interrogator_z1_cat[interrogator_id_z1 == (idtemp+1),5]
interrogator_sfr_z1_hi_bpass[i] = interrogator_z1_cat[interrogator_id_z1 == (idtemp+1),6]
interrogator_sfr_z1_lo_pegase[i] = interrogator_z1_cat[interrogator_id_z1 == (idtemp+1),16]
interrogator_sfr_z1_pegase[i] = interrogator_z1_cat[interrogator_id_z1 == (idtemp+1),17]
interrogator_sfr_z1_hi_pegase[i] = interrogator_z1_cat[interrogator_id_z1 == (idtemp+1),18]
interrogator_Av_z1_lo_bpass[i] = interrogator_z1_cat[interrogator_id_z1 == (idtemp+1),10]
interrogator_Av_z1_bpass[i] = interrogator_z1_cat[interrogator_id_z1 == (idtemp+1),11]
interrogator_Av_z1_hi_bpass[i] = interrogator_z1_cat[interrogator_id_z1 == (idtemp+1),12]
interrogator_Av_z1_lo_pegase[i] = interrogator_z1_cat[interrogator_id_z1 == (idtemp+1),22]
interrogator_Av_z1_pegase[i] = interrogator_z1_cat[interrogator_id_z1 == (idtemp+1),23]
interrogator_Av_z1_hi_pegase[i] = interrogator_z1_cat[interrogator_id_z1 == (idtemp+1),24]
#--------------------z3-----------------------------------------
interrogator_mass_z3_lo_bpass = np.zeros_like(cat_z3_ids)*np.nan
interrogator_mass_z3_bpass = np.zeros_like(cat_z3_ids)*np.nan
interrogator_mass_z3_hi_bpass = np.zeros_like(cat_z3_ids)*np.nan
interrogator_mass_z3_lo_pegase = np.zeros_like(cat_z3_ids)*np.nan
interrogator_mass_z3_pegase = np.zeros_like(cat_z3_ids)*np.nan
interrogator_mass_z3_hi_pegase = np.zeros_like(cat_z3_ids)*np.nan
interrogator_sfr_z3_lo_bpass = np.zeros_like(cat_z3_ids)*np.nan
interrogator_sfr_z3_bpass = np.zeros_like(cat_z3_ids)*np.nan
interrogator_sfr_z3_hi_bpass = np.zeros_like(cat_z3_ids)*np.nan
interrogator_sfr_z3_lo_pegase = np.zeros_like(cat_z3_ids)*np.nan
interrogator_sfr_z3_pegase = np.zeros_like(cat_z3_ids)*np.nan
interrogator_sfr_z3_hi_pegase = np.zeros_like(cat_z3_ids)*np.nan
interrogator_Av_z3_lo_bpass = np.zeros_like(cat_z3_ids)*np.nan
interrogator_Av_z3_bpass = np.zeros_like(cat_z3_ids)*np.nan
interrogator_Av_z3_hi_bpass = np.zeros_like(cat_z3_ids)*np.nan
interrogator_Av_z3_lo_pegase = np.zeros_like(cat_z3_ids)*np.nan
interrogator_Av_z3_pegase = np.zeros_like(cat_z3_ids)*np.nan
interrogator_Av_z3_hi_pegase = np.zeros_like(cat_z3_ids)*np.nan
interrogator_z3_cat = np.genfromtxt('../code_outputs/INTERROGATOR_z3_output.dat')
if vb == True:
print(interrogator_z3_cat.shape)
interrogator_id_z3 = interrogator_z3_cat[0:,0]
if vb == True:
print(np.sort(interrogator_id_z3)[1:10])
for i, idtemp in enumerate(cat_z3_ids):
#print(np.sum(interrogator_id_z3 == (idtemp+1)))
if np.sum(interrogator_id_z3 == (idtemp+1)) == 1:
interrogator_mass_z3_lo_bpass[i] = interrogator_z3_cat[interrogator_id_z3 == (idtemp+1),1]
interrogator_mass_z3_bpass[i] = interrogator_z3_cat[interrogator_id_z3 == (idtemp+1),2]
interrogator_mass_z3_hi_bpass[i] = interrogator_z3_cat[interrogator_id_z3 == (idtemp+1),3]
interrogator_mass_z3_lo_pegase[i] = interrogator_z3_cat[interrogator_id_z3 == (idtemp+1),13]
interrogator_mass_z3_pegase[i] = interrogator_z3_cat[interrogator_id_z3 == (idtemp+1),14]
interrogator_mass_z3_hi_pegase[i] = interrogator_z3_cat[interrogator_id_z3 == (idtemp+1),15]
interrogator_sfr_z3_lo_bpass[i] = interrogator_z3_cat[interrogator_id_z3 == (idtemp+1),4]
interrogator_sfr_z3_bpass[i] = interrogator_z3_cat[interrogator_id_z3 == (idtemp+1),5]
interrogator_sfr_z3_hi_bpass[i] = interrogator_z3_cat[interrogator_id_z3 == (idtemp+1),6]
interrogator_sfr_z3_lo_pegase[i] = interrogator_z3_cat[interrogator_id_z3 == (idtemp+1),16]
interrogator_sfr_z3_pegase[i] = interrogator_z3_cat[interrogator_id_z3 == (idtemp+1),17]
interrogator_sfr_z3_hi_pegase[i] = interrogator_z3_cat[interrogator_id_z3 == (idtemp+1),18]
interrogator_Av_z3_lo_bpass[i] = interrogator_z3_cat[interrogator_id_z3 == (idtemp+1),10]
interrogator_Av_z3_bpass[i] = interrogator_z3_cat[interrogator_id_z3 == (idtemp+1),11]
interrogator_Av_z3_hi_bpass[i] = interrogator_z3_cat[interrogator_id_z3 == (idtemp+1),12]
interrogator_Av_z3_lo_pegase[i] = interrogator_z3_cat[interrogator_id_z3 == (idtemp+1),22]
interrogator_Av_z3_pegase[i] = interrogator_z3_cat[interrogator_id_z3 == (idtemp+1),23]
interrogator_Av_z3_hi_pegase[i] = interrogator_z3_cat[interrogator_id_z3 == (idtemp+1),24]
if vb == True:
plt.figure(figsize=(12,4))
plt.subplot(1,2,1)
mask = (interrogator_mass_z1_pegase > -99)
plt.scatter(interrogator_mass_z1_bpass,interrogator_sfr_z1_bpass,c=interrogator_Av_z1_bpass)
plt.colorbar()
plt.ylim(-4,3)
plt.xlim(8,12)
plt.xlabel('log Stellar Mass')
plt.ylabel('log SFR')
plt.subplot(1,2,2)
plt.scatter(interrogator_mass_z1_pegase[mask],interrogator_sfr_z1_pegase[mask],c=interrogator_Av_z1_pegase[mask])
plt.colorbar()
plt.ylim(-4,3)
plt.xlim(8,12)
plt.xlabel('log Stellar Mass')
plt.ylabel('log SFR')
plt.show()
plt.figure(figsize=(12,4))
plt.subplot(1,2,1)
plt.scatter(interrogator_mass_z3_bpass,interrogator_sfr_z3_bpass,c=interrogator_Av_z3_bpass)
plt.colorbar()
plt.ylim(-4,3)
plt.xlim(8,12)
plt.xlabel('log Stellar Mass')
plt.ylabel('log SFR')
plt.subplot(1,2,2)
plt.scatter(interrogator_mass_z3_pegase,interrogator_sfr_z3_pegase,c=interrogator_Av_z3_pegase)
plt.colorbar()
plt.ylim(-4,3)
plt.xlim(8,12)
plt.xlabel('log Stellar Mass')
plt.ylabel('log SFR')
plt.show()
print('imported interrogator fits.')
|
[
"cpacifici@stsci.edu"
] |
cpacifici@stsci.edu
|
714e19179bf6cdebe2c8704dd95faf550aad8b20
|
0282ec8733105cbb22af9c1c38aec5d240abeb32
|
/experiments/gluon_rnns/setup.py
|
2fc1a1ee3ca5e5c6b70a4ca86244e405f4bdefbd
|
[
"Apache-2.0"
] |
permissive
|
uwsampl/relay-bench
|
d247f6f3cbc344d1d176c255a2fe7662e2d5ed8d
|
527166e4ca13a2c7a513f9c66584b7ac9201436e
|
refs/heads/master
| 2022-06-19T08:54:20.276771
| 2021-10-25T23:24:17
| 2021-10-25T23:24:17
| 137,016,402
| 8
| 4
|
Apache-2.0
| 2022-05-24T17:06:13
| 2018-06-12T04:20:45
|
Python
|
UTF-8
|
Python
| false
| false
| 498
|
py
|
from common import invoke_main, render_exception, write_status
from mxnet_util import export_mxnet_model
def main(config_dir, setup_dir):
try:
export_mxnet_model('rnn', setup_dir)
export_mxnet_model('gru', setup_dir)
export_mxnet_model('lstm', setup_dir)
write_status(setup_dir, True, 'success')
except Exception as e:
write_status(setup_dir, False, render_exception(e))
if __name__ == '__main__':
invoke_main(main, 'config_dir', 'setup_dir')
|
[
"sslyu@cs.washington.edu"
] |
sslyu@cs.washington.edu
|
56be1372165e22f35b3d54e6e743bf28d328e0c1
|
bb80ce104e916d54d2af020b8a38f6a92bfe9931
|
/home/migrations/0001_initial.py
|
f0030a0816dc9d6da4765f6d47264233312e389c
|
[] |
no_license
|
louis-Fs/Django
|
e16f0f058df92e02f73ae36ca61061faed3b86bc
|
424cdc3d18d5f9f5c7cf2b38c615bbc9ca9ced15
|
refs/heads/main
| 2023-02-20T09:17:12.578596
| 2021-01-21T04:19:29
| 2021-01-21T04:19:29
| 331,511,083
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 912
|
py
|
# Generated by Django 2.2.7 on 2019-12-01 05:49
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='stuinfo',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('username', models.CharField(max_length=20)),
('password', models.CharField(max_length=20)),
('email', models.CharField(max_length=50)),
('sex', models.CharField(max_length=4)),
('grade', models.CharField(max_length=10)),
('hobby', models.CharField(max_length=20)),
('phone', models.CharField(max_length=20)),
('addres', models.CharField(max_length=100)),
],
),
]
|
[
"344735074@qq.com"
] |
344735074@qq.com
|
d2d20572d982ee26e2f12321d0b30cbe73a1f96d
|
1dd687bdb3bb964383c3f4dde7e9eae8a09be5f5
|
/pyleecan/Functions/Optimization/evaluate.py
|
0a1af3f9cf3e0b559605e9893c1c9f85951875f6
|
[
"Apache-2.0"
] |
permissive
|
Kelos-Zhu/pyleecan
|
4daa2c8738cfe8a721ac2bdf883c59a1b52d8570
|
368f8379688e31a6c26d2c1cd426f21dfbceff2a
|
refs/heads/master
| 2022-11-18T14:30:29.787005
| 2020-07-09T16:55:02
| 2020-07-09T16:55:02
| 278,112,321
| 0
| 0
|
Apache-2.0
| 2020-07-08T14:31:39
| 2020-07-08T14:31:38
| null |
UTF-8
|
Python
| false
| false
| 2,398
|
py
|
# -*- coding: utf-8 -*-
from logging import WARNING
import traceback
import sys
if sys.version_info > (3, 0):
from io import StringIO
else:
from StringIO import StringIO
from os import stat, remove
from datetime import datetime
def evaluate(solver, indiv):
"""Evaluate the individual according to the solver method
Parameters
----------
solver : Solver
optimization solver
indiv : individual
individual to evaluate
Returns
-------
evaluation_failure : bool
failure of the evaluation
"""
# Get solver logger
logger = solver.get_logger()
tb = StringIO() # to store the traceback in case of error
logger.debug("Design variables :")
for i, design_variable in enumerate(indiv.design_var_name_list):
logger.debug(design_variable + " : " + str(indiv[i]))
try:
if solver.problem.eval_func == None:
indiv.output.simu.run()
else:
solver.problem.eval_func(indiv.output)
# Sort the obj_func
obj_func_list = list(solver.problem.obj_func.keys())
obj_func_list.sort()
# Add the fitness values
fitness = []
for of in obj_func_list:
fitness.append(float(solver.problem.obj_func[of].func(indiv.output)))
indiv.fitness.values = fitness
indiv.is_simu_valid = True
evaluation_failure = False # Evaluation succeed
except KeyboardInterrupt:
raise KeyboardInterrupt("Stopped by the user.")
except:
# Logging
print("The following simulation failed :", file=tb)
if logger.level > 10: # Log design variables values if it is not already done
print("Design variables :", file=tb)
for i, design_variable in enumerate(indiv.design_var_name_list):
print(design_variable + " : " + str(indiv[i]), file=tb)
# Log the simulation error
traceback.print_exc(file=tb)
logger.warning(tb.getvalue())
# Sort the obj_func
obj_func_list = list(solver.problem.obj_func.keys())
obj_func_list.sort()
# Set fitness as inf
indiv.fitness.values = [float("inf") for _ in obj_func_list]
indiv.is_simu_valid = False
# Reset standard output and error
evaluation_failure = True # Evaluation failed
return evaluation_failure
|
[
"cedric.marinel@eomys.com"
] |
cedric.marinel@eomys.com
|
f75aad247ab631750bcc8185827295fbc9a08fb3
|
527bdfc1d0e49009d69dd611e74273b8846c4f0e
|
/rithviks_solutions/2017/Prob9/main.py
|
372b4a0964eba150e4c8fe5de46a4fbeabdcfd66
|
[] |
no_license
|
RithvikKasarla/2019-Infinite-Loop-Practice
|
3e0ddfae5c017de62ff2fd7b8522e062167c2f4a
|
c54808e89cbd2a18225100dde6914b3305400d91
|
refs/heads/master
| 2020-04-25T01:17:20.313357
| 2020-03-23T01:39:26
| 2020-03-23T01:39:26
| 172,404,074
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 783
|
py
|
def encoder(key, line):
key = list(key)
l = len(key)
li = list(line)
line = ""
keyplace = 0
for ch in range(len(li)):
if li[ch] == " ":
line += " "
else:
c= ord( key[keyplace%l].lower() ) - 96
if (ord(li[ch].lower()) + c-1) > 122:
z = ord(li[ch].lower()) + c-1
z = z-122
z += 96
line += chr(z)
keyplace += 1
else:
line += chr( ord(li[ch].lower()) + c-1 )
keyplace += 1
return line
with open("input.txt") as f:
lines = f.readlines()[1:]
for line in range(0 , len(lines), 2):
print(encoder(lines[line+1].lower().strip(),lines[line].lower().strip()))
|
[
"arnavborborah11@gmail.com"
] |
arnavborborah11@gmail.com
|
1c594c122b75ef4ea73e6f9f6b91493174b4fe64
|
b6a79ae5312dd1a69b043aeb5466bc3b6bffa6aa
|
/fp/test_closure_change.py
|
f8df7a48ec3f5e121603ac85a298e69ab248386d
|
[] |
no_license
|
vchub/daily-python
|
5f54aedf77aba34ddc48ca06f9133990c2e82b0a
|
7d7740f4db4f8500c84eff353420c61242321f2e
|
refs/heads/master
| 2022-12-12T08:35:27.416071
| 2020-08-25T18:26:06
| 2020-08-25T18:26:06
| 290,292,008
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 208
|
py
|
def f():
x = 1
i = 0
while i < x:
yield i
i += 1
def test():
f.x = 5
ff = f
ff.x = 5
g = ff()
# g.x = 2
res = list(x for x in g)
assert len(res) == 1
|
[
"vlad.chub@gmail.com"
] |
vlad.chub@gmail.com
|
87e38073f5de8a642caa8473876b6c85d6fbf76d
|
2306161b6c6504bc3c7b9c1c167f2cf9f655a5f6
|
/test/functional/p2p_pos_fakestake.py
|
960e824e4620406c572aaec798103b1ecd895242
|
[
"MIT"
] |
permissive
|
Zoras2/OPCoinX
|
294081748b308b84a4890da286484c003cfebf60
|
d93b9f6a7f09d48ea661814a94211a9c9fc98309
|
refs/heads/master
| 2021-07-21T23:20:33.699815
| 2020-09-14T00:41:24
| 2020-09-14T00:41:24
| 167,402,324
| 1
| 0
|
MIT
| 2019-01-24T16:48:12
| 2019-01-24T16:48:12
| null |
UTF-8
|
Python
| false
| false
| 2,282
|
py
|
#!/usr/bin/env python3
# Copyright (c) 2019 The OPCX developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
Covers the scenario of a PoS block where the coinstake input prevout is already spent.
'''
from time import sleep
from fake_stake.base_test import OPCX_FakeStakeTest
class PoSFakeStake(OPCX_FakeStakeTest):
def run_test(self):
self.description = "Covers the scenario of a PoS block where the coinstake input prevout is already spent."
self.init_test()
INITAL_MINED_BLOCKS = 150 # First mined blocks (rewards collected to spend)
MORE_MINED_BLOCKS = 100 # Blocks mined after spending
STAKE_AMPL_ROUNDS = 2 # Rounds of stake amplification
self.NUM_BLOCKS = 3 # Number of spammed blocks
# 1) Starting mining blocks
self.log.info("Mining %d blocks.." % INITAL_MINED_BLOCKS)
self.node.generate(INITAL_MINED_BLOCKS)
# 2) Collect the possible prevouts
self.log.info("Collecting all unspent coins which we generated from mining...")
# 3) Create 10 addresses - Do the stake amplification
self.log.info("Performing the stake amplification (%d rounds)..." % STAKE_AMPL_ROUNDS)
utxo_list = self.node.listunspent()
address_list = []
for i in range(10):
address_list.append(self.node.getnewaddress())
utxo_list = self.stake_amplification(utxo_list, STAKE_AMPL_ROUNDS, address_list)
self.log.info("Done. Utxo list has %d elements." % len(utxo_list))
sleep(2)
# 4) Start mining again so that spent prevouts get confirmted in a block.
self.log.info("Mining %d more blocks..." % MORE_MINED_BLOCKS)
self.node.generate(MORE_MINED_BLOCKS)
sleep(2)
# 5) Create "Fake Stake" blocks and send them
self.log.info("Creating Fake stake blocks")
err_msgs = self.test_spam("Main", utxo_list)
if not len(err_msgs) == 0:
self.log.error("result: " + " | ".join(err_msgs))
raise AssertionError("TEST FAILED")
self.log.info("%s PASSED" % self.__class__.__name__)
if __name__ == '__main__':
PoSFakeStake().main()
|
[
"knashanas@gmail.com"
] |
knashanas@gmail.com
|
fa51b64316f34902e418a7a004e81edb92be9c88
|
67f7c53d80faac734a96ef95bfe8f8a3927c1c33
|
/Cherry Pie.py
|
4dbfffdb4f6927d98cd50c187cff3efd43d23b9c
|
[] |
no_license
|
Lpadilla2018/Transfer-Photos
|
ad21f9a00ed64542c49260687afeffcab1a6a77a
|
f8d23b014a882a6e81734aa6e755c7914d4f14b4
|
refs/heads/master
| 2020-04-10T01:50:56.397430
| 2018-12-06T20:22:00
| 2018-12-06T20:22:00
| 160,727,333
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,738
|
py
|
# Cherry Pie.py
# 11/30/2018
# Developed by Louie Padilla
"""
Loop through main folder directory. For each file in a folder rename file to folderName_snapshot_#.jpg
"""
# Import modules
import os
from shutil import move
######################################################################################################
INPUT_DESTINATION = raw_input(
'Enter DESTINATION folder path\nEx. "C:\Users\Download\Destination": ')
INPUT_SOURCE = raw_input(
'Enter SOURCE folder path\nEx. "C:\Users\Download\Source": ')
DESTINATION_FILE_PATH = r"{}".format(INPUT_DESTINATION)
SOURCE_FOLDER_PATH = r"{}".format(INPUT_SOURCE)
def create_folder(destination, name):
new_path = r'{}\{}'.format(destination, name)
if not os.path.exists(new_path):
os.makedirs(new_path)
return new_path
def copy_files(folderName):
folderPath = r"{}\\{}".format(SOURCE_FOLDER_PATH, folderName)
count = 1
f = os.listdir(folderPath)
new_folder = create_folder(DESTINATION_FILE_PATH, folderName)
for name in f:
if name.endswith(".jpg"):
source_path = (os.path.join(folderPath, name))
newName = "{}_snapshot_{}.jpg".format(name.split("_")[0], count)
move(source_path, new_folder + "\\" + newName)
count = count + 1
# Main Function
def transfer_files():
# print(DESTINATION_FILE_PATH)
# gets folder paths
for root, dirs, files in os.walk(SOURCE_FOLDER_PATH):
for folder in dirs:
copy_files(str(folder))
print(folder)
transfer_files()
#############################################################################################################
|
[
"noreply@github.com"
] |
noreply@github.com
|
fd975001732ca43e6a45cbcefd0a09a0cf1fd7fa
|
a37963de31a67c214680d80d9ee3ce4611d28587
|
/mrl/modules/model.py
|
8f93b82dcc75932df0c875e7910016d0b4a2814d
|
[
"MIT"
] |
permissive
|
jingweiz/mrl
|
c4c614877760953b246125688e7df96f9081fc4e
|
c94ab1685aea85b0d328199adefca543227875af
|
refs/heads/master
| 2022-11-12T01:36:05.354935
| 2020-07-10T23:32:38
| 2020-07-10T23:32:38
| 279,804,300
| 0
| 1
|
MIT
| 2020-07-15T07:56:50
| 2020-07-15T07:56:49
| null |
UTF-8
|
Python
| false
| false
| 1,448
|
py
|
import mrl
import torch
from typing import Callable
import os
import pickle
import dill
class PytorchModel(mrl.Module):
"""
Generic wrapper for a pytorch nn.Module (e.g., the actorcritic network).
These live outside of the learning algorithm modules so that they can easily be
shared by different modules (e.g., critic can be used by intrinsic curiosity module).
They are also saved independently of the agent module (which is stateless).
"""
def __init__(self, name : str, model_fn : Callable):
super().__init__(name, required_agent_modules=[], locals=locals())
self.model_fn = model_fn
self.model = self.model_fn()
def _setup(self):
if self.config.get('device'):
self.model = self.model.to(self.config.device)
def save(self, save_folder : str):
path = os.path.join(save_folder, self.module_name + '.pt')
torch.save(self.model.state_dict(), path)
def load(self, save_folder : str):
path = os.path.join(save_folder, self.module_name + '.pt')
self.model.load_state_dict(torch.load(path), strict=False)
def copy(self, new_name):
"""Makes a copy of the Model; e.g., for target networks"""
new_model = dill.loads(dill.dumps(self.model))
model_fn = lambda: new_model
return self.__class__(new_name, model_fn)
def __call__(self, *args, **kwargs):
if self.training:
self.model.train()
else:
self.model.eval()
return self.model(*args, **kwargs)
|
[
"silviu.pitis@gmail.com"
] |
silviu.pitis@gmail.com
|
75218de7038fa55db27ff99920b6fc6874ece6a1
|
d28d8388e4afb5fb2e98ff65feb89c9611a2bc9a
|
/python/python_env/bin/flask
|
8dcdcc335787e74fc1fed0adcd6b7266d3bd994a
|
[] |
no_license
|
baldpixels/DataLith
|
f5c4b384d856126167b7b20ef4ebb89e7ed6cc4a
|
d50052b1165d2162d0b12626d0972e50e9dc6030
|
refs/heads/master
| 2020-03-12T17:40:33.992806
| 2018-04-25T18:17:13
| 2018-04-25T18:17:13
| 130,742,033
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 291
|
#!/Users/Mammoth/github/scraper/spring2018-cp-group-436839-437632/python/python_env/bin/python3.6
# -*- coding: utf-8 -*-
import re
import sys
from flask.cli import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"baldpixels@gmail.com"
] |
baldpixels@gmail.com
|
|
b64db5675c6a12447a0a4f82922098e73c823b62
|
28b513332b7162dbd79f82a28132582401ff28f9
|
/9.Manus & SubMenus.py
|
d86b751ed588b5f51494bfafdf684409b5874ba2
|
[] |
no_license
|
ProtikAcharjay/GUI_Tkinter
|
e19a9931907ab9bfaeff6f93a4dae5276848cb3b
|
6a54062db27ae85a4f2518cb4d1b4d0f9615f3e5
|
refs/heads/main
| 2023-05-12T21:30:02.511847
| 2021-06-01T16:00:05
| 2021-06-01T16:00:05
| 372,886,592
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,386
|
py
|
from tkinter import *
root=Tk()
root.geometry("800x500")
root.title("Manus In Tkinter")
#function
def function1():
print("New file created")
def function2():
print("Opening a file")
def function3():
print("File saved")
def function4():
print("File Coppied")
def function5():
print("File Pasted")
def function6():
print("File Deleted")
#Menu _ Basic one(Non drop down menubar)
# normalmenu=Menu(root)
# normalmenu.add_command(label="File",command=function1)
# normalmenu.add_command(label="Exit",command=quit)
# root.config(menu=normalmenu)
#Drop Down Menubar:
dropdownmenu=Menu(root)
#1st drop of a menubar
drop1= Menu(dropdownmenu,tearoff=0)
drop1.add_command(label="New",command=function1)
drop1.add_command(label="Open",command=function2)
drop1.add_separator()
drop1.add_command(label="Save",command=function3)
dropdownmenu.add_cascade(label="File",menu=drop1)
root.config(menu=dropdownmenu)
#2nd drop of a menubar
drop2= Menu(dropdownmenu,tearoff=0)
drop2.add_command(label="Copy",command=function4)
drop2.add_command(label="Paste",command=function5)
drop2.add_separator()
drop2.add_command(label="Delete",command=function6)
dropdownmenu.add_cascade(label="Edit",menu=drop2)
root.config(menu=dropdownmenu)
dropdownmenu.add_command(label="Exit",command=quit)
root.config(menu=dropdownmenu)
root.mainloop()
|
[
"noreply@github.com"
] |
noreply@github.com
|
838f055b240dabb83ff3fb5ea75fbdd224a7ed9f
|
510c17f54e8898d9dfff590caa3cbd05417c356f
|
/signer-independent-pytorch/bck-24-01-19/main_vae_celebA.py
|
0716b58f5ad300cee1591e3f46d535cc6fd46c69
|
[] |
no_license
|
pmmf/DeSIRe
|
c65172ce4a0b9d4aaf6317a04c1f33f69d849501
|
c4a68246c9e436481a61879328ff28b70dbb3b11
|
refs/heads/master
| 2022-04-08T18:57:37.099211
| 2020-03-05T11:03:55
| 2020-03-05T11:03:55
| 168,131,636
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 12,640
|
py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision
from torch.utils.data import Dataset
import h5py
import numpy as np
from glob import glob
import os
from skimage import io
from PIL import Image
import matplotlib.pyplot as plt
from skimage import io
from torchsummary import summary
def merge_images(images, size):
# merge all output images(of sample size:8*8 output images of size 64*64) into one big image
h, w = images.shape[1], images.shape[2]
img = np.zeros((h * size[0], w * size[1], 3))
for idx, image in enumerate(images): # idx=0,1,2,...,63
i = idx % size[1] # column number
j = idx // size[1] # row number
img[j*h:j*h+h, i*w:i*w+w, :] = image
return img
def inverse_transform(x):
x = x.to('cpu').numpy()
x = x.transpose((0, 2, 3, 1))
x = (x+1.)/2.
return x
class ToTensor(object):
"""Convert ndarrays in sample to Tensors."""
def __call__(self, sample):
# swap color axis because
# numpy image: H x W x C
# torch image: C X H X W
sample = sample.transpose((2, 0, 1))
sample = torch.from_numpy(sample).float()/127.5 - 1.
return sample
class CelebA(Dataset):
def __init__(self, transform=None):
self.transform = transform
self.data_fn = '/data/DB/celebA/img_align_celeba_crop/'
self.imgs_list = sorted(os.listdir(self.data_fn))
def __len__(self):
return len(self.imgs_list)
def __getitem__(self, index):
img = io.imread(os.path.join(*(self.data_fn, self.imgs_list[index])))
if self.transform is not None:
img = self.transform(img)
return img
class KinectLeap(Dataset):
def __init__(self,
data_fn='/data/DB/kinect_leap_dataset_signer_independent/',
n_person=14,
n_gesture=10,
n_repetions=10,
extension='_rgb.png',
data_type='RGB_CROPS_RZE_DISTTR2',
cmap=-1,
validation=None,
transform=None):
self.data_fn = data_fn
self.n_person = n_person
self.n_gesture = n_gesture
self.n_repetions = n_repetions
self.data_type = data_type
self.extension = extension
self.cmap = cmap
self.validation = validation
self.transform = transform
self.imgs_list = sorted(os.listdir(self.data_fn))
def __len__(self):
return self.n_person*self.n_gesture*self.n_repetions
def index2dataset(self, index):
# get person, gesture and repetion indexes from global index
n_rep = index % self.n_repetions + 1
n_gesture = int(np.floor(index/self.n_gesture) % self.n_repetions) + 1
n_person = int(np.floor(index/(self.n_gesture*self.n_repetions))) + 1
return n_person, n_gesture, n_rep
def __getitem__(self, index):
# get indexes
n_person, n_gesture, n_rep = self.index2dataset(index)
# read image
img_fn = os.path.join(*[self.data_fn,
"P" + str(n_person),
"G" + str(n_gesture),
self.data_type,
str(n_rep) + self.extension])
print(index, img_fn)
img = io.imread(img_fn)
# transform
if self.transform is not None:
img = self.transform(img)
return img, n_gesture-1, n_person-1
class VAE(nn.Module):
def __init__(self,
input_shape=(3, 64, 64),
base_filters=64,
z_dim=128,
kernel_size=5,
learning_rate=1e-03,
batch_size=64,
KLD_weight=5e-04):
super(VAE, self).__init__()
self.input_shape = input_shape
self.base_filters = base_filters
self.z_dim = z_dim
self.kernel_size = kernel_size
self.learning_rate = learning_rate
self.batch_size = batch_size
self.KLD_weight = KLD_weight
# encoder
self.encoder = nn.Sequential(
nn.Conv2d(3, 64, kernel_size=self.kernel_size, stride=2, padding=2),
nn.BatchNorm2d(64),
nn.LeakyReLU(negative_slope=0.2),
nn.Conv2d(64, 128, kernel_size=5, stride=2, padding=2),
nn.BatchNorm2d(128),
nn.LeakyReLU(negative_slope=0.2),
nn.Conv2d(128, 256, kernel_size=5, stride=2, padding=2),
nn.BatchNorm2d(256),
nn.LeakyReLU(negative_slope=0.2),
nn.Conv2d(256, 512, kernel_size=5, stride=2, padding=2),
nn.BatchNorm2d(512),
nn.LeakyReLU(negative_slope=0.2))
# mean and var
self.fc_mean = nn.Linear(4*4*512, self.z_dim)
self.fc_var = nn.Linear(4*4*512, self.z_dim)
self.bn_mean = nn.BatchNorm1d(self.z_dim)
self.bn_var = nn.BatchNorm1d(self.z_dim)
# decoder
self.fc_d1 = nn.Linear(self.z_dim, 8*8*256)
self.bn_d1 = nn.BatchNorm2d(256*8*8)
self.lr_d1 = nn.LeakyReLU(negative_slope=0.2)
self.decoder = nn.Sequential(
nn.ConvTranspose2d(256, 256, kernel_size=self.kernel_size, stride=2, padding=2, output_padding=1),
nn.BatchNorm2d(256),
nn.LeakyReLU(negative_slope=0.2),
nn.ConvTranspose2d(256, 128, kernel_size=5, stride=2, padding=2, output_padding=1),
nn.BatchNorm2d(128),
nn.LeakyReLU(negative_slope=0.2),
nn.ConvTranspose2d(128, 32, kernel_size=5, stride=2, padding=2, output_padding=1),
nn.BatchNorm2d(32),
nn.LeakyReLU(negative_slope=0.2),
nn.ConvTranspose2d(32, 3, kernel_size=5, stride=1, padding=2, output_padding=0),
# nn.BatchNorm2d(512), # NO BNORM ON LAST LAYER
nn.Tanh())
def encode(self, x):
enc = self.encoder(x)
enc = enc.reshape(enc.size(0), -1)
z_mean = self.bn_mean(self.fc_mean(enc))
z_log = F.softplus(self.bn_var(self.fc_var(enc))) + 1e-06
return z_mean, z_log
def reparameterize(self, z_mean, log_var):
std = torch.exp(log_var/2)
eps = torch.randn_like(std)
return z_mean + eps * std
def decode(self, z):
h = self.fc_d1(z)
h = h.view(-1, 256, 8, 8)
h = self.lr_d1(self.bn_d1(h))
h = self.decoder(h)
return h
def forward(self, x):
# encode
z_mean, log_var = self.encode(x)
# reparameterization trick
z = self.reparameterize(z_mean, log_var)
# decode
x_reconst = self.decode(z)
return x_reconst, z_mean, log_var
# def plain_vae_loss(inputs, r_mean, z_mean, z_log_var_sq):
# loss_reconstruction = torch.mean(torch.square(r_mean - inputs), axis=-1)
# loss_KL = torch.mean(- 0.5 * K.sum(1 + z_log_var_sq - K.square(z_mean) - K.exp(z_log_var_sq), axis=1), axis=0)
# return loss_reconstruction + (self.KLD_weight * loss_KL)
if __name__ == '__main__':
dataset = KinectLeap()
print(0, dataset.index2dataset(0))
print(10, dataset.index2dataset(10))
print(100, dataset.index2dataset(100))
print(101, dataset.index2dataset(101))
print(751, dataset.index2dataset(751))
print(1000, dataset.index2dataset(1000))
print(1001, dataset.index2dataset(1001))
print(1399, dataset.index2dataset(1399))
print(len(dataset))
print(dataset[0])
for i in range(len(dataset)):
X, y, group = dataset[i]
print(y, group)
plt.figure()
plt.imshow(X)
plt.axis('off')
plt.show()
# print(tsfrm(celeba_data[i]).shape)
adasd
BATCH_SIZE = 64
num_epochs = 30
KLD_weight = 5e-04
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
## DATA
tsfrm = ToTensor()
celeba_data = CelebA(tsfrm)
# creating data indices for training and validation splits
dataset_size = len(celeba_data) # number of samples in training + validation sets
indices = list(range(dataset_size))
split = int(np.floor(0.2 * dataset_size)) # no. samples in valid set
np.random.seed(42)
np.random.shuffle(indices)
train_indices, valid_indices = indices[split:], indices[:split]
print(len(train_indices), len(valid_indices))
train_sampler = torch.utils.data.sampler.SubsetRandomSampler(train_indices)
valid_sampler = torch.utils.data.sampler.SubsetRandomSampler(valid_indices)
train_loader = torch.utils.data.DataLoader(celeba_data, batch_size=BATCH_SIZE,
shuffle=False, num_workers=4,
sampler=train_sampler)
valid_loader = torch.utils.data.DataLoader(celeba_data, batch_size=BATCH_SIZE,
shuffle=False, num_workers=4,
sampler=valid_sampler)
## MODEL
model = VAE().to(device)
print(model)
summary(model, (3, 64, 64))
model = VAE().to(device)
optimizer = torch.optim.Adam(model.parameters(), lr=1e-03)
# Start training
for epoch in range(num_epochs):
model.train()
for i, x in enumerate(train_loader):
# Forward pass
x = x.to(device)
# print("IN: ", x.shape)
r_mean, z_mean, z_log_var = model(x)
# Compute reconstruction loss and kl divergence
reconst_loss = torch.mean((r_mean - x)**2)
loss_KL = torch.mean(- 0.5 * torch.sum(1 + z_log_var - z_mean**2 - torch.exp(z_log_var), dim=1), dim=0)
loss = reconst_loss + (KLD_weight * loss_KL)
# print(loss)
# Backprop and optimize
optimizer.zero_grad()
loss.backward()
optimizer.step()
if (i+1) % 50 == 0:
# print(loss, reconst_loss, loss_KL)
print ("Epoch[{}/{}], Step [{}/{}], VAE Loss: {:.4f}, Reconst Loss: {:.4f}, KL Div: {:.4f}"
.format(epoch+1, num_epochs, i+1, len(train_loader), loss, reconst_loss, loss_KL))
if (i+1) % 500 == 0:
org_images = merge_images(inverse_transform(x), (8, 8))
rec_images = merge_images(inverse_transform(r_mean.detach()), (8, 8))
# var.detach().numpy()
plt.figure()
plt.subplot(121)
plt.imshow(org_images)
plt.axis('off')
plt.subplot(122)
plt.imshow(rec_images)
plt.axis('off')
plt.show()
z = torch.randn(BATCH_SIZE, 128).to(device)
new_rec = model.decode(z)
new_images = merge_images(inverse_transform(new_rec.detach()), (8, 8))
plt.figure()
plt.imshow(new_images)
plt.axis('off')
plt.show()
model.eval() # eval mode (batchnorm uses moving mean/variance instead of mini-batch mean/variance)
with torch.no_grad():
loss = 0
reconst_loss = 0
loss_KL = 0
for i, x in enumerate(valid_loader):
# Forward pass
x = x.to(device)
# print("IN: ", x.shape)
r_mean, z_mean, z_log_var = model(x)
# Compute reconstruction loss and kl divergence
reconst_loss = torch.mean((r_mean - x)**2)
loss_KL = torch.mean(- 0.5 * torch.sum(1 + z_log_var - z_mean**2 - torch.exp(z_log_var), dim=1), dim=0)
loss += reconst_loss + (KLD_weight * loss_KL)
loss_KL += loss_KL
reconst_loss += reconst_loss
# print(i, loss)
print ("[VALID], VAE Loss: {:.4f}, Reconst Loss: {:.4f}, KL Div: {:.4f}".format(loss/(i+1), reconst_loss/(i+1), loss_KL/(i+1)))
ada
tsfrm = ToTensor()
celeba_data = CelebA()
print(len(celeba_data))
print(celeba_data[0])
for i in range(len(celeba_data)):
plt.figure()
plt.imshow(celeba_data[i])
plt.axis('off')
plt.show()
print(tsfrm(celeba_data[i]).shape)
pass
|
[
"noreply@github.com"
] |
noreply@github.com
|
e947e0a17fdf220132f2f1568544ca47d5787acc
|
05fe775032bb3a08c1752421dbb52a8c52a96c85
|
/반복문/range.py
|
c83060123dc106808df5458dcb5857d907fe4c92
|
[] |
no_license
|
nexiom1221/Phyton-Practice
|
58843d507d87a9ac99b20dbf8ca215e5ba25fb64
|
aeff0942a26b990c7891ac11b58f45e40f024b11
|
refs/heads/master
| 2022-12-07T08:59:26.916976
| 2020-09-07T13:14:55
| 2020-09-07T13:14:55
| 270,298,350
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 535
|
py
|
n = 10
a = range(0, int(n/2))
print(list(a))
for i in range(5):
print(str(i+1) + "= 반복 변수")
array = [273, 32, 103, 57, 52]
for i in range(len(array)):
print("{} 번쨰 반복 {}".format(i+1, array[i]))
for i in range(4, 0-1 , -1):
print(i)
print()
for i in reversed(range(5)):
print(i)
i =0
while i< 10:
print("{}번쨰 반복".format(i))
i += 1
list_test = [1,2,3,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,4,5,6,8,7,9,100]
value = 2
while value in list_test:
list_test.remove(value)
print(list_test)
|
[
"superdizmon1@gmail.com"
] |
superdizmon1@gmail.com
|
630f9556a0812fab978009d4e1ed1064a8794300
|
9824860061cc00b51c7065229bf5ae30f6ea2589
|
/qlpdb/qlpdb/tdse/migrations/0002_remove_tdse_test.py
|
d0b865fb01f487f41f1d90eeee8058a63e5bc0fd
|
[] |
no_license
|
ScorpXOY/quantum_linear_programming
|
0a8c5d2b4e845a559c6fc3fffd83e8e0a2340aed
|
66690b10c3c379a91a73db3e0b8b02a2b8c0de11
|
refs/heads/master
| 2023-03-24T05:58:53.306000
| 2021-02-04T15:22:27
| 2021-02-04T15:22:27
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 310
|
py
|
# Generated by Django 3.0.3 on 2020-04-22 08:05
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('tdse', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='tdse',
name='test',
),
]
|
[
"pyo0220@gmail.com"
] |
pyo0220@gmail.com
|
1d6f34aff2427b5cf195571a5cbfda9bbdd1d18c
|
46e657af193e8e1c1045b100787c4ed3126ca98b
|
/src/old/jnntools.py
|
3a00ca97f0ffb06e1fa54fd4c6db9cae9c6eb4da
|
[] |
no_license
|
sinancemy/jamnet
|
77487bf73f1cfce6edc0d7294bfcd35fd01e3762
|
045289b39dd4f647131885159ba99ad0e96d978b
|
refs/heads/master
| 2023-03-21T03:06:41.789908
| 2021-02-23T18:06:36
| 2021-02-23T18:06:36
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,227
|
py
|
import torch
import mido.midifiles.tracks as mitracks
from mido import MidiFile, MidiTrack, Message, MetaMessage
import copy
import numpy as np
# OLD VERSION OF midi2roll.py. Converts to and from a "note array" format instead of tensors.
# Not deleting in case I find a use for this.
''' MIDI <-> JNN converter for creating JamNet data-sets and playing JamNet outputs. '''
SIGNATURE = "Generated by JamNet Converter"
BASS = [32, 33, 34, 35, 36, 37, 38, 39]
PIANO = [0, 1, 2, 3, 4, 5, 6, 7]
ORGAN = [16, 17, 18, 19, 20, 21, 22, 23]
GUITAR = [24, 25, 26, 27, 28, 29, 30, 31]
SAX = [64, 65, 66, 67, 69] # nice
BRASS = [56, 57, 58, 59, 60, 61, 62, 63]
CLARINET = [71]
FLUTE = [72, 73]
VIBRAPHONE = [11, 12, 13]
# STRINGS = [40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55]
INSTRUMENTS = {"bass": BASS, "acmp": PIANO + ORGAN + GUITAR,
"lead": SAX + BRASS + CLARINET + FLUTE + VIBRAPHONE}
PERC_NAMES = ["perc", "drum", "snare", "cymb", "bongo", "shake", "bd", "hh", "kick", "hi-hat", "hihat", "hi hat",
"stick", "conga", "tamb", "agogo", "tom", "cabasa", "kit", "brush", "bell", "ride", "crash", "hat"]
PERC_SAFE_NAMES = ["org", "chro"]
LEAD_NAMES = ["lead", "melody", "voice"]
JNN_PIECE_PARTS = ["bass", "acmp", "lead", "tempo"]
# Instruments for output pieces.
JNN_INSTRUMENTS = {"bass": 32, "acmp": 0, "lead": 11} # 26 = jazz guitar
def midi_to_jnn(full_raw_midi_dir):
""" MIDI to JNN """
midif = MidiFile(full_raw_midi_dir)
midi_tracks = parse_midi_tracks(midif)
grouped_tracks = group_midi_tracks(midi_tracks)
jnn_piece = grouped_tracks_to_jnn(grouped_tracks, midif)
jnn_piece["tempo"] = parse_tempo(midif)
return jnn_piece
def jnn_to_tensor(jnn_piece):
np_piece = np.zeros((3, 88, 1))
for i, g in enumerate(jnn_piece):
for n in jnn_piece[g]:
mx = max(mx, n[1] + n[2])
def parse_tempo(midif):
for i, track in enumerate(midif.tracks):
for msg in track:
if msg.type == "set_tempo":
return msg.tempo
def grouped_tracks_to_jnn(grouped_tracks, midif):
# TODO: TORCH
jnn_piece_ = copy.deepcopy(INSTRUMENTS)
starts = []
for g in grouped_tracks:
if grouped_tracks[g]:
jnn_piece_[g], start = parse_notes(
mitracks.merge_tracks([track for i, track in enumerate(midif.tracks) if i in grouped_tracks[g]]))
starts.append(start)
else:
jnn_piece_[g] = [None] # No track
if len(starts) > 1:
start = min(starts)
for g in jnn_piece_:
for n in jnn_piece_[g]:
if not n is None:
n[1] -= start
return jnn_piece_
def parse_notes(track):
# TODO: TORCH
midi_note_messages = []
for msg in mitracks._to_abstime(track):
if msg.type == 'note_on' or msg.type == 'note_off':
midi_note_messages.append(msg)
notes = []
start = None
for i in range(0, len(midi_note_messages)):
if midi_note_messages[i].type == 'note_on':
if start is None:
start = midi_note_messages[i].time
new = [midi_note_messages[i].note, midi_note_messages[i].time, -midi_note_messages[i].time]
for j in range(i + 1, len(midi_note_messages)):
if midi_note_messages[j].type == 'note_off' and midi_note_messages[j].note == new[0]:
new[2] += midi_note_messages[j].time
break
notes.append(new)
return clean_identical_notes(notes, 128), start
def group_midi_tracks(midi_tracks):
grouped_tracks = copy.deepcopy(INSTRUMENTS)
for l in grouped_tracks:
grouped_tracks[l].clear()
for id, name, instrument in midi_tracks:
grouped_tracks[instrument].append(id)
return grouped_tracks
def parse_midi_tracks(midif):
tracks = []
for i, track in enumerate(midif.tracks):
track_name = parse_midi_track_name(track)
instrument_type = parse_midi_track_instrument(track)
if not instrument_type is None:
is_perc = False
for perc_name in PERC_NAMES:
if perc_name in track_name.lower():
is_perc = True
for perc_safe_name in PERC_SAFE_NAMES:
if perc_safe_name in track_name.lower():
is_perc = False
break
is_lead = False
for lead_name in LEAD_NAMES:
if lead_name in track_name.lower():
is_lead = True
if is_lead:
tracks.append((i, track_name, 'lead'))
elif not is_perc:
tracks.append((i, track_name, instrument_type))
return tracks
def parse_midi_track_instrument(track):
for msg in track:
if msg.type == 'program_change':
for inst_type in INSTRUMENTS:
for midi_index in INSTRUMENTS[inst_type]:
if msg.program == midi_index:
return inst_type
return None
def parse_midi_track_name(track):
for msg in track:
if msg.type == 'track_name':
return msg.name
if msg.type == 'text':
return msg.text
return "null"
def jnn_to_midi(jnn_piece, full_midi_save_dir):
""" JNN to MIDI. """
tempo = jnn_piece.pop("tempo")
midif = MidiFile(type=1)
midif.tracks.append(generate_header_track(tempo))
for channel, jnn_instrument in enumerate(jnn_piece):
if not jnn_piece[jnn_instrument][0] is None:
midif.tracks.append(jnn_track_to_midi_track(jnn_piece[jnn_instrument], jnn_instrument, channel))
midif.save(full_midi_save_dir)
return midif
def generate_header_track(tempo):
# TODO: TORCH
midit = MidiTrack()
midit.append(MetaMessage("time_signature", numerator=4, denominator=4,
clocks_per_click=24, notated_32nd_notes_per_beat=8, time=0))
midit.append(MetaMessage("set_tempo", tempo=int(tempo / 2), time=0))
midit.append(MetaMessage("copyright", text=SIGNATURE))
midit.append(MetaMessage("end_of_track", time=1))
return midit
def jnn_track_to_midi_track(jnn_track, jnn_instrument, channel):
# TODO: TORCH
messages = [Message(type='program_change', channel=channel, program=JNN_INSTRUMENTS[jnn_instrument], time=0),
MetaMessage("track_name", name=jnn_instrument, time=0)]
for jnn_note in jnn_track:
note, start, duration = jnn_note
messages.append(Message(type="note_on", channel=channel, note=note, velocity=50, time=start))
messages.append(Message(type="note_off", channel=channel, note=note, velocity=0, time=start + duration))
messages.sort(key=lambda msg: msg.time)
return MidiTrack(mitracks.fix_end_of_track(mitracks._to_reltime(messages)))
def jnnize_midi(full_raw_midi_dir, full_jnnized_midi_save_dir):
""" JNN'ize raw MIDI file. """
jnn_to_midi(midi_to_jnn(full_raw_midi_dir), full_jnnized_midi_save_dir)
def clean_identical_notes(notes, factor, mode=1):
""" mode=0 : only removes completely identical notes
mode=1 : removes all notes with the same pitch & starting time except the one with the longest duration"""
if mode == 0:
return [list(note) for note in {tuple(note) for note in notes}]
elif mode == 1:
note_time_dict = {}
for note in notes:
if int(note[1] / factor) not in note_time_dict.keys():
note_time_dict[int(note[1] / factor)] = list([note])
else:
copy_note = [(i, existing) for i, existing in enumerate(note_time_dict[int(note[1] / factor)]) if
existing[0] == note[0]]
if copy_note:
i, copy_note = copy_note[0]
if copy_note[2] < note[2]:
note_time_dict[int(note[1] / factor)][i] = note
else:
note_time_dict[int(note[1] / factor)].append(note)
notes = list()
for t in note_time_dict:
for note in note_time_dict[t]:
notes.append(note)
return notes
|
[
"sinancem_yucel@hotmail.com"
] |
sinancem_yucel@hotmail.com
|
c3dbd510be0238b80ddc6e66505d342b901e56c1
|
4a3b42ef1d5708979e64b1cebccd4a9335c1b62b
|
/find_fruit.py
|
954d167c9b47a75c15d5b7c3f5fa48fb8d5c075d
|
[] |
no_license
|
grnbeltwarrior/Find-Fruit
|
34cd2297253052ee905f7e16e3f5894855174aed
|
ccc3e2ccac9df11219e5cd19c73d78857654abbf
|
refs/heads/master
| 2020-03-21T15:53:55.321515
| 2018-06-26T17:22:20
| 2018-06-26T17:22:20
| 138,739,053
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,897
|
py
|
#!/usr/bin/python
# reworked into python from rvrsh3ll's Find-Fruit.ps1
# https://github.com/rvrsh3ll/Misc-Powershell-Scripts/blob/master/Find-Fruit.ps1
import sys
import socket
import getopt
import threading
import subprocess
import requests
import requests.packages.urllib3
requests.packages.urllib3.disable_warnings()
ip = ""
ports = ""
https = False
timeOut = 0.5
vuln_links = ['/','jmx-console/',
'web-console/ServerInfo.jsp',
'invoker/JMXInvokerServlet',
'system/console',
'axis2/axis2-admin/',
'manager/html',
'tomcat/manager/html',
'wp-admin',
'workorder/FileDownload.jsp',
'ibm/console/logon.jsp?action=OK',
'data/login',
'script/',
'opennms']
def usage():
print
print "______ _ _ _____ _ ______ _ _"
print "| ___|(_) | | |_ _|| | | ___| (_)| |"
print "| |_ _ _ __ __| | | | | |__ ___ | |_ _ __ _ _ _ | |_"
print "| _| | || '_ \ / _` | | | | '_ \ / _ \ | _|| '__|| | | || || __|"
print "| | | || | | || (_| | | | | | | || __/ | | | | | |_| || || |_"
print "\_| |_||_| |_| \__,_| \_/ |_| |_| \___| \_| |_| \__,_||_| \__|"
print
print "Usage: find_fruit.py -t target_host -p list_of_ports"
print
print "Example: "
print "find_fruit.py -t 10.10.10.10 -p 80,443,8080,8443"
sys.exit()
def urlBuilder(http,portColon,port):
for path in vuln_links:
url = http + '://' + ip + portColon + port + '/' + path
def heavy_lifting():
global ip
global ports
for port in ports:
if int(port) == 443:
http = 'https'
portColon = ''
port = ''
elif int(port) == 80:
http = 'http'
portColon = ''
port = ''
else:
http = 'http'
portColon = ':'
urlBuilder(http,portColon,port)
for path in vuln_links:
url = http + '://' + ip + portColon + port + '/' + path
headers = {'User-Agent':'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:60.0) Gecko/20100101 Firefox/60.0'}
try:
data = requests.get(url, headers, timeout=timeOut, verify=False)
if str(data.status_code) == '200':
print "The following URL returned a status of OK: " + url
print data.text + '\r\n'
except requests.exceptions.Timeout:
pass
#print "The following URL timed out: " + url #Uncomment these if you want the errors to go to the console.
except requests.exceptions.RequestException as e:
pass
#print "The following error occurred: " + e #Uncomment these if you want the errors to go to the console.
def main():
global ip
global ports
if not len(sys.argv[1:]):
usage()
try:
opts, args = getopt.getopt(sys.argv[1:],"t:p:",["target","port"])
except getopt.GetoptError as err:
print str(err)
usage()
for o,a in opts:
if o in ("-t","--target"):
ip = a
print a
elif o in ("-p","--port"):
ports = a.split(',')
print ports
else:
assert False, "Unhandled Option"
heavy_lifting()
main()
|
[
"gabrielthompson@gabrielthompson.com"
] |
gabrielthompson@gabrielthompson.com
|
e96e099b25cfb3fc367f85f23be963095437e653
|
a9fc496e0724866093dbb9cba70a8fdce12b67a9
|
/scripts/quest/q5523e.py
|
65c50af387328753c10ae50e98802bd1ea180dff
|
[
"MIT"
] |
permissive
|
ryantpayton/Swordie
|
b2cd6b605f7f08f725f5e35d23ba3c22ef2ae7c0
|
ca6f42dd43f63b1d2e6bb5cdc8fc051c277f326e
|
refs/heads/master
| 2022-12-01T09:46:47.138072
| 2020-03-24T10:32:20
| 2020-03-24T10:32:20
| 253,997,319
| 2
| 0
|
MIT
| 2022-11-24T08:17:54
| 2020-04-08T05:50:22
|
Java
|
UTF-8
|
Python
| false
| false
| 105
|
py
|
# Tot's reward lv 60
sm.completeQuest(5523)
# Lv. 60 Equipment box
sm.giveItem(2433958, 1)
sm.dispose()
|
[
"tim.blokk@gmail.com"
] |
tim.blokk@gmail.com
|
93161c94b7e980743a53d94aa3a20ea99bd7324b
|
261e6eff44097d9dbe217f061cb4c8ab13f02e37
|
/vce_health_care/wsgi.py
|
1fc8e9c2f1d592cc6d8b36a8730f887877d3e246
|
[
"MIT"
] |
permissive
|
1919kiran/healthcare-portal
|
e83b84b8d47886c8210cf0529674eef9a7202fcc
|
547d734762b6698adf8f5a7a45418e8553ca9397
|
refs/heads/master
| 2022-12-11T08:26:36.743965
| 2019-04-28T19:15:00
| 2019-04-28T19:15:00
| 180,219,725
| 0
| 0
|
MIT
| 2022-12-08T04:57:43
| 2019-04-08T19:34:36
|
Python
|
UTF-8
|
Python
| false
| false
| 407
|
py
|
"""
WSGI config for vce_health_care project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'vce_health_care.settings')
application = get_wsgi_application()
|
[
"iam191911918114@gmail.com"
] |
iam191911918114@gmail.com
|
5b722af7a1935ec56d65f9a898b93a8698f4d229
|
e472f5209789cd50cc9129683c4918d63e3ddb41
|
/ANALYSIS_SCRIPTS/heepElastics_Analysis/hms_heep_summary/hms_electrons/dW_variations.py
|
27e94f9c7809a718568f3ed46dd58176cc8d6fc8
|
[] |
no_license
|
Yero1990/DEUTERON_ANALYSIS
|
a8447557de109302ca19e360c8e5d0f1280b0dd6
|
c0f107ec907a47a0f83b2175222ac7c48d199af2
|
refs/heads/master
| 2023-08-06T12:18:37.639062
| 2021-09-14T18:01:11
| 2021-09-14T18:01:11
| 160,069,441
| 1
| 1
| null | 2019-11-14T15:20:44
| 2018-12-02T16:57:42
|
C
|
UTF-8
|
Python
| false
| false
| 4,422
|
py
|
#Code to determine the variations in E, E' theta_e from the variations
#measured in W_simc - W_data
import LT.box as B
import numpy as np
from numpy import ndarray
f = B.get_file('hms_heep_summary_FINAL.data')
#Define masses
Mp = 0.938272 #proton mass GeV
me = 0.00051099 #electron mass GeV
#converion factor from deg to radians
dtr = np.pi / 180.
#Get the data from datafile
run = B.get_data(f, 'Run')
nmr_true = B.get_data(f, 'nmr_true')
kf = B.get_data(f, 'nmr_P') #central electron momentum
theta_e = B.get_data(f, 'hms_Angle') #e- arm central angle
Pf = B.get_data(f, 'shms_P') #central proton arm momentum
theta_p = B.get_data(f, 'shms_Angle') #p arm central angle
Eb = B.get_data(f, 'beam_e') #beam energy
data_W_mean = B.get_data(f, 'data_W_mean')
data_W_mean_err = B.get_data(f, 'data_W_mean_err')
data_W_sigma = B.get_data(f, 'data_W_sigma')
simc_W_mean = B.get_data(f, 'simc_W_mean')
simc_W_mean_err = B.get_data(f, 'simc_W_mean_err')
simc_W_sigma = B.get_data(f, 'simc_W_sigma')
#Declare arrays
dW_mean = ndarray(22) #W_simc_fit - W_data_fit
dW_mean_err = ndarray(22) #error in dW_mean
dE_beam = ndarray(22) #beam energy variation
d_kf = ndarray(22) #central momentum variation
d_theta = ndarray(22) #central spec. angle variation
corr_factor = ndarray(22) #momentum corr. factor
kf_corr = ndarray(22) #corrected momentum
dW_dkf = ndarray(22) #derivative dW/dkf = -Eb / kf
dkf_kf = ndarray(22) #relative uncertainty in cent. momentum dkf / kf
index = [0] * 22 #index to keep track of runs
#Add keys to the existing kin file
f.add_key('dW_meas', 'f')
f.add_key('dW_meas_err', 'f')
f.add_key('dW_dkf', 'f')
f.add_key('dkf', 'f')
f.add_key('dkf_kf', 'f')
f.add_key('kf_corr_factor', 'f')
f.add_key('kf_corr', 'f')
#Loop over all kin groups
for i, run in enumerate(run):
#Calculate variations and errors in W
dW_mean[i] = simc_W_mean[i] - data_W_mean[i]
dW_mean_err[i] = np.sqrt( (simc_W_mean_err[i])**2 + (data_W_mean_err[i])**2 )
#Formulas for variations in beam energy, e- angle and e- momentum
dE_beam[i] = Eb[i] / kf[i] * dW_mean[i] #variation in beam energy
d_kf[i] = - kf[i] / Eb[i] * dW_mean[i] #variation in electron arm momentum
d_theta[i] = -1. / (2.*Eb[i]*kf[i]*np.sin(0.5*theta_e[i]*dtr)*np.cos(0.5*theta_e[i]*dtr) / Mp ) * dW_mean[i] #variation in electron arm angle
dW_dkf[i] = - Eb[i] / kf[i]
dkf_kf[i] = - dW_mean[i] / Eb[i]
#Assume the correction is from momentum ONLY
corr_factor[i] = 1. - (dW_mean[i]/Eb[i])
kf_corr[i] = kf[i]*corr_factor[i]
f.data[i]['dW_meas'] = round(dW_mean[i],5)
f.data[i]['dW_meas_err'] = round(dW_mean_err[i],5)
f.data[i]['dW_dkf'] = round(dW_dkf[i],5)
f.data[i]['dkf'] = round(d_kf[i],5)
f.data[i]['dkf_kf'] = round(dkf_kf[i],5)
f.data[i]['kf_corr_factor'] = round(corr_factor[i],5)
f.data[i]['kf_corr'] = round(kf_corr[i],5)
index[i] = i + 1
f.save('hms_heep_summary_FINAL_v2.data')
# print('dW_mean = ', round(dW_mean[i],4), 'dE_beam = ', round(dE_beam[i],4), 'd_kf = ', round(d_kf[i],4), 'd_theta = ', round(d_theta[i],4), 'kf = ', round(kf[i],4), 'corr_fac = ',round(corr_factor[i],4), 'kf_corr = ', round(kf_corr[i],4) )
#B.plot_exp(nmr_true, kf_corr)
#B.plot_exp(dW_mean, corr_factor)
#B.plot_exp(dE_beam, dW_mean, dW_mean_err )
#B.plot.xlabel('Beam Energy Variation [GeV]')
#B.plot.ylabel('Invariant Mass W Variation [GeV]')
#B.plot_exp(d_kf, dW_mean, dW_mean_err )
#B.plot_exp(d_theta/dtr, dW_mean, dW_mean_err )
#B.plot_exp(kf, dkf_kf)
#B.pl.xlabel('HMS Un-Corrected Central Momentum, E\' [GeV]')
#B.pl.ylabel('Relative Uncertainty dE\' / E\' ')
#B.pl.title('HMS dE\'/E\' vs E\' ')
#B.pl.grid(True)
#B.pl.show()
|
[
"jones@jlab.org"
] |
jones@jlab.org
|
0c3976214f8e28555d2e3ff9dd37ab37dd2c712b
|
251e4de91841fc42959e89211d3501ce24c4435e
|
/eventdriven/adapter/base.py
|
253f683289151bfeaaceae339ac6fba3956f10e6
|
[
"Apache-2.0"
] |
permissive
|
ZSAIm/EventDriven
|
df1251c4e9f3f382600159d6626a6c959670c438
|
92bed2b3cde9249724f9cc25f3d19470abda5b9b
|
refs/heads/master
| 2020-12-07T17:04:32.511933
| 2020-02-20T07:51:18
| 2020-02-20T07:51:18
| 232,758,430
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,767
|
py
|
# -*- coding: UTF-8 -*-
from abc import ABC
class AbstractAdapter(ABC):
def __setup__(self, parent, name, **options):
""" 安装适配器过程中调用该方法进行初始化。 """
self._parent = parent
self._instance_name = name
self._options = options
def __name__(self):
""" 返回适配器实例名称。 """
return self._instance_name
def __patch__(self):
""" __setup__ 之后对控制器进行打补丁。 """
pass
def __running__(self):
""" 控制器启动中(线程启动前)。"""
pass
def __run__(self):
""" 控制器启动后调用该方法。 """
pass
def __closing__(self):
""" 控制器发起关闭事件后调用该方法。 """
pass
def __closed__(self):
""" 控制事件关闭后调用该方法。"""
pass
def __exception__(self, error):
""" 控制器事件处理异常调用该方法。"""
pass
def __suspend__(self):
""" 控制器发起挂起事件后调用该方法。 """
pass
def __resume__(self):
""" 控制器发起恢复挂起状态事件后调用该方法。 """
pass
def __mapping__(self):
""" 返回添加的事件处理映射。 """
return {}
def __context__(self):
""" 返回需要添加的全局动态上下文。"""
return {}
def __static__(self):
""" 返回需要添加的静态上下文。"""
return {}
@staticmethod
def __unique__():
""" 返回是否只能安装唯一实例。 """
return False
@staticmethod
def __dependencies__():
""" 返回适配器依赖列表。 """
return []
|
[
"zzsaim@163.com"
] |
zzsaim@163.com
|
e6fb7f33e217a344b1fb817bb005bc2c3ba1cee9
|
58618ee13a29bb51a27a2e60c2585f9cb1d8c274
|
/app.py
|
5e3cb9b9b823985834a4190189886e20fe22c7ab
|
[] |
no_license
|
abhisharma5/Python_chatbot
|
54f4e526a7d8b24804831901d2469731f3d52c6d
|
19e3805ecbeb26592b4fbb795d62ae4de215a9b2
|
refs/heads/master
| 2022-10-08T18:54:50.884320
| 2020-06-08T04:53:50
| 2020-06-08T04:53:50
| 270,530,484
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 700
|
py
|
from flask import Flask, render_template, request
from chatterbot import ChatBot
from chatterbot.trainers import ChatterBotCorpusTrainer
app = Flask(__name__)
#create chatbot
englishBot = ChatBot("Chatterbot", storage_adapter="chatterbot.storage.SQLStorageAdapter")
trainer = ChatterBotCorpusTrainer(englishBot)
trainer.train("chatterbot.corpus.english") #train the chatter bot for english
#define app routes
@app.route("/")
def index():
return render_template("index.html")
@app.route("/get")
#function for the bot response
def get_bot_response():
userText = request.args.get('msg')
return str(englishBot.get_response(userText))
if __name__ == "__main__":
app.run()
|
[
"noreply@github.com"
] |
noreply@github.com
|
7075b62d95d63c0abfdebcac5772e9ce9fff30f4
|
02b460257be33634a5e204c12a22d396c49ec1e8
|
/ch1/ex1_6.py
|
e506176ded89c2a72f238158685c3fe6189a0731
|
[] |
no_license
|
wxhheian/ptcb
|
c5250362d5ab0903498e52c5a5d9cbdccc37853f
|
ae95fb18853f94246b4b1e84371e3f140677c8e8
|
refs/heads/master
| 2020-07-02T08:28:16.867948
| 2019-08-09T18:49:50
| 2019-08-09T18:49:50
| 201,473,507
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 466
|
py
|
##实现一个键对应多个值的字典
#####实现方法一:将多个值放到不同的容器中
# d = {
# 'a':[1,2,3],
# 'b':[4,5]
# }
# e = {
# 'a':{1,2,3},
# 'b':{4,5}
# }
from collections import defaultdict
d = defaultdict(list)
d['a'].append(1)
d['b'].append(2)
d['b'].append(4)
e = defaultdict(set)
e['a'].add(1)
e['a'].add(2)
e['b'].add(4)
################setdefault
f={}
f.setdefault('a',[]).append(1)
f.setdefault('a',[]).append(2)
|
[
"365194143@qq.com"
] |
365194143@qq.com
|
73788b1d6880269678f000717a68971f4e3f0f80
|
0084e99ca9fcc792ddca9d489f7d55f023199b36
|
/backend_receitas/ll_env/Scripts/django-admin.py
|
aee7e9a3ea3acd2dcef6d3a9c75342ae0eed5e56
|
[
"Apache-2.0"
] |
permissive
|
gugact/backend_web
|
5fd84dd5edefd76541c27f213b0887589286e75f
|
32b72ec460c1b6bae63bfd391c87b0c4bf644821
|
refs/heads/master
| 2021-08-23T01:16:20.789714
| 2017-12-02T03:09:21
| 2017-12-02T03:09:21
| 112,808,122
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 176
|
py
|
#!c:\users\gustavo\desktop\backend_receitas\ll_env\scripts\python.exe
from django.core import management
if __name__ == "__main__":
management.execute_from_command_line()
|
[
"gustavo.tremiliosi.usp@gmail.com"
] |
gustavo.tremiliosi.usp@gmail.com
|
142624fbd5f9548c22e3fac769ecbe3ca4657863
|
a960259bafb95fca6aed244fd4698a6dd6669cf8
|
/BooAPI/.ipynb_checkpoints/check_db-checkpoint.py
|
97bb5c6aa62b3c71db59f9b2a00cb280e81fde13
|
[] |
no_license
|
BenAndrew310/Booboo-api
|
b6f523e48e5a1bc4a1e004252f050b047330972d
|
afcec8587b471de2d56d47d72bb85a0819fb6b3c
|
refs/heads/master
| 2022-12-11T12:09:20.981651
| 2020-08-14T08:28:15
| 2020-08-14T08:28:15
| 287,349,152
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,050
|
py
|
from API import db
from API.models import User, Device, UserKey
from API.DeviceManager import Device_Manager
from datetime import datetime
if __name__=="__main__":
# dev = Device_Manager()
# db.create_all()
# d=datetime.utcnow()
# dev.create_device(name="Lamp desk",value="true",value_type="boolean",date_created=d)
# devices = Device.query.all()
# print(devices)
# users = User.query.all()
# devices = Device.query.all()
# for user in users:
# print(user)
# db.session.delete(user)
# db.session.commit()
# devices = Device.query.all()
# for device in devices:
# print(device)
# db.session.delete(device)
# db.session.commit()
# keys = UserKey.query.all()
# for key in keys:
# print(key)
# db.session.delete(key)
# db.session.commit()
userkey = UserKey.query.filter_by(username='Benchley').first()
userkey.username = 'ben_andrew'
db.session.commit()
users = User.query.all()
devices = Device.query.all()
keys = UserKey.query.all()
print(users,"\n",devices,"\n",keys)
|
[
"andreben2442@gmail.com"
] |
andreben2442@gmail.com
|
a70448d44e33562edc9126a4b7054b947ed0c41d
|
6f1eabcb1b6cb1aaad4e882f2195b4e6d8bdad92
|
/backend/habdb/migrations/0006_auto_20190619_1508.py
|
ff2807be8ca59a5728fdae3919e7084689db57c5
|
[] |
no_license
|
pulidongz/hab-hub
|
705f47078ad9c5f92d02fbe1cd15cd594f03f06b
|
0a75a7cbec6b1afcc16b6549d2c98a9f924d7ebe
|
refs/heads/master
| 2023-01-23T00:04:39.859890
| 2020-02-05T09:14:02
| 2020-02-05T09:14:02
| 236,428,933
| 1
| 0
| null | 2023-01-06T06:26:52
| 2020-01-27T06:00:35
|
CSS
|
UTF-8
|
Python
| false
| false
| 446
|
py
|
# Generated by Django 2.2.2 on 2019-06-19 07:08
import django.contrib.gis.db.models.fields
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('habdb', '0005_station_location'),
]
operations = [
migrations.AlterField(
model_name='station',
name='location',
field=django.contrib.gis.db.models.fields.PointField(srid=4326),
),
]
|
[
"paogravoso@gmail.com"
] |
paogravoso@gmail.com
|
220bc39fefb86914c598be64d3011ebb9cdcf34d
|
4f67b9d8b7f4b5cb38a8abbe543ab858b6b8fb43
|
/tests/components/test_counter.py
|
8dc04f0e76ab7b910d7cc8309ff0c8884d579b85
|
[
"Apache-2.0"
] |
permissive
|
FedericoAsi/home-assistant
|
fc5b78b162d71a1eccb122bd3dd4e9e3f842b4c2
|
45609f13262111ddd6a7f865c8e3e290444110fd
|
refs/heads/dev
| 2023-03-10T15:17:46.326157
| 2021-02-11T22:09:33
| 2021-02-11T22:09:33
| 106,481,706
| 1
| 0
|
NOASSERTION
| 2021-02-11T22:09:34
| 2017-10-10T23:23:18
|
Python
|
UTF-8
|
Python
| false
| false
| 5,848
|
py
|
"""The tests for the counter component."""
# pylint: disable=protected-access
import asyncio
import unittest
import logging
from homeassistant.core import CoreState, State
from homeassistant.setup import setup_component, async_setup_component
from homeassistant.components.counter import (
DOMAIN, decrement, increment, reset, CONF_INITIAL, CONF_STEP, CONF_NAME,
CONF_ICON)
from homeassistant.const import (ATTR_ICON, ATTR_FRIENDLY_NAME)
from tests.common import (get_test_home_assistant, mock_restore_cache)
_LOGGER = logging.getLogger(__name__)
class TestCounter(unittest.TestCase):
"""Test the counter component."""
# pylint: disable=invalid-name
def setUp(self):
"""Set up things to be run when tests are started."""
self.hass = get_test_home_assistant()
# pylint: disable=invalid-name
def tearDown(self):
"""Stop everything that was started."""
self.hass.stop()
def test_config(self):
"""Test config."""
invalid_configs = [
None,
1,
{},
{'name with space': None},
]
for cfg in invalid_configs:
self.assertFalse(
setup_component(self.hass, DOMAIN, {DOMAIN: cfg}))
def test_methods(self):
"""Test increment, decrement, and reset methods."""
config = {
DOMAIN: {
'test_1': {},
}
}
assert setup_component(self.hass, 'counter', config)
entity_id = 'counter.test_1'
state = self.hass.states.get(entity_id)
self.assertEqual(0, int(state.state))
increment(self.hass, entity_id)
self.hass.block_till_done()
state = self.hass.states.get(entity_id)
self.assertEqual(1, int(state.state))
increment(self.hass, entity_id)
self.hass.block_till_done()
state = self.hass.states.get(entity_id)
self.assertEqual(2, int(state.state))
decrement(self.hass, entity_id)
self.hass.block_till_done()
state = self.hass.states.get(entity_id)
self.assertEqual(1, int(state.state))
reset(self.hass, entity_id)
self.hass.block_till_done()
state = self.hass.states.get(entity_id)
self.assertEqual(0, int(state.state))
def test_methods_with_config(self):
"""Test increment, decrement, and reset methods with configuration."""
config = {
DOMAIN: {
'test': {
CONF_NAME: 'Hello World',
CONF_INITIAL: 10,
CONF_STEP: 5,
}
}
}
assert setup_component(self.hass, 'counter', config)
entity_id = 'counter.test'
state = self.hass.states.get(entity_id)
self.assertEqual(10, int(state.state))
increment(self.hass, entity_id)
self.hass.block_till_done()
state = self.hass.states.get(entity_id)
self.assertEqual(15, int(state.state))
increment(self.hass, entity_id)
self.hass.block_till_done()
state = self.hass.states.get(entity_id)
self.assertEqual(20, int(state.state))
decrement(self.hass, entity_id)
self.hass.block_till_done()
state = self.hass.states.get(entity_id)
self.assertEqual(15, int(state.state))
def test_config_options(self):
"""Test configuration options."""
count_start = len(self.hass.states.entity_ids())
_LOGGER.debug('ENTITIES @ start: %s', self.hass.states.entity_ids())
config = {
DOMAIN: {
'test_1': {},
'test_2': {
CONF_NAME: 'Hello World',
CONF_ICON: 'mdi:work',
CONF_INITIAL: 10,
CONF_STEP: 5,
}
}
}
assert setup_component(self.hass, 'counter', config)
self.hass.block_till_done()
_LOGGER.debug('ENTITIES: %s', self.hass.states.entity_ids())
self.assertEqual(count_start + 2, len(self.hass.states.entity_ids()))
self.hass.block_till_done()
state_1 = self.hass.states.get('counter.test_1')
state_2 = self.hass.states.get('counter.test_2')
self.assertIsNotNone(state_1)
self.assertIsNotNone(state_2)
self.assertEqual(0, int(state_1.state))
self.assertNotIn(ATTR_ICON, state_1.attributes)
self.assertNotIn(ATTR_FRIENDLY_NAME, state_1.attributes)
self.assertEqual(10, int(state_2.state))
self.assertEqual('Hello World',
state_2.attributes.get(ATTR_FRIENDLY_NAME))
self.assertEqual('mdi:work', state_2.attributes.get(ATTR_ICON))
@asyncio.coroutine
def test_initial_state_overrules_restore_state(hass):
"""Ensure states are restored on startup."""
mock_restore_cache(hass, (
State('counter.test1', '11'),
State('counter.test2', '-22'),
))
hass.state = CoreState.starting
yield from async_setup_component(hass, DOMAIN, {
DOMAIN: {
'test1': {},
'test2': {
CONF_INITIAL: 10,
},
}})
state = hass.states.get('counter.test1')
assert state
assert int(state.state) == 0
state = hass.states.get('counter.test2')
assert state
assert int(state.state) == 10
@asyncio.coroutine
def test_no_initial_state_and_no_restore_state(hass):
"""Ensure that entity is create without initial and restore feature."""
hass.state = CoreState.starting
yield from async_setup_component(hass, DOMAIN, {
DOMAIN: {
'test1': {
CONF_STEP: 5,
}
}})
state = hass.states.get('counter.test1')
assert state
assert int(state.state) == 0
|
[
"pascal.vizeli@syshack.ch"
] |
pascal.vizeli@syshack.ch
|
06d81819ec245e77cec949f12a8b70ffb0617810
|
9431bba2d148f8aef9c0a8f3ca16fcf875890757
|
/scraping/get_html_title.py
|
9f5573db2266ed5c6d715cae3af9936cb85faae6
|
[
"MIT"
] |
permissive
|
terasakisatoshi/pythonCodes
|
fba0b78414b2c85f4a738200354ea583f0516768
|
953210c06e9885a7c885bc01047715a77de08a1a
|
refs/heads/master
| 2023-05-14T12:30:22.201711
| 2023-05-07T13:41:22
| 2023-05-07T13:41:22
| 197,893,702
| 2
| 1
|
MIT
| 2022-11-25T10:59:52
| 2019-07-20T07:09:12
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 603
|
py
|
from urllib.request import urlopen
from urllib.error import HTTPError
from bs4 import BeautifulSoup
def get_title(url):
try:
html = urlopen(url)
except HTTPError as e:
print(e)
return None
try:
bsoup = BeautifulSoup(html.read())
title = bsoup.body.h1
except AttributeError as e:
return None
return title
def main():
URL="http://www.pythonscraping.com/pages/page1.html"
title=get_title(URL)
if title==None:
print("Title could not be found")
else:
print(title)
if __name__ == '__main__':
main()
|
[
"terasakisatoshi.math@gmail.com"
] |
terasakisatoshi.math@gmail.com
|
75ced30ac4e6c4e6bead4ec4daa4862cb8bf164e
|
414493d42cb86fad7e00d1b3ea1eb7a4bc018557
|
/logger/ztest/FileWriterTest.py
|
1e458f014efe7da8313f3fc3e533c2a8322c184c
|
[] |
no_license
|
aman0302/trade-log
|
3f24230b000da8ac1efe2a76fc51ed9a43896017
|
ad90d75dce69ea740f21c612df975a9fca141e2f
|
refs/heads/master
| 2021-01-19T21:33:54.094140
| 2018-09-23T23:23:34
| 2018-09-23T23:23:34
| 88,665,947
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 466
|
py
|
from logger.action.FileWriter import FileWriter
from logger.models.SmallcaseModel import smallcase_model
def test_file_insert():
sm = smallcase_model(name='TEST_SM', index='107.45', investment='10000.00', value='1007989.00', pnl='-10.6%',
actual_pnl='-1982.78', bought_on='20 APril', timestamp='12:45:78 34/34/34')
sm_list = []
sm_list.append(sm)
filewriter = FileWriter()
filewriter.write(sm_list)
test_file_insert()
|
[
"aman.gupta@flipkart.com"
] |
aman.gupta@flipkart.com
|
4d761b6be55161e94f2b66c2946332253be62fd8
|
027dbcdb37ba1c09d06bfc623eaa9134d41a664a
|
/models/spring.py
|
0b8371ce23d1b7577b2598b9c562725ab0742fb4
|
[] |
no_license
|
sjkywalker/physical-modeling-with-python
|
ccada128c4a3aaf6c02b2d0789059dbd73b78307
|
8e00897263d17381e159373bf9762d9274a028e5
|
refs/heads/master
| 2020-06-02T14:08:26.469431
| 2019-06-16T06:37:45
| 2019-06-16T06:37:45
| 191,181,891
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 663
|
py
|
import numpy as np
from scipy.integrate import odeint
import matplotlib.pyplot as plt
# args = mass, spring coeff., drag coeff.
def spring(state, t, m, k, b):
disp, vel = state
state_deriv = [vel, -(k/m) * disp - (b/m) * vel]
return state_deriv
# coefficients
m = 2.0
k = 100.0
b = 5.0 # critical damp near 25.0
# initial condition
disp0 = 0.5
vel0 = 0.0
state0 = [disp0, vel0]
# time points
t = np.linspace(0, 5, 500)
# solve ODE
soln = odeint(spring, state0, t, args=(m, k, b))
# plot results
plt.plot(t, soln[:, 0], 'r:', label='x(t)')
plt.plot(t, soln[:, 1], 'g-', label='v(t)')
plt.xlabel('time')
plt.ylabel('values')
plt.grid()
plt.show()
|
[
"sjkskywalker@korea.ac.kr"
] |
sjkskywalker@korea.ac.kr
|
df8b92276a90d6247ec6faad10bd8438265fa168
|
968ea030e3439a2ec46fd8424363c9f49f88d3f3
|
/accounts/migrations/0001_initial.py
|
aef742fbeca32d580f166ea286df0f40ff7fb018
|
[] |
no_license
|
busoff/projectime
|
63f7d68dc49a7aba5b5ea4f091f3ec1665f21810
|
db9f10d6fcf65bd50383df00210bc224238ee952
|
refs/heads/master
| 2021-06-11T04:24:00.388820
| 2020-02-29T04:50:09
| 2020-02-29T04:50:09
| 152,294,463
| 0
| 0
| null | 2021-06-10T21:02:41
| 2018-10-09T17:40:16
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 754
|
py
|
# Generated by Django 2.1.2 on 2018-12-08 15:22
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Profile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('myid', models.CharField(max_length=16, unique=True)),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
|
[
"busoff@qq.com"
] |
busoff@qq.com
|
c13cea7b42eb939807a49f33b9a9432879a0693b
|
e8dfe0ebcb618ae9ed70072ab28399db572a6890
|
/corp/admin.py
|
136f7c2cc71b8a8330a34c7a984c163ca029e334
|
[] |
no_license
|
wowioboy/popgalaxy
|
3ace319b6abdd9201c8003fbeb8525e969e14a6b
|
a08a76b013672f1481d337277098809548fbaeb2
|
refs/heads/master
| 2021-01-01T06:51:03.721738
| 2011-04-11T22:55:07
| 2011-04-11T22:55:07
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 273
|
py
|
from django.contrib import admin
from corp.models import *
class CorporateAdmin(admin.ModelAdmin):
list_display = ('section',)
search_fields = ['section', 'details_markdown']
list_filter = ('section',)
admin.site.register(Corporate, CorporateAdmin)
|
[
"lleach@wowio.com"
] |
lleach@wowio.com
|
c822f6ed07953bee56d648fff611aea04680c407
|
366b2ff9cd498808438bf7c48f697c05b361d02c
|
/models.py
|
0606075241f9749a7ff176655dadf12a115be600
|
[] |
no_license
|
c-bata/AngularJS-Bottle-TodoApp
|
1aef6b09fd85fabaa63898ab3fb9a2d586216b93
|
8f03820b7949b0c28477970c58f25ccd1856b2a9
|
refs/heads/master
| 2021-03-12T22:40:32.000758
| 2015-11-04T11:14:47
| 2015-11-04T11:14:47
| 38,732,944
| 2
| 0
| null | 2015-11-04T11:11:39
| 2015-07-08T05:02:47
|
Python
|
UTF-8
|
Python
| false
| false
| 1,225
|
py
|
from datetime import datetime
from sqlalchemy import (
Column, Integer, Unicode, UnicodeText, Boolean, DateTime,
create_engine
)
from sqlalchemy.ext import declarative
from bottle.ext import sqlalchemy
Base = declarative.declarative_base()
engine = create_engine('sqlite:///:memory:', echo=True)
plugin = sqlalchemy.Plugin(
engine,
Base.metadata,
keyword='db', # 関数内で挿入される変数名
create=True, # テーブルを作成するか
commit=True, # 関数終了時にトランザクションをコミットするか
use_kwargs=False
)
class Task(Base):
__tablename__ = 'tasks'
id = Column(Integer, primary_key=True)
title = Column(Unicode(255), nullable=False)
memo = Column(UnicodeText)
done = Column(Boolean, nullable=False, default=False)
created_at = Column(DateTime, default=datetime.now(), nullable=False)
def __repr__(self):
return "<Task (title='%s')>" % self.title
@property
def serialize(self):
return {
'id': self.id,
'title': self.title,
'memo': self.memo,
'done': self.done,
'created_at': self.created_at.strftime('%Y-%m-%d')
}
|
[
"contact@c-bata.link"
] |
contact@c-bata.link
|
86366bbc3631c073f9792a83537f1104e2a53fc9
|
a684efb1b22a57a2f15c793960dd1ac7a7d1138d
|
/model.py
|
223d1bb01534555c13affa26fab019b9fae2448d
|
[] |
no_license
|
gagz21/MnistDigit-Recognition
|
ca74d7089a0e5ffa2bb90667fc0d852905fc4f02
|
09d767a6bdf9b33c8e7494a42737bc172231dd96
|
refs/heads/master
| 2020-03-28T09:59:09.647904
| 2019-03-06T01:32:48
| 2019-03-06T01:32:48
| 148,073,893
| 1
| 0
| null | 2018-09-09T23:17:52
| 2018-09-09T23:17:52
| null |
UTF-8
|
Python
| false
| false
| 3,778
|
py
|
import keras
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPool2D, Activation
from keras.layers.normalization import BatchNormalization
from keras import backend as K
import numpy as np
from keras.models import model_from_json
import cv2
#load json and create model
def load_model():
json_file = open('model/model.json', 'r')
loaded_model_json = json_file.read()
json_file.close()
loaded_model = model_from_json(loaded_model_json)
# load weights into new model
loaded_model.load_weights("model/model.h5")
print("Loaded model from disk")
return loaded_model
def save_model(model):
# serialize model to JSON
model_json = model.to_json()
with open("model/model.json", "w") as json_file:
json_file.write(model_json)
# serialize weights to HDF5
model.save_weights("model/model.h5")
print("Saved model to disk")
def build_model():
model = Sequential()
model.add(Conv2D(
filters = 64,
kernel_size = (3, 3),
padding = 'same',
activation = 'relu',
input_shape = (28, 28, 1)))
model.add(Conv2D(
filters = 128,
kernel_size = (3,3),
padding = 'same',
activation = 'relu'))
model.add(MaxPool2D(pool_size = (2,2)))
model.add(Dropout(0.25))
model.add(Conv2D(
filters = 128,
kernel_size = (3,3),
padding = 'same',
activation = 'relu'))
model.add(Conv2D(
filters = 128,
kernel_size = (3,3),
padding = 'same',
activation = 'relu'))
model.add(Conv2D(
filters = 128,
kernel_size = (3,3),
padding = 'same',
activation = 'relu'))
model.add(Conv2D(
filters = 256,
kernel_size = (3,3),
padding = 'same',
activation = 'relu'))
model.add(MaxPool2D(pool_size = (2,2)))
model.add(Dropout(0.25))
model.add(Conv2D(
filters = 256,
kernel_size = (3,3),
padding = 'same',
activation = 'relu'))
model.add(Conv2D(
filters = 256,
kernel_size = (3,3),
padding = 'same',
activation = 'relu'))
model.add(Conv2D(
filters = 256,
kernel_size = (3,3),
padding = 'same',
activation = 'relu'))
model.add(Conv2D(
filters = 512,
kernel_size = (3, 3),
padding = 'same',
activation = 'relu'))
model.add(MaxPool2D(pool_size = (2, 2)))
model.add(Dropout(0.25))
model.add(Conv2D(
filters = 512,
kernel_size = (3, 3),
padding = 'same',
activation = 'relu'))
model.add(Conv2D(
filters = 512,
kernel_size = (3, 3),
padding = 'same',
activation = 'relu'))
model.add(Conv2D(
filters = 512,
kernel_size = (3, 3),
padding = 'same',
activation = 'relu'))
model.add(Conv2D(
filters = 512,
kernel_size = (3, 3),
padding = 'same',
activation = 'relu'))
model.add(Conv2D(
filters = 1024,
kernel_size = (1, 1),
padding = 'same',
activation = 'relu'))
model.add(MaxPool2D(pool_size = (2,2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(4096))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(1024))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(10, activation = 'softmax'))
model.compile(loss=keras.losses.categorical_crossentropy,
optimizer=keras.optimizers.Adadelta(),
metrics=['accuracy'])
return model
|
[
"gentlemanana1@gmail.com"
] |
gentlemanana1@gmail.com
|
6992cf72d105b6ef54bff25adcb8ea24a189c840
|
14c63f5847aafbcf69796ccdab1656f0e5897711
|
/Trump_Clinton_Classifer/CNN.py
|
12d5ec080091b030ad8d0c83c3edb27074dfdee5
|
[
"MIT"
] |
permissive
|
lilyzhi1/fake_news_during_election
|
f12a7a06568077856e71d6147027cd13b6629dff
|
59609f498444a6b37727afa096f2adcaf0ed4ce8
|
refs/heads/master
| 2020-04-29T18:36:26.499995
| 2019-03-18T15:51:54
| 2019-03-18T15:51:54
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 12,276
|
py
|
# 向量化(vectorize)
import sys
from my_weapon import *
from gensim.models import Word2Vec
import word2vecReader
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import autograd, optim
import logging
# logging.basicConfig(filename="log/train-11302018.log", format="%(levelname)s - %(asctime)s - %(message)s", level=logging.INFO)
logging.basicConfig(format="%(levelname)s - %(asctime)s - %(message)s", level=logging.INFO)
from tensorboardX import SummaryWriter
from sklearn.metrics import classification_report
class Config:
def __init__(self):
self.train_file = "train_data/train_dataset.txt"
self.train_batch_size = 128
self.learning_rate = 0.001
self.window_size = 3
self.num_classes = 2
self.num_epochs = 10
self.train_steps = None
self.summary_interval = 1000
class Dataset:
def __init__(self, filepath, batch_size):
self._file = open(filepath)
self._batch_size = batch_size
self._count = 0
self._file_num = 1
self._wv1 = None
self._wv2 = None
self._reset()
def read_wv1(self):
print("Loading wv1 ...")
return Word2Vec.load("model/word2vec.mod")
def read_wv2(self):
print("Loading wv2 ...")
return word2vecReader.Word2Vec.load_word2vec_format(
"/media/alex/data/word2vec_twitter_model/word2vec_twitter_model.bin", binary=True)
def wv1(self, line):
v = np.zeros(40 * 400).reshape(40, 400)
words = line.strip().split(" ")
_index = 0
for w in words:
if _index >= 40:
break
if w in self._wv1.wv:
v[_index] = self._wv1.wv[w]
_index += 1
return v
def wv2(self, line):
v = np.zeros(40 * 400).reshape(40, 400)
words = line.strip().split(" ")
_index = 0
for w in words:
if _index >= 40:
break
if w in self._wv2:
v[_index] = self._wv2[w]
_index += 1
return v
# 迭代时候每次先调用__iter__,初始化
# 接着调用__next__返回数据
# 如果没有buffer的时候,就补充数据_fill_buffer
# 如果buffer补充后仍然为空,则停止迭代
def _save(self):
if not self._wv1:
self._wv1 = self.read_wv1()
if not self._wv2:
self._wv2 = self.read_wv2()
self._reset()
count = 0
labels = []
X = []
for line in self._file:
try:
label, sentence = line.strip().split("\t")
except ValueError:
continue
label = int(label.strip())
sequence1 = self.wv1(sentence)
sequence2 = self.wv2(sentence)
labels.append(label)
X.append([sequence1, sequence2])
count += 1
if count % (self._batch_size * 100) == 0:
np.save("/media/alex/data/train_data/X_{}.npy".format(int(count /
self._batch_size / 100)), np.array(X))
np.save("/media/alex/data/train_data/Y_{}.npy".format(int(count /
self._batch_size / 100)), np.array(labels))
labels = []
X = []
print(count)
def __iter__(self):
self._reset()
return self
def _fill_buffer(self):
if self._count == 0 and self._file_num <= 189:
self._buffer = []
# print("load file {} ...".format(self._file_num))
X = np.load("/media/alex/data/train_data/X_{}.npy".format(self._file_num))
Y = np.load("/media/alex/data/train_data/Y_{}.npy".format(self._file_num))
self._file_num += 1
self._count += Y.shape[0]
for i in range(Y.shape[0]):
self._buffer.append((Y[i], X[i]))
self._buffer_iter = iter(self._buffer)
# print("loading finished.")
def __next__(self):
self._fill_buffer() # 每次读1024个batch作为buffer
if self._count == 0: # After filling, still empty, stop iter!
raise StopIteration
label_batch = []
sequence_batch = []
for label, sequence in self._buffer_iter:
self._count -= 1
label_batch.append(label)
sequence_batch.append(sequence)
if len(label_batch) == self._batch_size:
break
return {"sequences": torch.Tensor(sequence_batch), "labels": torch.LongTensor(label_batch)}
def _reset(self):
self._buffer = None
self._count = 0
self._file_num = 1
self._buffer = []
self._buffer_iter = None
def save_testdata(self):
if not self._wv1:
self._wv1 = self.read_wv1()
if not self._wv2:
self._wv2 = self.read_wv2()
labels = []
sequences = []
for line in open("data/0-test.txt"):
labels.append(0)
sequences.append([self.wv1(line), self.wv2(line)])
for line in open("data/1-test.txt"):
labels.append(1)
sequences.append([self.wv1(line), self.wv2(line)])
np.save("/media/alex/data/train_data/X_test.npy", np.array(sequences))
np.save("/media/alex/data/train_data/Y_test.npy", np.array(labels))
def get_testdata(self):
return {"sequences": torch.Tensor(np.load("/media/alex/data/train_data/X_test.npy")),
"labels": torch.LongTensor(np.load("/media/alex/data/train_data/Y_test.npy"))}
class Dataset2:
def __init__(self, filepath, batch_size):
self._file = open(filepath)
self._wv1 = self.read_wv1()
self._wv2 = self.read_wv2()
self._batch_size = batch_size
self._file.seek(0)
self._buffer = []
self._buffer_iter = None
self._buff_count = 0
self._file_num = 0
self._reset()
def wv1(self, line):
v = np.zeros(40 * 400).reshape(40, 400)
words = line.strip().split(" ")
_index = 0
for w in words:
if _index >= 40:
break
if w in self._wv1.wv:
v[_index] = self._wv1.wv[w]
_index += 1
return v
def wv2(self, line):
v = np.zeros(40 * 400).reshape(40, 400)
words = line.strip().split(" ")
_index = 0
for w in words:
if _index >= 40:
break
if w in self._wv2:
v[_index] = self._wv2[w]
_index += 1
return v
def read_wv1(self):
print("Loading wv1 ...")
return Word2Vec.load("model/word2vec.mod")
def read_wv2(self):
print("Loading wv2 ...")
return word2vecReader.Word2Vec.load_word2vec_format(
"/media/alex/data/word2vec_twitter_model/word2vec_twitter_model.bin", binary=True)
# 迭代时候每次先调用__iter__,初始化
# 接着调用__next__返回数据
# 如果没有buffer的时候,就补充数据_fill_buffer
# 如果buffer补充后仍然为空,则停止迭代
def __iter__(self):
self._reset()
return self
def _fill_buffer(self):
if self._buff_count > 0:
return 1
train_filename = "train_data/train_{:0>2d}".format(self._file_num)
# 遍历文件
with open(train_filename) as f:
for line in f:
try:
label, sentence = line.strip().split("\t")
except ValueError:
continue
label = int(label.strip())
sequence1 = self.wv1(sentence)
sequence2 = self.wv2(sentence)
self._buff_count += 1
self._buffer.append((label, [sequence1, sequence2]))
print("file:", train_filename)
self._file_num += 1
self._buffer_iter = iter(self._buffer)
self._buffer = []
if self._file_num > 18:
return 0
else:
return 1
def __next__(self):
if self._fill_buffer() == 0:
raise StopIteration
label_batch = []
sequence_batch = []
for label, sequence in self._buffer_iter:
self._buff_count -= 1
label_batch.append(label)
sequence_batch.append(sequence)
if len(label_batch) == self._batch_size:
break
return {"sequences": torch.Tensor(sequence_batch), "labels": torch.LongTensor(label_batch)}
def _reset(self):
self._file.seek(0)
self._buffer = []
self._buffer_iter = None
self._buff_count = 0
self._file_num = 0
def get_testdata(self):
labels = []
sequences = []
for line in open("train_data/0-test.txt"):
labels.append(0)
sequences.append([self.wv1(line), self.wv2(line)])
for line in open("train_data/1-test.txt"):
labels.append(1)
sequences.append([self.wv1(line), self.wv2(line)])
return torch.LongTensor(labels), torch.Tensor(sequences)
class CNNClassifier(nn.Module):
def __init__(self):
super(CNNClassifier, self).__init__()
# 2 in- channels, 32 out- channels, 3 * 400 windows size
self.conv = torch.nn.Conv2d(2, 64, kernel_size=(3, 400), groups=2)
self.f1 = nn.Linear(1216, 128)
self.f2 = nn.Linear(128, 64)
self.f3 = nn.Linear(64, 32)
self.f4 = nn.Linear(32, 2)
def forward(self, x):
out = self.conv(x)
out = F.relu(out)
out = torch.squeeze(out)
out = F.max_pool1d(out, 2)
out = out.view(-1, 2 * 32 * 19) # 9 is after pooling
out = F.relu(self.f1(out))
out = F.relu(self.f2(out))
out = F.relu(self.f3(out))
out = F.relu(self.f4(out))
# print(out.size())
probs = F.softmax(out, dim=1)
# print(probs)
classes = torch.max(probs, 1)[1]
return probs, classes
def train(model, train_set, test_set):
loss_function = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(), lr=config.learning_rate)
writer = SummaryWriter(log_dir="log")
epoch = 0
step = 0
for epoch in range(1, config.num_epochs + 1):
logging.info("==================== Epoch: {} ====================".format(epoch))
running_losses = []
for batch in train_set:
sequences = batch["sequences"]
labels = batch["labels"]
# # Predict
# try:
# probs, classes = model(sequences)
# except:
# print(sequences.size(), labels.size())
# print("发生致命错误!")
# # Backpropagation
# optimizer.zero_grad()
# losses = loss_function(probs, labels)
# losses.backward()
# optimizer.step()
# # Log summary
# running_losses.append(losses.data.item())
# if step % config.summary_interval == 0:
# loss = sum(running_losses) / len(running_losses)
# writer.add_scalar("train/loss", loss, step)
# logging.info("step = {}, loss = {}".format(step, loss))
# running_losses = []
# step += 1
# # Classification report
# test_X = test_set["sequences"]
# test_labels = test_set["labels"]
# probs, y_pred = model(test_X)
# target_names = ['pro-hillary', 'pro-trump']
# logging.info("{}".format(classification_report(test_labels, y_pred, target_names=target_names)))
# # Save
# torch.save(model, "model/11292018-model-epoch-{}.pkl".format(epoch))
epoch += 1
config = Config()
if __name__ == "__main__":
train_set = Dataset2(config.train_file, config.train_batch_size)
test_set = train_set.get_testdata()
model = CNNClassifier()
train(model, train_set, test_set)
|
[
"zkzhou_91@163.com"
] |
zkzhou_91@163.com
|
7466229e21a1f6ba95a9a8ae72f30c4c238f16fe
|
9ecf6cfdc15b704b44688c533c5c6e9eccc5c0ab
|
/randomise-selected-objects-color.py
|
181f6e92a57894fc3a910c380826c7c07f9afaf0
|
[] |
no_license
|
Bordilovskii/cinema4d-scripts
|
96b1eab6aa442ef6ead105d22e0bab352d8563c9
|
811be702a64c8b0c97dedbbf95723ce0af06a7fa
|
refs/heads/master
| 2020-03-27T06:37:25.692966
| 2018-07-04T09:30:18
| 2018-07-04T09:30:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 508
|
py
|
import c4d
import random as rand
def main():
doc.StartUndo()
objs = doc.GetActiveObjects(0)
if len(objs) == 0:return
for obj in objs:
doc.AddUndo(c4d.UNDOTYPE_CHANGE,obj)
obj[c4d.ID_BASEOBJECT_USECOLOR] = 2
r = rand.random()
g = rand.random()
b = rand.random()
doc.AddUndo(c4d.UNDOTYPE_CHANGE,obj)
obj[c4d.ID_BASEOBJECT_COLOR] = c4d.Vector(r,g,b)
c4d.EventAdd()
doc.EndUndo()
if __name__=='__main__':
main()
|
[
"rautio.arttu@gmail.com"
] |
rautio.arttu@gmail.com
|
ba899437a9b90245a23302d2657f8bbc45f04620
|
753e37262b066ce25d1459abc9726f23143d7ec1
|
/NashEquilibria/cyclic-4-3/inputFile.py
|
5728662be385b86e6c69b1214184a2ef85bedb89
|
[] |
no_license
|
JoseMath/multiregeneration
|
bc7f93499e1bf6d1d65e025fa97fc7936a300fcb
|
57ace40595941678491d82f96d2d71a784284a3e
|
refs/heads/master
| 2021-07-12T00:24:50.815887
| 2020-08-11T18:00:51
| 2020-08-11T18:00:51
| 191,240,334
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 270
|
py
|
degrees = [[2, 1, 0, 1], [2, 1, 0, 1], [2, 1, 0, 1], [1, 2, 1, 0], [1, 2, 1, 0], [1, 2, 1, 0], [0, 1, 2, 1], [0, 1, 2, 1], [0, 1, 2, 1], [1, 0, 1, 2], [1, 0, 1, 2], [1, 0, 1, 2]]
logTolerance = -10
explorationOrder="depthFirst"
algebraicTorusVariableGroups = [0,1,2,3]
|
[
"jose.israel.rodriguez.math@gmail.com"
] |
jose.israel.rodriguez.math@gmail.com
|
7cf927c28e9287422cc2e956a310a8886499d318
|
7296426621849f7f0fec593cd6977bca140a0c9b
|
/manage.py
|
2de5da7787dc5f9fc4208210ed86256af63acd84
|
[
"MIT"
] |
permissive
|
aaira-a/batcher
|
cf31ae9fa289b4114692dadc288b157137e81161
|
3e22f017b23bc22d0bec6eced4c94ee56e3b36f9
|
refs/heads/master
| 2022-12-22T20:08:30.915731
| 2015-06-09T07:27:04
| 2015-06-09T07:27:04
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 250
|
py
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "batcher.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
[
"azam.alias@iproperty.com"
] |
azam.alias@iproperty.com
|
7e3d468ebf175be3859c7098b04ede6d18e42156
|
14edb8c2733f0e356fe77f7721b99a9a9c06d4ac
|
/Домашняя работа/Урок 2/Задание 2.2.py
|
2f32d1890bf929adc765c51ec3b5032e85345c5c
|
[] |
no_license
|
SavaGeekbrains/Python-1
|
40ef76bf16f0f46bbe597ebd023f69113bd627e9
|
850373bd9252e76bc54c6179da1eb10faf5ffec3
|
refs/heads/master
| 2022-04-25T08:55:19.850289
| 2020-04-30T11:00:19
| 2020-04-30T11:00:19
| 255,343,791
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 386
|
py
|
el_count = int(input("Введите количество элементов списка "))
my_list = []
i = 0
el = 0
while i < el_count:
my_list.append(input("Введите следующее значение списка "))
i += 1
for elem in range(int(len(my_list)/2)):
my_list[el], my_list[el + 1] = my_list [el + 1], my_list[el]
el += 2
print(my_list)
|
[
"Savchenko@live.ru"
] |
Savchenko@live.ru
|
fc3d08b64398f20a86a22df3a3d1580e139e6d72
|
00f449b83b0501cd8c7fd49c80ab42e527a6429a
|
/day1.py
|
ede0449c75dc97560c35d7e8c368e1db0b22af76
|
[] |
no_license
|
aidaploco/AdventOfCode2019
|
620a576366b6f7e61aae4957416094cfd6bd2080
|
cc20fad41380cd3e50f7d7cbee33185313b638ba
|
refs/heads/master
| 2022-10-24T23:28:47.302321
| 2020-03-06T10:22:36
| 2020-03-06T10:22:36
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 945
|
py
|
def compute_sum(array):
sum_ = 0
for _, mass in enumerate(array):
fuel = mass
while fuel > 8:
fuel = int(fuel / 3) - 2
sum_ += fuel
return sum_
array = [114106,
87170,
133060,
70662,
134140,
125874,
50081,
133117,
100409,
95098,
70251,
134043,
87501,
85034,
110678,
80615,
64647,
88555,
106387,
143755,
101246,
142348,
92684,
62051,
94894,
65873,
78473,
64042,
147982,
145898,
85591,
121413,
132163,
94351,
80080,
73554,
106598,
135174,
147951,
132517,
50925,
115752,
114022,
73448,
50451,
56205,
81474,
90028,
124879,
137452,
91036,
87221,
126590,
130592,
91503,
148689,
86526,
105924,
52411,
146708,
149280,
52100,
80024,
115412,
91204,
132726,
59837,
129863,
140980,
109574,
103013,
84105,
138883,
144861,
126708,
140290,
54417,
138154,
125187,
91537,
90338,
61150,
61702,
95888,
100484,
82115,
122141,
63986,
138234,
54150,
57651,
124570,
88460,
112144,
112334,
119114,
58220,
143221,
86568,
148706]
print(compute_sum(array))
|
[
"aidaploco.ap@gmail.com"
] |
aidaploco.ap@gmail.com
|
21c065daa29195c7e52d627c2a60d46bb9f76c6f
|
fbcecdf2b3c6f995541c142e208ccb6c74e0fee9
|
/tests/test_clusters.py
|
c70eb6aa7993a63ea3efd939f4c03637b95aa747
|
[
"MIT"
] |
permissive
|
ISO-B/zigate-1
|
da715671db2714492cb1078bb5583cf6fddc0bde
|
34032007d2d1971b744ba47abaec368ebeb59dab
|
refs/heads/master
| 2020-04-08T20:02:47.409415
| 2018-12-31T04:30:21
| 2018-12-31T04:30:21
| 159,682,004
| 0
| 0
|
MIT
| 2018-11-29T14:50:30
| 2018-11-29T14:50:29
| null |
UTF-8
|
Python
| false
| false
| 1,640
|
py
|
'''
ZiGate clusters Tests
-------------------------
'''
import unittest
from zigate import clusters
class TestResponses(unittest.TestCase):
def test_cluster_0012(self):
# xiaomi cube status
endpoint = {'device': 24321}
data = {"attributes": [{"attribute": 85,
"data": 4,
"expire": 2,
"expire_value": "",
"name": "movement",
"value": ""}],
"cluster": 18
}
c = clusters.C0012.from_json(data, endpoint)
self.assertEqual(c.attributes,
{85: {'attribute': 85, 'data': 4,
'expire': 2, 'expire_value': '',
'name': 'movement', 'value': 'flip90_84'}}
)
# xiaomi lumi.remote.b1acn01
endpoint = {'device': 259}
data = {"attributes": [{"attribute": 85,
"data": 4,
"expire": 2,
"expire_value": "",
"name": "movement",
"value": ""}],
"cluster": 18
}
c = clusters.C0012.from_json(data, endpoint)
self.assertEqual(c.attributes,
{85: {'attribute': 85, 'data': 4,
'expire': 2,
'name': 'multiclick', 'value': 4}}
)
if __name__ == '__main__':
unittest.main()
|
[
"sebastien.ramage@gmail.com"
] |
sebastien.ramage@gmail.com
|
c2160201ce7a18aba463609e124b2e6cc946adbc
|
0cc544eb7ac81c9f78081ab3da6f5ce0ad9eabaa
|
/lab/11. sliding_window.py
|
ea8c2f14cfbc099ac2b6b9d6a9e1a0305bf6556c
|
[] |
no_license
|
BGPark/Vehicle_Detection
|
ff2ad89e43b2979cdee978b148f6f9b7ee1038b3
|
fe2dc4d8ef21190d48b49fa1fd122a869001e584
|
refs/heads/master
| 2021-01-19T15:23:38.887678
| 2017-08-22T12:51:32
| 2017-08-22T12:51:32
| 100,857,268
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,907
|
py
|
import numpy as np
import cv2
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import pickle
from utils import *
# image = mpimg.imread('cutouts/bbox-example-image.jpg')
image = mpimg.imread('project_video[00 00 21].bmp')
params = {}
window_list = None
x_start_stop = [16, None]
y_start_stop = [370, 700]
xy_window = (192, 192)
xy_overlap = (0.5, 0.5)
draw_color = (0, 0, 255)
windows = slide_window(image, x_start_stop=x_start_stop, y_start_stop=y_start_stop,
xy_window=xy_window, xy_overlap=xy_overlap)
window_list = np.array(windows)
print('window count = %d' % len(windows))
window_img = draw_boxes(image, windows, color=draw_color, thick=6)
x_start_stop = [None, None]
y_start_stop = [390, 600]
xy_window = (128, 128)
xy_overlap = (0.5, 0.5)
draw_color = (0, 255, 0)
windows = slide_window(image, x_start_stop=x_start_stop, y_start_stop=y_start_stop,
xy_window=xy_window, xy_overlap=xy_overlap)
window_list = np.vstack((window_list, windows))
print('window count = %d' % len(windows))
window_img = draw_boxes(window_img, windows, color=draw_color, thick=6)
x_start_stop = [None, None]
y_start_stop = [410, 510]
xy_window = (64, 64)
xy_overlap = (0.5, 0.5)
draw_color = (255, 0, 0)
windows = slide_window(image, x_start_stop=x_start_stop, y_start_stop=y_start_stop,
xy_window=xy_window, xy_overlap=xy_overlap)
window_list = np.vstack((window_list, windows))
print('window count = %d' % len(windows))
window_img = draw_boxes(window_img, windows, color=draw_color, thick=6)
print('total window count = %d' % len(window_list))
test = {}
test["windows"] = window_list
pickle.dump(test, open('windows.p', 'wb'))
test2 = pickle.load(open('windows.p', 'rb'))
print(test2['windows'].shape)
plt.imshow(window_img)
plt.show()
|
[
"emsshi@gmail.com"
] |
emsshi@gmail.com
|
ed0a2f5de08d232d41756e5f40d97c76a6470ee6
|
5e72dc0cb60585e39b955bed5fe72066797f85b3
|
/fastreid/layers/context_block.py
|
7b1098a8663e48c4affead9bef504acf3e50e2ef
|
[
"Apache-2.0"
] |
permissive
|
lhf12278/ISM-ReID
|
0ba10205664367890c40d55ad1d48380f44655b7
|
e87ae2dedee4f19199400281960fd71453f6d9dc
|
refs/heads/main
| 2023-03-26T19:29:11.843753
| 2021-03-26T02:39:32
| 2021-03-26T02:39:32
| 360,726,193
| 2
| 0
|
Apache-2.0
| 2021-04-23T01:17:41
| 2021-04-23T01:17:40
| null |
UTF-8
|
Python
| false
| false
| 4,244
|
py
|
# copy from https://github.com/xvjiarui/GCNet/blob/master/mmdet/ops/gcb/context_block.py
import torch
from torch import nn
__all__ = ['ContextBlock']
def last_zero_init(m):
if isinstance(m, nn.Sequential):
nn.init.constant_(m[-1].weight, val=0)
if hasattr(m[-1], 'bias') and m[-1].bias is not None:
nn.init.constant_(m[-1].bias, 0)
else:
nn.init.constant_(m.weight, val=0)
if hasattr(m, 'bias') and m.bias is not None:
nn.init.constant_(m.bias, 0)
class ContextBlock(nn.Module):
def __init__(self,
inplanes,
ratio,
pooling_type='att',
fusion_types=('channel_add',)):
super(ContextBlock, self).__init__()
assert pooling_type in ['avg', 'att']
assert isinstance(fusion_types, (list, tuple))
valid_fusion_types = ['channel_add', 'channel_mul']
assert all([f in valid_fusion_types for f in fusion_types])
assert len(fusion_types) > 0, 'at least one fusion should be used'
self.inplanes = inplanes
self.ratio = ratio
self.planes = int(inplanes * ratio)
self.pooling_type = pooling_type
self.fusion_types = fusion_types
if pooling_type == 'att':
self.conv_mask = nn.Conv2d(inplanes, 1, kernel_size=1)
self.softmax = nn.Softmax(dim=2)
else:
self.avg_pool = nn.AdaptiveAvgPool2d(1)
if 'channel_add' in fusion_types:
self.channel_add_conv = nn.Sequential(
nn.Conv2d(self.inplanes, self.planes, kernel_size=1),
nn.LayerNorm([self.planes, 1, 1]),
nn.ReLU(inplace=True), # yapf: disable
nn.Conv2d(self.planes, self.inplanes, kernel_size=1))
else:
self.channel_add_conv = None
if 'channel_mul' in fusion_types:
self.channel_mul_conv = nn.Sequential(
nn.Conv2d(self.inplanes, self.planes, kernel_size=1),
nn.LayerNorm([self.planes, 1, 1]),
nn.ReLU(inplace=True), # yapf: disable
nn.Conv2d(self.planes, self.inplanes, kernel_size=1))
else:
self.channel_mul_conv = None
self.reset_parameters()
def reset_parameters(self):
if self.pooling_type == 'att':
nn.init.kaiming_normal_(self.conv_mask.weight, a=0, mode='fan_in', nonlinearity='relu')
if hasattr(self.conv_mask, 'bias') and self.conv_mask.bias is not None:
nn.init.constant_(self.conv_mask.bias, 0)
self.conv_mask.inited = True
if self.channel_add_conv is not None:
last_zero_init(self.channel_add_conv)
if self.channel_mul_conv is not None:
last_zero_init(self.channel_mul_conv)
def spatial_pool(self, x):
batch, channel, height, width = x.size()
if self.pooling_type == 'att':
input_x = x
# [N, C, H * W]
input_x = input_x.view(batch, channel, height * width)
# [N, 1, C, H * W]
input_x = input_x.unsqueeze(1)
# [N, 1, H, W]
context_mask = self.conv_mask(x)
# [N, 1, H * W]
context_mask = context_mask.view(batch, 1, height * width)
# [N, 1, H * W]
context_mask = self.softmax(context_mask)
# [N, 1, H * W, 1]
context_mask = context_mask.unsqueeze(-1)
# [N, 1, C, 1]
context = torch.matmul(input_x, context_mask)
# [N, C, 1, 1]
context = context.view(batch, channel, 1, 1)
else:
# [N, C, 1, 1]
context = self.avg_pool(x)
return context
def forward(self, x):
# [N, C, 1, 1]
context = self.spatial_pool(x)
out = x
if self.channel_mul_conv is not None:
# [N, C, 1, 1]
channel_mul_term = torch.sigmoid(self.channel_mul_conv(context))
out = out * channel_mul_term
if self.channel_add_conv is not None:
# [N, C, 1, 1]
channel_add_term = self.channel_add_conv(context)
out = out + channel_add_term
return out
|
[
"784509877@qq.com"
] |
784509877@qq.com
|
24cdb1982f2fe439f8411d943177ebf9d46ba73e
|
8d6ec0275afe856834bf10643e3b4b2cbcb318f4
|
/03-online-shop/myshop/shop/views.py
|
93982ce741c0abec344a2ff2ddd5db46f5ee1ff2
|
[] |
no_license
|
markronquillo/django-by-example
|
be35fbbc483440a11c440733931c146d56816c97
|
fa749e5077f64ac68f11c7b529e13ac097cb5bd0
|
refs/heads/master
| 2021-01-11T14:38:40.854636
| 2017-02-24T03:09:58
| 2017-02-24T03:09:58
| 80,184,667
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 883
|
py
|
from django.shortcuts import render, get_object_or_404
from .models import Category, Product
from cart.forms import CartAddProductForm
def product_list(request, category_slug=None):
category = None
categories = Category.objects.all()
products = Product.objects.filter(available=True)
if category_slug:
category = get_object_or_404(Category, slug=category_slug)
products = products.filter(category=category)
return render(request,
'shop/product/list.html',
{'category': category,
'categories': categories,
'products': products})
def product_detail(request, id, slug):
product = get_object_or_404(Product,
id=id,
slug=slug,
available=True)
cart_product_form = CartAddProductForm()
return render(request,
'shop/product/detail.html',
{'product': product,
'cart_product_form': cart_product_form})
|
[
"markronquillo23@gmail.com"
] |
markronquillo23@gmail.com
|
ee8ba4bcc3a9abb8c2eb5568f10db0cb9fc8d7fa
|
2892731203f7b59faa8f5182b756c0b3575e796f
|
/cma/__init__.py
|
158fa33dcec09b9cd3383b61ea91ae290e7e8990
|
[] |
no_license
|
assimilation/assimilation-official
|
1024b92badcbaf6b7c42f01f52e71c926a4b65f8
|
9ac993317c6501cb1e1cf09025f43dbe1d015035
|
refs/heads/rel_2_dev
| 2023-05-10T20:12:33.935123
| 2022-12-08T16:21:22
| 2022-12-08T16:21:22
| 42,373,046
| 52
| 17
| null | 2023-08-16T12:43:49
| 2015-09-12T21:04:36
|
Python
|
UTF-8
|
Python
| false
| false
| 121
|
py
|
"""Module docstring ;-) """
__path__ = __import__("pkgutil").extend_path(__path__, __name__)
print("PATH=%s" % __path__)
|
[
"alanr@unix.sh"
] |
alanr@unix.sh
|
f73e10702eac780562bb92aa6e041b70ea6d6e0c
|
7f0c5a5632eae66a8774b85cecd7d8b109b2d574
|
/prometeo/mem/ast_analyzer.py
|
7afa08b4d8066ab73b06c47800dd27bfbee7eb6f
|
[] |
no_license
|
tmmsartor/prometeo
|
5580f57776e04a3c41b842c921550dad320c503a
|
0274956bf42c2494c2efaa917fd91ccce9c19917
|
refs/heads/master
| 2022-06-29T13:18:03.439266
| 2020-05-04T12:22:36
| 2020-05-04T12:22:36
| 261,490,240
| 0
| 0
| null | 2020-05-05T14:10:11
| 2020-05-05T14:10:10
| null |
UTF-8
|
Python
| false
| false
| 10,315
|
py
|
import ast
from collections import defaultdict
from ..cgen.node_util import ExplicitNodeVisitor
import astpretty as ap
from ..cgen.op_util import get_op_symbol, get_op_precedence, Precedence
import json
from collections import Iterable
from copy import deepcopy
pmt_functions = {\
'global@pmat': [], \
'global@pvec': [], \
'global@plist': [], \
'global@pmat_copy': [], \
'global@pmat_print': [], \
'global@pvec_print': [], \
'global@pmat_fill': [], \
'global@pmat_tran': [], \
'global@pmat_hcat': [], \
'global@pmat_vcat': [], \
'global@pmt_gemm_nn': [], \
'global@pmt_gemm_tn': [], \
'global@pmt_gemm_nt': [], \
'global@pmt_gead': [], \
'global@pmt_potrf': [], \
'global@pmt_potrsm': [], \
'global@pmt_getrf': [], \
'global@pmt_getrsm': [], \
'global@print': [], \
'global@pparse': [], \
}
def precedence_setter(AST=ast.AST, get_op_precedence=get_op_precedence,
isinstance=isinstance, list=list):
""" This only uses a closure for performance reasons,
to reduce the number of attribute lookups. (set_precedence
is called a lot of times.)
"""
def set_precedence(value, *nodes):
"""Set the precedence (of the parent) into the children.
"""
if isinstance(value, AST):
value = get_op_precedence(value)
for node in nodes:
if isinstance(node, AST):
node._pp = value
elif isinstance(node, list):
set_precedence(value, *node)
else:
assert node is None, node
return set_precedence
set_precedence = precedence_setter()
def descope(current_scope, pop):
if current_scope.endswith(pop):
return current_scope[:-len(pop)]
else:
raise Exception('Attempt to descope {}, which is not the current scope'.format(pop))
def flatten(coll):
for i in coll:
if isinstance(i, Iterable) and not isinstance(i, str):
for subc in flatten(i):
yield subc
else:
yield i
class ast_visitor(ExplicitNodeVisitor):
def __init__(self):
self.callees = pmt_functions
self.caller_scope = 'global'
self.callee_scope = 'global'
self.in_call = False
# load local typed_record
with open('__pmt_cache__/typed_record.json', 'r') as f:
self.typed_record = json.load(f)
visit = self.visit
def visit_ast(*params):
for item in params:
# ap.pprint(item)
if isinstance(item, ast.AST):
visit(item)
elif callable(item):
item()
self.visit_ast = visit_ast
def aux_visit_ast(self, node, *params):
self.visit_ast(*params)
def __getattr__(self, name, defaults=dict(keywords=(),
_pp=Precedence.highest).get):
""" Get an attribute of the node.
like dict.get (returns None if doesn't exist)
"""
if not name.startswith('get_'):
raise AttributeError
geta = getattr
shortname = name[4:]
default = defaults(shortname)
def getter(node):
return geta(node, shortname, default)
setattr(self, name, getter)
return getter
def body(self, args):
self.visit_ast(*args)
def visit_Module(self, node):
self.visit_ast(*node.body)
return
def visit_FunctionDef(self, node):
if node.name != '__init__':
self.caller_scope = self.caller_scope + '@' + node.name
self.callees[self.caller_scope] = set([])
# self.visit_ast(node)
self.body(node.body)
self.caller_scope = descope(self.caller_scope, '@' + node.name)
def visit_ClassDef(self, node):
self.caller_scope = self.caller_scope + '@' + node.name
self.callees[self.caller_scope] = set([])
self.body(node.body)
self.caller_scope = descope(self.caller_scope, '@' + node.name)
def visit_Expr(self, node):
set_precedence(node, node.value)
self.aux_visit_ast(node)
self.generic_visit(node)
def visit_Expression(self, node):
self.visit(node.body)
def visit_Call(self, node, len=len):
# ap.pprint(node)
if isinstance(node.func, ast.Name):
self.callees[self.caller_scope].add(self.callee_scope + '@' + node.func.id)
elif isinstance(node.func, ast.Attribute):
self.in_call = True
self.visit(node.func)
self.callees[self.caller_scope].add(self.callee_scope)
self.in_call = False
def visit_Name(self, node):
return
# self.generic_visit(node)
def visit_Tuple(self, node):
self.generic_visit(node)
def visit_Set(self, node):
self.generic_visit(node)
def visit_Return(self, node):
self.aux_visit_ast(node)
def visit_Assign(self, node):
set_precedence(node, node.value)
self.visit(node.value)
def visit_Num(self, node):
self.generic_visit(node)
def visit_Attribute(self, node):
self.visit_ast(node.value)
if self.in_call:
if isinstance(node.value, ast.Name):
self.callee_scope = self.callee_scope + '@' + node.value.id + '@' + node.attr
else:
self.callee_scope = self.callee_scope + '@' + node.attr
return
# self.visit_ast(node.attr)
def visit_JoinedStr(self, node):
return
def visit_Str(self, node, is_joined=False):
return
def else_body(self, elsewhat):
if elsewhat:
self.body(elsewhat)
def body_or_else(self, node):
self.body(node.body)
self.else_body(node.orelse)
def visit_For(self, node, is_async=False):
self.body_or_else(node)
def visit_While(self, node, is_async=False):
self.body_or_else(node)
def visit_ImportFrom(self, node):
return
def visit_Import(self, node):
return
def visit_AnnAssign(self, node):
self.visit(node.value)
return
def visit_Subscript(self, node):
return
def visit_List(self, node):
return
def visit_BinOp(self, node):
return
def visit_UnaryOp(self, node):
return
def compute_reach_graph(call_graph, typed_record):
# get unresolved calls
all_methods = list(call_graph.keys())
# calls = list(call_graph.values())
unresolved_calls = set([])
unresolved_callers = dict()
graph_copy = deepcopy(call_graph)
for method in call_graph:
unresolved_callers[method] = set([])
for call in call_graph[method]:
if call not in all_methods and call != set([]):
# skip CasADi-related calls
if '@ca@' not in call:
# add call to dictionary of unresolved calls
unresolved_callers[method].add(call)
# remove call from call graph
graph_copy[method].remove(call)
call_graph = deepcopy(graph_copy)
# strip empty calls
r_unresolved_callers = dict()
for caller in unresolved_callers:
if unresolved_callers[caller] != set([]):
r_unresolved_callers[caller] = unresolved_callers[caller]
# resolve calls
for caller in r_unresolved_callers:
for call in r_unresolved_callers[caller]:
scopes = call.split('@')
curr_scope = scopes[0]
for j in range(len(scopes)-1):
if curr_scope + '@' + scopes[j+1] in typed_record:
curr_scope = curr_scope + '@' + scopes[j+1]
else:
# try to resolve class name
if scopes[j] in typed_record[caller]:
scopes[j] = typed_record[caller][scopes[j]]
t_call = '@'.join(scopes)
if t_call in all_methods:
r_unresolved_callers[caller].remove(call)
r_unresolved_callers[caller].add(t_call)
break
# update call_graph with unresolved calls
call_graph.update(r_unresolved_callers)
# check that there are no unresolved calls
# TODO(andrea): this is a bit ugly
unresolved_calls = set([])
unresolved_callers = dict()
for method in call_graph:
unresolved_callers[method] = set([])
graph_copy = deepcopy(call_graph)
for call in call_graph[method]:
if call not in all_methods and call != set([]):
# add call to dictionary of unresolved calls
unresolved_callers[method].add(call)
# remove call from call graph
graph_copy[method].remove(call)
call_graph = deepcopy(graph_copy)
# strip empty calls
r_unresolved_callers = dict()
for caller in unresolved_callers:
if unresolved_callers[caller] != set([]):
r_unresolved_callers[caller] = unresolved_callers[caller]
if r_unresolved_callers != dict():
raise Exception('call graph analyzer -- could not resolve the following calls {}'.format(r_unresolved_callers))
reach_map = {}
for curr_node in call_graph:
reach_map[curr_node] = get_reach_nodes(call_graph, curr_node, curr_node, [], 1)
return reach_map
def get_reach_nodes(call_graph, curr_call, root, reach_nodes_h, root_flag):
if not call_graph[curr_call] and not root_flag:
if curr_call not in reach_nodes_h:
reach_nodes_h += [curr_call]
return reach_nodes_h
else:
if curr_call in reach_nodes_h:
if curr_call not in reach_nodes_h:
reach_nodes_h += [curr_call]
return reach_nodes_h
if root == curr_call and not root_flag:
reach_nodes_h += ['*']
return reach_nodes_h
else:
if curr_call != root:
if curr_call not in reach_nodes_h:
reach_nodes_h += [curr_call]
for call_iter in call_graph[curr_call]:
reach_nodes_h = get_reach_nodes(call_graph, call_iter, root, reach_nodes_h, 0)
return reach_nodes_h
|
[
"andrea.zanelli@imtek.uni-freiburg.de"
] |
andrea.zanelli@imtek.uni-freiburg.de
|
1f024929c0afb2db426b198c70664fa3b2feef25
|
614ef75a554c7d3ebec416f7f93b496738e5ed38
|
/ask_app/models.py
|
e4c9031cd19b45f3c8619f786672ba0af842b798
|
[] |
no_license
|
IVyazmin/ask_vyazmin
|
12807b574451bdcc96512293df89995edc5e571d
|
00e83d482d702359aec01f59406b36093d185970
|
refs/heads/master
| 2021-01-22T03:23:18.209069
| 2017-02-17T20:17:07
| 2017-02-17T20:17:07
| 81,121,472
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,755
|
py
|
# _*_ coding: utf-8 _*_
from __future__ import unicode_literals
from django.db import models
from django.contrib.auth.models import User, UserManager
from django.shortcuts import get_list_or_404
from django.shortcuts import get_object_or_404
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
class AnswerManager(models.Manager):
def correct(self):
return self.filter(is_correct=True)
def question(self, question_id):
return self.filter(question=question_id)
class QuestionManager(models.Manager):
def hot(self):
return self.order_by("-time_add")
def new(self):
return self.order_by("-likes")
def tag(self, tag_name):
return get_list_or_404(self, tags__name=tag_name)
def number(self, question_id):
return get_object_or_404(self, id=question_id)
class TagManager(models.Manager):
def popular(self):
return self.order_by("-questions")[:10]
class AuthorManager(models.Manager):
def best(self):
return self.order_by("-publications")[:7]
class Answer(models.Model):
text = models.TextField(verbose_name=u'Текст')
is_correct = models.TextField( verbose_name=u'Правильный', default='')
author = models.ForeignKey('Author', on_delete=models.CASCADE)
question = models.ForeignKey('Question', on_delete=models.CASCADE)
likes = models.IntegerField(verbose_name=u'Лайк', default=0)
objects = AnswerManager()
def __unicode__(self):
return self.text
class Meta:
verbose_name = u'Ответ'
verbose_name_plural = u'Ответы'
class Question(models.Model):
time_add = models.DateTimeField(verbose_name=u'Время добавления', auto_now_add=True)
title = models.CharField(max_length=255, verbose_name=u'Заголовок')
text = models.TextField(verbose_name=u'Текст')
author = models.ForeignKey('Author', on_delete=models.CASCADE)
tags = models.ManyToManyField('Tag')
likes = models.IntegerField(verbose_name=u'Лайк', default=0)
answers = models.IntegerField(verbose_name=u'Количество ответов', default=0)
objects = QuestionManager()
def count_likes(self):
self.likes = self.likequestion_set.all().count()
self.save()
def count_answers(self):
self.answers = self.answer_set.all().count()
self.save()
class Meta:
verbose_name = u'Вопрос'
verbose_name_plural = u'Вопросы'
def __unicode__(self):
return self.title
class Tag(models.Model):
name = models.CharField(max_length=10, verbose_name=u'Тэг')
questions = models.IntegerField(verbose_name=u'Количество вопросов', default=0)
objects = TagManager()
class Meta:
verbose_name = u'Тэг'
verbose_name_plural = u'Тэги'
def __unicode__(self):
return self.name
class Author(User):
publications = models.IntegerField(verbose_name=u'Публикации', default=0)
image = models.ImageField(verbose_name=u'Аватар', upload_to='')
objects = AuthorManager()
class Meta:
verbose_name = u'Автор'
verbose_name_plural =u'Авторы'
def __unicode__(self):
return self.username
class LikeQuestion(models.Model):
author = models.ForeignKey('Author', on_delete=models.CASCADE)
question = models.ForeignKey('Question', on_delete=models.CASCADE)
status = models.IntegerField(verbose_name=u'Статус', default=0)
class Meta:
verbose_name = u'Лайк вопросу'
verbose_name_plural = u'Лайки вопросу'
class LikeAnswer(models.Model):
author = models.ForeignKey('Author', on_delete=models.CASCADE)
answer = models.ForeignKey('Answer', on_delete=models.CASCADE)
status = models.IntegerField(verbose_name=u'Статус', default=0)
class Meta:
verbose_name = u'Лайк ответу'
verbose_name_plural = u'Лайки ответу'
|
[
"ilja.vyazmin@mail.ru"
] |
ilja.vyazmin@mail.ru
|
c0d854e6bc7618d1b2b42bf913ae0b6e583c8c5f
|
2c555fafc092ab76fbad5a9d4dff9ed74c668d88
|
/Activities/lab2/4_launchvmapi.py
|
65e8151a132a1d7f284720e16e699fbb9bde212e
|
[] |
no_license
|
jcooklin/OpenStackWorkshop
|
3aa69ebdbe90a3c02bc308571e7688e492b87f78
|
d5d9b85194c1d68c1744bebd2c5643b13b8b0663
|
refs/heads/master
| 2021-01-10T19:02:38.106183
| 2012-09-12T22:30:01
| 2012-09-12T22:30:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 908
|
py
|
# coding: utf-8
# ---------------ACTIVITY1- create a Nova client object
ip = 'http://' + raw_input("Enter your ip: " )
url = ip + ':5000/v2.0'
user = raw_input("Enter Username: ")
pwd = raw_input("Enter Password: ")
tenant = raw_input("Enter Tenant name: ")
from novaclient.v1_1 import client
nc = client.Client(user, pwd, tenant ,url, service_type="compute")
# ---------------ACTIVITY2 -Launch an instance
instancename = raw_input("Please enter instance name: ")
print"Launching instance in cloud:"
#nc.serververs.create(“InstanceName”, nc.images.list()[0], nc.flavors.list()[0])
nc.servers.create(instancename, nc.images.list()[0], nc.flavors.list()[0])
# ---------------ACTIVITY3 -List instances in the cloud
print"List of instances in the cloud"
print(nc.servers.list())
#----------------ACTIVITY4444 - Terminate an instance
#Terminate the instance API
#nc.servers.list()[0].delete
|
[
"root@ubuntux64.(none)"
] |
root@ubuntux64.(none)
|
f170ac1a2e6ba98c415f5e49e892325ff7ce31e1
|
2a365e091c9f9c84decbe50a9504b95f9b68a83a
|
/api_app/urls.py
|
3c6b3a7dd391e83c0150e1610d8531f7346d2644
|
[] |
no_license
|
Manikanta-u94/demo1
|
6505fb1c6ecfd7c867058db7cd9abf188f795ab5
|
cbbf390af8ab4a003bab5c8ed686328ecf55f93a
|
refs/heads/main
| 2023-03-21T06:34:27.844539
| 2021-03-15T01:53:30
| 2021-03-15T01:53:30
| 347,375,840
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 193
|
py
|
from django.urls import path,include
from . import views
urlpatterns = [
path('api/',views.PostViews.as_view(),name='posts'),
path('post/<int:pk>/', views.postDetail.as_view()),
]
|
[
"manikanta.u94@gmail.com"
] |
manikanta.u94@gmail.com
|
af8ceb2af3f872a54f6d664f9932f2eabcbab643
|
9ac97d1f0a8b6ad5be29f8b0216f00fd85f1bc57
|
/debin_consul/wsgi.py
|
a88fcbdc8b9a7f94a07ea74499e4a361650fed7d
|
[] |
no_license
|
Mikemetal/consulrest
|
4416d74b79a5035774fdccffa635bce30f9e4908
|
f434fdbe3c826a35d73b98f36eaa7e70f5fe29d9
|
refs/heads/master
| 2021-01-13T02:14:15.595438
| 2014-05-03T05:23:25
| 2014-05-03T05:23:25
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 524
|
py
|
"""
WSGI config for debin_consul project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/howto/deployment/wsgi/
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "debin_consul.settings")
#from django.core.wsgi import get_wsgi_application
#application = get_wsgi_application()
from django.core.wsgi import get_wsgi_application
from dj_static import Cling
application = Cling(get_wsgi_application())
|
[
"miguelbq88@gmail.com"
] |
miguelbq88@gmail.com
|
06bac58a8436394069cb7609e768cbadf4f20137
|
6111ac220d76bb40fd08fc34dfda81fd32685f65
|
/src/day1/test_solution_part2.py
|
8dd7e5da990ad3d6220a2c4615bb5cfd583a76b1
|
[] |
no_license
|
Markus-Ende/adventofcode2019
|
2aea3e62b65b3ac97a46bfdafd5dd7fe49aec9a8
|
e03c9d780d65908f456f3a00570f90006c39d8f8
|
refs/heads/master
| 2020-09-22T12:59:36.263544
| 2019-12-16T22:10:10
| 2019-12-16T22:11:39
| 225,206,766
| 0
| 0
| null | 2019-12-16T22:11:41
| 2019-12-01T18:17:02
|
Python
|
UTF-8
|
Python
| false
| false
| 528
|
py
|
import pytest
import day1.solution_part2
@pytest.mark.parametrize("input_mass,expected", [
(12, 2), (14, 2), (1969, 966), (100756, 50346)
])
def test_calculate_fuel_recursively(input_mass, expected):
assert day1.solution_part2.calculate_fuel_recursively(
input_mass) == expected
def test_solution_part2():
input_raw = day1.solution_part1.read('src/day1/input.txt')
masses = day1.solution_part1.parse_input(input_raw)
solution = day1.solution_part2.sum_fuel(masses)
assert solution == 5011553
|
[
"19502754+Markus-Ende@users.noreply.github.com"
] |
19502754+Markus-Ende@users.noreply.github.com
|
4dacaa30f927134d67f697ebba2cba98678ea517
|
efbcdc04e5d2d5917328e23f62f0e2b3b585d393
|
/neuron/analog2digital/soma_mt.py
|
00beb221c13630b51bd31d82783f2be5ac20ea72
|
[] |
no_license
|
satya-arjunan/spatiocyte-models
|
7e43457a170348638998a1382410c00e2d091cd6
|
b5c29b6be758e971ba016d0334670c2afafd2c31
|
refs/heads/master
| 2021-01-17T00:39:29.965797
| 2018-09-06T07:46:17
| 2018-09-06T07:46:17
| 11,064,813
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 20,501
|
py
|
import numpy as np
import math
volumes = [5.8822e-18]
T = 540000
#nKinesin = 35*2.258e-17/volumes[0]
nKinesin = 100
pPlusEnd_Detach = 1
VoxelRadius = 0.8e-8
nNeurite = 5
nNeuriteMT = 5
EdgeSpace = VoxelRadius*5
neuriteRadius = 0.2e-6
MTRadius = 12.5e-9
KinesinRadius = 0.4e-8
Filaments = 13
neuriteSpace = neuriteRadius*2
somaLength = nNeurite*neuriteRadius*2+neuriteSpace*(nNeurite+1)
somaWidth = somaLength
somaHeight = neuriteRadius*4
inSomaLength = VoxelRadius*6
neuriteLengths = np.empty((nNeurite))
neuriteLengths.fill(5e-6+inSomaLength)
neuriteLengths[0] = 25e-6
neuriteLengths[1] = 20e-6
neuriteLengths[2] = 15e-6
neuriteLengths[3] = 10e-6
neuriteLengths[4] = 5e-6
rootSpace = VoxelRadius*20
rootLengths = np.empty((1,3))
rootLengths = (somaWidth+np.amax(neuriteLengths)-inSomaLength+rootSpace*2,
somaLength+rootSpace*2, somaHeight+rootSpace*2)
neuriteOrigins = np.zeros((nNeurite, 3))
halfRootLengths = np.divide(rootLengths, 2.0)
somaOrigin = np.zeros((nNeurite, 3))
somaOrigin = (rootSpace+somaWidth/2, rootSpace+somaLength/2,
rootSpace+somaHeight/2)
with np.errstate(divide='ignore', invalid='ignore'):
somaOrigin = np.divide(np.subtract(somaOrigin, halfRootLengths),
halfRootLengths)
somaOrigin[somaOrigin == np.inf] = 0
somaOrigin = np.nan_to_num(somaOrigin)
for i in range(nNeurite):
neuriteOrigins[i] = np.array([rootSpace+somaWidth+(neuriteLengths[i]-
inSomaLength)/2,
rootSpace+neuriteSpace+i*(neuriteRadius*2+neuriteSpace)+neuriteRadius,
rootSpace+somaHeight/2])
with np.errstate(divide='ignore', invalid='ignore'):
neuriteOrigins[i] = np.divide(np.subtract(neuriteOrigins[i],
halfRootLengths), halfRootLengths)
neuriteOrigins[i][neuriteOrigins[i] == np.inf] = 0
neuriteOrigins[i] = np.nan_to_num(neuriteOrigins[i])
def rotatePointAlongVector(P, C, N, angle):
x = P[0]
y = P[1]
z = P[2]
a = C[0]
b = C[1]
c = C[2]
u = N[0]
v = N[1]
w = N[2]
u2 = u*u
v2 = v*v
w2 = w*w
cosT = math.cos(angle)
oneMinusCosT = 1-cosT
sinT = math.sin(angle)
xx = (a*(v2+w2)-u*(b*v+c*w-u*x-v*y-w*z))*oneMinusCosT+x*cosT+(
-c*v+b*w-w*y+v*z)*sinT
yy = (b*(u2+w2)-v*(a*u+c*w-u*x-v*y-w*z))*oneMinusCosT+y*cosT+(
c*u-a*w+w*x-u*z)*sinT
zz = (c*(u2+v2)-w*(a*u+b*v-u*x-v*y-w*z))*oneMinusCosT+z*cosT+(
-b*u+a*v-v*x+u*y)*sinT
return [xx, yy, zz]
MTLengths = np.zeros(nNeurite)
for i in range(len(neuriteLengths)):
MTLengths[i] = neuriteLengths[i]-2*EdgeSpace
MTsOriginX = np.zeros((nNeurite, nNeuriteMT))
MTsOriginY = np.zeros((nNeurite, nNeuriteMT))
MTsOriginZ = np.zeros((nNeurite, nNeuriteMT))
for i in range(nNeurite):
if(nNeuriteMT == 1):
MTsOriginX[i][0] = 0.0
MTsOriginY[i][0] = 0.0
MTsOriginZ[i][0] = 0.0
elif(nNeuriteMT == 2):
space = (neuriteRadii[i]*2-MTRadius*2*2)/(2+2)
MTsOriginY[i][0] = -1+(space+MTRadius)/neuriteRadii[i]
MTsOriginY[i][1] = 1-(space+MTRadius)/neuriteRadii[i]
elif(nNeuriteMT == 3):
y = neuriteRadii[i]*math.cos(math.pi/3)
y2 = y*math.cos(math.pi/3)
z = y*math.sin(math.pi/3)
MTsOriginY[i][0] = y/neuriteRadii[i]
MTsOriginY[i][1] = -y2/neuriteRadii[i]
MTsOriginZ[i][1] = -z/neuriteRadii[i]
MTsOriginY[i][2] = -y2/neuriteRadii[i]
MTsOriginZ[i][2] = z/neuriteRadii[i]
elif(nNeuriteMT == 4):
space = (neuriteRadius*2-MTRadius*2*2)/(2+3)
MTsOriginY[i][0] = -1+(space+MTRadius)/neuriteRadii[i]
MTsOriginY[i][1] = 1-(space+MTRadius)/neuriteRadii[i]
space = (neuriteRadius*2-MTRadius*2*2)/(2+3)
MTsOriginZ[i][2] = -1+(space+MTRadius)/neuriteRadii[i]
MTsOriginZ[i][3] = 1-(space+MTRadius)/neuriteRadii[i]
else:
MTsOriginY[i][0] = 2*2.0/6;
P = [0.0, MTsOriginY[i][0], 0.0]
C = [0.0, 0.0, 0.0]
N = [1.0, 0.0, 0.0]
angle = 2*math.pi/(nNeuriteMT-1)
for j in range(nNeuriteMT-2):
P = rotatePointAlongVector(P, C, N, angle);
MTsOriginX[i][j+1] = P[0]
MTsOriginY[i][j+1] = P[1]
MTsOriginZ[i][j+1] = P[2]
sim = theSimulator
s = sim.createStepper('SpatiocyteStepper', 'SS')
s.VoxelRadius = VoxelRadius
s.SearchVacant = 1
s.RemoveSurfaceBias = 1
sim.rootSystem.StepperID = 'SS'
sim.createEntity('Variable', 'Variable:/:LENGTHX').Value = rootLengths[0]
sim.createEntity('Variable', 'Variable:/:LENGTHY').Value = rootLengths[1]
sim.createEntity('Variable', 'Variable:/:LENGTHZ').Value = rootLengths[2]
sim.createEntity('Variable', 'Variable:/:VACANT')
#sim.createEntity('System', 'System:/:Surface').StepperID = 'SS'
#sim.createEntity('Variable', 'Variable:/Surface:DIMENSION').Value = 2
#sim.createEntity('Variable', 'Variable:/Surface:VACANT')
sim.createEntity('System', 'System:/:Soma').StepperID = 'SS'
sim.createEntity('Variable', 'Variable:/Soma:GEOMETRY').Value = 0
sim.createEntity('Variable', 'Variable:/Soma:LENGTHX').Value = somaWidth
sim.createEntity('Variable', 'Variable:/Soma:LENGTHY').Value = somaLength
sim.createEntity('Variable', 'Variable:/Soma:LENGTHZ').Value = somaHeight
sim.createEntity('Variable', 'Variable:/Soma:ORIGINX').Value = somaOrigin[0]
sim.createEntity('Variable', 'Variable:/Soma:ORIGINY').Value = somaOrigin[1]
sim.createEntity('Variable', 'Variable:/Soma:ORIGINZ').Value = somaOrigin[2]
sim.createEntity('Variable', 'Variable:/Soma:VACANT').Value = -1
sim.createEntity('System', 'System:/Soma:Surface').StepperID = 'SS'
sim.createEntity('Variable', 'Variable:/Soma/Surface:DIMENSION').Value = 2
sim.createEntity('Variable', 'Variable:/Soma/Surface:VACANT')
for i in range(nNeurite):
sim.createEntity('System', 'System:/:Neurite%d' %i).StepperID = 'SS'
sim.createEntity('Variable', 'Variable:/Neurite%d:GEOMETRY' %i).Value = 2
x = sim.createEntity('Variable', 'Variable:/Neurite%d:LENGTHX' %i)
x.Value = neuriteLengths[i]
y = sim.createEntity('Variable', 'Variable:/Neurite%d:LENGTHY' %i)
y.Value = neuriteRadius*2
x = sim.createEntity('Variable', 'Variable:/Neurite%d:ORIGINX' %i)
x.Value = neuriteOrigins[i][0]
y = sim.createEntity('Variable', 'Variable:/Neurite%d:ORIGINY' %i)
y.Value = neuriteOrigins[i][1]
sim.createEntity('Variable', 'Variable:/Neurite%d:ORIGINZ' %i).Value = 0
sim.createEntity('Variable', 'Variable:/Neurite%d:VACANT' %i)
d = sim.createEntity('Variable', 'Variable:/Neurite%d:DIFFUSIVE' %i)
d.Name = '/:Soma'
# Create the neurite membrane:
sim.createEntity('System', 'System:/Neurite%d:Surface' %i).StepperID = 'SS'
sim.createEntity('Variable',
'Variable:/Neurite%d/Surface:DIMENSION' %i).Value = 2
sim.createEntity('Variable', 'Variable:/Neurite%d/Surface:VACANT' %i)
sim.createEntity('Variable',
'Variable:/Neurite%d/Surface:DIFFUSIVE' %i).Name = '/Soma:Surface'
for j in range(nNeuriteMT):
m = sim.createEntity('MicrotubuleProcess',
'Process:/Neurite%d:Microtubule%d' %(i, j))
m.OriginX = MTsOriginX[i][j]
m.OriginY = MTsOriginY[i][j]
m.OriginZ = MTsOriginZ[i][j]
m.RotateX = 0
m.RotateY = 0
m.RotateZ = 0
m.Radius = MTRadius
m.SubunitRadius = KinesinRadius
m.Length = MTLengths[i]
m.Filaments = Filaments
m.Periodic = 0
m.VariableReferenceList = [['_', 'Variable:/Soma:TUB_KIF' ]]
m.VariableReferenceList = [['_', 'Variable:/Soma:TUB_KIF_ATP' ]]
m.VariableReferenceList = [['_', 'Variable:/Soma:TUB_GTP' ]]
m.VariableReferenceList = [['_', 'Variable:/Soma:TUB_GTP_KIF' ]]
m.VariableReferenceList = [['_', 'Variable:/Soma:TUB_GTP_KIF_ATP' ]]
m.VariableReferenceList = [['_', 'Variable:/Soma:aTUB']]
m.VariableReferenceList = [['_', 'Variable:/Soma:TUB', '-1']]
m.VariableReferenceList = [['_', 'Variable:/Soma:TUB_M', '-2']]
m.VariableReferenceList = [['_', 'Variable:/Soma:TUB_P', '-3']]
nSomaMT = 16
mtSpaceY = somaLength/(nSomaMT)
for i in range(nSomaMT):
for j in range(3):
OriginZ = 0.0
if(j != 0):
if(j == 1):
OriginZ = 0.5
else:
OriginZ = -0.5
m = theSimulator.createEntity('MicrotubuleProcess',
'Process:/Soma:Microtubule%d%d' %(i,j))
m.OriginX = 0
m.OriginY = (mtSpaceY/2+i*mtSpaceY)/(somaLength/2)-1
m.OriginZ = OriginZ
m.RotateX = 0
m.RotateY = 0
m.RotateZ = 0
m.Radius = MTRadius
m.SubunitRadius = KinesinRadius
m.Length = somaWidth*0.8
m.Filaments = Filaments
m.Periodic = 0
m.VariableReferenceList = [['_', 'Variable:/Soma:TUB_KIF' ]]
m.VariableReferenceList = [['_', 'Variable:/Soma:TUB_KIF_ATP' ]]
m.VariableReferenceList = [['_', 'Variable:/Soma:TUB_GTP' ]]
m.VariableReferenceList = [['_', 'Variable:/Soma:TUB_GTP_KIF' ]]
m.VariableReferenceList = [['_', 'Variable:/Soma:TUB_GTP_KIF_ATP' ]]
m.VariableReferenceList = [['_', 'Variable:/Soma:aTUB']]
m.VariableReferenceList = [['_', 'Variable:/Soma:TUB', '-1']]
m.VariableReferenceList = [['_', 'Variable:/Soma:TUB_M', '-2']]
m.VariableReferenceList = [['_', 'Variable:/Soma:TUB_P', '-3']]
sim.createEntity('Variable', 'Variable:/Soma:KIF').Value = nKinesin
sim.createEntity('Variable', 'Variable:/Soma:TUB_GTP' ).Value = 0
sim.createEntity('Variable', 'Variable:/Soma:TUB_KIF' ).Value = 0
sim.createEntity('Variable', 'Variable:/Soma:TUB_KIF_ATP' ).Value = 0
sim.createEntity('Variable', 'Variable:/Soma:TUB_GTP_KIF' ).Value = 0
sim.createEntity('Variable', 'Variable:/Soma:TUB_GTP_KIF_ATP' ).Value = 0
sim.createEntity('Variable', 'Variable:/Soma:aTUB' ).Value = 0
sim.createEntity('Variable', 'Variable:/Soma:TUB' ).Value = 0
sim.createEntity('Variable', 'Variable:/Soma:TUB_M' ).Value = 0
sim.createEntity('Variable', 'Variable:/Soma:TUB_P' ).Value = 0
v = sim.createEntity('VisualizationLogProcess', 'Process:/Soma:v')
#v.VariableReferenceList = [['_', 'Variable:/Soma:TUB']]
v.VariableReferenceList = [['_', 'Variable:/Soma:aTUB']]
v.VariableReferenceList = [['_', 'Variable:/Soma:TUB_M']]
v.VariableReferenceList = [['_', 'Variable:/Soma:TUB_P']]
v.VariableReferenceList = [['_', 'Variable:/Soma:KIF']]
v.VariableReferenceList = [['_', 'Variable:/Soma:TUB_KIF' ]]
v.VariableReferenceList = [['_', 'Variable:/Soma:TUB_KIF_ATP' ]]
v.VariableReferenceList = [['_', 'Variable:/Soma:TUB_GTP_KIF' ]]
v.VariableReferenceList = [['_', 'Variable:/Soma:TUB_GTP_KIF_ATP' ]]
v.VariableReferenceList = [['_', 'Variable:/Soma:TUB_GTP']]
#v.VariableReferenceList = [['_', 'Variable:/Soma/Surface:VACANT']]
#v.VariableReferenceList = [['_', 'Variable:/Soma/Membrane:PlusSensor']]
#v.VariableReferenceList = [['_', 'Variable:/Soma/Membrane:MinusSensor']]
v.LogInterval = 10
#Populate-----------------------------------------------------------------------
#p = sim.createEntity('MoleculePopulateProcess', 'Process:/Soma:pPlusSensor')
#p.VariableReferenceList = [['_', 'Variable:/Soma/Membrane:PlusSensor']]
#p.EdgeX = 1
#
#p = sim.createEntity('MoleculePopulateProcess', 'Process:/Soma:pMinusSensor')
#p.VariableReferenceList = [['_', 'Variable:/Soma/Membrane:MinusSensor']]
#p.EdgeX = -1
p = sim.createEntity('MoleculePopulateProcess', 'Process:/Soma:pTUB_KIF')
p.VariableReferenceList = [['_', 'Variable:/Soma:TUB_KIF']]
#p = sim.createEntity('MoleculePopulateProcess', 'Process:/Soma:pTUB_GTP')
#p.VariableReferenceList = [['_', 'Variable:/Soma:TUB_GTP']]
#p.LengthBinFractions = [1, 0.3, 0.8]
#p.Priority = 100 #set high priority for accurate fraction
p = sim.createEntity('MoleculePopulateProcess', 'Process:/Soma:pKIF')
p.VariableReferenceList = [['_', 'Variable:/Soma:KIF']]
#-------------------------------------------------------------------------------
#Cytosolic KIF recruitment to microtubule---------------------------------------
r = sim.createEntity('DiffusionInfluencedReactionProcess', 'Process:/Soma:b1')
r.VariableReferenceList = [['_', 'Variable:/Soma:KIF','-1']]
r.VariableReferenceList = [['_', 'Variable:/Soma:TUB','-1']]
r.VariableReferenceList = [['_', 'Variable:/Soma:TUB_KIF','1']]
r.p = 0.0001
r = sim.createEntity('DiffusionInfluencedReactionProcess', 'Process:/Soma:b2')
r.VariableReferenceList = [['_', 'Variable:/Soma:KIF','-1']]
r.VariableReferenceList = [['_', 'Variable:/Soma:TUB_GTP','-1']]
r.VariableReferenceList = [['_', 'Variable:/Soma:TUB_GTP_KIF','1']]
r.p = 0
r = sim.createEntity('DiffusionInfluencedReactionProcess', 'Process:/Soma:b3')
r.VariableReferenceList = [['_', 'Variable:/Soma:KIF','-1']]
r.VariableReferenceList = [['_', 'Variable:/Soma:aTUB','-1']]
r.VariableReferenceList = [['_', 'Variable:/Soma:TUB_KIF','1']]
r.p = 0.9
#-------------------------------------------------------------------------------
#MT KIF detachment to cytosol---------------------------------------------------
r = sim.createEntity('SpatiocyteNextReactionProcess', 'Process:/Soma:detach')
r.VariableReferenceList = [['_', 'Variable:/Soma:TUB_KIF_ATP','-1']]
r.VariableReferenceList = [['_', 'Variable:/Soma:aTUB','1']]
r.VariableReferenceList = [['_', 'Variable:/Soma:KIF','1']]
r.SearchVacant = 1
r.k = 15
r = sim.createEntity('SpatiocyteNextReactionProcess', 'Process:/Soma:detachGTP')
r.VariableReferenceList = [['_', 'Variable:/Soma:TUB_GTP_KIF_ATP','-1']]
r.VariableReferenceList = [['_', 'Variable:/Soma:TUB_GTP','1']]
r.VariableReferenceList = [['_', 'Variable:/Soma:KIF','1']]
r.SearchVacant = 1
r.k = 15
#-------------------------------------------------------------------------------
#Active tubulin inactivation----------------------------------------------------
r = sim.createEntity('SpatiocyteNextReactionProcess', 'Process:/Soma:i1')
r.VariableReferenceList = [['_', 'Variable:/Soma:aTUB','-1']]
r.VariableReferenceList = [['_', 'Variable:/Soma:TUB','1']]
r.k = 0.055
#-------------------------------------------------------------------------------
#MT KIF detachment to cytosol at plus end---------------------------------------
r = sim.createEntity('DiffusionInfluencedReactionProcess', 'Process:/Soma:p1')
r.VariableReferenceList = [['_', 'Variable:/Soma:TUB_GTP_KIF_ATP','-1']]
r.VariableReferenceList = [['_', 'Variable:/Soma:TUB_P','-1']]
r.VariableReferenceList = [['_', 'Variable:/Soma:TUB_GTP','1']]
r.VariableReferenceList = [['_', 'Variable:/Soma:TUB_P','1']]
r.VariableReferenceList = [['_', 'Variable:/Soma:KIF','1']]
r.p = pPlusEnd_Detach
r = sim.createEntity('DiffusionInfluencedReactionProcess', 'Process:/Soma:p2')
r.VariableReferenceList = [['_', 'Variable:/Soma:TUB_GTP_KIF','-1']]
r.VariableReferenceList = [['_', 'Variable:/Soma:TUB_P','-1']]
r.VariableReferenceList = [['_', 'Variable:/Soma:TUB_GTP','1']]
r.VariableReferenceList = [['_', 'Variable:/Soma:TUB_P','1']]
r.VariableReferenceList = [['_', 'Variable:/Soma:KIF','1']]
r.p = pPlusEnd_Detach
r = sim.createEntity('DiffusionInfluencedReactionProcess', 'Process:/Soma:p3')
r.VariableReferenceList = [['_', 'Variable:/Soma:TUB_KIF_ATP','-1']]
r.VariableReferenceList = [['_', 'Variable:/Soma:TUB_P','-1']]
r.VariableReferenceList = [['_', 'Variable:/Soma:TUB','1']]
r.VariableReferenceList = [['_', 'Variable:/Soma:TUB_P','1']]
r.VariableReferenceList = [['_', 'Variable:/Soma:KIF','1']]
r.p = pPlusEnd_Detach
r = sim.createEntity('DiffusionInfluencedReactionProcess', 'Process:/Soma:p4')
r.VariableReferenceList = [['_', 'Variable:/Soma:TUB_KIF','-1']]
r.VariableReferenceList = [['_', 'Variable:/Soma:TUB_P','-1']]
r.VariableReferenceList = [['_', 'Variable:/Soma:TUB','1']]
r.VariableReferenceList = [['_', 'Variable:/Soma:TUB_P','1']]
r.VariableReferenceList = [['_', 'Variable:/Soma:KIF','1']]
r.p = pPlusEnd_Detach
#-------------------------------------------------------------------------------
#KIF ATP hydrolysis-------------------------------------------------------------
r = sim.createEntity('SpatiocyteNextReactionProcess', 'Process:/Soma:h1')
r.VariableReferenceList = [['_', 'Variable:/Soma:TUB_KIF_ATP','-1']]
r.VariableReferenceList = [['_', 'Variable:/Soma:TUB_KIF','1']]
r.SearchVacant = 1
r.k = 100
r = sim.createEntity('SpatiocyteNextReactionProcess', 'Process:/Soma:h2')
r.VariableReferenceList = [['_', 'Variable:/Soma:TUB_GTP_KIF_ATP','-1']]
r.VariableReferenceList = [['_', 'Variable:/Soma:TUB_GTP_KIF','1']]
r.SearchVacant = 1
r.k = 100
#-------------------------------------------------------------------------------
#KIF ADP phosphorylation--------------------------------------------------------
r = sim.createEntity('SpatiocyteNextReactionProcess', 'Process:/Soma:phos1')
r.VariableReferenceList = [['_', 'Variable:/Soma:TUB_KIF','-1']]
r.VariableReferenceList = [['_', 'Variable:/Soma:TUB_KIF_ATP','1']]
r.SearchVacant = 1
r.k = 145
r = sim.createEntity('SpatiocyteNextReactionProcess', 'Process:/Soma:phos2')
r.VariableReferenceList = [['_', 'Variable:/Soma:TUB_GTP_KIF','-1']]
r.VariableReferenceList = [['_', 'Variable:/Soma:TUB_GTP_KIF_ATP','1']]
r.SearchVacant = 1
r.k = 145
#-------------------------------------------------------------------------------
#KIF ratchet biased walk_-------------------------------------------------------
r = sim.createEntity('SpatiocyteNextReactionProcess', 'Process:/Soma:rat1')
r.VariableReferenceList = [['_', 'Variable:/Soma:TUB_KIF','-1']]
r.VariableReferenceList = [['_', 'Variable:/Soma:aTUB','1']]
r.VariableReferenceList = [['_', 'Variable:/Soma:TUB','0']] #If BindingSite[1]==TUB
r.VariableReferenceList = [['_', 'Variable:/Soma:TUB_KIF_ATP','1']] #option 1
r.VariableReferenceList = [['_', 'Variable:/Soma:aTUB','0']] #Elif BindingSite[1]==TUB_GTP
r.VariableReferenceList = [['_', 'Variable:/Soma:TUB_KIF_ATP','1']] #option 2
r.BindingSite = 1
r.k = 55
#r = sim.createEntity('SpatiocyteNextReactionProcess', 'Process:/Soma:rat1')
#r.VariableReferenceList = [['_', 'Variable:/Soma:TUB_KIF','-1']]
#r.VariableReferenceList = [['_', 'Variable:/Soma:aTUB','1']]
#r.VariableReferenceList = [['_', 'Variable:/Soma:TUB','0']] #If BindingSite[1]==TUB
#r.VariableReferenceList = [['_', 'Variable:/Soma:TUB_KIF_ATP','1']] #option 1
#r.VariableReferenceList = [['_', 'Variable:/Soma:TUB_GTP','0']] #Elif BindingSite[1]==TUB_GTP
#r.VariableReferenceList = [['_', 'Variable:/Soma:TUB_GTP_KIF_ATP','1']] #option 2
#r.BindingSite = 1
#r.k = 55
r = sim.createEntity('SpatiocyteNextReactionProcess', 'Process:/Soma:rat2')
r.VariableReferenceList = [['_', 'Variable:/Soma:TUB_GTP_KIF','-1']] #A
r.VariableReferenceList = [['_', 'Variable:/Soma:TUB_GTP','1']] #C
r.VariableReferenceList = [['_', 'Variable:/Soma:TUB','0']] #E
r.VariableReferenceList = [['_', 'Variable:/Soma:TUB_KIF_ATP','1']] #D
r.VariableReferenceList = [['_', 'Variable:/Soma:TUB_GTP','0']] #H
r.VariableReferenceList = [['_', 'Variable:/Soma:TUB_GTP_KIF_ATP','1']] #F
r.BindingSite = 1
r.k = 55
#-------------------------------------------------------------------------------
#KIF random walk between GTP and GDP tubulins-----------------------------------
r = sim.createEntity('DiffusionInfluencedReactionProcess', 'Process:/Soma:w1')
r.VariableReferenceList = [['_', 'Variable:/Soma:TUB_GTP_KIF','-1']]
r.VariableReferenceList = [['_', 'Variable:/Soma:TUB_GTP','-1']]
r.VariableReferenceList = [['_', 'Variable:/Soma:TUB_GTP','1']]
r.VariableReferenceList = [['_', 'Variable:/Soma:TUB_GTP_KIF','1']]
r.ForcedSequence = 1
r.p = 1
r = sim.createEntity('DiffusionInfluencedReactionProcess', 'Process:/Soma:w2')
r.VariableReferenceList = [['_', 'Variable:/Soma:TUB_GTP_KIF','-1']]
r.VariableReferenceList = [['_', 'Variable:/Soma:TUB','-1']]
r.VariableReferenceList = [['_', 'Variable:/Soma:TUB_GTP','1']]
r.VariableReferenceList = [['_', 'Variable:/Soma:TUB_KIF','1']]
r.ForcedSequence = 1
r.p = 1
r = sim.createEntity('DiffusionInfluencedReactionProcess', 'Process:/Soma:w3')
r.VariableReferenceList = [['_', 'Variable:/Soma:TUB_KIF','-1']]
r.VariableReferenceList = [['_', 'Variable:/Soma:TUB_GTP','-1']]
r.VariableReferenceList = [['_', 'Variable:/Soma:TUB','1']]
r.VariableReferenceList = [['_', 'Variable:/Soma:TUB_GTP_KIF','1']]
r.ForcedSequence = 1
r.p = 1
#-------------------------------------------------------------------------------
#KIF normal diffusion-----------------------------------------------------------
d = sim.createEntity('DiffusionProcess', 'Process:/Soma:dKIF')
d.VariableReferenceList = [['_', 'Variable:/Soma:KIF']]
d.D = 0.5e-12
d = sim.createEntity('DiffusionProcess', 'Process:/Soma:dTUB_KIF')
d.VariableReferenceList = [['_', 'Variable:/Soma:TUB_KIF']]
d.VariableReferenceList = [['_', 'Variable:/Soma:aTUB', '1']]
d.D = 0.04e-12
d = sim.createEntity('DiffusionProcess', 'Process:/Soma:dTUB_GTP_KIF')
d.VariableReferenceList = [['_', 'Variable:/Soma:TUB_GTP_KIF']]
d.WalkReact = 1
d.D = 0.04e-12
#-------------------------------------------------------------------------------
run(T)
|
[
"satya.arjunan@gmail.com"
] |
satya.arjunan@gmail.com
|
14e219344dbabcdff789037c47a7c521856907a4
|
90b55510c7f1041a8ebc5e7cf1da131b488dc87f
|
/submit_bulk_prof_only.py
|
560a8c50a8687e6cb4013d3bb5ef37f2faedf5d8
|
[] |
no_license
|
alv53/UIUCExpertSearch
|
893fd68022ff536ebde5a0fd8614af2f6afa4b2f
|
db03238b1e9df96294a25fe56622ef9c66996412
|
refs/heads/master
| 2021-01-01T15:17:12.460123
| 2015-05-13T07:06:35
| 2015-05-13T07:06:35
| 35,476,028
| 4
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,294
|
py
|
from elasticsearch import Elasticsearch
import timeit
NUM_BULKS = 30
bulk_data = [[] for x in xrange(NUM_BULKS)]
ES_HOST = {"host" : "localhost", "port" : 9200}
INDEX_NAME = "expertsearch_index_prof_only_uiuc"
curr = 0
print "Creating bulks..."
with open('expertsearch_bulk_prof_only_uiuc.txt', 'r') as f:
while True:
line1 = f.readline().rstrip()
line2 = f.readline().rstrip()
line1 = ''.join(i for i in line1 if ord(i)<128)
line2 = ''.join(i for i in line2 if ord(i)<128)
bulk_data[curr % NUM_BULKS].append(line1)
bulk_data[curr % NUM_BULKS].append(line2)
curr += 1
if not line2:
break
print str(len(bulk_data[0]))
es = Elasticsearch(hosts = [ES_HOST])
# since we are running locally, use one shard and no replicas
request_body = {
"settings" : {
"number_of_shards": 1,
"number_of_replicas": 0
}
}
curr = 0
total = 0
for curr_bulk in bulk_data:
print("bulk indexing... " + str(curr) + " of size " + str(len(curr_bulk)))
start = timeit.default_timer()
res = es.bulk(index = INDEX_NAME, body = curr_bulk, refresh = True)
bulk = open('bulk.log', 'w')
bulk.write("response: " + str(res))
elapsed = timeit.default_timer() - start
total += elapsed
curr += 1
print "Using " + str(NUM_BULKS) + " bulks of size " + str(len(bulk_data[0] )) + " took " + str(total)
|
[
"alvjou@gmail.com"
] |
alvjou@gmail.com
|
378ecbbf34bd6d26ca6133be5c54c69159722ebe
|
46eafaa77280475263decdba64a587c9c6dbd48b
|
/drood/article/urls.py
|
5a1281ef320c784967d25355cdd395c259214f7a
|
[] |
no_license
|
Adyg/drood
|
3b19d39e06455a576f7c3a11001c26a22abe31df
|
d028295afcf6e5f8ddcf1f848882e04f3418febb
|
refs/heads/master
| 2021-01-19T17:59:41.709461
| 2015-12-30T12:27:33
| 2015-12-30T12:27:33
| 35,033,728
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 224
|
py
|
from django.conf.urls import patterns, url
urlpatterns = patterns('article.views',
url(r'^$', 'list', name='home'),
url(r'^feed/(?P<feed_id>\d+)/$', 'list', name='home-feed'),)
|
[
"ady.ghiuta@gmail.com"
] |
ady.ghiuta@gmail.com
|
0064709ec9845041deedfc0f5f905b66ebb5d412
|
f9d05bcab8304b36087c1e82f5acebe93a638921
|
/main.py
|
49810a82373abf65b5e056c73032db6cf340da98
|
[
"Apache-2.0"
] |
permissive
|
adrien-bellaiche/ia-cdf-rob-2015
|
0fd06cfd1d9fe2e1753058911fefec13c950d6f3
|
6e66b12cb1cc8a8de4e13a951528842311f8dd3a
|
refs/heads/master
| 2016-09-05T19:17:01.600660
| 2015-05-14T06:13:10
| 2015-05-14T06:13:10
| 27,771,013
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 664
|
py
|
__author__ = 'adrie_000'
# -*- coding: utf8 -*-
from General import Robot
from time import sleep
if __name__ == '__main__':
''' Explication : en lançant ce script, le soft va en permanence checker si la mainduino a la languette tirée (info
stockée dans robot.started, True si tirée, False si en place).
Si la languette est repositionnée, la boucle du robot va se terminer.
Une fois la languette retirée, le système reboucle.
Ceci permettra de ne pas avoir de soucis avec une tirette accidentellement retirée trop tôt.
'''
robot = Robot()
while True:
if robot.started:
robot.start()
sleep(0.1)
|
[
"adrien.bellaiche@gmail.com"
] |
adrien.bellaiche@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.