content
stringlengths 5
1.05M
|
|---|
from http import HTTPStatus
from pathlib import Path
from io import BytesIO
from flask import request
from ..articles.models import Article
from ..articles.tags.models import Tag
from .utils import login
def test_create_draft_requires_authentication(client):
response = client.get('/en/article/draft/new/')
assert response.status_code == HTTPStatus.FOUND
assert '/login' in response.headers.get('Location')
def test_create_draft_should_display_form(client, editor):
login(client, editor.email, 'password')
response = client.get('/en/article/draft/new/')
assert response.status_code == 200
assert '<input id=title' in response
def test_create_draft_should_generate_article(client, user, editor):
login(client, editor.email, 'password')
response = client.post('/fr/article/draft/new/', data={
'title': 'Test article',
'summary': 'Summary',
'body': 'Article body',
'authors': user.id
}, follow_redirects=True)
assert response.status_code == 200
assert '<span class=original-language>fr</span>' in response
assert '<h1>Test article</h1>' in response
assert '<p>Article body</p>' in response
def test_create_draft_with_tag(client, user, editor, tag):
login(client, editor.email, 'password')
response = client.post('/en/article/draft/new/', data={
'title': 'Test article',
'summary': 'Summary',
'body': 'Article body',
'authors': user.id,
'tag-1': 'Wonderful'
}, follow_redirects=True)
assert response.status_code == 200
article = Article.objects.get(title='Test article')
assert article.tags == [tag]
def test_create_draft_with_tags(client, app, user, editor, tag):
login(client, editor.email, 'password')
language = app.config['LANGUAGES'][0][0]
tag2 = Tag.objects.create(name='Sensational', language=language)
response = client.post('/en/article/draft/new/', data={
'title': 'Test article',
'summary': 'Summary',
'body': 'Article body',
'authors': user.id,
'tag-1': 'Wonderful',
'tag-2': 'Sensational'
}, follow_redirects=True)
assert response.status_code == 200
article = Article.objects.get(title='Test article')
assert article.tags == [tag, tag2]
def test_create_draft_with_unknown_tag(client, user, editor, tag):
login(client, editor.email, 'password')
response = client.post('/en/article/draft/new/', data={
'title': 'Test article',
'summary': 'Summary',
'body': 'Article body',
'authors': user.id,
'tag-1': 'Wonderful',
'tag-2': 'Sensational'
}, follow_redirects=True)
assert response.status_code == 200
article = Article.objects.get(title='Test article')
tag2 = Tag.objects.get(name='Sensational')
assert article.tags == [tag, tag2]
def test_create_draft_with_preexising_translation(client, user, editor,
article, translation):
login(client, editor.email, 'password')
response = client.post('/en/article/draft/new/', data={
'title': 'Test article',
'summary': 'Summary',
'body': 'Article body',
'authors': user.id,
}, follow_redirects=True)
assert response.status_code == 200
assert '<h1>Test article</h1>' in response
assert '<p>Article body</p>' in response
def test_create_published_draft_should_display_article(client, user, editor):
login(client, editor.email, 'password')
response = client.post('/en/article/draft/new/', data={
'title': 'Test article',
'summary': 'Summary',
'body': 'Article body',
'authors': user.id,
'status': 'published',
}, follow_redirects=True)
assert response.status_code == 200
assert request.url_rule.endpoint == 'articles.detail'
assert '<span class=original-language>en</span>' in response
assert '<h1>Test article</h1>' in response
assert '<p>Article body</p>' in response
def test_draft_editing_should_update_content(client, user, editor):
login(client, editor.email, 'password')
data = {
'title': 'My article',
'summary': 'Summary',
'body': 'Article body',
'authors': [user.id],
}
draft = Article.objects.create(language='en', **data)
data['title'] = 'Updated title'
data['authors'] = user.id
response = client.post(f'/en/article/draft/{draft.id}/edit/',
data=data, follow_redirects=True)
assert response.status_code == 200
draft.reload()
assert draft.id == draft.id
assert draft.title == 'Updated title'
def test_draft_editing_with_many_authors(client, user, user2, editor):
login(client, editor.email, 'password')
data = {
'title': 'My article',
'summary': 'Summary',
'body': 'Article body',
'authors': [user.id],
}
draft = Article.objects.create(**data, language='en')
data['authors'] = [user.id, user2.id]
response = client.post(f'/en/article/draft/{draft.id}/edit/',
data=data, follow_redirects=True)
assert response.status_code == 200
draft.reload()
assert draft.authors == [user, user2]
def test_draft_image_should_save_and_render(app, client, user, editor):
login(client, editor.email, 'password')
with open(Path(__file__).parent / 'dummy-image.jpg', 'rb') as content:
image = BytesIO(content.read())
data = {
'title': 'My article',
'summary': 'Summary',
'body': 'Article body',
'authors': user.id,
'image': (image, 'image-name.jpg'),
}
response = client.post('/en/article/draft/new/', data=data,
content_type='multipart/form-data',
follow_redirects=True)
assert response.status_code == HTTPStatus.OK
article = Article.objects.first()
assert article.image_filename == '/articles/image-name.jpg'
assert Path(app.config.get('UPLOADS_FOLDER') /
'articles' / 'image-name.jpg').exists()
assert '<img src="/resized-images/' in response
def test_draft_should_not_offer_social_sharing(client, article):
response = client.get(f'/en/article/draft/{article.id}/')
assert response.status_code == 200
assert 'facebook.com/sharer' not in response
def test_visitor_cannot_edit_draft(client, article):
response = client.post(f'/en/article/draft/{article.id}/edit/', data={
'title': 'Updated draft'
})
assert '/login' in response.headers.get('Location')
def test_author_cannot_edit_draft(client, user, article):
login(client, user.email, 'password')
response = client.post(f'/en/article/draft/{article.id}/edit/', data={
'title': 'Updated draft'
})
assert response.status_code == HTTPStatus.FORBIDDEN
def test_access_published_article_should_return_404(client, published_article):
response = client.get(f'/en/article/draft/{published_article.id}/')
assert response.status_code == HTTPStatus.NOT_FOUND
def test_editor_access_drafts_list(client, editor, article):
login(client, editor.email, 'password')
response = client.get('/en/article/draft/')
assert response.status_code == HTTPStatus.OK
assert article.title in response
def test_editor_access_drafts_list_localized(client, editor, article):
login(client, editor.email, 'password')
response = client.get('/fr/article/draft/')
assert response.status_code == HTTPStatus.OK
assert article.title not in response
article.modify(language='fr')
response = client.get('/fr/article/draft/')
assert response.status_code == HTTPStatus.OK
assert article.title in response
def test_author_cannot_access_drafts_list(client, user):
login(client, user.email, 'password')
response = client.get('/en/article/draft/')
assert response.status_code == HTTPStatus.FORBIDDEN
def test_drafts_list_only_displays_drafts(client, editor, article,
published_article):
published_article.modify(title='published article')
login(client, editor.email, 'password')
response = client.get('/en/article/draft/')
assert response.status_code == HTTPStatus.OK
assert article.title in response
assert published_article.title not in response
def test_drafts_list_menu_link_localized_list(client, editor, article):
login(client, editor.email, 'password')
response = client.get('/en/article/draft/')
assert '<a href=/fr/article/draft/>FR</a>' in response
def test_draft_detail_contains_tags_without_links(client, app, tag, article):
language = app.config['LANGUAGES'][0][0]
tag2 = Tag.objects.create(name='Sensational', language=language)
article.modify(tags=[tag, tag2])
response = client.get(f'/en/article/draft/{article.id}/')
assert response.status_code == HTTPStatus.OK
assert 'Wonderful,' in response
assert 'Sensational.' in response
|
import httpretty
from tests import FulcrumTestCase
class ChildRecordTest(FulcrumTestCase):
@httpretty.activate
def test_records_from_form_via_url_params(self):
httpretty.register_uri(httpretty.GET, self.api_root + '/child_records?form_id=cf6f189e-7d50-404f-946a-835952da5083',
body='{"total_count": 1, "current_page": 1, "total_pages": 1, "records": [{"status": "occupied", "updated_by": "Jason Sanford", "latitude": null, "geometry": null, "created_at": "2014-11-30T23:44:55Z", "updated_at": "2014-11-30T23:44:55Z", "created_by": "Jason Sanford", "form_values": {"2541": "2014-11-13", "2c7f": "12:34", "e05a": "Good stuff", "348f": [{"caption": "", "photo_id": "03656c9b-24ad-fed5-abee-6e3b514bb927"}]}, "client_created_at": "2014-11-30T23:44:54Z", "assigned_to_id": null, "version": 1, "updated_by_id": "4f1efa091441405373000445", "longitude": null, "client_updated_at": "2014-11-30T23:44:54Z", "record_id": "beef678b-fb89-4b15-9ee7-1f8be3e2abe7", "created_by_id": "4f1efa091441405373000445", "project_id": null, "changeset_id": "00291a46-232b-417c-a14d-b0be3e7eca94", "id": "5911af53-8c48-502a-2d07-17010aef73f9", "form_id": "cf6f189e-7d50-404f-946a-835952da5083"}], "per_page": 20000}',
status=200)
records = self.fulcrum_api.child_records.search(url_params={'form_id': 'cf6f189e-7d50-404f-946a-835952da5083'})
self.assertIsInstance(records, dict)
self.assertEqual(len(records['records']), 1)
|
from stack import Stack
def reverse_string(input_str):
stack = Stack()
for i in range(len(input_str)):
stack.push(input_str[i])
rev_string=""
while not stack.is_empty():
rev_string+=stack.pop()
return rev_string
input_string = input("Enter your string: ")
print(reverse_string(input_string))
#print(input_string[::-1])
|
from ex110 import resumo
preco = float(input('Digite o preço: '))
resumo(preco, 90, 35)
|
# Copyright 2015 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import os
from osc_lib.command import command
from osc_lib import utils
from oslo_log import log as logging
from zaqarclient._i18n import _
from zaqarclient.queues.v1 import cli
def _get_client(obj, parsed_args):
obj.log.debug("take_action(%s)" % parsed_args)
return obj.app.client_manager.messaging
class CreateQueue(cli.CreateQueue):
"""Create a queue"""
pass
class OldCreateQueue(cli.OldCreateQueue):
"""Create a queue"""
pass
class DeleteQueue(cli.DeleteQueue):
"""Delete a queue"""
pass
class OldDeleteQueue(cli.OldDeleteQueue):
"""Delete a queue"""
pass
class ListQueues(command.Lister):
"""List available queues"""
_description = _("List available queues")
log = logging.getLogger(__name__ + ".ListQueues")
def get_parser(self, prog_name):
parser = super(ListQueues, self).get_parser(prog_name)
parser.add_argument(
"--marker",
metavar="<queue_id>",
help="Queue's paging marker")
parser.add_argument(
"--limit",
metavar="<limit>",
help="Page size limit")
parser.add_argument(
"--detailed",
action="store_true",
help="If show detailed information of queue")
parser.add_argument(
"--with_count",
action="store_true",
help="If show amount information of queue")
return parser
def take_action(self, parsed_args):
client = _get_client(self, parsed_args)
kwargs = {}
columns = ["Name"]
if parsed_args.marker is not None:
kwargs["marker"] = parsed_args.marker
if parsed_args.limit is not None:
kwargs["limit"] = parsed_args.limit
if parsed_args.detailed is not None and parsed_args.detailed:
kwargs["detailed"] = parsed_args.detailed
columns.extend(["Metadata_Dict", "Href"])
if parsed_args.with_count is not None and parsed_args.with_count:
kwargs["with_count"] = parsed_args.with_count
data, count = client.queues(**kwargs)
if count:
print("Queues in total: %s" % count)
columns = tuple(columns)
return (columns, (utils.get_item_properties(s, columns) for s in data))
class OldListQueues(cli.OldListQueues):
"""List available queues"""
pass
class GetQueueStats(cli.GetQueueStats):
"""Get queue stats"""
pass
class OldGetQueueStats(cli.OldGetQueueStats):
"""Get queue stats"""
pass
class SetQueueMetadata(command.Command):
"""Set queue metadata"""
_description = _("Set queue metadata")
log = logging.getLogger(__name__ + ".SetQueueMetadata")
def get_parser(self, prog_name):
parser = super(SetQueueMetadata, self).get_parser(prog_name)
parser.add_argument(
"queue_name",
metavar="<queue_name>",
help="Name of the queue")
parser.add_argument(
"queue_metadata",
metavar="<queue_metadata>",
help="Queue metadata, All the metadata of "
"the queue will be replaced by queue_metadata")
return parser
def take_action(self, parsed_args):
client = _get_client(self, parsed_args)
queue_name = parsed_args.queue_name
queue_metadata = parsed_args.queue_metadata
if (client.api_version == 1 and
not client.queue(queue_name, auto_create=False).exists()):
raise RuntimeError("Queue(%s) does not exist." % queue_name)
try:
valid_metadata = json.loads(queue_metadata)
except ValueError:
raise RuntimeError("Queue metadata(%s) is not a valid json." %
queue_metadata)
client.queue(queue_name, auto_create=False).\
metadata(new_meta=valid_metadata)
class OldSetQueueMetadata(cli.OldSetQueueMetadata):
"""Set queue metadata"""
pass
class GetQueueMetadata(cli.GetQueueMetadata):
"""Get queue metadata"""
pass
class OldGetQueueMetadata(cli.OldGetQueueMetadata):
"""Get queue metadata"""
pass
class PostMessages(command.Command):
"""Post messages for a given queue"""
_description = _("Post messages for a given queue")
log = logging.getLogger(__name__ + ".PostMessages")
def get_parser(self, prog_name):
parser = super(PostMessages, self).get_parser(prog_name)
parser.add_argument(
"queue_name",
metavar="<queue_name>",
help="Name of the queue")
parser.add_argument(
"messages",
type=json.loads,
metavar="<messages>",
help="Messages to be posted.")
parser.add_argument(
"--client-id",
metavar="<client_id>",
default=os.environ.get("OS_MESSAGE_CLIENT_ID"),
help="A UUID for each client instance.")
return parser
def take_action(self, parsed_args):
client = _get_client(self, parsed_args)
if not parsed_args.client_id:
raise AttributeError("<--client-id> option is missing and "
"environment variable OS_MESSAGE_CLIENT_ID "
"is not set. Please at least either pass in "
"the client id or set the environment "
"variable")
else:
client.client_uuid = parsed_args.client_id
queue = client.queue(parsed_args.queue_name)
queue.post(parsed_args.messages)
class OldPostMessages(PostMessages):
"""Post messages for a given queue"""
_description = _("Post messages for a given queue")
# TODO(wanghao): Remove this class and ``message post`` command
# after Queen.
# This notifies cliff to not display the help for this command
deprecated = True
log = logging.getLogger('deprecated')
def take_action(self, parsed_args):
self.log.warning(_('This command has been deprecated. '
'Please use "messaging message post" '
'instead.'))
return super(OldPostMessages, self).take_action(parsed_args)
class ListMessages(command.Lister):
"""List all messages for a given queue"""
_description = _("List all messages for a given queue")
log = logging.getLogger(__name__ + ".ListMessages")
def get_parser(self, prog_name):
parser = super(ListMessages, self).get_parser(prog_name)
parser.add_argument(
"queue_name",
metavar="<queue_name>",
help="Name of the queue")
parser.add_argument(
"--message-ids",
metavar="<message_ids>",
help="List of messages' ids to retrieve")
parser.add_argument(
"--limit",
metavar="<limit>",
type=int,
help="Maximum number of messages to get")
parser.add_argument(
"--echo",
action="store_true",
help="Whether to get this client's own messages")
parser.add_argument(
"--include-claimed",
action="store_true",
help="Whether to include claimed messages")
parser.add_argument(
"--include-delayed",
action="store_true",
help="Whether to include delayed messages")
parser.add_argument(
"--client-id",
metavar="<client_id>",
default=os.environ.get("OS_MESSAGE_CLIENT_ID"),
help="A UUID for each client instance.")
return parser
def take_action(self, parsed_args):
client = _get_client(self, parsed_args)
if not parsed_args.client_id:
raise AttributeError("<--client-id> option is missing and "
"environment variable OS_MESSAGE_CLIENT_ID "
"is not set. Please at least either pass in "
"the client id or set the environment "
"variable")
else:
client.client_uuid = parsed_args.client_id
kwargs = {}
if parsed_args.limit is not None:
kwargs["limit"] = parsed_args.limit
if parsed_args.echo is not None:
kwargs["echo"] = parsed_args.echo
if parsed_args.include_claimed is not None:
kwargs["include_claimed"] = parsed_args.include_claimed
if parsed_args.include_delayed is not None:
kwargs["include_delayed"] = parsed_args.include_delayed
queue = client.queue(parsed_args.queue_name)
if parsed_args.message_ids:
messages = queue.messages(parsed_args.message_ids.split(','),
**kwargs)
else:
messages = queue.messages(**kwargs)
columns = ("ID", "Body", "TTL", "Age", "Claim ID", "Checksum")
return (columns,
(utils.get_item_properties(s, columns) for s in messages))
class OldListMessages(ListMessages):
"""List all messages for a given queue"""
_description = _("List all messages for a given queue")
# TODO(wanghao): Remove this class and ``message list`` command
# after Queen.
# This notifies cliff to not display the help for this command
deprecated = True
log = logging.getLogger('deprecated')
def take_action(self, parsed_args):
self.log.warning(_('This command has been deprecated. '
'Please use "messaging message list" '
'instead.'))
return super(OldListMessages, self).take_action(parsed_args)
class PurgeQueue(command.Command):
"""Purge a queue"""
_description = _("Purge a queue")
log = logging.getLogger(__name__ + ".PurgeQueue")
def get_parser(self, prog_name):
parser = super(PurgeQueue, self).get_parser(prog_name)
parser.add_argument(
"queue_name",
metavar="<queue_name>",
help="Name of the queue")
parser.add_argument(
"--resource_types",
metavar="<resource_types>",
action='append',
choices=['messages', 'subscriptions'],
help="Resource types want to be purged.")
return parser
def take_action(self, parsed_args):
client = _get_client(self, parsed_args)
queue_name = parsed_args.queue_name
client.queue(queue_name).purge(
resource_types=parsed_args.resource_types)
class OldPurgeQueue(PurgeQueue):
"""Purge a queue"""
_description = _("Purge a queue")
# TODO(wanghao): Remove this class and ``queue purge`` command
# after Queen.
# This notifies cliff to not display the help for this command
deprecated = True
log = logging.getLogger('deprecated')
def take_action(self, parsed_args):
self.log.warning(_('This command has been deprecated. '
'Please use "messaging queue purge" '
'instead.'))
return super(OldPurgeQueue, self).take_action(parsed_args)
class CreatePool(cli.CreatePool):
"""Create a pool"""
pass
class OldCreatePool(cli.OldCreatePool):
"""Create a pool"""
pass
class ShowPool(cli.ShowPool):
"""Display pool details"""
pass
class OldShowPool(cli.OldShowPool):
"""Display pool details"""
pass
class UpdatePool(cli.UpdatePool):
"""Update a pool attribute"""
pass
class OldUpdatePool(cli.OldUpdatePool):
"""Update a pool attribute"""
pass
class DeletePool(cli.DeletePool):
"""Delete a pool"""
pass
class OldDeletePool(cli.OldDeletePool):
"""Delete a pool"""
pass
class ListPools(cli.ListPools):
"""List available Pools"""
pass
class OldListPools(cli.OldListPools):
"""List available Pools"""
pass
class DeleteFlavor(cli.DeleteFlavor):
"""Delete a flavor"""
pass
class ShowFlavor(cli.ShowFlavor):
"""Display flavor details"""
pass
class UpdateFlavor(cli.UpdateFlavor):
"""Update a flavor's attributes"""
pass
class CreateFlavor(cli.CreateFlavor):
"""Create a pool flavor"""
def take_action(self, parsed_args):
self.log.debug("take_action(%s)" % parsed_args)
client = self.app.client_manager.messaging
kwargs = {}
if parsed_args.capabilities != {}:
raise AttributeError("<--capabilities> option is only\
available in client api version < 2")
pool_list = None
if parsed_args.pool_list:
pool_list = parsed_args.pool_list.split(',')
data = client.flavor(parsed_args.flavor_name,
pool_list=pool_list,
**kwargs)
columns = ('Name', 'Pool list', 'Capabilities')
return columns, utils.get_item_properties(data, columns)
class ListFlavors(cli.ListFlavors):
"""List available flavors"""
pass
class CreateSubscription(command.ShowOne):
"""Create a subscription for queue"""
_description = _("Create a subscription for queue")
log = logging.getLogger(__name__ + ".CreateSubscription")
def get_parser(self, prog_name):
parser = super(CreateSubscription, self).get_parser(prog_name)
parser.add_argument(
"queue_name",
metavar="<queue_name>",
help="Name of the queue to subscribe to")
parser.add_argument(
"subscriber",
metavar="<subscriber>",
help="Subscriber which will be notified")
parser.add_argument(
"ttl",
metavar="<ttl>",
type=int,
help="Time to live of the subscription in seconds")
parser.add_argument(
"--options",
type=json.loads,
default={},
metavar="<options>",
help="Metadata of the subscription in JSON format")
return parser
def take_action(self, parsed_args):
client = _get_client(self, parsed_args)
kwargs = {'options': parsed_args.options}
if parsed_args.subscriber:
kwargs['subscriber'] = parsed_args.subscriber
if parsed_args.subscriber:
kwargs['ttl'] = parsed_args.ttl
data = client.subscription(parsed_args.queue_name, **kwargs)
if not data:
raise RuntimeError('Failed to create subscription for (%s).' %
parsed_args.subscriber)
columns = ('ID', 'Subscriber', 'TTL', 'Options')
return columns, utils.get_item_properties(data, columns)
class OldCreateSubscription(CreateSubscription):
"""Create a subscription for queue"""
_description = _("Create a subscription for queue")
# TODO(wanghao): Remove this class and ``subscription create`` command
# after Queen.
# This notifies cliff to not display the help for this command
deprecated = True
log = logging.getLogger('deprecated')
def take_action(self, parsed_args):
self.log.warning(_('This command has been deprecated. '
'Please use "messaging subscription create" '
'instead.'))
return super(OldCreateSubscription, self).take_action(parsed_args)
class UpdateSubscription(command.ShowOne):
"""Update a subscription"""
_description = _("Update a subscription")
log = logging.getLogger(__name__ + ".UpdateSubscription")
def get_parser(self, prog_name):
parser = super(UpdateSubscription, self).get_parser(prog_name)
parser.add_argument(
"queue_name",
metavar="<queue_name>",
help="Name of the queue to subscribe to")
parser.add_argument(
"subscription_id",
metavar="<subscription_id>",
help="ID of the subscription"
)
parser.add_argument(
"--subscriber",
metavar="<subscriber>",
help="Subscriber which will be notified")
parser.add_argument(
"--ttl",
metavar="<ttl>",
type=int,
help="Time to live of the subscription in seconds")
parser.add_argument(
"--options",
type=json.loads,
default={},
metavar="<options>",
help="Metadata of the subscription in JSON format")
return parser
def take_action(self, parsed_args):
client = _get_client(self, parsed_args)
data = {'subscriber': parsed_args.subscriber,
'ttl': parsed_args.ttl,
'options': parsed_args.options}
kwargs = {'id': parsed_args.subscription_id}
subscription = client.subscription(parsed_args.queue_name,
auto_create=False, **kwargs)
subscription.update(data)
columns = ('ID', 'Subscriber', 'TTL', 'Options')
return columns, utils.get_item_properties(data, columns)
class OldUpdateSubscription(UpdateSubscription):
"""Update a subscription"""
_description = _("Update a subscription")
# TODO(wanghao): Remove this class and ``subscription update`` command
# after Queen.
# This notifies cliff to not display the help for this command
deprecated = True
log = logging.getLogger('deprecated')
def take_action(self, parsed_args):
self.log.warning(_('This command has been deprecated. '
'Please use "messaging subscription update" '
'instead.'))
return super(OldUpdateSubscription, self).take_action(parsed_args)
class DeleteSubscription(command.Command):
"""Delete a subscription"""
_description = _("Delete a subscription")
log = logging.getLogger(__name__ + ".DeleteSubscription")
def get_parser(self, prog_name):
parser = super(DeleteSubscription, self).get_parser(prog_name)
parser.add_argument(
"queue_name",
metavar="<queue_name>",
help="Name of the queue for the subscription")
parser.add_argument(
"subscription_id",
metavar="<subscription_id>",
help="ID of the subscription"
)
return parser
def take_action(self, parsed_args):
client = _get_client(self, parsed_args)
client.subscription(parsed_args.queue_name,
id=parsed_args.subscription_id,
auto_create=False).delete()
class OldDeleteSubscription(DeleteSubscription):
"""Delete a subscription"""
_description = _("Delete a subscription")
# TODO(wanghao): Remove this class and ``subscription delete`` command
# after Queen.
# This notifies cliff to not display the help for this command
deprecated = True
log = logging.getLogger('deprecated')
def take_action(self, parsed_args):
self.log.warning(_('This command has been deprecated. '
'Please use "messaging subscription delete" '
'instead.'))
return super(OldDeleteSubscription, self).take_action(parsed_args)
class ShowSubscription(command.ShowOne):
"""Display subscription details"""
_description = _("Display subscription details")
log = logging.getLogger(__name__ + ".ShowSubscription")
def get_parser(self, prog_name):
parser = super(ShowSubscription, self).get_parser(prog_name)
parser.add_argument(
"queue_name",
metavar="<queue_name>",
help="Name of the queue to subscribe to"
)
parser.add_argument(
"subscription_id",
metavar="<subscription_id>",
help="ID of the subscription"
)
return parser
def take_action(self, parsed_args):
client = _get_client(self, parsed_args)
kwargs = {'id': parsed_args.subscription_id}
pool_data = client.subscription(parsed_args.queue_name,
**kwargs)
columns = ('ID', 'Subscriber', 'TTL', 'Age', 'Confirmed', 'Options')
return columns, utils.get_dict_properties(pool_data.__dict__, columns)
class OldShowSubscription(ShowSubscription):
"""Display subscription details"""
_description = _("Display subscription details")
# TODO(wanghao): Remove this class and ``subscription show`` command
# after Queen.
# This notifies cliff to not display the help for this command
deprecated = True
log = logging.getLogger('deprecated')
def take_action(self, parsed_args):
self.log.warning(_('This command has been deprecated. '
'Please use "messaging subscription show" '
'instead.'))
return super(OldShowSubscription, self).take_action(parsed_args)
class ListSubscriptions(command.Lister):
"""List available subscriptions"""
_description = _("List available subscriptions")
log = logging.getLogger(__name__ + ".ListSubscriptions")
def get_parser(self, prog_name):
parser = super(ListSubscriptions, self).get_parser(prog_name)
parser.add_argument(
"queue_name",
metavar="<queue_name>",
help="Name of the queue to subscribe to")
parser.add_argument(
"--marker",
metavar="<subscription_id>",
help="Subscription's paging marker, "
"the ID of the last subscription of the previous page")
parser.add_argument(
"--limit",
metavar="<limit>",
help="Page size limit, default value is 20")
return parser
def take_action(self, parsed_args):
self.log.debug("take_action(%s)" % parsed_args)
client = self.app.client_manager.messaging
kwargs = {'queue_name': parsed_args.queue_name}
if parsed_args.marker is not None:
kwargs["marker"] = parsed_args.marker
if parsed_args.limit is not None:
kwargs["limit"] = parsed_args.limit
data = client.subscriptions(**kwargs)
columns = ('ID', 'Subscriber', 'TTL', 'Age', 'Confirmed', 'Options')
return (columns,
(utils.get_item_properties(s, columns) for s in data))
class OldListSubscriptions(ListSubscriptions):
"""List available subscriptions"""
_description = _("List available subscriptions")
# TODO(wanghao): Remove this class and ``subscription list`` command
# after Queen.
# This notifies cliff to not display the help for this command
deprecated = True
log = logging.getLogger('deprecated')
def take_action(self, parsed_args):
self.log.warning(_('This command has been deprecated. '
'Please use "messaging subscription list" '
'instead.'))
return super(OldListSubscriptions, self).take_action(parsed_args)
class CreateClaim(cli.CreateClaim):
"""Create claim and return a list of claimed messages"""
def take_action(self, parsed_args):
client = _get_client(self, parsed_args)
kwargs = {}
if parsed_args.ttl is not None:
kwargs["ttl"] = parsed_args.ttl
if parsed_args.grace is not None:
kwargs["grace"] = parsed_args.grace
if parsed_args.limit is not None:
kwargs["limit"] = parsed_args.limit
queue = client.queue(parsed_args.queue_name, auto_create=False)
keys = ("claim_id", "id", "ttl", "age", 'body', "checksum")
columns = ("Claim_ID", "Message_ID", "TTL", "Age", "Messages",
"Checksum")
data = queue.claim(**kwargs)
return (columns,
(utils.get_item_properties(s, keys) for s in data))
class OldCreateClaim(cli.OldCreateClaim):
"""Create claim and return a list of claimed messages"""
pass
class QueryClaim(cli.QueryClaim):
"""Display claim details"""
pass
class OldQueryClaim(cli.OldQueryClaim):
"""Display claim details"""
pass
class RenewClaim(cli.RenewClaim):
"""Renew a claim"""
pass
class OldRenewClaim(cli.OldRenewClaim):
"""Renew a claim"""
pass
class ReleaseClaim(cli.ReleaseClaim):
"""Delete a claim"""
pass
class OldReleaseClaim(cli.OldReleaseClaim):
"""Delete a claim"""
pass
class CreateSignedUrl(command.ShowOne):
"""Create a pre-signed url"""
_description = _("Create a pre-signed url")
log = logging.getLogger(__name__ + ".CreateSignedUrl")
def get_parser(self, prog_name):
parser = super(CreateSignedUrl, self).get_parser(prog_name)
parser.add_argument(
"queue_name",
metavar="<queue_name>",
help="Name of the queue")
parser.add_argument(
"--paths",
metavar="<paths>",
default="messages",
help="Allowed paths in a comma-separated list. "
"Options: messages, subscriptions, claims")
parser.add_argument(
"--ttl-seconds",
metavar="<ttl_seconds>",
type=int,
help="Length of time (in seconds) until the signature expires")
parser.add_argument(
"--methods",
metavar="<methods>",
default="GET",
help="HTTP methods to allow as a comma-separated list. "
"Options: GET, HEAD, OPTIONS, POST, PUT, DELETE")
return parser
allowed_paths = ("messages", "subscriptions", "claims")
def take_action(self, parsed_args):
client = self.app.client_manager.messaging
queue = client.queue(parsed_args.queue_name, auto_create=False)
paths = parsed_args.paths.split(',')
if not all([p in self.allowed_paths for p in paths]):
print("Invalid path supplied! Received {}. "
"Valid paths are: messages, subscriptions, "
"claims".format(','.join(paths)))
kwargs = {
'methods': parsed_args.methods.split(','),
'paths': paths,
}
if parsed_args.ttl_seconds:
kwargs['ttl_seconds'] = parsed_args.ttl_seconds
data = queue.signed_url(**kwargs)
fields = ('Paths', 'Methods', 'Expires', 'Signature', 'Project ID')
return fields, (
','.join(data['paths']),
','.join(data['methods']),
data['expires'],
data['signature'],
data['project']
)
class OldCreateSignedUrl(CreateSignedUrl):
"""Create a pre-signed url"""
_description = _("Create a pre-signed url")
# TODO(wanghao): Remove this class and ``queue signed url`` command
# after Queen.
# This notifies cliff to not display the help for this command
deprecated = True
log = logging.getLogger('deprecated')
def take_action(self, parsed_args):
self.log.warning(_('This command has been deprecated. '
'Please use "messaging queue signed url" '
'instead.'))
return super(OldCreateSignedUrl, self).take_action(parsed_args)
class Ping(command.ShowOne):
"""Check if Zaqar server is alive or not"""
_description = _("Check if Zaqar server is alive or not")
log = logging.getLogger(__name__ + ".Ping")
def take_action(self, parsed_args):
client = _get_client(self, parsed_args)
columns = ('Pingable', )
return columns, utils.get_dict_properties({'pingable': client.ping()},
columns)
class Health(command.Command):
"""Display detailed health status of Zaqar server"""
_description = _("Display detailed health status of Zaqar server")
log = logging.getLogger(__name__ + ".Health")
def take_action(self, parsed_args):
client = _get_client(self, parsed_args)
health = client.health()
print(json.dumps(health, indent=4, sort_keys=True))
class HomeDoc(command.Command):
"""Display the resource doc of Zaqar server"""
_description = _("Display detailed resource doc of Zaqar server")
log = logging.getLogger(__name__ + ".HomeDoc")
def take_action(self, parsed_args):
client = _get_client(self, parsed_args)
homedoc = client.homedoc()
print(json.dumps(homedoc, indent=4, sort_keys=True))
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# License: © 2021 Achille-Tâm GUILCHARD All Rights Reserved
# Author: Achille-Tâm GUILCHARD
# Usage: python3 build_docker_and_launch_inference.py --workdir <DIR> --imgdir <DIR>
import os
import subprocess
import argparse
import shutil
from termcolor import colored
def parse_arguments():
parser = argparse.ArgumentParser(description='Automatic Launching of Inference (Detection and Classification)')
parser.add_argument('--workdir', type=str, default="./", help='Where the entries of the program are stored.')
parser.add_argument('--imgdir', type=str, default="./", help='Where the input images are stored.')
return parser.parse_args()
print("")
print(colored('//////////////////////', 'blue'),colored('///////////////////////', 'white'),colored('//////////////////////', 'red'))
print(colored('//', 'blue'), colored('Automatic Launching of Inference (Detection and Classification)', 'white'), colored('//', 'red'))
print(colored('//////////////////////', 'blue'),colored('///////////////////////', 'white'),colored('//////////////////////', 'red'))
print(colored('© 2021 Achille-Tâm GUILCHARD All Rights Reserved', 'red'))
print("")
args = parse_arguments()
workDir = args.workdir
imgDir = args.imgdir
nb_cpu = 7
docker_run_cmd = 'docker run --rm -it --cpuset-cpus="0-' + str(nb_cpu) +'" -u $(id -u) -v ' + workDir + ':/tmp inference_detection_classification:latest '
print(colored('Entries summary', 'green'))
print(" > workDir: " + str(workDir))
print(" > imgDir: " + str(imgDir))
imgDir = os.path.basename(imgDir)
# building of the docker
print("")
print(colored('/////////', 'blue'),colored('////////', 'white'),colored('/////////', 'red'))
print(colored('//', 'blue'), colored('Building of the docker', 'white'), colored('//', 'red'))
print(colored('/////////', 'blue'),colored('////////', 'white'),colored('/////////', 'red'))
print("")
cmd_build_docker = "docker build -t inference_detection_classification:latest ."
error_code = subprocess.call(cmd_build_docker, shell=True)
if error_code != 0:
print('Building of docker failed!')
exit(error_code)
# Launch inference
print("")
print(colored('///////', 'blue'),colored('///////', 'white'),colored('////////', 'red'))
print(colored('//', 'blue'), colored('Lauching training!', 'white'), colored('//', 'red'))
print(colored('///////', 'blue'),colored('///////', 'white'),colored('////////', 'red'))
print("")
# Create output directory
shutil.rmtree(workDir + "/results", ignore_errors=True)
os.makedirs(workDir + "/results", exist_ok=True)
# Launch inference
cmd_launch_inference = docker_run_cmd + 'python3 /tmp/inference_detection_classification.py --input /tmp/' + imgDir + ' --output /tmp/results'
print(colored("launching " + cmd_launch_inference, 'red'))
print("")
if subprocess.call(cmd_launch_inference, shell=True) != 0:
print('Inference failed!')
exit(-1)
print("")
|
# coding: utf-8
import re
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class ShareBandwidthTypeShowResp:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'id': 'str',
'bandwidth_type': 'str',
'public_border_group': 'str',
'created_at': 'str',
'updated_at': 'str',
'name_en': 'str',
'name_zh': 'str',
'description': 'str'
}
attribute_map = {
'id': 'id',
'bandwidth_type': 'bandwidth_type',
'public_border_group': 'public_border_group',
'created_at': 'created_at',
'updated_at': 'updated_at',
'name_en': 'name_en',
'name_zh': 'name_zh',
'description': 'description'
}
def __init__(self, id=None, bandwidth_type=None, public_border_group=None, created_at=None, updated_at=None, name_en=None, name_zh=None, description=None):
"""ShareBandwidthTypeShowResp - a model defined in huaweicloud sdk"""
self._id = None
self._bandwidth_type = None
self._public_border_group = None
self._created_at = None
self._updated_at = None
self._name_en = None
self._name_zh = None
self._description = None
self.discriminator = None
if id is not None:
self.id = id
if bandwidth_type is not None:
self.bandwidth_type = bandwidth_type
if public_border_group is not None:
self.public_border_group = public_border_group
if created_at is not None:
self.created_at = created_at
if updated_at is not None:
self.updated_at = updated_at
if name_en is not None:
self.name_en = name_en
if name_zh is not None:
self.name_zh = name_zh
if description is not None:
self.description = description
@property
def id(self):
"""Gets the id of this ShareBandwidthTypeShowResp.
支持带宽类型的id
:return: The id of this ShareBandwidthTypeShowResp.
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this ShareBandwidthTypeShowResp.
支持带宽类型的id
:param id: The id of this ShareBandwidthTypeShowResp.
:type: str
"""
self._id = id
@property
def bandwidth_type(self):
"""Gets the bandwidth_type of this ShareBandwidthTypeShowResp.
带宽类型
:return: The bandwidth_type of this ShareBandwidthTypeShowResp.
:rtype: str
"""
return self._bandwidth_type
@bandwidth_type.setter
def bandwidth_type(self, bandwidth_type):
"""Sets the bandwidth_type of this ShareBandwidthTypeShowResp.
带宽类型
:param bandwidth_type: The bandwidth_type of this ShareBandwidthTypeShowResp.
:type: str
"""
self._bandwidth_type = bandwidth_type
@property
def public_border_group(self):
"""Gets the public_border_group of this ShareBandwidthTypeShowResp.
中心站点or边缘站点,默认展示
:return: The public_border_group of this ShareBandwidthTypeShowResp.
:rtype: str
"""
return self._public_border_group
@public_border_group.setter
def public_border_group(self, public_border_group):
"""Sets the public_border_group of this ShareBandwidthTypeShowResp.
中心站点or边缘站点,默认展示
:param public_border_group: The public_border_group of this ShareBandwidthTypeShowResp.
:type: str
"""
self._public_border_group = public_border_group
@property
def created_at(self):
"""Gets the created_at of this ShareBandwidthTypeShowResp.
创建时间
:return: The created_at of this ShareBandwidthTypeShowResp.
:rtype: str
"""
return self._created_at
@created_at.setter
def created_at(self, created_at):
"""Sets the created_at of this ShareBandwidthTypeShowResp.
创建时间
:param created_at: The created_at of this ShareBandwidthTypeShowResp.
:type: str
"""
self._created_at = created_at
@property
def updated_at(self):
"""Gets the updated_at of this ShareBandwidthTypeShowResp.
更新时间
:return: The updated_at of this ShareBandwidthTypeShowResp.
:rtype: str
"""
return self._updated_at
@updated_at.setter
def updated_at(self, updated_at):
"""Sets the updated_at of this ShareBandwidthTypeShowResp.
更新时间
:param updated_at: The updated_at of this ShareBandwidthTypeShowResp.
:type: str
"""
self._updated_at = updated_at
@property
def name_en(self):
"""Gets the name_en of this ShareBandwidthTypeShowResp.
带宽类型的英文表述
:return: The name_en of this ShareBandwidthTypeShowResp.
:rtype: str
"""
return self._name_en
@name_en.setter
def name_en(self, name_en):
"""Sets the name_en of this ShareBandwidthTypeShowResp.
带宽类型的英文表述
:param name_en: The name_en of this ShareBandwidthTypeShowResp.
:type: str
"""
self._name_en = name_en
@property
def name_zh(self):
"""Gets the name_zh of this ShareBandwidthTypeShowResp.
带宽类型的中文表述
:return: The name_zh of this ShareBandwidthTypeShowResp.
:rtype: str
"""
return self._name_zh
@name_zh.setter
def name_zh(self, name_zh):
"""Sets the name_zh of this ShareBandwidthTypeShowResp.
带宽类型的中文表述
:param name_zh: The name_zh of this ShareBandwidthTypeShowResp.
:type: str
"""
self._name_zh = name_zh
@property
def description(self):
"""Gets the description of this ShareBandwidthTypeShowResp.
带宽类型描述信息
:return: The description of this ShareBandwidthTypeShowResp.
:rtype: str
"""
return self._description
@description.setter
def description(self, description):
"""Sets the description of this ShareBandwidthTypeShowResp.
带宽类型描述信息
:param description: The description of this ShareBandwidthTypeShowResp.
:type: str
"""
self._description = description
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ShareBandwidthTypeShowResp):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
import functools
import logging
from datasets import config, load_dataset
from torch.utils.data import DataLoader
from transformers import default_data_collator
from bsmetadata.metadata_utils import add_metadata_and_chunk_examples
logger = logging.getLogger(__name__)
load_dataset = functools.partial(load_dataset, use_auth_token=True)
def get_dataloaders(tokenizer, args):
"""
Args:
tokenizer: a huggingface/transformers tokenizer
args: a DataConfig
Returns:
a training dataloader and one or more validation dataloaders
validation dataloaders should be in a dictionary
each dataloader should yield {str: torch.Tensor(cpu) }
dictionary keys may have 'metadata_mask'
other fields will be passed to model
note: metadata_mask should be padded
Example:
train_dataloader, val_dataloaders = get_dataloaders(...)
for batch in train_dataloader:
metadata_mask = batch.get('metadata_mask', None)
outputs = model(**batch)
metrics = loss_fn(batch, outputs, metadata_mask)
"""
# Mostly copy/paste from https://github.com/huggingface/transformers/blob/master/examples/pytorch/language-modeling/run_clm_no_trainer.py
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
#
# In distributed training, the load_dataset function guarantees that only one local process can concurrently
# download the dataset.
data_files = {}
if args.train_file is not None:
data_files["train"] = args.train_file
if args.validation_file is not None:
data_files["validation"] = args.validation_file
if not data_files:
data_files = None
logger.info(f"Start to load dataset, the result will be cached at {config.HF_DATASETS_CACHE}")
if args.dataset_name is not None:
logger.info(
"Downloading with arguments: "
f"dataset_name={args.dataset_name}, "
f"dataset_config_name={args.dataset_config_name}, "
f"data_files={data_files}, "
f"cache_dir={args.cache_dir},"
)
# Downloading and loading a dataset from the hub.
datasets = load_dataset(
args.dataset_name,
args.dataset_config_name,
data_files=data_files,
cache_dir=args.cache_dir,
keep_in_memory=False,
)
if "validation" not in datasets.keys():
datasets["validation"] = load_dataset(
args.dataset_name,
args.dataset_config_name,
split=f"train[:{args.validation_split_percentage}%]",
cache_dir=args.cache_dir,
)
datasets["train"] = load_dataset(
args.dataset_name,
args.dataset_config_name,
split=f"train[{args.validation_split_percentage}%:]",
cache_dir=args.cache_dir,
)
else:
logger.info("Loading dataset from extension script")
extension = args.train_file.split(".")[-1] if not args.extension else args.extension
if extension == "txt":
raise ValueError(
"You have entered a text file for the train data, but this type of file cannot contain metadata "
"columns. Wouldn't you rather have a file in json/jsonl or pandas format?"
)
if extension == "jsonl":
extension = "json"
datasets = load_dataset(extension, data_files=data_files, cache_dir=args.cache_dir)
if "validation" not in datasets.keys():
datasets["validation"] = load_dataset(
extension,
data_files=data_files,
split=f"train[:{args.validation_split_percentage}%]",
cache_dir=args.cache_dir,
)
datasets["train"] = load_dataset(
extension,
data_files=data_files,
split=f"train[{args.validation_split_percentage}%:]",
cache_dir=args.cache_dir,
)
logger.info(f"Dataset loaded: {datasets}")
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Preprocessing the datasets.
column_names = datasets["train"].column_names
logger.info("Start to add metadata and chunk examples")
# First we pre-process our text and metadata
datasets = datasets.map(
functools.partial(add_metadata_and_chunk_examples, tokenizer=tokenizer, cfg=args.metadata_config),
batched=True,
num_proc=args.preprocessing_num_workers,
load_from_cache_file=not args.overwrite_cache,
desc="Pre-process the text and metadata to create new samples",
remove_columns=column_names,
batch_size=args.map_batch_size,
)
logger.info("Add metadata and chunk examples finished")
def create_labels_column(examples):
examples["labels"] = examples["input_ids"].copy()
return examples
logger.info("Create labels column")
# Then we add the column containing the labels
datasets = datasets.map(
create_labels_column,
batched=True,
num_proc=args.preprocessing_num_workers,
load_from_cache_file=not args.overwrite_cache,
desc="Create labels column",
batch_size=args.map_batch_size,
)
logger.info("Creating labels column finished")
train_dataset = datasets["train"]
val_dataset = datasets["validation"]
logger.info(f" Num train examples = {len(train_dataset)}")
logger.info(f" Num validation examples = {len(val_dataset)}")
# DataLoaders creation:
train_dataloader = DataLoader(
train_dataset,
shuffle=True,
collate_fn=default_data_collator,
batch_size=args.per_device_train_batch_size,
)
val_dataloader1 = DataLoader(
val_dataset,
collate_fn=default_data_collator,
batch_size=args.per_device_eval_batch_size,
)
return train_dataloader, {"val1": val_dataloader1}
|
def main(request, response):
"""
Returns a response with a Set-Cookie header based on the query params.
The body will be "1" if the cookie is present in the request and `drop` parameter is "0",
otherwise the body will be "0".
"""
same_site = request.GET.first("same-site")
cookie_name = request.GET.first("cookie-name")
drop = request.GET.first("drop")
cookie_in_request = "0"
cookie = "%s=1; Secure; SameSite=%s" % (cookie_name, same_site)
if drop == "1":
cookie += "; Max-Age=0"
if request.cookies.get(cookie_name):
cookie_in_request = request.cookies[cookie_name].value
headers = [('Content-Type', 'text/html'), ('Set-Cookie', cookie)]
return (200, headers, cookie_in_request)
|
import streamlit as st
from core.utils.financial_plots import *
from core.utils.financial_data import *
st.title('FORECASTING')
ticker = 'AAPL'
company_name = 'APPLE'
close_prices = get_data(ticker)
st.plotly_chart(plot_historical_price(ticker,company_name,close_prices))
st.plotly_chart(plot_return_price(ticker,company_name,close_prices))
st.plotly_chart(plot_return_hist(ticker,company_name,close_prices))
|
# Naam: Robert Rijksen
# Functie: Het netjes schrijven van alle resultaten in een csv bestand waarna
# al dit bestand gebruikt kon worden om alles tegelijk te inserten in mysql
# in de tabel Resultaten_Blast
import mysql.connector
import pickle
def main():
input_file = open('Volledige_blast', 'rb')
# Het bestand is het pickle bestand waarin de resultaten van de BLAST
# in een dictionary staat
dicti = pickle.load(input_file)
input_file.close()
filter_dict(dicti)
def filter_dict(dictionary):
"""
:param dictionary: De dictionary die hier als parameter mee wordt gegeven
is dezelfde dictionary die in het BLAST script gepickeled wordt
"""
verbinding = mysql.connector.connect(
host="hannl-hlo-bioinformatica-mysqlsrv.mysql.database.azure.com",
user="rohtv@hannl-hlo-bioinformatica-mysqlsrv",
db="rohtv", password='pwd123')
cursor = verbinding.cursor() # de verbinding met de database
bestand = open('__alle_blast_resultaten__versie2.csv', 'a+')
for x in dictionary:
cursor.execute("""select Sequentie_ID from Onderzoeks_sequenties
where Header = '{}'""".format(x))
# Met bovenstaandde query wordt de sequentie ID opgehaald, zodat deze
# gekoppeld kan worden aan de header met de juiste BLAST-resultaten
regel = cursor.fetchone()
if dictionary[x] != "":
for y in regel:
if dictionary[x][0] != '':
for i in range(len(dictionary[x][0])):
bestand.write(str(y) + ',')
# Het is misschien niet zo efficient gedaan om zoveel
# if / elifs te gebruiken, maar het is puur om te weten
# dat er geen een overgeslagen word zodat elk resultaat
# meegenomen kan worden in de database
if len(dictionary[x][0][i]) == 0:
bestand.write('\'' + ' ' + '\'' + ',')
bestand.write('\'' + dictionary[x][1][i] + '\''
+ ',')
bestand.write('\'' + dictionary[x][2][i] + '\''
+ ',')
bestand.write(str(dictionary[x][3][i]) + ',')
bestand.write(str(dictionary[x][4][i]) + ',')
bestand.write(str(dictionary[x][5][i]) + '\n')
elif len(dictionary[x][1][i]) == 0:
bestand.write('\'' + dictionary[x][0][i] + '\''
+ ',')
bestand.write('\'' + ' ' + '\'' + ',')
bestand.write('\'' + dictionary[x][2][i] + '\''
+ ',')
bestand.write(str(dictionary[x][3][i]) + ',')
bestand.write(str(dictionary[x][4][i]) + ',')
bestand.write(str(dictionary[x][5][i]) + '\n')
elif len(dictionary[x][2][i]) == 0:
bestand.write('\'' + dictionary[x][0][i] + '\''
+ ',')
bestand.write('\'' + dictionary[x][1][i] + '\''
+ ',')
bestand.write('\'' + ' ' + '\'' + ',')
bestand.write(str(dictionary[x][3][i]) + ',')
bestand.write(str(dictionary[x][4][i]) + ',')
bestand.write(str(dictionary[x][5][i]) + '\n')
else:
bestand.write('\'' + dictionary[x][0][i] + '\''
+ ',')
bestand.write('\'' + dictionary[x][1][i] + '\''
+ ',')
bestand.write('\'' + dictionary[x][2][i] + '\''
+ ',')
bestand.write(str(dictionary[x][3][i]) + ',')
bestand.write(str(dictionary[x][4][i]) + ',')
bestand.write(str(dictionary[x][5][i]) + '\n')
bestand.close()
main()
|
'''
Each new term in the Fibonacci sequence is generated by
adding the previous two terms. By starting with 1 and 2,
the first 10 terms will be:
1, 2, 3, 5, 8, 13, 21, 34, 55, 89, ...
By considering the terms in the Fibonacci sequence whose
values do not exceed four million, find the sum of the
even-valued terms.
'''
import timeit
def fibonacci(a):
fib = []
b, c = 0, 1
while c < a:
fib.append(c)
b, c = c, b + c
return fib
sum([x for x in fibonacci(4000000) if x % 2 == 0])
# %timeit fibonacci(100)
def main():
fibonacci(100)
# t = timeit.repeat(for x in range(100): fibonacci(100))
timeit.timeit(main, number=100)
|
import os
import sys
import site
import platform
from setuptools import setup
from setuptools.command.install import install
def post_install():
try:
baseDir=site.getsitepackages()
except AttributeError:
baseDir=[os.path.join(site.PREFIXES[0],'lib','site-packages')]
assert baseDir and 'site-packages'==baseDir[-1].split(os.path.sep)[-1]
baseDir=baseDir[-1]
tdcosimapp=os.path.join(baseDir,'tdcosim','tdcosimapp.py')
pyExe=sys.executable.split('\\')[-1].replace('.exe','')
os.system('mkdir "{}"'.format(os.path.join(baseDir,'tdcosim','install_logs')))
directive='reg query "HKEY_CURRENT_USER\Software\Microsoft\Command Processor" /v AutoRun > {} 2>&1'.format(\
os.path.join(baseDir,'tdcosim','install_logs','previous_reg_query.txt'))
print('running directive,\n{}'.format(directive))
os.system(directive)
directive='reg add "HKEY_CURRENT_USER\Software\Microsoft\Command Processor" /v AutoRun /d "doskey tdcosim={} \\"{}\\" $*" /f'.format(pyExe,tdcosimapp)
print('running directive,\n{}'.format(directive))
os.system(directive)
class PostInstall(install):
def run(self):
install.run(self)
post_install()
# The text of the README file
f=open(os.path.join(os.path.dirname(os.path.abspath(__file__)),'README.md'))
README=f.read()
f.close()
if platform.architecture()[0]=='64bit':
setup(name='tdcosim',
version=open("tdcosim/_version.py").readlines()[-1].split()[-1].strip("\"'"),
packages=setuptools.find_packages(),
include_package_data=True,
description='Transmission and Distribution Network co-Simulation for Power System',
long_description=README,
long_description_content_type="text/markdown",
url ='https://github.com/tdcosim/TDcoSim',
author = 'TDcoSim Team',
author_email='yim@anl.gov',
license= 'LICENSE.txt',
classifiers=[
'License :: OSI Approved :: BSD License',
'Intended Audience :: Science/Research',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.7',
],
install_requires=['pywin32>=301','matplotlib>=2.0.2','numpy>=1.16.2','scipy>=1.2.1',
'xlsxwriter>=1.1.8','psutil>=5.7.0','pandas>=0.24.2','dash>=1.21.0',
'dash-bootstrap-components>=1.0.1','networkx','pvder'],
extras_require={'diffeqpy': ['diffeqpy>=1.1.0']},
package_data={'tdcosim':['data/**/**/*','logs/.*','config/*','examples/*']},
cmdclass={'install':PostInstall}
)
else:
setup(name='tdcosim',
version=open("tdcosim/_version.py").readlines()[-1].split()[-1].strip("\"'"),
packages=setuptools.find_packages(),
include_package_data=True,
description='Transmission and Distribution Network co-Simulation for Power System',
long_description=README,
long_description_content_type="text/markdown",
url ='https://github.com/tdcosim/TDcoSim',
author = 'TDcoSim Team',
author_email='yim@anl.gov',
license= 'LICENSE.txt',
classifiers=[
'License :: OSI Approved :: BSD License',
'Intended Audience :: Science/Research',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.7',
],
install_requires=['pywin32==224','matplotlib>=2.0.2','numpy>=1.16.2','scipy>=1.2.1',
'xlsxwriter==1.1.8','psutil==5.7.0','pandas>=0.24.2','dash>=1.21.0',
'dash-bootstrap-components>=1.0.1','networkx','pvder'],
extras_require={'diffeqpy': ['diffeqpy>=1.1.0']},
package_data={'tdcosim':['data/**/**/*','logs/.*','config/*','examples/*']},
cmdclass={'install':PostInstall}
)
|
import datetime
import numpy as np
from datetime import datetime
from unittest import TestCase
from algotrader.technical.pipeline.pairwise import Plus, Minus, Times, Divides, PairCorrelation
from algotrader.trading.context import ApplicationContext
class PairwiseTest(TestCase):
def setUp(self):
self.app_context = ApplicationContext()
def test_name(self):
bar0 = self.app_context.inst_data_mgr.get_series("bar0")
bar1 = self.app_context.inst_data_mgr.get_series("bar1")
bar0.start(self.app_context)
bar1.start(self.app_context)
bar0_plus_bar1 = Plus(inputs=[bar0, bar1], input_keys='close')
bar0_plus_bar1.start(self.app_context)
self.assertEquals("Plus(bar0[close],bar1[close],length=1)",
bar0_plus_bar1.name)
spread = Minus(inputs=[bar0, bar1], input_keys='close')
spread.start(self.app_context)
self.assertEquals("Minus(bar0[close],bar1[close],length=1)",
spread.name)
def test_empty_at_initialize(self):
bar0 = self.app_context.inst_data_mgr.get_series("bar0")
bar1 = self.app_context.inst_data_mgr.get_series("bar1")
bar0.start(self.app_context)
bar1.start(self.app_context)
bar0_plus_bar1 = Plus(inputs=[bar0, bar1], input_keys='close')
bar0_plus_bar1.start(self.app_context)
self.assertEquals(0, len(bar0_plus_bar1.get_data()))
def test_shape(self):
bar0 = self.app_context.inst_data_mgr.get_series("bar0")
bar1 = self.app_context.inst_data_mgr.get_series("bar1")
bar0.start(self.app_context)
bar1.start(self.app_context)
bar0_plus_bar1 = Plus(inputs=[bar0, bar1], input_keys='close')
bar0_plus_bar1.start(self.app_context)
try:
np.testing.assert_almost_equal(np.array([1, 1]), bar0_plus_bar1.shape(), 5)
except AssertionError as e:
self.fail(e.message)
# def test_nan_before_size(self):
def test_with_single_bar_multi_time(self):
bar0 = self.app_context.inst_data_mgr.get_series("bar0")
bar1 = self.app_context.inst_data_mgr.get_series("bar1")
bar0.start(self.app_context)
bar1.start(self.app_context)
plus = Plus(inputs=[bar0, bar1], input_keys='close')
minus = Minus(inputs=[bar0, bar1], input_keys='close')
times = Times(inputs=[bar0, bar1], input_keys='close')
divides = Divides(inputs=[bar0, bar1], input_keys='close')
pcorr = PairCorrelation(inputs=[bar0, bar1], input_keys='close', length=4)
plus.start(self.app_context)
minus.start(self.app_context)
times.start(self.app_context)
divides.start(self.app_context)
pcorr.start(self.app_context)
now = 1
x = np.array([80.0, 102.0, 101.0, 99.0])
y = np.array([95.0, 98.0, 105.2, 103.3])
ts = [now + 3 for i in range(4)]
x_p_y = x + y
x_m_y = x - y
x_t_y = x * y
x_d_y = x / y
bar0.add(data={"timestamp": ts[0], "close": x[0], "open": 0})
bar1.add(data={"timestamp": ts[0], "close": y[0], "open": 0})
self.assertEqual(plus.now('value'), 175.0)
self.assertEqual(minus.now('value'), -15.0)
self.assertEqual(times.now('value'), 7600.0)
self.assertEqual(divides.now('value'), 80.0 / 95.0)
bar0.add(data={"timestamp": ts[1], "close": x[1], "open": 0})
bar1.add(data={"timestamp": ts[1], "close": y[1], "open": 0})
self.assertEqual(plus.now('value'), 200.0)
self.assertEqual(minus.now('value'), 4.0)
self.assertEqual(times.now('value'), 102.0 * 98.0)
self.assertEqual(divides.now('value'), 102.0 / 98.0)
bar0.add(data={"timestamp": ts[2], "close": x[2], "open": 0})
bar1.add(data={"timestamp": ts[2], "close": y[2], "open": 0})
bar0.add(data={"timestamp": ts[3], "close": x[3], "open": 0})
bar1.add(data={"timestamp": ts[3], "close": y[3], "open": 0})
self.assertEqual(pcorr.now('value'), np.corrcoef(x, y)[0, 1])
|
import pandas as pd
# Permite Importar dados do Google Drive
from google.colab import drive
drive.mount('/content/drive')
# Caminho para dados do arquivo csv
csv = '/content/drive/My Drive/Colab Notebooks/Alura/aluguel.csv'
dados = pd.read_csv(csv, sep = ";")
dados.head(10)
# Método para filtrar NaN numbers e tranformar em 0
dados = dados.fillna(0)
# Utilizando método query
# Casas com valor do Aluguel abaixo de 5000
dados.query("Valor < 5000 & Tipo == 'Casa'")
# Média do aluguel de casas
dados.query("Tipo == 'Casa'").Valor.mean()
# Tipos de Moradias
Tipos = sorted(tipos_de_dados.unique())
# Cria coluna para Tipos de dados
tipos_de_dados = pd.DataFrame(dados.dtypes,
columns = ['Tipos de Dados'])
# Cria coluna para as variáveis no index
tipos_de_dados.columns.name = 'Variáveis'
# Imprime Tipos de Dados
tipos_de_dados
# Importando dados HTML
df_html = pd.read_html('https://news.google.com/covid19/map?hl=pt-BR&mid=%2Fm%2F01l_jz&gl=BR&ceid=BR%3Apt-419', decimal=",")
# Seleciona primeira Tabela do site
df_html[0]
# Remove Coluna
df_html = pd.DataFrame(df_html[0]).drop(columns=['Novos casos (últimos 60 dias)'])
# Renomeia Coluna
df_html = df_html.rename(columns={"Casos a cada um milhão de pessoas": "Casos a cada 1M de pessoas"})
# Imprime DataFrame
df_html
|
import sys
from typing import Optional
import click
import requests
import valohai_cli
from valohai_cli.messages import warn
@click.command()
def update_check() -> None:
data = get_pypi_info()
current_version = valohai_cli.__version__
latest_version = data['info']['version']
click.echo('Your version of Valohai-CLI is ' + click.style(current_version, bold=True))
click.echo(' The latest release on PyPI is ' + click.style(latest_version, bold=True))
upgrade_status = determine_upgrade_status(current_version, latest_version)
if upgrade_status == 'upgrade':
click.secho(
'\nGood news! An upgrade is available!\n'
'Run (e.g.) `pip install -U valohai-cli` to install the new version.',
bold=True,
fg='green',
)
click.echo('Upgrade instructions may differ based on the method you\'ve installed the application with.')
sys.exit(1)
elif upgrade_status == 'delorean':
click.secho(
'\nWhen this thing gets up to 88 mph... You seem to be running a version from the future!\n',
bold=True,
fg='cyan',
)
elif upgrade_status == 'current':
click.echo('\nYou seem to be running the latest and greatest. Good on you!')
def determine_upgrade_status(current_version: str, latest_version: str) -> Optional[str]:
try:
from distutils.version import LooseVersion
parsed_current_version = LooseVersion(current_version)
parsed_latest_version = LooseVersion(latest_version)
if parsed_latest_version > parsed_current_version:
return 'upgrade'
elif parsed_latest_version < parsed_current_version:
return 'delorean'
elif parsed_latest_version == parsed_current_version:
return 'current'
except Exception as exc:
warn(f'Unable to determine whether the version is older or newer ({exc})')
return None
def get_pypi_info() -> dict:
resp = requests.get('https://pypi.org/pypi/valohai-cli/json')
resp.raise_for_status()
return dict(resp.json())
|
from __future__ import division
import numpy as np
# define target list
mmol=338.66 # g/mol
Th_length=687. # [Aa]
# target masses in GeV and number of nucleons
#Hydrogen
m_H=0.9389
A_H=1.
Z_H=1.
n_H=6.+44.
# Carbon
m_C=11.188
A_C=(12.*98.9+13.*1.1)/100.
Z_C=6.
n_C=2.+22.
# lists
mT_list=[m_H,m_C]
AT_list=[A_H,A_C]
ZT_list=[Z_H,Z_C]
nameT_list=['H','C']
massFrac_list=np.array([m_H*n_H,m_C*n_C])/(m_H*n_H+m_C*n_C)
|
# -*- coding: utf-8 -*-
"""
Created on Thu Oct 15 16:06:11 2015
@author: jnewman
"""
#Functions for initial processing of WindCube data before TI adjustment is applied.
from functools import reduce
def import_WC_file(filename):
encoding_from='iso-8859-1'
encoding_to='UTF-8'
#Reads in WINDCUBE .rtd file and outputs raw u, v, and w components, measurement heights, and timestamp.
#Inputs
#filename: WINDCUBE v2 .rtd file to read
#Outputs
#u_sorted, v_sorted, w_sorted: Raw u, v, and w values from all measurement heights
#heights: Measurement heights from file
#time_datenum_sorted: All timestamps in datetime format
import numpy as np
from datetime import datetime
#Read in row containing heights (either row 38 or 39) and convert heights to a set of integers.
inp = open(filename,encoding=encoding_from).readlines()
height_array = str.split(inp[38])
heights_temp = height_array[2:]
if len(heights_temp) == 0:
height_array = str.split(inp[39])
heights_temp = height_array[2:]
heights = [int(i) for i in heights_temp]
#Read in timestamps. There will be either 41 or 42 headerlines.
num_rows = 41
timestamp = np.loadtxt(filename,encoding=encoding_from, delimiter='\t', usecols=(0,),dtype=str, unpack=True,skiprows=num_rows)
try:
datetime.strptime(timestamp[0],"%Y/%m/%d %H:%M:%S.%f")
except:
num_rows = 42
timestamp = np.loadtxt(filename, encoding=encoding_from,delimiter='\t', usecols=(0,),dtype=str, unpack=True,skiprows=num_rows)
#Convert timestamps to Python datetime format. Some timestamps may be blank and will raise an error during the
#datetime conversion. The rows corresponding to these bad timestamps are recorded.
time_datenum_temp = []
bad_rows = []
for i in range(0,len(timestamp)):
try:
time_datenum_temp.append(datetime.strptime(timestamp[i],"%Y/%m/%d %H:%M:%S.%f"))
except:
bad_rows.append(i)
#If bad timestamps are detected, an error message is output to the screen and all timestamps including and following
#the bad timestamp are deleted. The rows corresponding to these timestamps are categorized as footer lines and are not
#used when reading in the velocity data.
if(bad_rows):
print(filename,': Issue reading timestamp')
footer_lines = len(time_datenum_temp) - bad_rows[0] + 1
timestamp = np.delete(timestamp,range(bad_rows[0],len(timestamp)),axis=0)
time_datenum_temp = np.delete(time_datenum_temp,range(bad_rows[0],len(time_datenum_temp)),axis=0)
else:
footer_lines = 0
#Create column of NaNs for measurement heights that raise an error.
v_nan = np.empty(len(time_datenum_temp))
v_nan[:] = np.nan
u = []
v = []
w = []
#Read in values of u, v, and w one measurement height at a time. Definitions of the wind components are as follows:
#u is east-west wind (u > 0 means wind is coming from the west)
#v is north-south wind (v > 0 means wind is coming from the south)
#w is vertical wind (w > 0 means wind is upward)
for i in range(1,len(heights)+1):
try:
u.append(-np.genfromtxt(filename,encoding='iso-8859-1', delimiter='\t',usecols=(i*9 + 1),dtype=None,skip_header=num_rows,skip_footer=footer_lines))
v.append(-np.genfromtxt(filename,encoding='iso-8859-1', delimiter='\t', usecols=(i*9),dtype=None,skip_header=num_rows,skip_footer=footer_lines))
w.append(-np.genfromtxt(filename,encoding='iso-8859-1', delimiter='\t', usecols=(i*9 + 2),dtype=None,skip_header=num_rows,skip_footer=footer_lines))
except:
u.append(v_nan)
v.append(v_nan)
w.append(v_nan)
u = np.array(u).transpose()
v = np.array(v).transpose()
w = np.array(w).transpose()
#Check to make sure all timestamps follow the initial timestamp. If a particular timestamp is earlier than the first
#timestamp in the data file, this row is marked as a bad row and removed from the data.
bad_rows = []
for i in range(1,len(time_datenum_temp)):
if time_datenum_temp[i] < time_datenum_temp[0]:
bad_rows.append(i)
if(bad_rows):
print(filename,': Issue with timestamp order')
u = np.delete(u,bad_rows,axis=0)
v = np.delete(v,bad_rows,axis=0)
w = np.delete(w,bad_rows,axis=0)
time_datenum_temp = np.delete(time_datenum_temp,axis=0)
timestamp = np.delete(timestamp,axis=0)
#Sort data by timestamp to ensure that variables are in correct temporal order.
time_datenum_sorted = np.array([time_datenum_temp[i] for i in np.argsort(time_datenum_temp)])
u_sorted = np.array([u[i,:] for i in np.argsort(time_datenum_temp)])
v_sorted = np.array([v[i,:] for i in np.argsort(time_datenum_temp)])
w_sorted = np.array([w[i,:] for i in np.argsort(time_datenum_temp)])
return u_sorted,v_sorted,w_sorted,heights,time_datenum_sorted
def import_WC_file_VAD(filename,height_needed):
#Reads in WINDCUBE .rtd file and performs VAD technique at desired height. Outputs u, v, and w values from VAD technique,
#w values from vertical beam, measurement heights, and timestamp.
#Inputs
#filename: WINDCUBE v2 .rtd file to read
#height_needed: Height where VAD analysis should be performed
#Outputs
#u_VAD, v_VAD, w_VAD: u, v, and w values from VAD fit at height_needed
#vert_beam: Radial velocity from vertical beam at height_needed
#time_datenum: Timestamps corresponding to the start of each scan in datetime format
#time_datenum_vert_beam: Timestamps corresponding to vertical beam position in datetime format
import numpy as np
from scipy.optimize import curve_fit
from lidar_preprocessing_functions import VAD_func
from datetime import datetime
from lidar_preprocessing_functions import min_diff
inp = open(filename, encoding='iso-8859-1').readlines()
height_array = str.split(inp[38])
heights_temp = height_array[2:]
if len(heights_temp) == 0:
height_array = str.split(inp[39])
heights_temp = height_array[2:]
heights = [int(i) for i in heights_temp]
height_needed_index = min_diff(heights,height_needed,6.1)
num_rows = 41
timestamp = np.loadtxt(filename,encoding='iso-8859-1', delimiter='\t', usecols=(0,),dtype=str, unpack=True,skiprows=num_rows)
try:
datetime.strptime(timestamp[0],"%Y/%m/%d %H:%M:%S.%f")
except:
num_rows = 42
timestamp = np.loadtxt(filename,encoding='iso-8859-1', delimiter='\t', usecols=(0,),dtype=str, unpack=True,skiprows=num_rows)
time_datenum_temp = []
bad_rows = []
#Create list of rows where timestamp cannot be converted to datetime
for i in range(0,len(timestamp)):
try:
time_datenum_temp.append(datetime.strptime(timestamp[i],"%Y/%m/%d %H:%M:%S.%f"))
except:
bad_rows.append(i)
#Delete all timestamp and datetime values from first bad row to end of dataset
if(bad_rows):
footer_lines = len(time_datenum_temp) - bad_rows[0] + 1
timestamp = np.delete(timestamp,range(bad_rows[0],len(timestamp)),axis=0)
time_datenum_temp = np.delete(time_datenum_temp,range(bad_rows[0],len(time_datenum_temp)),axis=0)
else:
footer_lines = 0
#Skip lines that correspond to bad data
az_angle = np.genfromtxt(filename,encoding='iso-8859-1', delimiter='\t', usecols=(1,),dtype=str, unpack=True,skip_header=num_rows,skip_footer=footer_lines)
vr_nan = np.empty(len(time_datenum_temp))
vr_nan[:] = np.nan
vr = []
for i in range(1,len(heights)+1):
try:
vr.append(-np.genfromtxt(filename,encoding='iso-8859-1', delimiter='\t', usecols=(i*9 -4),dtype=None,skip_header=num_rows,skip_footer=footer_lines))
except:
vr.append(vr_nan)
vr = np.array(vr)
vr = vr.transpose()
bad_rows = []
#Find rows where time decreases instead of increasing
for i in range(1,len(time_datenum_temp)):
if time_datenum_temp[i] < time_datenum_temp[0]:
bad_rows.append(i)
#Delete rows where time decreases instead of increasing
if(bad_rows):
vr = np.delete(vr,bad_rows,axis=0)
time_datenum_temp = np.delete(time_datenum_temp,bad_rows,axis=0)
timestamp = np.delete(timestamp,bad_rows,axis=0)
az_angle = np.delete(az_angle,bad_rows,axis=0)
#Sort timestamp, vr, and az angle in order of ascending datetime value
timestamp_sorted = [timestamp[i] for i in np.argsort(time_datenum_temp)]
vr_sorted = np.array([vr[i,:] for i in np.argsort(time_datenum_temp)])
az_angle_sorted = np.array([az_angle[i] for i in np.argsort(time_datenum_temp)])
vert_beam = []
vr_temp = []
az_temp = []
timestamp_az = []
timestamp_vert_beam = []
#Separate vertical beam values (where az angle = "V") from off-vertical beam values
for i in range(0,len(az_angle_sorted)):
if "V" in az_angle_sorted[i]:
vert_beam.append(vr_sorted[i,height_needed_index])
timestamp_vert_beam.append(timestamp_sorted[i])
else:
vr_temp.append(vr_sorted[i,height_needed_index])
az_temp.append(float(az_angle_sorted[i]))
timestamp_az.append(timestamp_sorted[i])
vr_temp = np.array(vr_temp)
elevation = 62
u_VAD = []
v_VAD = []
w_VAD = []
timestamp_VAD = []
#Perform a VAD fit on each full scan
print(len(az_temp)/4)
for i in range(0,int(len(az_temp)/4)):
x_vals = np.array(az_temp[i*4 + 1:i*4 + 5])
y_vals= np.array(vr_temp[i*4 + 1:i*4 + 5])
if len(y_vals[np.isnan(y_vals)]) == 0:
#Initial guesses for the VAD fit parameters
p0 = np.array([(np.max(y_vals)-np.min(y_vals))/2,2*np.pi,np.nanmean(y_vals)])
popt, pcov = curve_fit(VAD_func, x_vals.ravel(), y_vals.ravel(),p0.ravel())
ws_temp = popt[0]/np.cos(np.radians(elevation))
wd_temp = np.degrees(popt[1]-np.pi)
if wd_temp > 360:
wd_temp-= 360
u_VAD.append(np.sin(np.radians(wd_temp) - np.pi)*ws_temp)
v_VAD.append(np.cos(np.radians(wd_temp) - np.pi)*ws_temp)
w_VAD.append(popt[2]/np.sin(np.radians(elevation)))
else:
u_VAD.append(np.nan)
v_VAD.append(np.nan)
w_VAD.append(np.nan)
timestamp_VAD.append(timestamp_az[i*4 +1])
#Convert VAD and vertical beam timestamps to datetime format
time_datenum = []
for i in range(0,len(timestamp_VAD)):
time_datenum.append(datetime.strptime(timestamp_VAD[i],"%Y/%m/%d %H:%M:%S.%f"))
time_datenum_vert_beam = []
for i in range(0,len(timestamp_vert_beam)):
time_datenum_vert_beam.append(datetime.strptime(timestamp_vert_beam[i],"%Y/%m/%d %H:%M:%S.%f"))
return np.array(u_VAD),np.array(v_VAD),np.array(w_VAD),np.array(vert_beam)[:,0],\
np.array(time_datenum),np.array(time_datenum_vert_beam)
def VAD_func(az, x1, x2, x3):
import numpy as np
return np.array(x3+x1*np.cos(np.radians(az)-x2))
def import_WC_file_vr(filename,height_needed):
#Reads in WINDCUBE .rtd file and extracts off-vertical radial wind speed components at desired height.
#Inputs
#filename: WINDCUBE v2 .rtd file to read
#height_needed: Height where off-vertical measurements should be extracted
#Outputs
#vr_n,vr_e,vr_s,vr_w: Time series from north-, east-, south-, and west-pointing beams,
#respectively, at height_needed
#time_datenum_n,time_datenum_e,time_datenum_s,time_datenum_w: Timestamps corresponding to
#north-, east-, south-, and west-pointing beams, respectively, in datetime format
import numpy as np
from datetime import datetime
from lidar_preprocessing_functions import min_diff
inp = open(filename,encoding='iso-8859-1').readlines()
height_array = str.split(inp[38])
heights_temp = height_array[2:]
if len(heights_temp) == 0:
height_array = str.split(inp[39])
heights_temp = height_array[2:]
heights = [int(i) for i in heights_temp]
height_needed_index = min_diff(heights,height_needed,6.1)
num_rows = 41
timestamp = np.loadtxt(filename,encoding='iso-8859-1', delimiter='\t', usecols=(0,),dtype=str, unpack=True,skiprows=num_rows)
try:
datetime.strptime(timestamp[0],"%Y/%m/%d %H:%M:%S.%f")
except:
num_rows = 42
timestamp = np.loadtxt(filename,encoding='iso-8859-1', delimiter='\t', usecols=(0,),dtype=str, unpack=True,skiprows=num_rows)
time_datenum_temp = []
bad_rows = []
#Create list of rows where timestamp cannot be converted to datetime
for i in range(0,len(timestamp)):
try:
time_datenum_temp.append(datetime.strptime(timestamp[i],"%Y/%m/%d %H:%M:%S.%f"))
except:
bad_rows.append(i)
#Delete all timestamp and datetime values from first bad row to end of dataset
if(bad_rows):
footer_lines = len(time_datenum_temp) - bad_rows[0] + 1
timestamp = np.delete(timestamp,range(bad_rows[0],len(timestamp)),axis=0)
time_datenum_temp = np.delete(time_datenum_temp,range(bad_rows[0],len(time_datenum_temp)),axis=0)
else:
footer_lines = 0
#Skip lines that correspond to bad data
az_angle = np.genfromtxt(filename,encoding='iso-8859-1', delimiter='\t', usecols=(1,),dtype=str, unpack=True,skip_header=num_rows,skip_footer=footer_lines)
vr_nan = np.empty(len(time_datenum_temp))
vr_nan[:] = np.nan
vr = []
for i in range(1,len(heights)+1):
try:
vr.append(-np.genfromtxt(filename,encoding='iso-8859-1', delimiter='\t', usecols=(i*9 -4),dtype=None,skip_header=num_rows,skip_footer=footer_lines))
except:
vr.append(vr_nan)
vr = np.array(vr)
vr = vr.transpose()
bad_rows = []
#Find rows where time decreases instead of increasing
for i in range(1,len(time_datenum_temp)):
if time_datenum_temp[i] < time_datenum_temp[0]:
bad_rows.append(i)
#Delete rows where time decreases instead of increasing
if(bad_rows):
vr = np.delete(vr,bad_rows,axis=0)
time_datenum_temp = np.delete(time_datenum_temp,bad_rows,axis=0)
timestamp = np.delete(timestamp,bad_rows,axis=0)
az_angle = np.delete(az_angle,bad_rows,axis=0)
#Sort timestamp, vr, and az angle in order of ascending datetime value
timestamp_sorted = [timestamp[i] for i in np.argsort(time_datenum_temp)]
vr_sorted = np.array([vr[i,:] for i in np.argsort(time_datenum_temp)])
az_angle_sorted = np.array([az_angle[i] for i in np.argsort(time_datenum_temp)])
vr_sorted = np.array(vr_sorted)
vr_temp = []
az_temp = []
timestamp_az = []
vert_beam = []
timestamp_vert_beam = []
#Separate vertical beam values (where az angle = "V") from off-vertical beam values
for i in range(0,len(az_angle_sorted)):
if "V" in az_angle_sorted[i]:
vert_beam.append(vr_sorted[i,:])
timestamp_vert_beam.append(timestamp_sorted[i])
else:
vr_temp.append(vr_sorted[i,:])
az_temp.append(float(az_angle_sorted[i]))
timestamp_az.append(timestamp_sorted[i])
vr_temp = np.array(vr_temp)
az_temp = np.array(az_temp)
#Extract data for north-, east-, south-, and west-pointing beams at height of interest
vr_n = vr_temp[az_temp==0,height_needed_index]
vr_e = vr_temp[az_temp==90,height_needed_index]
vr_s = vr_temp[az_temp==180,height_needed_index]
vr_w = vr_temp[az_temp==270,height_needed_index]
#Convert timestamps to datetime format
time_datenum = []
for i in range(0,len(timestamp_az)):
time_datenum.append(datetime.strptime(timestamp_az[i],"%Y/%m/%d %H:%M:%S.%f"))
time_datenum = np.array(time_datenum)
time_datenum_n = time_datenum[az_temp==0]
time_datenum_e = time_datenum[az_temp==90]
time_datenum_s = time_datenum[az_temp==180]
time_datenum_w = time_datenum[az_temp==270]
return vr_n,vr_e,vr_s,vr_w,time_datenum_n,time_datenum_e,time_datenum_s,time_datenum_w
def import_ZephIR_file(filename):
#Reads in ZephIR .ZPH file and outputs raw u, v, and w components, measurement heights, and timestamp.
#filename: High-resolution ZephIR. ZPH file to read
#Outputs
#u_sorted, v_sorted, w_sorted: u, v, and w values from all measurement heights
#heights: Measurement heights from file
#time_datenum_sorted: All timestamps in datetime format
import numpy as np
from datetime import datetime
inp = open(filename,'iso-8859-1').readlines()
#Read in measurement heights and convert to integers
height_array = str.split(inp[0])[33:44]
heights = [int(i[:-1]) for i in height_array]
#Read in timestamps
num_rows = 2
timestamp = np.loadtxt(filename,encoding='iso-8859-1', delimiter=',', usecols=(1,),dtype=str, unpack=True,skiprows=num_rows)
time_datenum_temp = []
bad_rows = []
#Create list of rows where timestamp cannot be converted to datetime
for i in range(0,len(timestamp)):
try:
time_datenum_temp.append(datetime.strptime(timestamp[i],"%d/%m/%Y %H:%M:%S"))
except:
bad_rows.append(i)
#Delete all timestamp and datetime values from first bad row to end of dataset
if(bad_rows):
print(filename,': Issue reading timestamp')
footer_lines = len(time_datenum_temp) - bad_rows[0] + 1
timestamp = np.delete(timestamp,range(bad_rows[0],len(timestamp)),axis=0)
time_datenum_temp = np.delete(time_datenum_temp,range(bad_rows[0],len(time_datenum_temp)),axis=0)
else:
footer_lines = 0
v_nan = np.empty(len(time_datenum_temp))
v_nan[:] = np.nan
#Read in wind direction, horizontal wind speed, and vertical wind speed estimated from VAD fit at each measurement height
for i in range(0,len(heights)):
try:
if i == 0:
wd = np.genfromtxt(filename,encoding='iso-8859-1', delimiter=',',usecols=(19 + i*3),dtype=None,skip_header=num_rows,skip_footer=footer_lines)
ws = np.genfromtxt(filename,encoding='iso-8859-1', delimiter=',', usecols=(20 + i*3),dtype=None,skip_header=num_rows,skip_footer=footer_lines)
w = np.genfromtxt(filename,encoding='iso-8859-1', delimiter=',', usecols=(21 + i*3),dtype=None,skip_header=num_rows,skip_footer=footer_lines)
else:
wd = np.vstack((wd,np.genfromtxt(filename,encoding='iso-8859-1', delimiter=',',usecols=(19 + i*3),dtype=None,skip_header=num_rows,skip_footer=footer_lines)))
ws = np.vstack((ws,np.genfromtxt(filename,encoding='iso-8859-1', delimiter=',', usecols=(20 + i*3),dtype=None,skip_header=num_rows,skip_footer=footer_lines)))
w = np.vstack((w,np.genfromtxt(filename,encoding='iso-8859-1', delimiter=',', usecols=(21 + i*3),dtype=None,skip_header=num_rows,skip_footer=footer_lines)))
except:
if i == 0:
wd = v_nan
ws = v_nan
w = v_nan
else:
wd = np.vstack((wd,v_nan))
ws = np.vstack((ws,v_nan))
w = np.vstack((w,v_nan))
print(filename,': Issue with wind speed')
wd[wd==9999] = np.nan
ws[ws==9999] = np.nan
w[w==9999] = np.nan
u = np.array(np.sin(np.radians(wd - 180))*ws).transpose()
v = np.array(np.cos(np.radians(wd - 180))*ws).transpose()
w = np.array(w).transpose()
bad_rows = []
#Find rows where time decreases instead of increasing
for i in range(1,len(time_datenum_temp)):
if time_datenum_temp[i] < time_datenum_temp[0]:
bad_rows.append(i)
#Delete rows where time decreases instead of increasing
if(bad_rows):
print(filename,': Issue with timestamp order')
u = np.delete(u,bad_rows,axis=0)
v = np.delete(v,bad_rows,axis=0)
w = np.delete(w,bad_rows,axis=0)
time_datenum_temp = np.delete(time_datenum_temp,axis=0)
timestamp = np.delete(timestamp,axis=0)
#Sort data in order of ascending datetime value
time_datenum_sorted = np.array([time_datenum_temp[i] for i in np.argsort(time_datenum_temp)])
u_sorted = np.array([u[i,:] for i in np.argsort(time_datenum_temp)])
v_sorted = np.array([v[i,:] for i in np.argsort(time_datenum_temp)])
w_sorted = np.array([w[i,:] for i in np.argsort(time_datenum_temp)])
return u_sorted,v_sorted,w_sorted,heights,time_datenum_sorted
def interp_ts(ts,time_datenum,interval):
#Interpolates time series ts with timestamps time_datenum to a grid with constant temporal spacing of "interval"
#Inputs
#ts: Time series for interpolation
#time_datenum: Original timestamps for time series in datetime format
#interval: Temporal interval to use for interpolation
#Outputs
#ts_interp: Interpolated time series
#time_interp: Timestamps of interpolated time series in datetime format
import numpy as np
from datetime import datetime
import calendar as cal
#Convert timestamps to unix time (seconds after 1970 01-01) as it's easier to perform the interpolation
unix_time = []
for i in range(0,len(time_datenum)):
unix_time.append(cal.timegm(datetime.timetuple(time_datenum[i])) + (time_datenum[i].microsecond/1e6))
unix_time = np.array(unix_time)
#Select the start and end time for the interpolation
#The starting minute value of the interpolation should be the next multiple of 10
if time_datenum[0].minute%10 == 0:
start_minute = str((time_datenum[0].minute//10)*10)
else:
start_minute = str((time_datenum[0].minute//10 + 1)*10)
start_hour = str(time_datenum[0].hour)
if int(start_minute) == 60:
start_minute = '00'
start_hour = str(time_datenum[0].hour + 1)
end_hour = str(time_datenum[-1].hour)
#The ending minute value of the interpolation should end with a 9
if (time_datenum[-1].minute-9)%10 == 0:
end_minute = str((time_datenum[-1].minute//10*10) + 9)
else:
end_minute = str((time_datenum[-1].minute//10)*10 - 1)
if int(end_minute) < 0:
end_minute = '59'
end_hour = str(time_datenum[-1].hour - 1)
#Convert start and end times into unix time and get interpolation times in unix time
timestamp_start = str(time_datenum[0].year) + "/" + str(time_datenum[0].month) + "/" + str(time_datenum[0].day) + \
" " + start_hour + ":" + start_minute + ":00"
time_datenum_start = datetime.strptime(timestamp_start,"%Y/%m/%d %H:%M:%S")
unix_time_start = cal.timegm(datetime.timetuple(time_datenum_start))
timestamp_end = str(time_datenum[-1].year) + "/" + str(time_datenum[-1].month) + "/" + str(time_datenum[-1].day) + \
" " + end_hour + ":" + end_minute + ":59"
time_datenum_end = datetime.strptime(timestamp_end,"%Y/%m/%d %H:%M:%S")
unix_time_end = cal.timegm(datetime.timetuple(time_datenum_end))
time_interp_unix = np.arange(unix_time_start,unix_time_end+1,interval)
#Interpolate time series
ts_interp = []
#If more than 75% of the data are valid, perform interpolation using only non-NaN data. (Every fifth point of the
#u and v data will be NaNs because of the vertically pointing beam.)
if float(len(ts[~np.isnan(ts)])/float(len(ts))) > 0.75:
ts_temp = ts[~np.isnan(ts)]
time_temp = unix_time[~np.isnan(ts)]
else:
ts_temp = ts
time_temp = unix_time
ts_interp = np.interp(time_interp_unix,time_temp,ts_temp)
#If several points in a row have the same value, set these points to NaN. This can occur when the interpolation is
#performed on a dataset with one valid value surrounded by several NaNs.
for i in range(2,len(ts_interp)-2):
if ts_interp[i-2] == ts_interp[i] and ts_interp[i+2] == ts_interp[i]:
ts_interp[i-2:i+2] = np.nan
time_interp = [datetime.utcfromtimestamp(int(i) + round(i-int(i),10)) for i in time_interp_unix]
return np.transpose(ts_interp),time_interp
def min_diff(array_orig,array_to_find,tol):
#Finds indices in array_orig that correspond to values closest to numbers in array_to_find with tolerance tol
#Inputs
#array_orig: Original array where you want to find matching values
#array_to_find: Array of numbers to find in array_orig
#tol: Tolerance to find matching value
#Outputs
#found_indices: Indices corresponding to matching values. If no values matched with desired tolerance, index will be filled by NaN.
import numpy as np
found_indices = []
if not np.shape(array_to_find):
array_to_find = [array_to_find]
for i in array_to_find:
min_difference = tol
found_index_temp = np.nan
for j in range(0,len(array_orig)):
diff_temp = abs(i-array_orig[j])
if diff_temp < min_difference:
min_difference = diff_temp
found_index_temp = j
found_indices.append(found_index_temp)
return np.array(found_indices)
def get_10min_mean_ws_wd(u,v,time,frequency):
#Calculates the 10-min. scalar average wind speed and wind direction at all measurement heights
#Inputs
#u: East-west velocity time series
#v: North-south velocity time series
#time: Timestamps in datetime format
#frequency: Sampling frequency of velocity data
#Outputs
#U: 10-min. mean horizontal wind speeds
#wd: 10-min. mean wind direction
#time_datenum_10min: Timestamp corresponding to the start of each 10-min. averaging period
import numpy as np
ten_min_count = int(frequency*60*10)
U = []
wd = []
time_datenum_10min = []
for i in np.arange(0,len(u)-ten_min_count+1,ten_min_count):
U_height = []
wd_height = []
#10-min. window of data
if len(np.shape(u)) > 1:
u_temp = u[i:i+ten_min_count,:]
v_temp = v[i:i+ten_min_count,:]
else:
u_temp = u[i:i+ten_min_count]
v_temp = v[i:i+ten_min_count]
for j in range(np.shape(u_temp)[1]):
U_height.append(np.nanmean((u_temp[:,j]**2 + v_temp[:,j]**2)**0.5,axis=0));
u_bar = np.nanmean(u_temp[:,j])
v_bar = np.nanmean(v_temp[:,j])
wd_height.append((180./np.pi)*(np.arctan2(u_bar,v_bar) + np.pi))
U.append(U_height)
wd.append(wd_height)
time_datenum_10min.append(time[i])
return np.array(U),np.array(wd),time_datenum_10min
def get_10min_shear_parameter(U,heights,height_needed):
#Calculates the shear parameter for every 10-min. period of data by fitting power law equation to
#10-min. mean wind speeds
#Inputs
#U: 10-min. mean horizontal wind speed at all measurement heights
#heights: Measurement heights
#height_needed: Height where TI is being extracted - values used to calculate shear parameter
#should be centered around this height
#Outputs
#p: 10-min. values of shear parameter
import numpy as np
from functools import reduce
from lidar_preprocessing_functions import min_diff
import warnings
p = []
#Set heights for calculation of shear parameter and find corresponding indices
zprofile = np.arange(0.5*height_needed,1.5*height_needed + 10,10)
height_indices = np.unique(min_diff(heights,zprofile,5))
height_indices = height_indices[~np.isnan(height_indices)]
#Arrays of height and mean wind speed to use for calculation
heights_temp = np.array([heights[int(i)] for i in height_indices])
U_temp = np.array([U[:,int(i)] for i in height_indices])
mask = [~np.isnan(U_temp)]
mask = reduce(np.logical_and, mask)
with warnings.catch_warnings():
warnings.filterwarnings('error')
#For each set of 10-min. U values, use linear fit to determine value of shear parameter
for i in range(0,len(U)):
try:
try:
p_temp = np.polyfit(np.log(heights_temp[mask[:,i]]),np.log(U_temp[mask[:,i],i]),1)
p.append(p_temp[0])
except np.RankWarning:
p.append(np.nan)
except:
p.append(np.nan)
return np.array(p)
def rotate_ws(u,v,w,frequency):
#Performs coordinate rotation according to Eqs. 22-29 in Wilczak et al. (2001)
#Reference: Wilczak, J. M., S. P. Oncley, and S. A. Stage, 2001: Sonic anemometer tilt adjustment algorithms.
#Bound.-Layer Meteor., 99, 127–150.
#Inputs
#u, v, w: Time series of east-west, north-south, and vertical wind speed components, respectively
#frequency: Sampling frequency of velocity
#Outputs
#u_rot, v_rot, w_rot: Rotated u, v, and w wind speed, with u rotated into the 10-min. mean wind direction and
#the 10-min. mean of v and w forced to 0
import numpy as np
#Number of samples in a 10-min period
ten_min_count = int(frequency*60*10)
u_rot = []
v_rot = []
w_rot = []
#Perform coordinate rotation. First rotation rotates u into the mean wind direction and forces the mean v to 0.
#Second rotation forces the mean w to 0.
for i in np.arange(0,len(u)-ten_min_count+1,ten_min_count):
u_temp = u[i:i+ten_min_count]
v_temp = v[i:i+ten_min_count]
w_temp = w[i:i+ten_min_count]
phi_temp = np.arctan2(np.nanmean(v_temp),np.nanmean(u_temp))
u1_temp = u_temp*np.cos(phi_temp) + v_temp*np.sin(phi_temp)
v1_temp = -u_temp*np.sin(phi_temp) + v_temp*np.cos(phi_temp)
w1_temp = w_temp;
phi_temp2 = np.arctan2(np.nanmean(w1_temp),np.nanmean(u1_temp))
u_rot.append(u1_temp*np.cos(phi_temp2) + w1_temp*np.sin(phi_temp2))
v_rot.append(v1_temp)
w_rot.append(-u1_temp*np.sin(phi_temp2) + w1_temp*np.cos(phi_temp2))
return np.array(u_rot).ravel(),np.array(v_rot).ravel(),np.array(w_rot).ravel()
def get_10min_var(ts,frequency):
#Calculates variance for each 10-min. period
#Inputs
#ts: Time series of data
#frequency: Sampling frequency of data
#Outputs
#ts_var: 10-min. variance values from time series
import numpy as np
#Number of samples in a 10-min period
ten_min_count = int(frequency*60*10)
ts_var = []
for i in np.arange(0,len(ts)-ten_min_count+1,ten_min_count):
ts_temp = ts[i:i+ten_min_count]
ts_var.append(np.nanmean((ts_temp-np.nanmean(ts_temp))**2))
return np.array(ts_var)
def get_10min_spectrum(ts,frequency):
#Calculate power spectrum for 10-min. period
#Inputs
#ts: Time series of data
#frequency: Sampling frequency of data
#Outputs
#S_A_fast: Spectral power
#frequency_fft: Frequencies correspond to spectral power values
import numpy as np
N = len(ts)
delta_f = float(frequency)/N
frequency_fft = np.linspace(0,float(frequency)/2,float(N/2))
F_A_fast = np.fft.fft(ts)/N
E_A_fast = 2*abs(F_A_fast[0:int(N/2)]**2)
S_A_fast = (E_A_fast)/delta_f
return S_A_fast,frequency_fft
def get_10min_spectrum_WC_raw(ts,frequency):
#Calculate power spectrum for 10-min. period
#Inputs
#ts: Time series of data
#frequency: Sampling frequency of data
#Outputs
#S_A_fast: Spectral power
#frequency_fft: Frequencies correspond to spectral power values
import numpy as np
N = len(ts)
delta_f = float(frequency)/N
frequency_fft = np.linspace(0,float(frequency)/2,float(N/2))
F_A_fast = np.fft.fft(ts)/N
E_A_fast = 2*abs(F_A_fast[0:int(N/2)]**2)
S_A_fast = (E_A_fast)/delta_f
#Data are only used for frequencies lower than 0.125 Hz. Above 0.125 Hz, the
#WINDCUBE spectrum calculated using raw data begins to show an artifact. This
#artifact is due to the recording of the u, v, and w components for every beam
#position, which results in repeating components.
S_A_fast = S_A_fast[frequency_fft <= 0.125]
frequency_fft = frequency_fft[frequency_fft <= 0.125]
return S_A_fast,frequency_fft
def get_10min_covar(ts1,ts2,frequency):
#Calculate the covariance of two variables
#Inputs
#ts1: Time series of variable 1
#ts2: Time series of variable 2
#frequency: Sampling frequency
#Outputs
#ts_covar: 10-min. covariance of variables 1 and 2
import numpy as np
#Number of samples in a 10-min period
ten_min_count = int(frequency*60*10)
ts_covar = []
for i in np.arange(0,len(ts1)-ten_min_count+1,ten_min_count):
ts_temp1 = ts1[i:i+ten_min_count]
ts_temp2 = ts2[i:i+ten_min_count]
mask = [~np.isnan(ts_temp1),~np.isnan(ts_temp2)]
total_mask = reduce(np.logical_and, mask)
ts_temp1 = ts_temp1[total_mask]
ts_temp2 = ts_temp2[total_mask]
ts_covar.append(np.nanmean((ts_temp1-np.nanmean(ts_temp1))*(ts_temp2-np.nanmean(ts_temp2))))
return np.array(ts_covar)
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models
import os
from django.db.models import query
from requests.api import head
import jwt
import uuid
import hashlib
import json
from urllib.parse import urlencode
import time
import requests
from dotenv import load_dotenv
from .upbit_dto import UpbitDTO
'''
upbit 거래소와 상호작용하는 서비스 레이어
view로부터 통제를 받아 models 또는 serializer 부터
필요한 데이터베이스의 데이터를 처리하고 로직을 수행하는 부분
'ApiClient'는 view를 통해 외부에서 주입되는 upbit key를 self에 담아 저장하는 클래스이다
'UpbitService'는
전체 계좌 조회,
주문 가능 정보 조회,
개별 주문 조회,
주문 리스트 조회,
입금 리스트 조회,
주문하기,
주문 취소 요청,
출금 가능 정보 조회,
코인 출금하기,
원화 출금하기,
개별 입금 조회,
입금 주소 생성 요청,
전체 입금 주소 조회,
개별 입금 주소 조회,
원화 입금하기,
입출금 현황 조회,
API 키 리스트 조회
일 별 캔들 조회
기능이 있다
view에서 service.py에 있는 서비스 호출하는 방법:
# views.py
class UpbitView(generics.GenericAPIView):
def get(self, request, *args, **kwargs):
UpbitService().any_business_method()
return Response(...)
'''
class UpbitService():
def __init__(self, data: UpbitDTO, *args):
self.access_key = data.access_key
self.secret_key = data.secret_key
self.server_url = data.server_url
self.market = data.market
self.days_number = data.days_number
def getQuery(self, query):
query = query
return query
def getQueryString(self, query):
query_string = urlencode(query).encode()
return query_string
def getQueryHash(self, query_string):
m = hashlib.sha512()
m.update(query_string)
query_hash = m.hexdigest()
return query_hash
def getPayload(self):
payload = {
'access_key' : self.access_key,
'nonce' : str(uuid.uuid4()),
'query_hash' : self.getQueryHash(),
'query_hash_alg' : 'SHA512'
}
return payload
def getJwtToken(self, payload):
jwt_token = jwt.encode(payload, self.secret_key)
return jwt_token
def getAuthorizeToken(self, jwt_token):
authorize_token = 'Bearer {}'.format(jwt_token)
return authorize_token
def getHeaders(self, authorize_token):
headers = {
"Authorization" : authorize_token
}
return headers
def sendForm(
self,
route_name,
headers
):
res = requests.get(
self.server_url + route_name,
headers=headers
)
return res.json()
def sendParamForm(
self,
route_name,
query,
headers
):
res = requests.get(
self.server_url + route_name,
params=query,
headers=headers
)
return res.json()
def getAllAccount(self):
'''
전체 계좌 조회
'''
route_name = "accounts"
payload = {
'access_key' : self.access_key,
'nonce' : str(uuid.uuid4())
}
jwt_token = self.getJwtToken(payload)
authorize_token = self.getAuthorizeToken(jwt_token)
headers = self.getHeaders(authorize_token)
res = self.sendForm(
route_name=route_name,
headers=headers
)
return res
def getAllOrder(self):
'''
전체 주문 정보
'''
access_key = self.access_key
secret_key = self.secret_key
server_url = self.server_url
query = {
'state': 'done',
}
query_string = urlencode(query)
uuids = [
'9ca023a5-851b-4fec-9f0a-48cd83c2eaae',
#...
]
uuids_query_string = '&'.join(["uuids[]={}".format(uuid) for uuid in uuids])
query['uuids[]'] = uuids
query_string = "{0}&{1}".format(query_string, uuids_query_string).encode()
m = hashlib.sha512()
m.update(query_string)
query_hash = m.hexdigest()
payload = {
'access_key': access_key,
'nonce': str(uuid.uuid4()),
'query_hash': query_hash,
'query_hash_alg': 'SHA512',
}
jwt_token = jwt.encode(payload, secret_key)
authorize_token = 'Bearer {}'.format(jwt_token)
headers = {"Authorization": authorize_token}
res = requests.get(server_url + "/v1/orders", params=query, headers=headers)
return res.json()
def getOrderChance(self):
'''
주문 가능 정보
'''
route_name = "orders/chance"
query = {
'market' : self.market
}
query_string = self.getQueryString(query)
query_hash = self.getQueryHash(query_string)
payload = {
"access_key" : self.access_key,
'nonce' : str(uuid.uuid4()),
'query_hash' : query_hash,
'query_hash_alg' : "SHA512"
}
jwt_token = self.getJwtToken(payload)
authorize_token = self.getAuthorizeToken(jwt_token)
headers = self.getHeaders(authorize_token)
res = self.sendParamForm(
route_name=route_name,
query=query,
headers=headers
)
return res
def getOrderChanceById(self, id):
'''
개별 주문 조회
주문 uuid를 통해 개별 주문건을 조회
'''
route_name = "order"
res = self.sendParamForm(
route_name=route_name,
params=id,
headers=self.getHeaders()
)
return res
def getOrderList(self, page, order_by):
'''
주문 리스트 조회
'''
route_name = "orders"
query = {
'market': self.market,
'page' : page,
'order_by' : order_by
}
query_string = urlencode(query)
states = ['done', 'cancel'] # cancel은 빼먹으면 안됨 trade count 가 상태값
states_query_string = '&'.join(["states[]={}".format(state) for state in states])
query['states[]'] = states
query_string = "{0}&{1}".format(query_string, states_query_string).encode()
m = hashlib.sha512()
m.update(query_string)
query_hash = m.hexdigest()
payload = {
'access_key': self.access_key,
'nonce': time.time(),
'query_hash': query_hash,
'query_hash_alg': 'SHA512',
}
jwt_token = self.getJwtToken(payload)
authorize_token = self.getAuthorizeToken(jwt_token)
headers = self.getHeaders(authorize_token)
res = self.sendParamForm(
route_name=route_name,
query=query,
headers=headers
)
return res
def getUnfinishedOrderList(self, page, order_by):
'''
주문 리스트 중에 미체결 주문 리스트
'''
route_name = "orders"
query = {
'market': self.market,
'page' : page,
'order_by' : order_by
}
query_string = urlencode(query)
states = ['wait', 'watch']
states_query_string = '&'.join(["states[]={}".format(state) for state in states])
query['states[]'] = states
query_string = "{0}&{1}".format(query_string, states_query_string).encode()
m = hashlib.sha512()
m.update(query_string)
query_hash = m.hexdigest()
payload = {
'access_key': self.access_key,
'nonce': time.time(),
'query_hash': query_hash,
'query_hash_alg': 'SHA512',
}
jwt_token = self.getJwtToken(payload)
authorize_token = self.getAuthorizeToken(jwt_token)
headers = self.getHeaders(authorize_token)
res = self.sendParamForm(
route_name=route_name,
query=query,
headers=headers
)
return res
def getDepositList(self):
'''
입금 리스트 조회
'''
route_name = "deposits"
query = {
'currency': 'KRW',
}
query_string = self.getQueryString(query)
query_hash = self.getQueryHash(query_string)
payload = {
'access_key': self.access_key,
'nonce': str(uuid.uuid4()),
'query_hash': query_hash,
'query_hash_alg': 'SHA512',
}
jwt_token = self.getJwtToken(payload)
authorize_token = self.getAuthorizeToken(jwt_token)
headers = self.getHeaders(authorize_token)
res = self.sendParamForm(
route_name=route_name,
query=query,
headers=headers
)
def orderRequest(self, volume, market, price, ord_type, side_status):
'''
주문 하기
market : 거래 시장
side : 주문 종류
- bid : 매수
- ask : 매도
volume : 주문량(지정가, 시장가 매도 시 필수)
price : 주문가격
ex) KRW-BTC 마켓에서 1BTC 당 1000 KRW 로 거래할 경우, 값은 1000이 됨
KRW-BTC 마켓에서 1BTC 당 매도 1호가가 500 KRW 인 경우, 시장가 매수 시
값을 1000으로 세팅하면 2BTC 가 매수 된다
(수수료가 존재하거나 매도 1호가의 수량에 따라 상이할 수 있음)
ord_type : 주문 타입
- limit : 지정가 주문
- price : 시장가 주문(매수)
- market : 시장가 주문(매도)
identifier : 조회용 사용자 지정값
'''
side = 'bid' if side_status == 1 else 'ask'
route_name = "orders"
ord_type = ord_type
query = {
'market' : market,
'side' : side,
'volume' : volume,
'price' : price,
'ord_type' : ord_type
}
query_string = urlencode(query).encode()
m = hashlib.sha512()
m.update(query_string)
query_hash = m.hexdigest()
payload = {
'access_key' : self.access_key,
'nonce' : str(uuid.uuid4()),
'query_hash' : query_hash,
'query_hash_alg' : 'SHA512'
}
jwt_token = jwt.encode(payload, self.secret_key)
authorize_token = 'Bearer {}'.format(jwt_token)
headers = {
"Authorization" : authorize_token
}
res = requests.post(
self.server_url + route_name,
params = query,
headers = headers
)
return res.json()
def orderCancelRequest(self, id):
'''
주문 취소 요청
'''
route_name = 'order'
target_id = id
query = {
'uuid' : target_id
}
query_string = urlencode(query).encode()
m = hashlib.sha512()
m.update(query_string)
query_hash = m.hexdigest()
payload = {
'access_key' : self.access_key,
'nonce' : str(uuid.uuid4()),
'query_hash' : query_hash,
'query_hash_alg' : 'SHA512'
}
jwt_token = jwt.encode(payload, self.secret_key)
authorize_token = 'Bearer {}'.format(jwt_token)
headers = {
"Authorization" : authorize_token
}
res = requests.delete(
self.server_url + route_name,
params=query,
headers=headers
)
return res.json()
def getWithrawsChance(self):
'''
출금 가능 정보
'''
route_name = 'withdraws/chance'
query = {
'currency' : 'ADA'
}
query_string = self.getQueryString(query)
query_hash = self.getQueryHash(query_string)
payload = {
'access_key': self.access_key,
'nonce': str(uuid.uuid4()),
'query_hash': query_hash,
'query_hash_alg': 'SHA512',
}
jwt_token = self.getJwtToken(payload)
authorize_token = self.getAuthorizeToken(jwt_token)
headers = self.getHeaders(authorize_token)
res = self.sendParamForm(
route_name=route_name,
query=query,
headers=headers
)
def withdrawCoin(self):
'''
코인 출금하기
'''
route_name = 'withdraws/coin'
query = {
'currency' : 'ADA',
'amount' : '0.01',
'address' : '9187a66e-5edf-427c-9d12-0c21c26ae4b8'
}
query_string = self.getQueryString(query)
query_hash = self.getQueryHash(query_string)
payload = {
'access_key': self.access_key,
'nonce': str(uuid.uuid4()),
'query_hash': query_hash,
'query_hash_alg': 'SHA512',
}
jwt_token = self.getJwtToken(payload)
authorize_token = self.getAuthorizeToken(jwt_token)
headers = self.getHeaders(authorize_token)
res = self.sendParamForm(
route_name=route_name,
query=query,
headers=headers
)
def withdrawKrw(self):
'''
원화 출금하기
'''
route_name = 'withdraws/krw'
query = {
'amount' : '10000'
}
query_string = self.getQueryString(query)
query_hash = self.getQueryHash(query_string)
payload = {
'access_key': self.access_key,
'nonce': str(uuid.uuid4()),
'query_hash': query_hash,
'query_hash_alg': 'SHA512',
}
jwt_token = self.getJwtToken(payload)
authorize_token = self.getAuthorizeToken(jwt_token)
headers = self.getHeaders(authorize_token)
res = self.sendParamForm(
route_name=route_name,
query=query,
headers=headers
)
def getDeposit(self, uuid_value):
'''
개별 입금 조회
'''
route_name = 'deposit'
query = {
'uuid' : uuid_value
}
query_string = self.getQueryString(query)
query_hash = self.getQueryHash(query_string)
payload = {
'access_key': self.access_key,
'nonce': str(uuid.uuid4()),
'query_hash': query_hash,
'query_hash_alg': 'SHA512',
}
jwt_token = self.getJwtToken(payload)
authorize_token = self.getAuthorizeToken(jwt_token)
headers = self.getHeaders(authorize_token)
res = self.sendParamForm(
route_name=route_name,
query=query,
headers=headers
)
def generateCoinAddressForDeposit(self):
'''
입금 주소 생성 요청
'''
route_name = 'deposits/generate_coin_address'
query = {
'currency' : 'ADA'
}
query_string = self.getQueryString(query)
query_hash = self.getQueryHash(query_string)
payload = {
'access_key': self.access_key,
'nonce': str(uuid.uuid4()),
'query_hash': query_hash,
'query_hash_alg': 'SHA512',
}
jwt_token = self.getJwtToken(payload)
authorize_token = self.getAuthorizeToken(jwt_token)
headers = self.getHeaders(authorize_token)
res = requests.post(
self.server_url + route_name,
params=query,
headers=headers
)
def getAllCoinAddressForDeposit(self):
'''
전체 임금 주소 조회
'''
route_name = 'deposits/coin_address'
payload = {
'access_key' : self.access_key,
'nonce' : str(uuid.uuid4())
}
jwt_token = self.getJwtToken(payload)
authorize_token = self.getAuthorizeToken(jwt_token)
headers = self.getHeaders(authorize_token)
res = self.sendForm(
route_name=route_name,
headers=headers
)
def getCoinAddressForDeposit(self):
'''
개별 입금 주소 조회
'''
route_name = 'deposits/coin_address'
query = {
'currency' : 'ADA'
}
query_string = self.getQueryString(query)
query_hash = self.getQueryHash(query_string)
payload = {
'access_key': self.access_key,
'nonce': str(uuid.uuid4()),
'query_hash': query_hash,
'query_hash_alg': 'SHA512',
}
jwt_token = self.getJwtToken(payload)
authorize_token = self.getAuthorizeToken(jwt_token)
headers = self.getHeaders(authorize_token)
res = self.sendParamForm(
route_name=route_name,
query=query,
headers=headers
)
def requestDepositKrw(self):
'''
원화 입금하기
'''
route_name = 'deposity/krw'
query = {
'amount' : '10000'
}
query_string = self.getQueryString(query)
query_hash = self.getQueryHash(query_string)
payload = {
'access_key': self.access_key,
'nonce': str(uuid.uuid4()),
'query_hash': query_hash,
'query_hash_alg': 'SHA512',
}
jwt_token = self.getJwtToken(payload)
authorize_token = self.getAuthorizeToken(jwt_token)
headers = self.getHeaders(authorize_token)
res = requests.post(
self.server_url + route_name,
params=query,
headers=headers
)
def getWalletStatus(self):
'''
입출금 현황
'''
route_name = 'status/wallet'
payload = {
'access_key' : self.access_key,
'nonce' : str(uuid.uuid4())
}
jwt_token = self.getJwtToken(payload)
authorize_token = self.getAuthorizeToken(jwt_token)
headers = self.getHeaders(authorize_token)
res = self.sendForm(
route_name=route_name,
headers=headers
)
def getApiKeys(self):
'''
API 키 리스트 조회
'''
route_name = 'api_keys'
payload = {
'access_key' : self.access_key,
'nonce' : str(uuid.uuid4())
}
jwt_token = self.getJwtToken(payload)
authorize_token = self.getAuthorizeToken(jwt_token)
headers = self.getHeaders(authorize_token)
res = self.sendForm(
route_name=route_name,
headers=headers
)
def get_days_candle(self, **kwargs):
'''
일 별 캔들 구하기
'''
market= kwargs['market']
days_number= kwargs['days_number']
route_name = 'day_candles'
url = f"https://api.upbit.com/v1/candles/days?market={market}&count={days_number}"
headers = {"Accept" : "application/json"}
response= requests.request(
"GET",
url,
headers=headers
)
response = response.json()
return response
def get_minutes_candle(self, **kwargs):
'''
분 별 캔들 구하기
'''
market= kwargs['market']
minutes = kwargs['minutes']
route_name = 'day_candles'
url = f"https://api.upbit.com/v1/candles/minutes/{minutes}?market={market}&count=1"
headers = {"Accept" : "application/json"}
response= requests.request(
"GET",
url,
headers=headers
)
response = response.json()
return response
def get_ticker(self, **kwargs):
'''
현재가 정보
'''
market= kwargs['market']
url = f"https://api.upbit.com/v1/ticker?markets={market}"
headers = {"Accept": "application/json"}
response= requests.request(
"GET",
url,
headers=headers
)
return response.json()
def get_ticks(self, **kwargs):
'''
최근 체결 내역
'''
market= kwargs['market']
url = f"https://api.upbit.com/v1/trades/ticks?market={market}&count=1"
headers = {"Accept": "application/json"}
response = requests.request("GET", url, headers=headers)
return response.json()
def get_orderbooks(self, **kwargs):
'''
호가 정보 조회
'''
market= kwargs['market']
url = f"https://api.upbit.com/v1/orderbook?markets={market}"
headers = {"Accept": "application/json"}
response = requests.request("GET", url, headers=headers)
return response.json()
def get_my_order(self, market, page):
access_key=self.access_key
secret_key=self.secret_key
query = {
'market' : market,
'page': page,
'order_by' : 'asc'
}
query_string = urlencode(query)
states = ['done', 'cancel']
states_query_string = '&'.join(["states[]={}".format(state) for state in states])
query['states[]'] = states
query_string = "{0}&{1}".format(query_string, states_query_string).encode()
m = hashlib.sha512()
m.update(query_string)
query_hash = m.hexdigest()
payload = {
'access_key': access_key,
'nonce': time.time(),
'query_hash': query_hash,
'query_hash_alg': 'SHA512',
}
jwt_token = jwt.encode(payload, secret_key)
authorize_token = 'Bearer {}'.format(jwt_token)
headers = {"Authorization": authorize_token}
res = requests.get(self.server_url + "/v1/orders", params=query, headers=headers)
return res.json()
def get_all_market(self):
'''
전체 마켓 코드 조회
'''
url = "https://api.upbit.com/v1/market/all?isDetails=false"
headers = {"Accept": "application/json"}
res = requests.request("GET", url, headers=headers)
res = res.json()
return res
'''
테스트 코드
'''
'''
upbeat = Upbeat()
# 전체 계좌 조회
account = upbeat.getAllAccount()
print(f'전체계좌조회: {account}')
# 주문 가능 정보
orderInfo = upbeat.getOrderChance()
print(f'주문가능정보: {orderInfo}')
# 주문하기
# side_status 가 1이면 매수
# side_statsu 가 1이 아닌 경우는 매도
order = upbeat.orderRequest(
volume = 1,
price = '5000',
side_status=1
)
print(f'주문하기: {order}')
# 주문 리스트 조회
orderList = upbeat.getOrderList()
print(f'주문리스트 조회: {orderList}')
# 주문 취소
orderCancel = upbeat.orderCancelRequest()
print(f'주문취소: {orderCancel}')
# 출금 가능 정보
withdrawChance = upbeat.getWithrawsChance()
print(f'출금가능 정보: {withdrawChance}')
# 코인 출금하기
withdrawCoin = upbeat.withdrawCoin()
print(f'코인 출금하기: {withdrawCoin}')
# 원화 출금하기
withdrawKrw = upbeat.withdrawKrw()
print(f'원화 출금하기: {withdrawKrw}')
# 입금리스트 조회
depositList = upbeat.getDepositList()
print(f'입금리스트 조회: {depositList}')
# 개별 입금 조회
# 상세 검색이니 입금 uuid 가 파라메타에 필요함
deposit = upbeat.getDeposit(uuid_value='')
print(f'개별 입금 조회: {deposit}')
# 입금 주소 생성 요청
coinAddressForDeposit = upbeat.generateCoinAddressForDeposit()
print(f'입금 주소 생성 요청: {coinAddressForDeposit}')
# 전체 입금 주소 조회
allCoinAddressForDeposit = upbeat.getAllCoinAddressForDeposit()
print(f'전체 입금 주소 조회: {coinAddressForDeposit}')
# 개별 입금 주소 조회
coinAddressForDeposit = upbeat.getCoinAddressForDeposit()
print(f'개별 입금 주소 조회: {coinAddressForDeposit}')
# 원화 입금 하기
depositKrw = upbeat.requestDepositKrw()
print(f'원화 입금 하기: {depositKrw}')
# 입출금 현황
walletStatus = upbeat.getWalletStatus()
print(f'입출금 현황: {walletStatus}')
# API 키 리스트 조회
apiKeys = upbeat.getApiKeys()
print(f'API 키 리스트 조회: {apiKeys}')
'''
|
import logging
import os
import shutil
import subprocess
import pytest
import salt.utils.platform
log = logging.getLogger(__name__)
@pytest.fixture(scope="package", autouse=True)
def skip_on_tcp_transport(request):
if request.config.getoption("--transport") == "tcp":
pytest.skip("Multimaster under the TPC transport is not working. See #59053")
@pytest.fixture(scope="package")
def salt_mm_master_1(request, salt_factories):
config_defaults = {
"open_mode": True,
"transport": request.config.getoption("--transport"),
}
config_overrides = {
"interface": "127.0.0.1",
}
factory = salt_factories.salt_master_daemon(
"mm-master-1",
defaults=config_defaults,
overrides=config_overrides,
extra_cli_arguments_after_first_start_failure=["--log-level=debug"],
)
with factory.started(start_timeout=120):
yield factory
@pytest.fixture(scope="package")
def mm_master_1_salt_cli(salt_mm_master_1):
return salt_mm_master_1.get_salt_cli(timeout=120)
@pytest.fixture(scope="package")
def salt_mm_master_2(salt_factories, salt_mm_master_1):
if salt.utils.platform.is_darwin() or salt.utils.platform.is_freebsd():
subprocess.check_output(["ifconfig", "lo0", "alias", "127.0.0.2", "up"])
config_defaults = {
"open_mode": True,
"transport": salt_mm_master_1.config["transport"],
}
config_overrides = {
"interface": "127.0.0.2",
}
# Use the same ports for both masters, they are binding to different interfaces
for key in (
"ret_port",
"publish_port",
):
config_overrides[key] = salt_mm_master_1.config[key]
factory = salt_factories.salt_master_daemon(
"mm-master-2",
defaults=config_defaults,
overrides=config_overrides,
extra_cli_arguments_after_first_start_failure=["--log-level=debug"],
)
# The secondary salt master depends on the primarily salt master fixture
# because we need to clone the keys
for keyfile in ("master.pem", "master.pub"):
shutil.copyfile(
os.path.join(salt_mm_master_1.config["pki_dir"], keyfile),
os.path.join(factory.config["pki_dir"], keyfile),
)
with factory.started(start_timeout=120):
yield factory
@pytest.fixture(scope="package")
def mm_master_2_salt_cli(salt_mm_master_2):
return salt_mm_master_2.get_salt_cli(timeout=120)
@pytest.fixture(scope="package")
def salt_mm_minion_1(salt_mm_master_1, salt_mm_master_2):
config_defaults = {
"transport": salt_mm_master_1.config["transport"],
}
mm_master_1_port = salt_mm_master_1.config["ret_port"]
mm_master_1_addr = salt_mm_master_1.config["interface"]
mm_master_2_port = salt_mm_master_2.config["ret_port"]
mm_master_2_addr = salt_mm_master_2.config["interface"]
config_overrides = {
"master": [
"{}:{}".format(mm_master_1_addr, mm_master_1_port),
"{}:{}".format(mm_master_2_addr, mm_master_2_port),
],
"test.foo": "baz",
}
factory = salt_mm_master_1.salt_minion_daemon(
"mm-minion-1",
defaults=config_defaults,
overrides=config_overrides,
extra_cli_arguments_after_first_start_failure=["--log-level=debug"],
)
with factory.started(start_timeout=120):
yield factory
@pytest.fixture(scope="package")
def salt_mm_minion_2(salt_mm_master_1, salt_mm_master_2):
config_defaults = {
"transport": salt_mm_master_1.config["transport"],
}
mm_master_1_port = salt_mm_master_1.config["ret_port"]
mm_master_1_addr = salt_mm_master_1.config["interface"]
mm_master_2_port = salt_mm_master_2.config["ret_port"]
mm_master_2_addr = salt_mm_master_2.config["interface"]
config_overrides = {
"master": [
"{}:{}".format(mm_master_1_addr, mm_master_1_port),
"{}:{}".format(mm_master_2_addr, mm_master_2_port),
],
"test.foo": "baz",
}
factory = salt_mm_master_2.salt_minion_daemon(
"mm-minion-2",
defaults=config_defaults,
overrides=config_overrides,
extra_cli_arguments_after_first_start_failure=["--log-level=debug"],
)
with factory.started(start_timeout=120):
yield factory
|
#200. 岛屿数量
class Solution:
dx=[-1,1,0,0]
dy=[0,0,-1,1]
def numIslands(self, grid: List[List[str]]) -> int:
if not grid or not grid[0]:return 0
self.max_x=len(grid);self.max_y=len(grid[0]);self.grid=grid;
self.visited=set()
return sum([self.BFS(i,j)for i in range(self.max_x)for j in range(self.max_y)])
def BFS(self,x,y):
if not self._is_valid(x,y):
return 0
self.visited.add((x,y))
q=collections.deque()
q.append((x,y))
while q:
cur_x,cur_y=q.popleft()
for i in range(4):
new_x,new_y=cur_x+self.dx[i],cur_y+self.dy[i]
if self._is_valid(new_x,new_y):
self.visited.add((x,y))
q.append((x,y))
return 1
def _is_valid(self,x,y):
if x<0 or x>=self.max_x or y<0 or y>=self.max_y:
return False
if self.grid[x][y]=="0" or ((x,y) in self.visited):
return False
return True
|
import os
from mtcnn import MTCNN
from image import draw_boxes, show_image, image_load
def detection(image_name):
"""
Run detector on single image
"""
detector = MTCNN()
try:
return detector.detect(image_name)
except ValueError:
print("No sign detected")
return []
def run_demo():
"""
Run demo on images in folder "images"
"""
path = os.path.abspath("../images")
image_list = os.listdir(path)
for image_name in image_list:
image_path = os.path.join(path, image_name)
print("-----------------------------------------------")
print("Path:", image_path)
image = image_load(image_path)
detect_data = detection(image_path)
if len(detect_data) > 0:
draw_boxes(image, detect_data[:,0:4], detect_data[:,4], detect_data[:,5], red=255)
show_image(image, False)
print(detect_data)
if "__main__" == __name__:
run_demo()
|
import pandas_datareader as pdr
import pandas_datareader.data as web
import datetime
import requests_cache
expire_after = datetime.timedelta(days=3)
session = requests_cache.CachedSession(cache_name='cache', backend='sqlite', expire_after=expire_after)
start = datetime.datetime(2021, 5, 1)
end = datetime.datetime(2021, 5, 19)
ticker_usuario = input("Digite o código da ação: ")
ticker_usuario = ticker_usuario + ".SA" # Para ações do IBOV é necessário add '.SA'.
ticker = ticker_usuario.upper() # Converte tudo em maiúscula.
# Recebe os dados e armazena na variável papel.
papel = web.DataReader(ticker, 'yahoo', start, end, session=session)
papel = round(papel, 2) # Arredonda para 2 casas decimais.
# Mostra as informações sobre a tabela, nomes de colunas e tipos de variáveis.
print(papel.info())
print(
f'As 5 primeiras linhas da tabela:\n'
f'{papel.head()}'
)
print(
f'As informações de determinado dia:\n'
f'{papel.loc["2021-5-19"]}'
)
papel_vol = papel.Volume
print(f'O volume de {ticker} foi\n{papel_vol}')
|
import argparse
import sys
import time
import numpy as np
import pandas as pd
from epics import PV
from scipy.signal import peak_widths, periodogram
time0 = time.time()
def initArgs():
"""Initialize argparse arguments.
"""
parser = argparse.ArgumentParser()
parser.add_argument("--calc_streak", action="store_true", default=False,
help='Calculate streak')
parser.add_argument("--calc_period", action="store_true", default=False,
help='Calculate periodogram')
parser.add_argument("--plot", action="store_true", default=False,
help='Plot normalized spectrum')
parser.add_argument("--nevents", type=int, default=10,
help='Number of events to average')
parser.add_argument("--exp", type=str, default='cxilr6716',
help='Experiment')
parser.add_argument("--run", type=int,
help='Run')
parser.add_argument("--instrument", type=str, default='cxi',
help='Instrument')
parser.add_argument("--pvbase", type=str, default='CXI:SC1:DIFFRACT',
help='pvbase')
parser.add_argument("--alias", type=str, default='DscCsPad',
help='detector alias')
return parser.parse_args()
def DataSource(exp=None, run=None, **kwargs):
"""
Wrapper for loading PyDataSource DataSource
"""
import PyDataSource
if exp and run:
print('Loading DataSource for {:} run {:}'.format(exp, run))
ds = PyDataSource.DataSource(exp=exp, run=int(run))
elif exp:
print('Loading DataSource for shared memory with expiment {:}'
''.format(exp))
ds = PyDataSource.DataSource(exp=exp)
else:
print('Loading DataSource for shared memory')
ds = PyDataSource.DataSource()
print(ds.configData.show_info())
print('')
print('Load time: {:} sec'.format(time.time() - time0))
return ds
def output_cspad_sum(ds=None, alias='DscCsPad',
pvbase='CXI:SC1:DIFFRACT', calc_period=True, calc_streak=False,
psd_events=None, psd_rate=None, psd_resolution=None, **kwargs):
"""Outputs cspad sum and certain statistics as PVs
Parameters
----------
ds : DataSource, optional
DataSource object, if not specified, loads it using kwargs
alias : str, optional
Name for CsPad data
pvbase : str, optional
Base for PV names
calc_period : bool, optional
Determines the execution of frequency analysis
psd_events : int, optional
Number of events for frequency analysis, default is 240
psd_rate : int or float, optional
Event rate [Hz], default is 120
psd_resolution : int, optional
Resolution setting will perform rolling mean [Hz]
"""
# Configure epics PVs
print('Initializing epics PVs')
cspad_sum_pv = PV(':'.join([pvbase, 'TOTAL_ADU']))
streak_fraction_pv = PV(':'.join([pvbase, 'STREAK_FRACTION']))
stats_mean_pv = PV(':'.join([pvbase, 'STATS_MEAN']))
stats_std_pv = PV(':'.join([pvbase, 'STATS_STD']))
stats_min_pv = PV(':'.join([pvbase, 'STATS_MIN']))
stats_max_pv = PV(':'.join([pvbase, 'STATS_MAX']))
psd_frequency_pv = PV(':'.join([pvbase, 'PSD_FREQUENCY']))
psd_amplitude_pv = PV(':'.join([pvbase, 'PSD_AMPLITUDE']))
psd_rate_pv = PV(':'.join([pvbase, 'PSD_RATE']))
psd_events_pv = PV(':'.join([pvbase, 'PSD_EVENTS']))
psd_resolution_pv = PV(':'.join([pvbase, 'PSD_RESOLUTION']))
psd_freq_min_pv = PV(':'.join([pvbase, 'PSD_FREQ_MIN']))
psd_freq_wf_pv = PV(':'.join([pvbase, 'PSD_FREQ_WF']))
psd_amp_wf_pv = PV(':'.join([pvbase, 'PSD_AMP_WF']))
# psd_amp_array_pv = PV(':'.join([pvbase,'PSD_AMP_ARRAY']))
if psd_rate:
psd_rate_pv.put(psd_rate)
psd_rate = psd_rate_pv.get()
if psd_rate > 360 or psd_rate < 10:
psd_rate = 120.
psd_rate_pv.put(psd_rate)
if psd_events:
psd_events_pv.put(psd_events)
psd_events = psd_events_pv.get()
if psd_events > 1200 or psd_events < 60:
psd_events = psd_rate * 2.
psd_events_pv.put(psd_events)
if psd_resolution:
psd_resolution_pv.put(psd_resolution)
psd_resolution = psd_resolution_pv.get()
if psd_resolution > 5 or psd_resolution < 0.1:
psd_resolution = psd_rate / float(psd_events)
psd_resolution_pv.put(psd_resolution)
nroll = int(psd_resolution * psd_events / float(psd_rate))
psd_freq_min = psd_freq_min_pv.get()
if psd_freq_min > 40 or psd_freq_min < 2:
psd_freq_min = 5.
psd_freq_min_pv.put(psd_freq_min)
if0 = int(psd_freq_min / float(psd_rate) * psd_events)
psd_freq_wf = np.arange(psd_events / 2. + 1.) * \
float(psd_rate) / float(psd_events)
psd_freq_wf_pv.put(psd_freq_wf)
print('Events = {}'.format(psd_events))
print('... done')
if not ds:
ds = DataSource(**kwargs)
print('... done')
detector = ds._detectors[alias]
detector.next()
detector.add.property(asic)
detector.add.property(streak_present)
try:
no_streak = []
iloop = 0
icheck = 0
streaks = 0
time0 = time.time()
time_last = time0
sums = []
# aPxx = []
# atime = []
while True:
cspad_sum = detector.corr.sum()
sums.append(cspad_sum)
cspad_sum_pv.put(cspad_sum)
if calc_streak:
streak = detector.streak_present
streaks += streak
if not streak:
no_streak.append(iloop)
iloop += 1
icheck += 1
if not iloop % psd_events:
sums = np.asarray(sums)
det_avg = sums.mean()
det_std = sums.std()
det_max = sums.max()
det_min = sums.min()
stats_mean_pv.put(det_avg)
stats_max_pv.put(det_max)
stats_min_pv.put(det_min)
stats_std_pv.put(det_std)
if calc_period:
# f should be same as psd_freq_wf
f, Pxx = periodogram(sums, psd_rate)
if nroll > 1:
Pxx = pd.DataFrame(Pxx).rolling(
nroll).mean().values[nroll:, 0]
f = f[nroll:]
psd_frequency = f[Pxx[if0:].argmax() + if0]
psd_amplitude = Pxx[if0:].max() / 4 * psd_rate / psd_events
psd_frequency_pv.put(psd_frequency)
psd_amplitude_pv.put(psd_amplitude)
psd_amp_wf_pv.put(Pxx)
time_next = time.time()
evtrate = icheck / (time_next - time_last)
icheck = 0
time_last = time_next
print('{:8.1f} Hz - {:8.1f} {:12} {:12} {:12}'
''.format(evtrate, psd_frequency, psd_amplitude,
det_avg, det_std))
#
# Need to makd sure right shape before outputting array
# aPxx.append(Pxx)
# psd_amp_array_pv.put(np.asarray(aPxx))
if calc_streak:
streak_fraction = streaks / psd_events
streak_fraction_pv.put(streak_fraction)
sums = []
# aPxx = []
# atime = []
streaks = 0
no_streak = []
# Change to evt.next() in future and count damage
detector.next()
except KeyboardInterrupt:
return
except Exception as e:
print(e)
def output_cspad_streak(ds=None, alias='DscCsPad',
pvbase='CXI:SC1:DIFFRACT', nevents=10, **kwargs):
"""
Output cspad jet streak information
"""
beam_x_pv = PV(':'.join([pvbase, 'X0']))
beam_y_pv = PV(':'.join([pvbase, 'Y0']))
streak_angle_pv = PV(':'.join([pvbase, 'STREAK_PHI']))
streak_intensity_pv = PV(':'.join([pvbase, 'STREAK_INTENSITY']))
streak_width_pv = PV(':'.join([pvbase, 'STREAK_WIDTH']))
if not ds:
ds = DataSource(**kwargs)
# Now set in epics -- update as needed from epics.
# beam_x_pv.put(2094.9301668334006) # run 104
# beam_y_pv.put(-1796.5697333657126)
detector = ds._detectors[alias]
detector.next()
detector.add.property(asic)
cy, cx = get_center(detector, beam_x_pv, beam_y_pv)
j_map_1, j_map_2 = find_proj_mapping(cy, cx)
detector.add.parameter(proj_map_1=j_map_1, proj_map_2=j_map_2)
detector.add.property(streak_angle_raw)
detector.add.property(streak_present)
try:
iloop = 0
time0 = time.time()
while True:
streak_angle = detector.streak_angle_raw[0]
streak_intensity = detector.streak_angle_raw[1]
streak_width = detector.streak_angle_raw[2]
streak_angle_pv.put(streak_angle)
streak_intensity_pv.put(streak_intensity)
streak_width_pv.put(streak_width)
if not (iloop + 1) % nevents and detector.streak_present:
evt_rate = iloop / (time.time() - time0)
print('{:15} {:6.1f} Hz {:5.1f} {:5.1f} {:5.3f} {}'
''.format(iloop, evt_rate, streak_angle,
streak_intensity, streak_width,
int(detector.streak_present)))
iloop += 1
detector.next()
except KeyboardInterrupt:
return
def streak_present(self):
im1 = self.asic[0]
im2 = self.asic[2]
return streak_present_im(im1) and streak_present_im(im2)
def streak_present_im(im):
'''im is 2D np-array'''
s = im[-10:].sum(axis=0)
s -= s.mean()
s /= np.roll(s, 10 - s.argmax())[20:].std()
return s.max() > 5
def get_center(self, x0_pv, y0_pv):
center = (y0_pv.get(), x0_pv.get())
cy, cx = get_center_coords(self, center)
cy -= 185
return cy, cx
def to_pad_coord(det, point, i):
'''Point: (y,x)'''
pad = [1, 9, 17, 25][i]
origin = np.asarray(
(det.calibData.coords_x[pad, 0, 0], det.calibData.coords_y[pad, 0, 0]))
unit_y = ((det.calibData.coords_x[pad, 1, 0] - det.calibData.coords_x[pad, 0, 0]),
(det.calibData.coords_y[pad, 1, 0] - det.calibData.coords_y[pad, 0, 0]))
unit_x = ((det.calibData.coords_x[pad, 0, 1] - det.calibData.coords_x[pad, 0, 0]),
(det.calibData.coords_y[pad, 0, 1] - det.calibData.coords_y[pad, 0, 0]))
matrix = np.asarray([[unit_y[0], unit_x[0]], [unit_y[1], unit_x[1]]])
pos = np.linalg.solve(matrix, np.asarray(point) - origin)
return pos
def get_center_coords(det, center):
cy = np.zeros(4)
cx = np.zeros(4)
for i in range(4):
pos = to_pad_coord(det, center, i)
cy[i], cx[i] = pos[0], pos[1]
return cy, cx
def find_proj_mapping(cy, cx):
sq = 0
j_index_1 = np.zeros((100, 80), dtype=np.int64)
j_index_2 = np.zeros((100, 80), dtype=np.int64)
for a in range(-40, 40):
ang = np.radians(float(a) / 2)
for i in range(100):
j = int(np.tan(ang) * (100 - i + cy[sq]) + cx[sq]) % 100
j_index_1[i, a + 40] = j
j = int(np.tan(ang) *
(100 - i + cy[(sq + 2) % 4]) + cx[(sq + 2) % 4]) % 100
j_index_2[i, a + 40] = j
return j_index_1, j_index_2
def asic(self, attr='corr'):
"""
Select inner asics
"""
return getattr(self, attr)[[1, 9, 17, 25], :, 0:194]
def streak_angle_raw(self):
"""
Jet streak calculation
Returns: jet angle, jet intensity (as standard deviations from the mean),
jet width
"""
sq = 0
asic = self.asic
im1 = asic[sq][-100:, :100]
im2 = asic[(sq + 2) % 4][-100:, :100]
proj1 = np.zeros((100, 80))
proj2 = np.zeros((100, 80))
for a in range(-40, 40):
for i in range(im1.shape[0]):
proj1[i, a + 40] = im1[i, self.proj_map_1[i, a + 40]]
proj2[i, a + 40] = im2[i, self.proj_map_2[i, a + 40]]
s = proj1.sum(axis=0) + proj2.sum(axis=0)
s -= s.mean()
s /= np.roll(s, 10 - s.argmax())[20:].std()
peak = s[1:-1].argmax() + 1
try:
peakwidth = peak_widths(s, [peak])[0][0]
except Exception:
peakwidth = 5
return (np.pi * (peak - 40) / 360.0, s.max(), peakwidth)
if __name__ == "__main__":
args = initArgs()
print('Initializing args: {}'.format(args))
sys.exit(output_cspad_sum(alias=args.alias, pvbase=args.pvbase,
exp=args.exp, run=args.run,
calc_period=args.calc_period,
calc_streak=args.calc_streak,
nevents=args.nevents, plot=args.plot))
|
import logging
import os
from itertools import product
from conftest_markers import DIMENSIONS_MARKER_ARGS
from framework.tests_configuration.config_utils import get_enabled_tests
from xdist import get_xdist_worker_id
def parametrize_from_config(metafunc):
"""
Apply parametrization to all test functions loaded by pytest.
The functions discovered by pytest are matched against the ones declared in the test config file. When a match
if found, meaning the test is enabled, the dimensions declared in the config file are applied to the test function.
"""
tests_config = metafunc.config.getoption("tests_config")
test_collection_name = metafunc.definition.nodeid.split(os.path.sep)[0]
test_name = metafunc.definition.nodeid.split(os.path.sep)[1]
if test_collection_name in tests_config["test-suites"]:
if test_name in tests_config["test-suites"][test_collection_name]:
configured_dimensions_items = tests_config["test-suites"][test_collection_name][test_name]["dimensions"]
argnames, argvalues = _get_combinations_of_dimensions_values(configured_dimensions_items)
if argvalues:
metafunc.parametrize(argnames, argvalues, scope="class")
def _get_combinations_of_dimensions_values(configured_dimensions_items):
"""
Given a list of dict defining the configured test dimensions it computes all combinations of dimension
values in order to parametrize the tests.
E.g.
configured_dimensions_items =
[{'instances': ['inst1'], 'oss': ['os1', 'os2'], 'regions': ['region1', 'region2'], 'schedulers': ['s']},
{'instances': ['inst2', 'inst3'], 'oss': ['os1'], 'regions': ['region3'], 'schedulers': ['s']}]
Produces the following output:
argvalues = [('region1', 'inst1', 'os1', 's'), ('region1', 'inst1', 'os2', 's'), ('region2', 'inst1', 'os1', 's'),
('region2', 'inst1', 'os2', 's'), ('region3', 'inst2', 'os1', 's'), ('region3', 'inst3', 'os1', 's')]
"""
argnames = list(DIMENSIONS_MARKER_ARGS)
argvalues = []
for item in configured_dimensions_items:
dimensions_values = []
for dim in DIMENSIONS_MARKER_ARGS:
values = item.get(f"{dim}s")
if values:
dimensions_values.append(values)
elif dim in argnames:
argnames.remove(dim)
argvalues.extend(list(product(*dimensions_values)))
return argnames, argvalues
def remove_disabled_tests(session, config, items):
"""Remove all tests that are not defined in the config file"""
enabled_tests = get_enabled_tests(config.getoption("tests_config"))
for item in list(items):
if item.nodeid.split("[")[0] not in enabled_tests:
if get_xdist_worker_id(session) in ["master", "gw0"]:
# log only in master process to avoid duplicate log entries
logging.warning("Skipping test %s because not defined in config", item.nodeid)
items.remove(item)
def apply_cli_dimensions_filtering(config, items):
"""Filter tests based on dimensions passed as cli arguments."""
allowed_values = {}
for dimension in DIMENSIONS_MARKER_ARGS:
allowed_values[dimension] = config.getoption(dimension + "s")
for item in list(items):
for dimension in DIMENSIONS_MARKER_ARGS:
# callspec is not set if parametrization did not happen
if hasattr(item, "callspec"):
arg_value = item.callspec.params.get(dimension)
if allowed_values[dimension]:
if arg_value not in allowed_values[dimension]:
items.remove(item)
break
|
"""CLI utilities for vcspull.
vcspull.cli
~~~~~~~~~~~
"""
import logging
import click
from ..__about__ import __version__
from ..log import setup_logger
from .sync import sync
log = logging.getLogger(__name__)
@click.group()
@click.option(
"--log-level",
default="INFO",
help="Log level (DEBUG, INFO, WARNING, ERROR, CRITICAL)",
)
@click.version_option(version=__version__, message="%(prog)s %(version)s")
def cli(log_level):
setup_logger(log=log, level=log_level.upper())
# Register sub-commands here
cli.add_command(sync)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
import os
import sys
module_path = os.path.abspath(os.path.join("./src/"))
if module_path not in sys.path:
sys.path.append(module_path)
from timeit import default_timer as timer
import numpy as np
import pandas as pd
import torch
from model.autoencoders import SHAE, SHAENet
from sklearn.model_selection import GridSearchCV
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import StandardScaler
from utils.utils import FixRandomSeed, StratifiedSurvivalKFold, shae_criterion
def main():
with open("config/config.json") as f:
config = json.load(f)
model = "shaenet"
param_space = {
f"{model}__module__lambda_2": [0.01, 0.001, 0.0001],
f"{model}__module__lambda_1": [0.01, 0.001, 0.0001],
}
for cancer in config["cancers"]:
print(f"Starting: {cancer}")
data = pd.read_csv(
f"./data/processed/{cancer}/merged/{config['data_name_tcga']}"
)
X = data[data.columns[2:]]
X = X.loc[:, (X != X.iloc[0]).any()]
y_str = data["OS"].astype(str) + "|" + data["OS.time"].astype(str)
train_splits = pd.read_csv(
f"./data/splits/{cancer}/{config['train_split_name_tcga']}"
)
test_splits = pd.read_csv(
f"./data/splits/{cancer}/{config['test_split_name_tcga']}"
)
clinical_indices = [
i for i in range(len(X.columns)) if "clinical" in X.columns[i]
]
gex_indices = [
i for i in range(len(X.columns)) if "gex" in X.columns[i]
]
cnv_indices = [
i for i in range(len(X.columns)) if "cnv" in X.columns[i]
]
meth_indices = [
i for i in range(len(X.columns)) if "meth" in X.columns[i]
]
mirna_indices = [
i for i in range(len(X.columns)) if "mirna" in X.columns[i]
]
mut_indices = [
i for i in range(len(X.columns)) if "mut" in X.columns[i]
]
rppa_indices = [
i for i in range(len(X.columns)) if "rppa" in X.columns[i]
]
blocks = [
clinical_indices,
gex_indices,
cnv_indices,
meth_indices,
mirna_indices,
mut_indices,
rppa_indices,
]
# Make sure that all variables are considered in the blocks
assert sum([len(i) for i in blocks]) == X.shape[1]
params = []
scores = []
timing = []
for i in range(train_splits.shape[0]):
print(f"Split: {i+1} / 10")
train_ix = train_splits.iloc[i, :].dropna().values
test_ix = test_splits.iloc[i, :].dropna().values
cv = StratifiedSurvivalKFold(n_splits=config["inner_splits"])
net = SHAENet(
module=SHAE,
criterion=shae_criterion,
max_epochs=config["epochs"],
lr=config["lr"],
train_split=None,
optimizer=torch.optim.Adam,
callbacks=[
("seed", FixRandomSeed(config["seed"])),
],
verbose=0,
batch_size=-1,
module__blocks=blocks,
module__residual=True
)
pipe = make_pipeline(StandardScaler(), net)
grid = GridSearchCV(
estimator=pipe,
param_grid=param_space,
cv=cv,
n_jobs=-1,
)
start = timer()
grid.fit(
X.iloc[train_ix, :].to_numpy().astype(np.float32),
y_str.iloc[train_ix].to_numpy().astype(str),
)
end = timer()
params.append(grid.best_params_)
scores.append(
grid.score(
X.iloc[test_ix, :].to_numpy().astype(np.float32),
y_str.iloc[test_ix].to_numpy().astype(str),
)
)
timing.append(end - start)
params = pd.DataFrame(params)
params.to_csv(
f"./data/benchmarks/{cancer}/{model}_residual_tuned_parameters_timed_euler.csv",
index=False,
)
bench = pd.DataFrame()
bench["concordance"] = scores
bench["model"] = model
bench["timing"] = timing
bench[["model", "concordance", "timing"]].to_csv(
f"./data/benchmarks/{cancer}/{model}_residual_tuned_scores_timed_euler.csv",
index=False,
)
if __name__ == "__main__":
sys.exit(main())
|
# https://leetcode.com/problems/longest-common-prefix/
# Related Topics: String, Array
# Difficulty: Easy
# Initial thoughts:
# We are going to look at each index of every string
# at the same time, comparing them to their counterpart
# in the first string, adding the character to our results
# if all of the strings have the same character, and returning
# the results in case of a difference.
# Time complexity: O(n * min(s)) where s is the length of the strings
# Space complexity: O(min(s)) where s is the length of the strings
from typing import List
class Solution:
def longestCommonPrefix(self, strs: List[str]) -> str:
res = []
if not len(strs):
return "".join(res)
for i in range(len(strs[0])):
for j in range(len(strs)):
if i >= len(strs[j]) or strs[0][i] != strs[j][i]:
return "".join(res)
res.append(strs[0][i])
return "".join(res)
# Optimization:
# Using the counter variable we can forgoe the result tracking variable
# and just slice and return it at the end, rendering the space complexity constant.
# Time complexity: O(n * min(s)) where s is the length of the longest common prefix
# Space complexity: O(1)
class Solution:
def longestCommonPrefix(self, strs: List[str]) -> str:
if not len(strs):
return ""
for i in range(len(strs[0])):
for j in range(len(strs)):
if i >= len(strs[j]) or strs[0][j] != strs[j][i]:
return strs[0][0:i]
return strs[0]
|
#!/usr/bin/env python
import torch
import torch.distributed as dist
import torch.multiprocessing as mp
import torch.nn as nn
import torch.optim as optim
from torch.nn.parallel import DistributedDataParallel as DDP
def run(rank):
# create local model
model = nn.Linear(10, 10).to('cpu')
# define loss function and optimizer
loss_fn = nn.MSELoss()
optimizer = optim.SGD(model.parameters(), lr=0.001)
# forward pass
outputs = model(torch.randn(20, 10))
labels = torch.randn(20, 10)
# backward pass
loss_fn(outputs, labels).backward()
# update parameters
optimizer.step()
print(list(model.parameters()))
if __name__ == "__main__":
run(0)
|
import gitlab
from .aio_gitlab import AioGitlab
class Gitlab(gitlab.Gitlab):
def __init__(
self,
url,
private_token,
oauth_token=None,
ssl_verify=True,
http_username=None,
http_password=None,
timeout=None,
api_version="4",
session=None,
per_page=None,
):
self.aio = AioGitlab(gl=self, gitlab_url=url, gitlab_token=private_token)
super(Gitlab, self).__init__(
url=url,
private_token=private_token,
oauth_token=oauth_token,
ssl_verify=ssl_verify,
http_username=http_username,
http_password=http_password,
timeout=timeout,
api_version=api_version,
session=session,
per_page=per_page,
)
|
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Nettle(AutotoolsPackage, GNUMirrorPackage):
"""The Nettle package contains the low-level cryptographic library
that is designed to fit easily in many contexts."""
homepage = "https://www.lysator.liu.se/~nisse/nettle/"
gnu_mirror_path = "nettle/nettle-3.3.tar.gz"
version('3.4.1', sha256='f941cf1535cd5d1819be5ccae5babef01f6db611f9b5a777bae9c7604b8a92ad')
version('3.4', sha256='ae7a42df026550b85daca8389b6a60ba6313b0567f374392e54918588a411e94')
version('3.3', sha256='46942627d5d0ca11720fec18d81fc38f7ef837ea4197c1f630e71ce0d470b11e')
version('3.2', sha256='ea4283def236413edab5a4cf9cf32adf540c8df1b9b67641cfc2302fca849d97')
version('2.7.1', sha256='bc71ebd43435537d767799e414fce88e521b7278d48c860651216e1fc6555b40')
version('2.7', sha256='c294ea133c05382cc2effb1734d49f4abeb1ad8515543a333de49a11422cd4d6')
depends_on('gmp')
depends_on('m4', type='build')
def configure_args(self):
return ['CFLAGS={0}'.format(self.compiler.c99_flag)]
|
# -*- coding: utf-8 -*-
from django.conf.urls import include, url
from noticeboard.views import NoticeBoardView
urlpatterns = [
url(r'^$', NoticeBoardView.as_view(), name='notice-board'),
]
|
import falcon
import simplejson as json
import mysql.connector
import config
class MenuCollection:
@staticmethod
def __init__():
pass
@staticmethod
def on_options(req, resp):
resp.status = falcon.HTTP_200
@staticmethod
def on_get(req, resp):
cnx = mysql.connector.connect(**config.myems_system_db)
cursor = cnx.cursor(dictionary=True)
query = (" SELECT id, name, route, parent_menu_id, is_hidden "
" FROM tbl_menus "
" ORDER BY id ")
cursor.execute(query)
rows_menus = cursor.fetchall()
result = list()
if rows_menus is not None and len(rows_menus) > 0:
for row in rows_menus:
temp = {"id": row['id'],
"name": row['name'],
"route": row['route'],
"parent_menu_id": row['parent_menu_id'],
"is_hidden": bool(row['is_hidden'])}
result.append(temp)
cursor.close()
cnx.disconnect()
resp.body = json.dumps(result)
class MenuItem:
@staticmethod
def __init__():
pass
@staticmethod
def on_options(req, resp, id_):
resp.status = falcon.HTTP_200
@staticmethod
def on_get(req, resp, id_):
if not id_.isdigit() or int(id_) <= 0:
raise falcon.HTTPError(falcon.HTTP_400, title='API.BAD_REQUEST',
description='API.INVALID_MENU_ID')
cnx = mysql.connector.connect(**config.myems_system_db)
cursor = cnx.cursor(dictionary=True)
query = (" SELECT id, name, route, parent_menu_id, is_hidden "
" FROM tbl_menus "
" WHERE id=%s ")
cursor.execute(query, (id_,))
rows_menu = cursor.fetchone()
result = None
if rows_menu is not None and len(rows_menu) > 0:
result = {"id": rows_menu['id'],
"name": rows_menu['name'],
"route": rows_menu['route'],
"parent_menu_id": rows_menu['parent_menu_id'],
"is_hidden": bool(rows_menu['is_hidden'])}
cursor.close()
cnx.disconnect()
resp.body = json.dumps(result)
@staticmethod
def on_put(req, resp, id_):
"""Handles PUT requests"""
try:
raw_json = req.stream.read().decode('utf-8')
except Exception as ex:
raise falcon.HTTPError(falcon.HTTP_400, title='API.EXCEPTION', description=ex)
if not id_.isdigit() or int(id_) <= 0:
raise falcon.HTTPError(falcon.HTTP_400, title='API.BAD_REQUEST',
description='API.INVALID_MENU_ID')
new_values = json.loads(raw_json)
if 'is_hidden' not in new_values['data'].keys() or \
not isinstance(new_values['data']['is_hidden'], bool):
raise falcon.HTTPError(falcon.HTTP_400, title='API.BAD_REQUEST',
description='API.INVALID_IS_HIDDEN')
is_hidden = new_values['data']['is_hidden']
cnx = mysql.connector.connect(**config.myems_system_db)
cursor = cnx.cursor()
update_row = (" UPDATE tbl_menus "
" SET is_hidden = %s "
" WHERE id = %s ")
cursor.execute(update_row, (is_hidden,
id_))
cnx.commit()
cursor.close()
cnx.disconnect()
resp.status = falcon.HTTP_200
class MenuChildrenCollection:
@staticmethod
def __init__():
pass
@staticmethod
def on_options(req, resp, id_):
resp.status = falcon.HTTP_200
@staticmethod
def on_get(req, resp, id_):
if not id_.isdigit() or int(id_) <= 0:
raise falcon.HTTPError(falcon.HTTP_400, title='API.BAD_REQUEST',
description='API.INVALID_MENU_ID')
cnx = mysql.connector.connect(**config.myems_system_db)
cursor = cnx.cursor(dictionary=True)
query = (" SELECT id, name, route, parent_menu_id, is_hidden "
" FROM tbl_menus "
" WHERE id = %s ")
cursor.execute(query, (id_,))
row_current_menu = cursor.fetchone()
if row_current_menu is None:
cursor.close()
cnx.disconnect()
raise falcon.HTTPError(falcon.HTTP_404, title='API.NOT_FOUND',
description='API.MENU_NOT_FOUND')
query = (" SELECT id, name "
" FROM tbl_menus "
" ORDER BY id ")
cursor.execute(query)
rows_menus = cursor.fetchall()
menu_dict = dict()
if rows_menus is not None and len(rows_menus) > 0:
for row in rows_menus:
menu_dict[row['id']] = {"id": row['id'],
"name": row['name']}
result = dict()
result['current'] = dict()
result['current']['id'] = row_current_menu['id']
result['current']['name'] = row_current_menu['name']
result['current']['parent_menu'] = menu_dict.get(row_current_menu['parent_menu_id'], None)
result['current']['is_hidden'] = bool(row_current_menu['is_hidden'])
result['children'] = list()
query = (" SELECT id, name, route, parent_menu_id, is_hidden "
" FROM tbl_menus "
" WHERE parent_menu_id = %s "
" ORDER BY id ")
cursor.execute(query, (id_, ))
rows_menus = cursor.fetchall()
if rows_menus is not None and len(rows_menus) > 0:
for row in rows_menus:
parent_menu = menu_dict.get(row['parent_menu_id'], None)
meta_result = {"id": row['id'],
"name": row['name'],
"parent_menu": parent_menu,
"is_hidden": bool(row['is_hidden'])}
result['children'].append(meta_result)
cursor.close()
cnx.disconnect()
resp.body = json.dumps(result)
class MenuWebCollection:
@staticmethod
def __init__():
pass
@staticmethod
def on_options(req, resp):
resp.status = falcon.HTTP_200
@staticmethod
def on_get(req, resp):
cnx = mysql.connector.connect(**config.myems_system_db)
cursor = cnx.cursor(dictionary=True)
query = (" SELECT id, route, parent_menu_id "
" FROM tbl_menus "
" WHERE parent_menu_id IS NULL AND is_hidden = false ")
cursor.execute(query)
rows_menus = cursor.fetchall()
first_level_routes = {}
if rows_menus is not None and len(rows_menus) > 0:
for row in rows_menus:
first_level_routes[row['id']] = {
'route': row['route'],
'children': []
}
query = (" SELECT id, route, parent_menu_id "
" FROM tbl_menus "
" WHERE parent_menu_id IS NOT NULL AND is_hidden = false ")
cursor.execute(query)
rows_menus = cursor.fetchall()
if rows_menus is not None and len(rows_menus) > 0:
for row in rows_menus:
if row['parent_menu_id'] in first_level_routes.keys():
first_level_routes[row['parent_menu_id']]['children'].append(row['route'])
result = dict()
for _id, item in first_level_routes.items():
result[item['route']] = item['children']
cursor.close()
cnx.disconnect()
resp.body = json.dumps(result)
|
from hummingbot.script.script_base import ScriptBase
from decimal import Decimal
from hummingbot.core.event.events import (
BuyOrderCompletedEvent,
SellOrderCompletedEvent
)
from os.path import realpath, join
s_decimal_1 = Decimal("1")
LOGS_PATH = realpath(join(__file__, "../../logs/"))
SCRIPT_LOG_FILE = f"{LOGS_PATH}/logs_yt_monitor_script.log"
def print_comma_only(number):
return "{:,.2f}".format(number)
def print_currency(amount):
if amount <= 1:
return "${:,.6f}".format(amount)
else:
return "${:,.2f}".format(amount)
class YTMonitorScript(ScriptBase):
"""
Demonstrates a monitoring script that can take external inputs from a stream and execute a number of scripts based on the input.
This is experimental and assumes that the input stream is authenticated.
"""
# 1. measure volatility -> if too high, then send a stop signal
# 2. track profit target? -> if mid price is too low high
# 3. track how much fees paid? -> if too high, then allow sending a signal back
def __init__(self):
super().__init__()
self.url = None
self.status = "No action"
self._has_updated = False
self.total_units_bought = 0
self.total_units_sold = 0
self.total_balance = 0
self.average_price = 0
self._first_time_only = True
def on_tick(self):
strategy = self.pmm_parameters
assert strategy is not None
# market_info = self.pmm_market_info
if self._first_time_only:
self._first_time_only = False
def update_balances(self, units, price, is_buy):
self.total_balance += round(price, 2)
if is_buy:
self.total_units_bought += units
self.average_price = round(float(self.total_balance / self.total_units_bought), 2)
else:
self.total_units_sold += units
self.average_price = round(float(self.total_balance / self.total_units_sold), 2)
return
def on_buy_order_completed(self, event: BuyOrderCompletedEvent):
token = event.base_asset
price = event.quote_asset_amount
units = event.base_asset_amount
# print(f"Bought {token}: {amount} units @ ${price} {price_currency}")
self.update_balances(units, price, True)
self.status = f"bought = {print_comma_only(self.total_units_bought)} {token}, "
self.status += f"total balance = {print_currency(self.total_balance)}, "
self.status += f"avg price = {print_currency(self.average_price)}"
self.log(self.status)
def on_sell_order_completed(self, event: SellOrderCompletedEvent):
token = event.base_asset
price = event.quote_asset_amount
units = event.base_asset_amount
# print(f"Sold {token}: {amount} units @ ${price} {price_currency}")
self.update_balances(units, price, False)
self.status = f"sold = {print_comma_only(self.total_units_sold)} {token}, "
self.status += f"total balance = {print_currency(self.total_balance)}, "
self.status += f"avg price = {print_currency(self.average_price)}"
self.log(self.status)
def on_status(self):
return f"{self.status}"
|
# -*- coding: utf-8 -*-
"""
folklore.service
~~~~~~~~~~~~~~~~
This module implements service runner and handler definition interface.
Available hooks:
- before_api_call Hooks to be executed before api called.
- api_called Hooks to be executed after api called.
- api_timeout Hooks to be executed after api call timeout.
Registered hooks:
- api_called
- config_log
"""
import contextlib
import functools
import gevent
import itertools
import logging
import os.path
import sys
import time
from copy import deepcopy
from thriftpy import load
from thriftpy.thrift import TProcessorFactory, TException, \
TApplicationException
from thriftpy.transport import TBufferedTransport, TTransportException
from thriftpy.protocol import TBinaryProtocol
from folklore_config import config
from folklore_thrift import Processor, Response
from .exc import CloseConnectionError, TimeoutException
from .hook import HookRegistry, StopHook
from .hook.api import api_called
from ._compat import reraise, protocol_exceptions
from .log import MetaAdapter, config_log
TIMEOUT = 30
@contextlib.contextmanager
def _ignore_hook_exception(logger):
try:
yield
except Exception as e:
logger.warning('Ignore hook function exception: %s', e, exc_info=True)
class Context(dict):
"""Runtime context.
This class is used to track runtime informations.
"""
__setattr__ = dict.__setitem__
def __getattr__(self, attr):
if attr not in self:
raise AttributeError(
'Context object has no attribute {!r}'.format(attr))
return self[attr]
def clear_except(self, *keys):
"""Clear the dict except the given key.
:param keys: not delete the values of these keys
"""
reserved = [(k, self.get(k)) for k in keys]
self.clear()
self.update(reserved)
class FolkloreBinaryProtocol(object):
"""Thrift binary protocol wrapper
Used for ``thrift_protocol_class`` config.
:param sock: client socket
"""
def __init__(self, sock):
self.sock = sock
self.trans = None
def get_proto(self):
"""Create a TBinaryProtocol instance
"""
self.trans = TBufferedTransport(self.sock)
return TBinaryProtocol(self.trans)
def close(self):
"""Close underlying transport
"""
if self.trans is not None:
try:
self.trans.close()
finally:
self.trans = None
class FolkloreService(object):
"""Folklore service runner.
:Example:
>>> service = FolkloreService()
>>> service.context.info.update({'client_addr': '0.0.0.0:1234'})
>>> servcie.set_handler(handler)
>>> service.run()
"""
def __init__(self):
# The `info` field is shared by the whole service.
self.context = Context(info=Context())
self.logger = MetaAdapter(logging.getLogger(config.app_name),
extra={'ctx': self.context})
self.context.logger = self.logger
self.service_def = None
def set_handler(self, handler):
"""Fill the service handler for this service.
:param handler: a :class:`ServiceHandler` instance
"""
self.api_map = ApiMap(handler, self.context)
self.logger.logger = logging.getLogger(
':'.join([config.app_name, handler.service_name]))
self.service_def = getattr(handler.thrift_module, handler.service_name)
def run(self, sock):
"""The main run loop for the service.
:param sock: the client socket
"""
processor = TProcessorFactory(
Processor,
self.context,
self.service_def,
self.api_map
).get_processor()
proto_class = config.thrift_protocol_class or FolkloreBinaryProtocol
factory = proto_class(sock)
proto = factory.get_proto()
try:
while True:
processor.process(proto, proto)
except TTransportException as e:
# Ignore EOF exception
if e.type != TTransportException.END_OF_FILE:
self.logger.exception(e)
except protocol_exceptions as e:
self.logger.warn(
'[%s:%s] protocol error: %s',
self.context.info.get('client_addr', '-'),
self.context.info.get('client_port', '-'), e)
except CloseConnectionError:
pass
finally:
factory.close()
class ApiMap(object):
"""Record service handlers.
:param handler: a :class:`ServiceHandler` instance
:param env: environment context for this api map
"""
def __init__(self, handler, context):
self.__map = handler.api_map
self.__context = context
self.__hook = handler.hook_registry
self.__system_exc_handler = handler.system_exc_handler
self.__api_exc_handler = handler.api_exc_handler
self.__thrift_exc_handler = handler.thrift_exc_handler
def __setitem__(self, attr, item):
self.__map[attr] = item
def __call(self, api_name, handler, *args, **kwargs):
ctx = self.__context
# Add utility attributes to ctx
ctx.update(args=args, kwargs=kwargs, api_name=api_name,
start_at=time.time(), conf=handler.conf,
response_meta={}, log_extra={})
timeout = ctx.conf.get('timeout', TIMEOUT)
ctx.conf.setdefault('soft_timeout', timeout)
ctx.conf.setdefault('hard_timeout', timeout)
soft_timeout = ctx.conf['soft_timeout']
hard_timeout = ctx.conf['hard_timeout']
with_ctx = ctx.conf.get('with_ctx', False)
ctx.exc = None
try:
if hard_timeout < soft_timeout:
ctx.logger.warning(
'Api soft timeout {!r}s greater than hard timeout {!r}s'
.format(soft_timeout, hard_timeout))
# Before api call hook
try:
self.__hook.on_before_api_call(ctx)
except StopHook as e:
ret = Response(value=e.value, meta=e.meta)
ctx.return_value = ret
return ret
except Exception as e:
ctx.exc = e
reraise(*self.__system_exc_handler(*sys.exc_info()))
try:
args = itertools.chain([ctx], args) if with_ctx else args
with gevent.Timeout(hard_timeout,
exception=TimeoutException(hard_timeout)):
ret = handler(*args, **kwargs)
if not isinstance(ret, Response):
ret = Response(ret)
ctx.return_value = ret
return ret
except TException as e:
ctx.exc = e
reraise(*self.__thrift_exc_handler(*sys.exc_info()))
except TimeoutException as e:
ctx.exc = e
with _ignore_hook_exception(ctx.logger):
self.__hook.on_api_timeout(ctx)
reraise(*self.__system_exc_handler(*sys.exc_info()))
except Exception as e:
ctx.exc = e
reraise(*self.__api_exc_handler(*sys.exc_info()))
finally:
ctx.end_at = time.time()
# After api call hook
with _ignore_hook_exception(ctx.logger):
self.__hook.on_api_called(ctx)
# Clear context
ctx.clear_except('info', 'logger')
def __getattr__(self, api_name):
func = self.__map.get(api_name, _Handler.undefined(api_name))
return functools.partial(self.__call, api_name, func)
class _Handler(object):
"""Api handler.
Every api is wrapped with this class for configuration. Every api can be
configured.
"""
def __init__(self, func, conf):
"""Create a new Handler instance.
:param func: api function
:param conf: api configuration dict
"""
functools.wraps(func)(self)
self.func = func
self.conf = conf
def __call__(self, *args, **kwargs):
"""Delegate to the true function.
"""
return self.func(*args, **kwargs)
@classmethod
def undefined(cls, api_name):
"""Generate an undefined api handler
"""
def temp_func(*args, **kwargs):
raise TApplicationException(
TApplicationException.UNKNOWN_METHOD,
message='API {!r} undefined'.format(api_name))
return cls(temp_func, {})
class ServiceModule(object):
"""This class makes it convinent to implement api in different modules.
"""
def __init__(self, **kwargs):
self.conf = kwargs
self.api_map = {}
def add_api(self, name, func, conf):
"""Add an api
:param name: api name
:param func: function implement the api
:param conf: api configuration
"""
self.api_map[name] = _Handler(func, conf)
def api(self, name=None, **conf):
"""Used to register a handler func.
:param name: alternative api name, the default name is function name
"""
api_conf = deepcopy(self.conf)
api_conf.update(conf)
# Direct decoration
if callable(name):
self.add_api(name.__name__, name, api_conf)
return name
def deco(func):
api_name = name or func.__name__
self.add_api(api_name, func, api_conf)
return func
return deco
def api_with_ctx(self, *args, **kwargs):
"""Same as api except that the first argument of the func will
be api environment
"""
kwargs['with_ctx'] = True
return self.api(*args, **kwargs)
def _load_app_thrift():
module_name, _ = os.path.splitext(os.path.basename(config.thrift_file))
# module name should ends with '_thrift'
if not module_name.endswith('_thrift'):
module_name = ''.join([module_name, '_thrift'])
return load(config.thrift_file, module_name=module_name)
# Eager load thrift module.
thrift_module = _load_app_thrift()
del _load_app_thrift
class ServiceHandler(ServiceModule):
"""Folklore service handler.
This class is used to define a Folklore app. It will load thrift module and
set ``thrift_module`` for thrift module attribute access.
:Example:
app = ServiceHandler('PingService')
@app.api()
def ping():
return 'pong'
"""
def __init__(self, service_name, **kwargs):
self.service_name = service_name
super(ServiceHandler, self).__init__(**kwargs)
self.system_exc_handler = self.default_exception_handler
self.api_exc_handler = self.default_exception_handler
self.thrift_exc_handler = lambda tp, v, tb: (tp, v, tb)
# init hook registry
self.hook_registry = HookRegistry()
# register api hook
self.hook_registry.register(api_called)
# register log config hook
self.hook_registry.register(config_log)
# Reference to the global thrift module.
self.thrift_module = thrift_module
@staticmethod
def default_exception_handler(tp, val, tb):
e = TApplicationException(TApplicationException.INTERNAL_ERROR,
message=repr(val))
return TApplicationException, e, tb
def extend(self, module):
"""Extend app with another service module
:param module: instance of :class:`ServiceModule`
"""
for api_name, handler in module.api_map.items():
api_conf = deepcopy(self.conf)
api_conf.update(handler.conf)
self.add_api(api_name, handler.func, api_conf)
def use(self, hook):
"""Apply hook for this app
:param hook: a :class:`folklore_service.hook.Hook` instance
"""
self.hook_registry.register(hook)
def handle_system_exception(self, func):
"""Set system exception handler
:param func: the function to handle system exceptions
"""
self.system_exc_handler = func
return func
def handle_api_exception(self, func):
"""Set application exception handler
:Example:
.. code-block:: python
@app.handle_api_exception
def app_exception(tp, value, tb):
exc = app_thrift.UnknownException()
exc.value = value
exc.with_traceback(tb)
return exc.__class__, exc, tb
:param func: the function to handle application exceptions
"""
self.api_exc_handler = func
return func
def handle_thrift_exception(self, func):
"""Set thrift exception handler
:param func: the function to handle thrift exceptions
"""
self.thrift_exc_handler = func
return func
def __call__(self):
"""Make it callable
"""
|
from pydht import Dht
import pydht.magic as magic
import random
import numpy
class Bin:
def __init__(self, dht: Dht) -> None:
self.dht = dht
def __str__(self) -> str:
return "bin"
def __repr__(self) -> str:
return self.__str__()
def __get(self) -> int:
if self.dht == None:
return
if self.dht.nodeList.empty():
return int(self.dht.size / 2)
if len(self.dht.nodeList) == 1:
return int(self.dht.nodeList.getN(0).id - (self.dht.getSpace(self.dht.nodeList.getN(0))/2))
nodes = self.dht.nodeList.getRandomNodes(self.dht.n)
i = 0
max = 0
maxId = None
while i < len(nodes):
space = self.dht.getSpace(nodes[i])
if space > max:
maxId = []
maxId.append(i)
max = space
elif space == max:
maxId.append(i)
i += 1
assert (maxId != [])
maxId = random.choice(maxId)
index = self.dht.nodeList.get(nodes[maxId])
space = self.dht.getSpace(nodes[maxId])
id = self.dht.nodeList.getN(index).id - (space / 2)
return int(numpy.mod(id, self.dht.size))
def get(self) -> int:
return self.__get()
class binReal:
def __init__(self, dht: Dht, phi = None) -> None:
self.dht = dht
self.phi = phi
self.c = 1
def __str__(self) -> str:
return "binReal"
def __repr__(self) -> str:
return self.__str__()
def __phi(self, l: int) -> int:
if (self.phi != None):
return self.phi
return 2
return numpy.max([0,l - int(numpy.ceil(numpy.log2(l))) - self.c])
def __get(self) -> int:
if self.dht == None:
return
if self.dht.nodeList.empty():
return 0
if (len(self.dht.nodeList) == 1):
return int(self.dht.size // 2)
l = self.dht.n
r = self.dht.nodeList.getRandomNodes(1)[0]
S_r = self.dht.nodeList.getSharedIdNodes(r, self.__phi(l))
if (self.__phi(l) == self.dht.n):
assert(len(S_r) == 0)
if (len(S_r) >= 2 ** (l - self.__phi(l))) or len(S_r) == 0:
return self.getIdHalve(r)
else:
return self.getIdHalveSelect(S_r)
assert(False)
def get(self) -> int:
return self.__get()
def getIdHalve(self, node) -> int:
index = self.dht.nodeList.get(node)
space = 0
if index == 0:
space = self.dht.size - self.dht.nodeList.getN(-1).id + node.id
else:
space = node.id - self.dht.nodeList.getN(index-1).id
return int(numpy.mod((node.id-(space//2)),self.dht.size))
def getIdHalveSelect(self, nodeList) -> int:
i = 0
max = 0
maxId = None
while i < len(nodeList):
space = self.dht.getSpace(nodeList[i])
if space > max:
maxId = []
maxId.append(i)
max = space
elif space == max:
maxId.append(i)
i += 1
assert (maxId != [])
maxId = random.choice(maxId)
return self.getIdHalve(nodeList[maxId])
|
"""
argparse boilerplate code
"""
import ast
import argparse
import textwrap
from randtest import __version__
def read_data(ifname):
"""Read in data: assuming no header, only numbers"""
with open(ifname, "r") as fobj:
data = (ast.literal_eval(num.strip()) for num in fobj.readlines())
return data
def argparse_cli(description):
"""argparse boilerplate code"""
parser = argparse.ArgumentParser(description=textwrap.dedent(description))
parser.add_argument(
"-v",
"--version",
action="version",
# version="%(prog)s " + __version__,
version=__version__,
)
parser.add_argument(
"-a",
metavar="alternative",
type=str,
choices=["two_sided", "greater", "less"],
default="two_sided",
help="alternative hypothesis (default: 'two_sided').",
)
parser.add_argument(
"-p",
metavar="num_permutations",
type=int,
default=10000,
help="number of permutations (default: 10000).",
)
parser.add_argument(
"-n",
metavar="num_jobs",
type=int,
default=1,
help="number of jobs (default: 1).",
)
parser.add_argument(
"-l",
metavar="log_level",
type=str,
choices=["debug", "info", "warn", "error", "critical"],
default="warn",
help="set log level (default: 'warn').",
)
parser.add_argument(
"-s",
metavar="seed",
type=int,
default=None,
help="seed to initialize the random number generator (default: None)",
)
parser.add_argument(
"fname_data_A", type=str, help="file name group A data.",
)
parser.add_argument(
"fname_data_B", type=str, help="file name group B data.",
)
return parser
|
from .Die import Die
|
from django.contrib import admin
from django.urls import path, include
from rest_framework_simplejwt.views import TokenRefreshView
from rest_framework_simplejwt.views import TokenObtainPairView
from rest_framework_simplejwt.serializers import TokenObtainPairSerializer
class CryptoTokenObtainPairSerializer(TokenObtainPairSerializer):
@classmethod
def get_token(cls, user):
token = super().get_token(user)
token["first_name"] = user.first_name
token["last_name"] = user.last_name
return token
class CryptoTokenObtainPairView(TokenObtainPairView):
serializer_class = CryptoTokenObtainPairSerializer
urlpatterns = [
path('admin/', admin.site.urls),
path('accounts/login', CryptoTokenObtainPairView.as_view(), name='login_token_obtain_pair'),
path('accounts/token/fresh', TokenRefreshView.as_view(), name='token_refresh'),
path('api/v1/arbitrage/', include('bitcoin_arbitrage.urls', namespace='bitcoin_arbitrage')),
path('api/handlers/',include('crypto_bot.urls',namespace='apis_handler'))
]
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import math
'''注意力机制'''
# ∑softmax(W*tanh(V*h))
class SelfAttention(nn.Module):
def __init__(self, hidden_size):
super(SelfAttention, self).__init__()
self.relation = nn.Sequential(
nn.Linear(hidden_size, hidden_size//2),
nn.ReLU(), # nn.Tanh
nn.Linear(hidden_size//2, 1)
)
def forward(self, encoder_output):
# [batch_size, seq_len, hidden_size]
# -> [batch_size, seq_len, 1]
rout = self.relation(encoder_output)
# [batch_size, seq_len, 1] -> [batch_size, seq_len]
weights = F.softmax(rout.squeeze(2), dim=1)
# [batch_size, seq_len, hidden_size] * [batch_size, seq_len, 1]
# -> [batch_size, seq_len, hidden_size] -> [batch_size, hidden_size]
out = (encoder_output * weights.unsqueeze(-1)).sum(dim=1)
# [batch_size, hidden_size]
out = torch.tanh(out)
return out, weights
# attention
# att(Q, K, V) = ∑softmax(Q'K/√dim_k)V
class Attention(nn.Module):
def __init__(self):
super(Attention, self).__init__()
pass
def forward(self, query, keys, values):
'''
:param query: [batch_size, Q]
:param keys: [batch_size, seq_len, K]
:param values: [batch_size, seq_len, V]
more: K==Q keys==values(K==V)
:return: out:[batch_size, V] weights:[batch_size, seq_len]
'''
# 调节因子,防止内积过大
scale = 1. / math.sqrt(keys.size(2))
# [batch_size, 1, Q] * [batch_size, K, seq_len] ->
# [batch_size, 1, seq_len] -> [batch_size, seq_len]
att_weights = torch.bmm(query.unsqueeze(1), keys.transpose(1, 2)).squeeze(1)
# [batch_size, seq_len]
soft_att_weights = F.softmax(att_weights.mul(scale), dim=1)
# [batch_size, 1, seq_len] * [batch_size, seq_len, V] -> [batch_size, V]
att_out = torch.bmm(soft_att_weights.unsqueeze(1), values).squeeze(1)
# # [batch_size, seq_len, K] * [batch_size, Q, 1] ->
# # [batch_size, seq_len, 1] -> [batch_size, seq_len]
# att_weights = torch.bmm(keys, query.unsqueeze(2)).squeeze(2)
# # [batch_size, seq_len]
# soft_att_weights = F.softmax(att_weights.mul(scale), dim=1)
# # [batch_size, V, seq_len] * [batch_size, seq_len, 1] -> [batch_size, V, 1]
# att_out = torch.bmm(values.transpose(1, 2), soft_att_weights.unsqueeze(2)).squeeze(2)
return att_out, soft_att_weights
if __name__ == '__main__':
k = torch.rand((3, 10, 50))
v = k
q = torch.rand((3, 50))
att = Attention()
y, w = att(q, k, v)
|
import dataset
import librosa
from torch.utils.data import DataLoader, random_split
import torch
import torch.nn.functional as F
from utils import *
import torchvision.models as models
from torch.optim.lr_scheduler import StepLR
from torch.utils.tensorboard import SummaryWriter
import argparse
import yaml
from pathlib import Path
import pandas as pd
import time
from models import *
from filter import *
from torch.autograd import Variable
import glob
from mel2wav.modules import MelGAN_Generator
LongTensor = torch.cuda.LongTensor
def parse_args():
parser = argparse.ArgumentParser()
# Training parameters
#parser.add_argument("--device", type = str, required = True)
parser.add_argument("--experiment_name", type = str, required = True)
parser.add_argument("--resume_experiment", type = bool, default = False)
parser.add_argument("--epochs", type = int, default = 10000)
parser.add_argument("--batch_size", type = int, default = 64)
parser.add_argument("--load_path", type = str, default = None)
parser.add_argument("--stop_interval", type = int, default = 100)
# Data parameters
parser.add_argument("--sampling_rate", type = int, default = 8000)
parser.add_argument("--segment_length", type = int, default = 8192)
parser.add_argument("--seed", type = int, default = None)
args = parser.parse_args()
return args
def main():
args = parse_args()
root = Path(os.getcwd())
experiment_name = args.experiment_name
device = 'cuda:0'
# Set random seed
if args.seed == None:
manualSeed = random.randint(1, 10000) # use if you want new results
else:
manualSeed = args.seed
random.seed(manualSeed)
torch.manual_seed(manualSeed)
np.random.seed(manualSeed)
# Set up log directory
log_dir = os.path.join(root,'logs')
experiment_dir = os.path.join(log_dir, experiment_name)
checkpoint_dir = os.path.join(experiment_dir,'checkpoints')
visuals_dir = os.path.join(experiment_dir,'visuals')
example_dir = os.path.join(experiment_dir,'examples')
example_audio_dir = os.path.join(example_dir, 'audio')
example_spec_dir = os.path.join(example_dir, 'spectrograms')
if os.path.exists(experiment_dir) and not args.resume_experiment:
print("Experiment with this name already exists, use --resume_experiment to continue.")
exit()
elif not args.resume_experiment:
os.mkdir(experiment_dir)
os.mkdir(example_dir)
os.mkdir(checkpoint_dir)
os.mkdir(example_audio_dir)
os.mkdir(example_spec_dir)
os.mkdir(visuals_dir)
# ##################################
# Dump arguments and create logger #
# ###################################
with open(Path(experiment_dir) / "args.yml", "w") as f:
yaml.dump(args, f)
yaml.dump({'Seed used' : manualSeed}, f)
writer = SummaryWriter(str(experiment_dir))
# Some hyper parameters
num_genders = 2
num_digits = 10
# Meta data and list of data files
annotation_file = '/home/adam/adversarial_learning_speech/audio_mnist/audio_mnist/audioMNIST_meta.json'
train_file_index = librosa.util.find_files('/home/adam/adversarial_learning_speech/audio_mnist/audio_mnist/')
split_ratio = 5
# Build indices for the data
file_index, annotation_index_gender, annotation_index_digit, annotation_index_speaker_id = dataset.build_annotation_index(
train_file_index, annotation_file, balanced_genders = False)
test_annotation_index, train_annotation_index, test_ids, train_ids = dataset.balanced_annotation_split(file_index, annotation_index_gender, annotation_index_digit, annotation_index_speaker_id, split_ratio)
print(test_ids)
print(train_ids)
# Create the dataset
train_data = dataset.AnnotatedAudioDataset(train_annotation_index, args.sampling_rate, args.segment_length
)
test_data = dataset.AnnotatedAudioDataset(
test_annotation_index, args.sampling_rate, args.segment_length
)
n_train = train_data.__len__()
n_test = test_data.__len__()
#Set up models
audio_gender_net = AudioNet(num_genders).to(device)
audio_digit_net = AudioNet(num_digits).to(device)
# Optimizers
opt_gender = torch.optim.Adam(audio_gender_net.parameters(),1e-4, betas = (0.5, 0.9))
opt_digit = torch.optim.Adam(audio_digit_net.parameters(), 1e-4, betas = (0.5, 0.9))
# Put training objects into list for loading and saving state dicts
training_objects = []
training_objects.append(('netGender', audio_gender_net))
training_objects.append(('optGender', opt_gender))
training_objects.append(('netDigit', audio_digit_net))
training_objects.append(('optDigit', opt_digit))
training_objects.sort(key = lambda x : x[0])
# Loss
gender_loss = nn.CrossEntropyLoss()
digit_loss = nn.CrossEntropyLoss()
lowest_loss_digit = 1e+6
lowest_loss_gender =1e+6
counter_digit=0
counter_gender=0
# Dataloaders
train_loader = DataLoader(train_data, batch_size = args.batch_size , num_workers = 2, shuffle = True)
test_loader = DataLoader(test_data, batch_size = 1, num_workers = 1)
iter = 0
best_test_acc_digit = 0
best_test_acc_gender = 0
print("Training initiated, {} epochs".format(args.epochs))
for epoch in range(0, args.epochs):
correct_gender = 0
correct_digit = 0
epoch_start = time.time()
for i, (x, gender, digit, _) in enumerate(train_loader):
audio_digit_net.train()
audio_gender_net.train()
x = torch.unsqueeze(x,1).to(device)
digit = digit.to(device)
gender = gender.to(device)
#---------------------
# Train gender net
#---------------------
opt_gender.zero_grad()
pred_gender, _ = audio_gender_net(x)
audio_gender_loss = gender_loss(pred_gender, gender)
audio_gender_loss.backward()
opt_gender.step()
#---------------------
# Train digit net
#---------------------
opt_digit.zero_grad()
pred_digit, _ = audio_digit_net(x)
audio_digit_loss = digit_loss(pred_digit, digit)
audio_digit_loss.backward()
opt_digit.step()
#---------------------------------------
# Calculate accuracies on training set
#---------------------------------------
predicted = torch.argmax(pred_gender.data, 1)
correct_gender += (predicted == gender).sum()
predicted = torch.argmax(pred_digit.data, 1)
correct_digit += (predicted == digit).sum()
train_accuracy_gender = 100 * correct_gender / n_train
train_accuracy_digit = 100 * correct_digit / n_train
writer.add_scalar("train_digit_acc", train_accuracy_digit, epoch + 1)
writer.add_scalar("train_gender_acc", train_accuracy_gender, epoch + 1)
#---------------------------------------
# Evaluate model on test set
#---------------------------------------
correct_gender = 0
correct_digit = 0
accum_loss_digit = 0
accum_loss_gender = 0
for i, (x, gender, digit, _) in enumerate(test_loader):
audio_digit_net.eval()
audio_gender_net.eval()
x = torch.unsqueeze(x,1).to(device)
digit = digit.to(device)
gender = gender.to(device)
pred_digit, _ = audio_digit_net(x)
pred_gender, _ = audio_gender_net(x)
audio_gender_loss_val = gender_loss(pred_gender, gender)
audio_digit_loss_val = digit_loss(pred_digit,digit)
accum_loss_digit+=audio_digit_loss
accum_loss_gender+=audio_gender_loss
predicted = torch.argmax(pred_gender.data, 1)
correct_gender += (predicted == gender).sum()
predicted = torch.argmax(pred_digit.data, 1)
correct_digit += (predicted == digit).sum()
test_accuracy_gender = 100 * correct_gender / n_test
test_accuracy_digit = 100 * correct_digit / n_test
writer.add_scalar("test_digit_acc", test_accuracy_digit, epoch + 1)
writer.add_scalar("test_gender_acc", test_accuracy_gender, epoch + 1)
print("Epoch {} completed | Time: {:5.2f} s".format(epoch + 1, time.time() - epoch_start))
print("Digit | Train set accuracy: {} % | Test set accuracy: {} %".format(train_accuracy_digit, test_accuracy_digit))
print("Gender | Train set accuracy: {} % | Test set accuracy: {} %".format(train_accuracy_gender, test_accuracy_gender))
print("#____________________________________________________________#")
if lowest_loss_gender > accum_loss_gender:
best_test_acc_gender = test_accuracy_gender
torch.save(audio_gender_net.state_dict(),os.path.join(root, 'audio_gender_net_early_stop_epoch_{}.pt'.format(epoch)))
lowest_loss_gender = accum_loss_gender
counter_gender=0
else:
counter_gender +=1
if lowest_loss_digit > accum_loss_digit :
best_test_acc_digit = test_accuracy_digit
torch.save(audio_digit_net.state_dict(),os.path.join(root, 'audio_digit_net_early_stop_epoch_{}.pt'.format(epoch)))
lowest_loss_digit = accum_loss_digit
counter_digit=0
else:
counter_digit+=1
if counter_gender > args.stop_interval:
lowest_loss_gender = -1
final_acc_gender = test_accuracy_gender
print(final_acc_gender)
print('Not training gender more')
if counter_digit > args.stop_interval:
lowest_loss_digit = -1
final_acc_digit = test_accuracy_digit
print(final_acc_digit)
print('Not training digit more')
if lowest_loss_digit ==-1 and lowest_loss_gender==-1:
exit()
if __name__ =="__main__":
main()
|
'''Write a function toWeirdCase (weirdcase in Ruby) that accepts a string,
and returns the same string with all even indexed characters in each word upper cased,
and all odd indexed characters in each word lower cased.
The indexing just explained is zero based, so the zero-ith index is even,
therefore that character should be upper cased.
The passed in string will only consist of alphabetical characters and spaces(' ').
Spaces will only be present if there are multiple words. Words will be separated by a single space(' ').'''
def to_weird_case(s):
s = list(s)
s_even = list(letter.upper() for letter in s[::2])
s[::2] = s_even
weird = ''
for char in s:
weird += char
return weird
'''
also we could do this:
def to_weird_case_word(string):
return "".join(c.upper() if i%2 == 0 else c for i, c in enumerate(string.lower()))
def to_weird_case(string):
return " ".join(to_weird_case_word(str) for str in string.split())'''
|
import flask as fl
from flask_login import login_required
import time
from werkzeug.utils import environ_property
from sse.eventStack import getEventStackHandler
eventStackHandler = getEventStackHandler()
eventStackHandler.start()
#eventStackgenerator = eventStackHandler.getGenerator()
sseBP = fl.Blueprint("sse",__name__)
@sseBP.route("/test")
@login_required
def test():
global eventStack
global eventStackHandler
eventStackHandler.addEvent(str(time.time()))
return "",200
@sseBP.route("/lastUpdate")
def lastUpdate():
return eventStackHandler.last_message,200
@sseBP.route("/stream")
def stream():
return fl.Response(eventStackHandler.run(), mimetype="text/event-stream")
|
import sys
import traceback
import aergo.herapy as herapy
def eprint(*args, **kwargs):
print(*args, file=sys.stderr, **kwargs)
print(*args, **kwargs)
def run():
try:
aergo = herapy.Aergo()
print("------ Export New Account -----------")
aergo.new_account(skip_state=True)
new_exp_txt = aergo.export_account(password="1234")
print("Exported txt is {}".format(new_exp_txt))
print("------ Import Account -----------")
try:
aergo.import_account(new_exp_txt, password='test')
except herapy.errors.GeneralException:
print("It should be failed.")
print("------ Import Account -----------")
try:
account = aergo.import_account(new_exp_txt, password='1234')
print("Account private key is {}".format(account.private_key))
print("Account address is {}".format(account.address))
except herapy.errors.GeneralException:
print("It should be failed.")
print("------ Import Account with Exported Data -----------")
#exported_txt = "485ccQXjmT3JeUHV16n16LzAJhhfHHkv9HU9k7c5PeJDyPMAdLcCu8Yqws19UzMP4K4Rq2MkQ"
exported_txt = "MNxKz7jPTaWW8xZc6HSbgTPXDj6yGq6PrLbboSyV5psBfKyRmbo5qoVHgWTEXucisaUy8Y3PGf4UBc"
print("Exported Data is {}".format(exported_txt))
account = aergo.import_account(exported_txt, password='1234')
print("Account private key is {}".format(account.private_key))
print("Account address is {}".format(account.address))
print("------ Export Account -----------")
new_exp_txt = aergo.export_account(password='1234')
print("Exported txt is {}".format(new_exp_txt))
print("------ Connect AERGO -----------")
aergo.connect('localhost:7845')
print("------ Get Account State -----------")
a = aergo.get_account(address=account.address)
print(" > account state of Import account")
print(" - balance = {}".format(a.balance))
print(" - nonce = {}".format(a.nonce))
print(" - code hash = {}".format(a.code_hash))
print(" - storage root = {}".format(a.storage_root))
print("------ Disconnect AERGO -----------")
aergo.disconnect()
except Exception as e:
eprint(e)
traceback.print_exception(*sys.exc_info())
if __name__ == '__main__':
run()
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from . import _utilities
__all__ = ['UserArgs', 'User']
@pulumi.input_type
class UserArgs:
def __init__(__self__, *,
password: pulumi.Input[str],
username: pulumi.Input[str],
annotations: Optional[pulumi.Input[Mapping[str, Any]]] = None,
enabled: Optional[pulumi.Input[bool]] = None,
labels: Optional[pulumi.Input[Mapping[str, Any]]] = None,
name: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a User resource.
:param pulumi.Input[str] password: The user password (string)
:param pulumi.Input[str] username: The user username (string)
:param pulumi.Input[Mapping[str, Any]] annotations: Annotations for global role binding (map)
:param pulumi.Input[Mapping[str, Any]] labels: Labels for global role binding (map)
:param pulumi.Input[str] name: The user full name (string)
"""
pulumi.set(__self__, "password", password)
pulumi.set(__self__, "username", username)
if annotations is not None:
pulumi.set(__self__, "annotations", annotations)
if enabled is not None:
pulumi.set(__self__, "enabled", enabled)
if labels is not None:
pulumi.set(__self__, "labels", labels)
if name is not None:
pulumi.set(__self__, "name", name)
@property
@pulumi.getter
def password(self) -> pulumi.Input[str]:
"""
The user password (string)
"""
return pulumi.get(self, "password")
@password.setter
def password(self, value: pulumi.Input[str]):
pulumi.set(self, "password", value)
@property
@pulumi.getter
def username(self) -> pulumi.Input[str]:
"""
The user username (string)
"""
return pulumi.get(self, "username")
@username.setter
def username(self, value: pulumi.Input[str]):
pulumi.set(self, "username", value)
@property
@pulumi.getter
def annotations(self) -> Optional[pulumi.Input[Mapping[str, Any]]]:
"""
Annotations for global role binding (map)
"""
return pulumi.get(self, "annotations")
@annotations.setter
def annotations(self, value: Optional[pulumi.Input[Mapping[str, Any]]]):
pulumi.set(self, "annotations", value)
@property
@pulumi.getter
def enabled(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "enabled")
@enabled.setter
def enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enabled", value)
@property
@pulumi.getter
def labels(self) -> Optional[pulumi.Input[Mapping[str, Any]]]:
"""
Labels for global role binding (map)
"""
return pulumi.get(self, "labels")
@labels.setter
def labels(self, value: Optional[pulumi.Input[Mapping[str, Any]]]):
pulumi.set(self, "labels", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The user full name (string)
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@pulumi.input_type
class _UserState:
def __init__(__self__, *,
annotations: Optional[pulumi.Input[Mapping[str, Any]]] = None,
enabled: Optional[pulumi.Input[bool]] = None,
labels: Optional[pulumi.Input[Mapping[str, Any]]] = None,
name: Optional[pulumi.Input[str]] = None,
password: Optional[pulumi.Input[str]] = None,
principal_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
username: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering User resources.
:param pulumi.Input[Mapping[str, Any]] annotations: Annotations for global role binding (map)
:param pulumi.Input[Mapping[str, Any]] labels: Labels for global role binding (map)
:param pulumi.Input[str] name: The user full name (string)
:param pulumi.Input[str] password: The user password (string)
:param pulumi.Input[Sequence[pulumi.Input[str]]] principal_ids: (Computed) The user principal IDs (list)
:param pulumi.Input[str] username: The user username (string)
"""
if annotations is not None:
pulumi.set(__self__, "annotations", annotations)
if enabled is not None:
pulumi.set(__self__, "enabled", enabled)
if labels is not None:
pulumi.set(__self__, "labels", labels)
if name is not None:
pulumi.set(__self__, "name", name)
if password is not None:
pulumi.set(__self__, "password", password)
if principal_ids is not None:
pulumi.set(__self__, "principal_ids", principal_ids)
if username is not None:
pulumi.set(__self__, "username", username)
@property
@pulumi.getter
def annotations(self) -> Optional[pulumi.Input[Mapping[str, Any]]]:
"""
Annotations for global role binding (map)
"""
return pulumi.get(self, "annotations")
@annotations.setter
def annotations(self, value: Optional[pulumi.Input[Mapping[str, Any]]]):
pulumi.set(self, "annotations", value)
@property
@pulumi.getter
def enabled(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "enabled")
@enabled.setter
def enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enabled", value)
@property
@pulumi.getter
def labels(self) -> Optional[pulumi.Input[Mapping[str, Any]]]:
"""
Labels for global role binding (map)
"""
return pulumi.get(self, "labels")
@labels.setter
def labels(self, value: Optional[pulumi.Input[Mapping[str, Any]]]):
pulumi.set(self, "labels", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The user full name (string)
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def password(self) -> Optional[pulumi.Input[str]]:
"""
The user password (string)
"""
return pulumi.get(self, "password")
@password.setter
def password(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "password", value)
@property
@pulumi.getter(name="principalIds")
def principal_ids(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
(Computed) The user principal IDs (list)
"""
return pulumi.get(self, "principal_ids")
@principal_ids.setter
def principal_ids(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "principal_ids", value)
@property
@pulumi.getter
def username(self) -> Optional[pulumi.Input[str]]:
"""
The user username (string)
"""
return pulumi.get(self, "username")
@username.setter
def username(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "username", value)
class User(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
annotations: Optional[pulumi.Input[Mapping[str, Any]]] = None,
enabled: Optional[pulumi.Input[bool]] = None,
labels: Optional[pulumi.Input[Mapping[str, Any]]] = None,
name: Optional[pulumi.Input[str]] = None,
password: Optional[pulumi.Input[str]] = None,
username: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Provides a Rancher v2 User resource. This can be used to create Users for Rancher v2 environments and retrieve their information.
When a Rancher User is created, it doesn't have a global role binding. At least, `user-base` global role binding in needed in order to enable user login.
## Example Usage
```python
import pulumi
import pulumi_rancher2 as rancher2
# Create a new rancher2 User
foo_user = rancher2.User("fooUser",
username="foo",
password="changeme",
enabled=True)
# Create a new rancher2 global_role_binding for User
foo_global_role_binding = rancher2.GlobalRoleBinding("fooGlobalRoleBinding",
global_role_id="user-base",
user_id=foo_user.id)
```
## Import
Users can be imported using the Rancher User ID
```sh
$ pulumi import rancher2:index/user:User foo <user_id>
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[Mapping[str, Any]] annotations: Annotations for global role binding (map)
:param pulumi.Input[Mapping[str, Any]] labels: Labels for global role binding (map)
:param pulumi.Input[str] name: The user full name (string)
:param pulumi.Input[str] password: The user password (string)
:param pulumi.Input[str] username: The user username (string)
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: UserArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Provides a Rancher v2 User resource. This can be used to create Users for Rancher v2 environments and retrieve their information.
When a Rancher User is created, it doesn't have a global role binding. At least, `user-base` global role binding in needed in order to enable user login.
## Example Usage
```python
import pulumi
import pulumi_rancher2 as rancher2
# Create a new rancher2 User
foo_user = rancher2.User("fooUser",
username="foo",
password="changeme",
enabled=True)
# Create a new rancher2 global_role_binding for User
foo_global_role_binding = rancher2.GlobalRoleBinding("fooGlobalRoleBinding",
global_role_id="user-base",
user_id=foo_user.id)
```
## Import
Users can be imported using the Rancher User ID
```sh
$ pulumi import rancher2:index/user:User foo <user_id>
```
:param str resource_name: The name of the resource.
:param UserArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(UserArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
annotations: Optional[pulumi.Input[Mapping[str, Any]]] = None,
enabled: Optional[pulumi.Input[bool]] = None,
labels: Optional[pulumi.Input[Mapping[str, Any]]] = None,
name: Optional[pulumi.Input[str]] = None,
password: Optional[pulumi.Input[str]] = None,
username: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = UserArgs.__new__(UserArgs)
__props__.__dict__["annotations"] = annotations
__props__.__dict__["enabled"] = enabled
__props__.__dict__["labels"] = labels
__props__.__dict__["name"] = name
if password is None and not opts.urn:
raise TypeError("Missing required property 'password'")
__props__.__dict__["password"] = password
if username is None and not opts.urn:
raise TypeError("Missing required property 'username'")
__props__.__dict__["username"] = username
__props__.__dict__["principal_ids"] = None
super(User, __self__).__init__(
'rancher2:index/user:User',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
annotations: Optional[pulumi.Input[Mapping[str, Any]]] = None,
enabled: Optional[pulumi.Input[bool]] = None,
labels: Optional[pulumi.Input[Mapping[str, Any]]] = None,
name: Optional[pulumi.Input[str]] = None,
password: Optional[pulumi.Input[str]] = None,
principal_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
username: Optional[pulumi.Input[str]] = None) -> 'User':
"""
Get an existing User resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[Mapping[str, Any]] annotations: Annotations for global role binding (map)
:param pulumi.Input[Mapping[str, Any]] labels: Labels for global role binding (map)
:param pulumi.Input[str] name: The user full name (string)
:param pulumi.Input[str] password: The user password (string)
:param pulumi.Input[Sequence[pulumi.Input[str]]] principal_ids: (Computed) The user principal IDs (list)
:param pulumi.Input[str] username: The user username (string)
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _UserState.__new__(_UserState)
__props__.__dict__["annotations"] = annotations
__props__.__dict__["enabled"] = enabled
__props__.__dict__["labels"] = labels
__props__.__dict__["name"] = name
__props__.__dict__["password"] = password
__props__.__dict__["principal_ids"] = principal_ids
__props__.__dict__["username"] = username
return User(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def annotations(self) -> pulumi.Output[Mapping[str, Any]]:
"""
Annotations for global role binding (map)
"""
return pulumi.get(self, "annotations")
@property
@pulumi.getter
def enabled(self) -> pulumi.Output[Optional[bool]]:
return pulumi.get(self, "enabled")
@property
@pulumi.getter
def labels(self) -> pulumi.Output[Mapping[str, Any]]:
"""
Labels for global role binding (map)
"""
return pulumi.get(self, "labels")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The user full name (string)
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def password(self) -> pulumi.Output[str]:
"""
The user password (string)
"""
return pulumi.get(self, "password")
@property
@pulumi.getter(name="principalIds")
def principal_ids(self) -> pulumi.Output[Sequence[str]]:
"""
(Computed) The user principal IDs (list)
"""
return pulumi.get(self, "principal_ids")
@property
@pulumi.getter
def username(self) -> pulumi.Output[str]:
"""
The user username (string)
"""
return pulumi.get(self, "username")
|
import requests
import time
import urllib.parse
import hashlib
import hmac
import base64
class API(object):
def __init__(self, secret='', key=''):
self.url = 'https://api.kraken.com/0/'
self.session = requests.Session()
self.response = None
self.call_rate_limit = 15
def public(self, method='Time', input=None, timeout=None):
url = self.url + 'public/' + method
if not input:
input = {}
self.response = self.session.post(url, data=input, timeout=timeout)
if self.response.ok is False:
self.response.raise_for_status()
return self.response.json()
def private(self, method, input=None, timeout=None):
url = self.url + 'private/' + method
if not input:
input = {}
input['nonce'] = self._nonce()
headers = {
'API-Key': self.key,
'API-Sign': self._sign(input, '/0/private/' + method)
}
self.response = self.session.post(url, data=input, headers=headers, timeout=timeout)
if self.response.ok is False:
self.response.raise_for_status()
return self.response.json()
def load_key(self, path):
with open(path, 'r') as f:
self.key = f.readline().strip()
self.secret = f.readline().strip()
return
def _nonce(self):
return int(1000*time.time())
def _sign(self, input, urlpath):
postdata = urllib.parse.urlencode(input)
encoded = (str(input['nonce']) + postdata).encode()
message = urlpath.encode() + hashlib.sha256(encoded).digest()
signature = hmac.new(base64.b64decode(self.secret), message, hashlib.sha512)
sigdigest = base64.b64encode(signature.digest())
return sigdigest.decode()
|
import os
import ee
import geemap.foliumap as geemap
import streamlit as st
def app():
st.title("NAIP Imagery")
st.markdown(
"""
NAIP: National Agriculture Imagery Program. See this [link](https://developers.google.com/earth-engine/datasets/catalog/USDA_NAIP_DOQQ) for more information.
"""
)
df = st.session_state["locations"]
names = df["Name"].values.tolist()
names.sort()
col1, col2, col3, col5, col6, col7, _ = st.columns([1.8, 2, 2, 1, 1, 1, 1])
Map = geemap.Map(plugin_Draw=True, Draw_export=True)
# with col1:
# basemap = st.selectbox(
# "Select a basemap", geemap.folium_basemaps.keys(), index=1)
Map.add_basemap("HYBRID")
Map.add_basemap("ROADMAP")
with col1:
name = st.selectbox("Select a location", names)
latitude = df[df["Name"] == name]["latitude"].values[0]
longitude = df[df["Name"] == name]["longitude"].values[0]
# roi = ee.FeatureCollection("users/giswqs/MRB/NWI_HU8_Boundary_Simplify")
# roi = ee.FeatureCollection(
# "TIGER/2018/States").filter(ee.Filter.eq("NAME", "Tennessee"))
roi = ee.Geometry.Point([longitude, latitude]).buffer(1000)
style = {"color": "000000", "width": 2, "fillColor": "00000000"}
with col2:
checkbox = st.checkbox("Add NAIP imagery", value=True)
with col5:
lat = st.text_input("Center latitude", latitude)
with col6:
lon = st.text_input("Center longitude", longitude)
with col7:
zoom = st.slider("Zoom", 1, 22, 17)
if checkbox:
with col3:
year = st.slider("Select a year", 2003, 2021, 2018)
naip = ee.ImageCollection("USDA/NAIP/DOQQ")
naip = naip.filter(ee.Filter.calendarRange(year, year, "year"))
naip = naip.filterBounds(roi)
# 2005, 2006, 2007,
vis_params = {"bands": ["N", "R", "G"]}
if year in [2005, 2006, 2007]:
vis_params = {"bands": ["R", "G", "B"]}
Map.addLayer(naip, vis_params, f"NAIP {year}")
# Map.addLayer(roi.style(**style), {}, "Tennessee")
Map.add_points_from_xy(
"data/PyCTN.csv",
popup=["Name", "latitude", "longitude"],
layer_name="Callery Pear Locations",
)
Map.set_center(float(lon), float(lat), int(zoom))
Map.to_streamlit(width=1400, height=700)
|
import asyncio
import time
import unittest
from collections import deque
from typing import (
Deque,
Optional,
Union,
)
import hummingbot.connector.exchange.binance.binance_constants as CONSTANTS
from hummingbot.connector.exchange.binance.binance_order_book import BinanceOrderBook
from hummingbot.connector.exchange.binance.binance_order_book_tracker import BinanceOrderBookTracker
from hummingbot.core.api_throttler.async_throttler import AsyncThrottler
from hummingbot.core.data_type.order_book_message import OrderBookMessage, OrderBookMessageType
class BinanceOrderBookTrackerUnitTests(unittest.TestCase):
@classmethod
def setUpClass(cls) -> None:
super().setUpClass()
cls.base_asset = "COINALPHA"
cls.quote_asset = "HBOT"
cls.trading_pair = f"{cls.base_asset}-{cls.quote_asset}"
cls.ev_loop = asyncio.get_event_loop()
def setUp(self) -> None:
super().setUp()
self.throttler = AsyncThrottler(CONSTANTS.RATE_LIMITS)
self.tracker: BinanceOrderBookTracker = BinanceOrderBookTracker(trading_pairs=[self.trading_pair],
throttler=self.throttler)
self.tracking_task: Optional[asyncio.Task] = None
# Simulate start()
self.tracker._order_books[self.trading_pair] = BinanceOrderBook()
self.tracker._tracking_message_queues[self.trading_pair] = asyncio.Queue()
self.tracker._past_diffs_windows[self.trading_pair] = deque()
self.tracker._order_books_initialized.set()
def tearDown(self) -> None:
self.tracking_task and self.tracking_task.cancel()
super().tearDown()
def _simulate_message_enqueue(self, message_queue: Union[asyncio.Queue, Deque], msg: OrderBookMessage):
if isinstance(message_queue, asyncio.Queue):
self.ev_loop.run_until_complete(message_queue.put(msg))
elif isinstance(message_queue, Deque):
message_queue.append(msg)
else:
raise NotImplementedError
def test_order_book_diff_router_trading_pair_not_found_append_to_saved_message_queue(self):
expected_msg: OrderBookMessage = OrderBookMessage(
message_type=OrderBookMessageType.DIFF,
content={
"update_id": 1,
"trading_pair": self.trading_pair,
}
)
self._simulate_message_enqueue(self.tracker._order_book_diff_stream, expected_msg)
self.tracker._tracking_message_queues.clear()
task = self.ev_loop.create_task(
self.tracker._order_book_diff_router()
)
self.ev_loop.run_until_complete(asyncio.sleep(0.5))
self.assertEqual(0, len(self.tracker._tracking_message_queues))
self.assertEqual(1, len(self.tracker._saved_message_queues[self.trading_pair]))
task.cancel()
def test_order_book_diff_router_snapshot_uid_above_diff_message_update_id(self):
expected_msg: OrderBookMessage = OrderBookMessage(
message_type=OrderBookMessageType.DIFF,
content={
"update_id": 1,
"trading_pair": self.trading_pair,
}
)
self._simulate_message_enqueue(self.tracker._order_book_diff_stream, expected_msg)
task = self.ev_loop.create_task(
self.tracker._order_book_diff_router()
)
self.ev_loop.run_until_complete(asyncio.sleep(0.5))
self.assertEqual(1, self.tracker._tracking_message_queues[self.trading_pair].qsize())
task.cancel()
def test_order_book_diff_router_snapshot_uid_below_diff_message_update_id(self):
# Updates the snapshot_uid
self.tracker.order_books[self.trading_pair].apply_snapshot([], [], 2)
expected_msg: OrderBookMessage = OrderBookMessage(
message_type=OrderBookMessageType.DIFF,
content={
"update_id": 1,
"trading_pair": self.trading_pair,
}
)
self._simulate_message_enqueue(self.tracker._order_book_diff_stream, expected_msg)
task = self.ev_loop.create_task(
self.tracker._order_book_diff_router()
)
self.ev_loop.run_until_complete(asyncio.sleep(0.5))
self.assertEqual(0, self.tracker._tracking_message_queues[self.trading_pair].qsize())
task.cancel()
def test_track_single_book_snapshot_message_no_past_diffs(self):
snapshot_msg: OrderBookMessage = BinanceOrderBook.snapshot_message_from_exchange(
msg={
"trading_pair": self.trading_pair,
"lastUpdateId": 1,
"bids": [
["4.00000000", "431.00000000"]
],
"asks": [
["4.00000200", "12.00000000"]
]
},
timestamp=time.time()
)
self._simulate_message_enqueue(self.tracker._tracking_message_queues[self.trading_pair], snapshot_msg)
self.tracking_task = self.ev_loop.create_task(
self.tracker._track_single_book(self.trading_pair)
)
self.ev_loop.run_until_complete(asyncio.sleep(0.5))
self.assertEqual(1, self.tracker.order_books[self.trading_pair].snapshot_uid)
def test_track_single_book_snapshot_message_with_past_diffs(self):
snapshot_msg: OrderBookMessage = BinanceOrderBook.snapshot_message_from_exchange(
msg={
"trading_pair": self.trading_pair,
"lastUpdateId": 1,
"bids": [
["4.00000000", "431.00000000"]
],
"asks": [
["4.00000200", "12.00000000"]
]
},
timestamp=time.time()
)
past_diff_msg: OrderBookMessage = BinanceOrderBook.diff_message_from_exchange(
msg={
"e": "depthUpdate",
"E": 123456789,
"s": "BNBBTC",
"U": 1,
"u": 2,
"b": [
[
"0.0024",
"10"
]
],
"a": [
[
"0.0026",
"100"
]
]
},
metadata={"trading_pair": self.trading_pair}
)
self.tracking_task = self.ev_loop.create_task(
self.tracker._track_single_book(self.trading_pair)
)
self.ev_loop.run_until_complete(asyncio.sleep(0.5))
self._simulate_message_enqueue(self.tracker._past_diffs_windows[self.trading_pair], past_diff_msg)
self._simulate_message_enqueue(self.tracker._tracking_message_queues[self.trading_pair], snapshot_msg)
self.ev_loop.run_until_complete(asyncio.sleep(0.5))
self.assertEqual(1, self.tracker.order_books[self.trading_pair].snapshot_uid)
self.assertEqual(2, self.tracker.order_books[self.trading_pair].last_diff_uid)
def test_track_single_book_diff_message(self):
diff_msg: OrderBookMessage = BinanceOrderBook.diff_message_from_exchange(
msg={
"e": "depthUpdate",
"E": 123456789,
"s": "BNBBTC",
"U": 1,
"u": 2,
"b": [
[
"0.0024",
"10"
]
],
"a": [
[
"0.0026",
"100"
]
]
},
metadata={"trading_pair": self.trading_pair}
)
self._simulate_message_enqueue(self.tracker._tracking_message_queues[self.trading_pair], diff_msg)
self.tracking_task = self.ev_loop.create_task(
self.tracker._track_single_book(self.trading_pair)
)
self.ev_loop.run_until_complete(asyncio.sleep(0.5))
self.assertEqual(0, self.tracker.order_books[self.trading_pair].snapshot_uid)
self.assertEqual(2, self.tracker.order_books[self.trading_pair].last_diff_uid)
|
# Volatility
# Copyright (C) 2007-2013 Volatility Foundation
#
# This file is part of Volatility.
#
# Volatility is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License Version 2 as
# published by the Free Software Foundation. You may not use, modify or
# distribute this program under any other version of the GNU General
# Public License.
#
# Volatility is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Volatility. If not, see <http://www.gnu.org/licenses/>.
#
"""
@author: Andrew Case
@license: GNU General Public License 2.0
@contact: atcuno@gmail.com
@organization:
"""
import volatility.obj as obj
import volatility.debug as debug
import volatility.plugins.linux.lsmod as linux_lsmod
import volatility.plugins.linux.common as linux_common
from volatility.renderers import TreeGrid
from volatility.renderers.basic import Address
class linux_check_modules(linux_common.AbstractLinuxCommand):
"""Compares module list to sysfs info, if available"""
def get_kset_modules(self):
module_kset_addr = self.profile.get_symbol("module_kset")
if not module_kset_addr:
debug.error("This command is not supported by this profile.")
ret = {}
module_kset = obj.Object("kset", offset = module_kset_addr, vm = self.addr_space)
for kobj in module_kset.list.list_of_type("kobject", "entry"):
kobj_off = self.profile.get_obj_offset("module_kobject", "kobj")
mod_kobj = obj.Object("module_kobject", offset = kobj.v() - kobj_off, vm = self.addr_space)
mod = mod_kobj.mod
name = kobj.name.dereference_as("String", length = 32)
if name.is_valid() and kobj.reference_count() > 2:
ret[str(name)] = mod
return ret
def calculate(self):
linux_common.set_plugin_members(self)
kset_modules = self.get_kset_modules()
lsmod_modules = set([str(module.name) for (module, params, sects) in linux_lsmod.linux_lsmod(self._config).calculate()])
for mod_name in set(kset_modules.keys()).difference(lsmod_modules):
yield kset_modules[mod_name]
def unified_output(self, data):
return TreeGrid([("ModuleAddress", Address),
("ModuleName", str)],
self.generator(data))
def generator(self, data):
for mod in data:
yield (0, [Address(mod), str(mod.name)])
def render_text(self, outfd, data):
self.table_header(outfd, [("Module Address", "[address]"), ("Core Address", "[address]"), ("Init Address", "[addreess]"), ("Module Name", "24")])
for mod in data:
self.table_row(outfd, mod, mod.module_core, mod.module_init, str(mod.name))
|
# coding: utf-8
"""
Metal API
This is the API for Equinix Metal. The API allows you to programmatically interact with all of your Equinix Metal resources, including devices, networks, addresses, organizations, projects, and your user account. The official API docs are hosted at <https://metal.equinix.com/developers/api>. # noqa: E501
The version of the OpenAPI document: 1.0.0
Contact: support@equinixmetal.com
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import metal
from models.events_api import EventsApi # noqa: E501
from metal.rest import ApiException
class TestEventsApi(unittest.TestCase):
"""EventsApi unit test stubs"""
def setUp(self):
self.api = models.events_api.EventsApi() # noqa: E501
def tearDown(self):
pass
def test_find_connection_events(self):
"""Test case for find_connection_events
Retrieve connection events # noqa: E501
"""
pass
def test_find_connection_port_events(self):
"""Test case for find_connection_port_events
Retrieve connection port events # noqa: E501
"""
pass
def test_find_device_events(self):
"""Test case for find_device_events
Retrieve device's events # noqa: E501
"""
pass
def test_find_event_by_id(self):
"""Test case for find_event_by_id
Retrieve an event # noqa: E501
"""
pass
def test_find_events(self):
"""Test case for find_events
Retrieve current user's events # noqa: E501
"""
pass
def test_find_organization_events(self):
"""Test case for find_organization_events
Retrieve organization's events # noqa: E501
"""
pass
def test_find_project_events(self):
"""Test case for find_project_events
Retrieve project's events # noqa: E501
"""
pass
def test_find_virtual_circuit_events(self):
"""Test case for find_virtual_circuit_events
Retrieve connection events # noqa: E501
"""
pass
def test_find_volume_events(self):
"""Test case for find_volume_events
Retrieve volume's events # noqa: E501
"""
pass
if __name__ == '__main__':
unittest.main()
|
from hashtables.hashtables import *
import pytest
def test_left_join():
hash1 = HashTable()
hash1.add('fond', 'enamored')
hash1.add('wrath', 'anger')
hash1.add('diligent', 'employed')
hash1.add('outfit', 'garb')
hash1.add('guide', 'usher')
hash2 = HashTable()
hash2.add('fond', 'averse')
hash2.add('wrath', 'delight')
hash2.add('diligent', 'idle')
hash2.add('guide', 'follow')
hash2.add('flow', 'jam')
assert hashmap_left_join(hash1, hash2) == [['outfit', 'garb', 'Null'], ['guide', 'usher', 'follow'], ['wrath', 'anger', 'delight'], ['diligent', 'employed', 'idle'], ['fond', 'enamored', 'averse']]
def test_left_join_no_matching():
hash1 = HashTable()
hash1.add('pond', 'enamored')
hash1.add('rath', 'anger')
hash1.add('adiligent', 'employed')
hash1.add('poutfit', 'garb')
hash1.add('hangguide', 'usher')
hash2 = HashTable()
hash2.add('fond', 'averse')
hash2.add('wrath', 'delight')
hash2.add('diligent', 'idle')
hash2.add('guide', 'follow')
hash2.add('flow', 'jam')
assert hashmap_left_join(hash1, hash2) == [['poutfit', 'garb', 'Null'], ['hangguide', 'usher', 'Null'], ['adiligent', 'employed', 'Null'], ['rath', 'anger', 'Null'], ['pond', 'enamored', 'Null']]
|
import json
import matplotlib
import pylab
import sys
import matplotlib.pyplot as plt
train = json.loads(open(sys.argv[1]).read())
test = json.loads(open(sys.argv[2]).read())
key_maps = {}
i = 0
for k, v in train.items():
if k in key_maps.keys():
continue
key_maps[k] = i
i = i + 1
for k, v in test.items():
if k in key_maps.keys():
continue
key_maps[k] = i
i = i + 1
print 'KEYMAPS:', key_maps
train_points = []
test_points = []
train_avg, test_avg = {}, {}
for k,v in train.items():
if train != {}:
avg = sum(v)/len(v)
train_avg[k] = avg
train_points.append((key_maps[k], avg))
print train_avg
for k,v in test.items():
if test != {}:
avg = sum(v)/len(v)
test_avg[k] = avg
test_points.append((key_maps[k], avg))
print test_avg
result = {}
for k in train.keys():
if (test_avg[k] <= (train_avg[k] + 20)) and (test_avg[k] >= (train_avg[k]-20)):
result[k] = True
else:
result[k] = False
print sum(result.values())/float(len(result.values()))
x1,y1 = zip(*train_points)
x2,y2 = zip(*test_points)
C = x1, y1
S = x2, y2
plt.scatter(x1, y1, color="blue", linewidth=1.0, linestyle="-")
plt.scatter(x2, y2, color="green", linewidth=1.0, linestyle="-")
matplotlib.pyplot.show()
|
# api/serializers
from rest_framework import serializers
from .models import Order
class OrderSerializer(serializers.ModelSerializer):
class Meta:
model = Order
fields = ('id', 'date', 'first_name', 'last_name', 'city', 'state', 'card', 'company', 'cost', 'status', )
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from wechatpy.client.api import WeChatMenu
from ..models import Material, Menu, WeChatApp
from .base import mock, WeChatTestCase
class MenuTestCase(WeChatTestCase):
def test_sync(self):
"""测试同步菜单"""
permenant_media = "permenant_media_id"
app = self.app
# 微信官方菜单
data = self.load_data("mp_menu_data")
buttons = data["selfmenu_info"]["button"]
with mock.patch.object(WeChatApp, "as_permenant_material"),\
mock.patch.object(WeChatApp, "sync_articles"),\
mock.patch.object(WeChatMenu, "get_menu_info"):
WeChatApp.as_permenant_material.return_value = permenant_media
WeChatApp.sync_articles.return_value = None
WeChatMenu.get_menu_info.return_value = data
app.sync_menus()
self.assertMenusEqual(self.menus, buttons)
# 微信菜单不存在
data = {
"is_menu_open": 0
}
with mock.patch.object(WeChatMenu, "get_menu_info"):
WeChatMenu.get_menu_info.return_value = data
Menu.sync(app)
self.assertEqual(self.menus.count(), 0)
# 同步自定义菜单
data = self.load_data("self_menu_data")
buttons = data["selfmenu_info"]["button"]
with mock.patch.object(WeChatMenu, "get_menu_info"):
WeChatMenu.get_menu_info.return_value = data
Menu.sync(app)
self.assertMenusEqual(self.menus, buttons)
def test_menu_publish(self):
# 菜单发布
pass
def assertMenusEqual(self, menus, buttons):
self.assertEqual(len(menus), len(buttons))
for menu, button in zip(menus, buttons):
self.assertMenuEqual(menu, button)
def assertMenuEqual(self, menu, button):
self.assertEqual(menu.name, button["name"])
if "sub_button" in button:
self.assertIsNone(menu.type)
sub_menus = menu.sub_button.all()
sub_buttons = button["sub_button"]["list"]
self.assertMenusEqual(sub_menus, sub_buttons)
elif button["type"] == Menu.Event.CLICK:
self.assertEqual(menu.type, button["type"])
self.assertEqual(menu.content["key"], button["key"])
elif button["type"] == Menu.Event.VIEW:
self.assertEqual(menu.type, button["type"])
self.assertEqual(menu.content["url"], button["url"])
else:
# 回复转换为handler
pass
@property
def menus(self):
return (self.app.menus.filter(menuid__isnull=True)
.filter(parent_id__isnull=True)
.all())
|
# -*- coding: utf-8 -*-
"""
sphinx.config
~~~~~~~~~~~~~
Build configuration file handling.
:copyright: Copyright 2007-2017 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from os import path, getenv
from six import PY2, PY3, iteritems, string_types, binary_type, text_type, integer_types
from typing import Any, NamedTuple, Union
from sphinx.errors import ConfigError
from sphinx.locale import l_, __
from sphinx.util import logging
from sphinx.util.i18n import format_date
from sphinx.util.osutil import cd
from sphinx.util.pycompat import execfile_, NoneType
if False:
# For type annotation
from typing import Any, Callable, Dict, Iterable, Iterator, List, Tuple, Union # NOQA
from sphinx.util.tags import Tags # NOQA
logger = logging.getLogger(__name__)
nonascii_re = re.compile(br'[\x80-\xff]')
copyright_year_re = re.compile(r'^((\d{4}-)?)(\d{4})(?=[ ,])')
CONFIG_SYNTAX_ERROR = "There is a syntax error in your configuration file: %s"
if PY3:
CONFIG_SYNTAX_ERROR += "\nDid you change the syntax from 2.x to 3.x?"
CONFIG_EXIT_ERROR = "The configuration file (or one of the modules it imports) " \
"called sys.exit()"
CONFIG_ENUM_WARNING = "The config value `{name}` has to be a one of {candidates}, " \
"but `{current}` is given."
CONFIG_PERMITTED_TYPE_WARNING = "The config value `{name}' has type `{current.__name__}', " \
"expected to {permitted}."
CONFIG_TYPE_WARNING = "The config value `{name}' has type `{current.__name__}', " \
"defaults to `{default.__name__}'."
if PY3:
unicode = str # special alias for static typing...
ConfigValue = NamedTuple('ConfigValue', [('name', str),
('value', Any),
('rebuild', Union[bool, unicode])])
class ENUM(object):
"""represents the config value should be a one of candidates.
Example:
app.add_config_value('latex_show_urls', 'no', None, ENUM('no', 'footnote', 'inline'))
"""
def __init__(self, *candidates):
# type: (unicode) -> None
self.candidates = candidates
def match(self, value):
# type: (Union[unicode,List,Tuple]) -> bool
if isinstance(value, (list, tuple)):
return all(item in self.candidates for item in value)
else:
return value in self.candidates
string_classes = [text_type] # type: List
if PY2:
string_classes.append(binary_type) # => [str, unicode]
class Config(object):
"""
Configuration file abstraction.
"""
# the values are: (default, what needs to be rebuilt if changed)
# If you add a value here, don't forget to include it in the
# quickstart.py file template as well as in the docs!
config_values = dict(
# general options
project = ('Python', 'env'),
copyright = ('', 'html'),
version = ('', 'env'),
release = ('', 'env'),
today = ('', 'env'),
# the real default is locale-dependent
today_fmt = (None, 'env', string_classes),
language = (None, 'env', string_classes),
locale_dirs = (['locales'], 'env'),
figure_language_filename = (u'{root}.{language}{ext}', 'env', [str]),
master_doc = ('contents', 'env'),
source_suffix = (['.rst'], 'env'),
source_encoding = ('utf-8-sig', 'env'),
source_parsers = ({}, 'env'),
exclude_patterns = ([], 'env'),
default_role = (None, 'env', string_classes),
add_function_parentheses = (True, 'env'),
add_module_names = (True, 'env'),
trim_footnote_reference_space = (False, 'env'),
show_authors = (False, 'env'),
pygments_style = (None, 'html', string_classes),
highlight_language = ('default', 'env'),
highlight_options = ({}, 'env'),
templates_path = ([], 'html'),
template_bridge = (None, 'html', string_classes),
keep_warnings = (False, 'env'),
suppress_warnings = ([], 'env'),
modindex_common_prefix = ([], 'html'),
rst_epilog = (None, 'env', string_classes),
rst_prolog = (None, 'env', string_classes),
trim_doctest_flags = (True, 'env'),
primary_domain = ('py', 'env', [NoneType]),
needs_sphinx = (None, None, string_classes),
needs_extensions = ({}, None),
nitpicky = (False, None),
nitpick_ignore = ([], None),
numfig = (False, 'env'),
numfig_secnum_depth = (1, 'env'),
numfig_format = ({'section': l_('Section %s'),
'figure': l_('Fig. %s'),
'table': l_('Table %s'),
'code-block': l_('Listing %s')},
'env'),
tls_verify = (True, 'env'),
tls_cacerts = (None, 'env'),
) # type: Dict[unicode, Tuple]
def __init__(self, dirname, filename, overrides, tags):
# type: (unicode, unicode, Dict, Tags) -> None
self.overrides = overrides
self.values = Config.config_values.copy()
config = {} # type: Dict[unicode, Any]
if dirname is not None:
config_file = path.join(dirname, filename)
config['__file__'] = config_file
config['tags'] = tags
with cd(dirname):
# we promise to have the config dir as current dir while the
# config file is executed
try:
execfile_(filename, config)
except SyntaxError as err:
raise ConfigError(CONFIG_SYNTAX_ERROR % err)
except SystemExit:
raise ConfigError(CONFIG_EXIT_ERROR)
self._raw_config = config
# these two must be preinitialized because extensions can add their
# own config values
self.setup = config.get('setup', None) # type: Callable
if 'extensions' in overrides:
if isinstance(overrides['extensions'], string_types):
config['extensions'] = overrides.pop('extensions').split(',')
else:
config['extensions'] = overrides.pop('extensions')
self.extensions = config.get('extensions', []) # type: List[unicode]
# correct values of copyright year that are not coherent with
# the SOURCE_DATE_EPOCH environment variable (if set)
# See https://reproducible-builds.org/specs/source-date-epoch/
if getenv('SOURCE_DATE_EPOCH') is not None:
for k in ('copyright', 'epub_copyright'):
if k in config:
config[k] = copyright_year_re.sub(r'\g<1>%s' % format_date('%Y'),
config[k])
def check_types(self):
# type: () -> None
# check all values for deviation from the default value's type, since
# that can result in TypeErrors all over the place
# NB. since config values might use l_() we have to wait with calling
# this method until i18n is initialized
for name in self._raw_config:
if name not in self.values:
continue # we don't know a default value
settings = self.values[name]
default, dummy_rebuild = settings[:2]
permitted = settings[2] if len(settings) == 3 else ()
if hasattr(default, '__call__'):
default = default(self) # could invoke l_()
if default is None and not permitted:
continue # neither inferrable nor expliclitly permitted types
current = self[name]
if isinstance(permitted, ENUM):
if not permitted.match(current):
logger.warning(CONFIG_ENUM_WARNING.format(
name=name, current=current, candidates=permitted.candidates))
else:
if type(current) is type(default):
continue
if type(current) in permitted:
continue
common_bases = (set(type(current).__bases__ + (type(current),)) &
set(type(default).__bases__))
common_bases.discard(object)
if common_bases:
continue # at least we share a non-trivial base class
if permitted:
logger.warning(CONFIG_PERMITTED_TYPE_WARNING.format(
name=name, current=type(current),
permitted=str([cls.__name__ for cls in permitted])))
else:
logger.warning(CONFIG_TYPE_WARNING.format(
name=name, current=type(current), default=type(default)))
def check_unicode(self):
# type: () -> None
# check all string values for non-ASCII characters in bytestrings,
# since that can result in UnicodeErrors all over the place
for name, value in iteritems(self._raw_config):
if isinstance(value, binary_type) and nonascii_re.search(value):
logger.warning('the config value %r is set to a string with non-ASCII '
'characters; this can lead to Unicode errors occurring. '
'Please use Unicode strings, e.g. %r.', name, u'Content')
def convert_overrides(self, name, value):
# type: (unicode, Any) -> Any
if not isinstance(value, string_types):
return value
else:
defvalue = self.values[name][0]
if isinstance(defvalue, dict):
raise ValueError(__('cannot override dictionary config setting %r, '
'ignoring (use %r to set individual elements)') %
(name, name + '.key=value'))
elif isinstance(defvalue, list):
return value.split(',')
elif isinstance(defvalue, integer_types):
try:
return int(value)
except ValueError:
raise ValueError(__('invalid number %r for config value %r, ignoring') %
(value, name))
elif hasattr(defvalue, '__call__'):
return value
elif defvalue is not None and not isinstance(defvalue, string_types):
raise ValueError(__('cannot override config setting %r with unsupported '
'type, ignoring') % name)
else:
return value
def pre_init_values(self):
# type: () -> None
"""
Initialize some limited config variables before initialize i18n and loading extensions
"""
variables = ['needs_sphinx', 'suppress_warnings', 'language', 'locale_dirs']
for name in variables:
try:
if name in self.overrides:
self.__dict__[name] = self.convert_overrides(name, self.overrides[name])
elif name in self._raw_config:
self.__dict__[name] = self._raw_config[name]
except ValueError as exc:
logger.warning("%s", exc)
def init_values(self):
# type: () -> None
config = self._raw_config
for valname, value in iteritems(self.overrides):
try:
if '.' in valname:
realvalname, key = valname.split('.', 1)
config.setdefault(realvalname, {})[key] = value
continue
elif valname not in self.values:
logger.warning(__('unknown config value %r in override, ignoring'),
valname)
continue
if isinstance(value, string_types):
config[valname] = self.convert_overrides(valname, value)
else:
config[valname] = value
except ValueError as exc:
logger.warning("%s", exc)
for name in config:
if name in self.values:
self.__dict__[name] = config[name]
if isinstance(self.source_suffix, string_types): # type: ignore
self.source_suffix = [self.source_suffix] # type: ignore
def __getattr__(self, name):
# type: (unicode) -> Any
if name.startswith('_'):
raise AttributeError(name)
if name not in self.values:
raise AttributeError(__('No such config value: %s') % name)
default = self.values[name][0]
if hasattr(default, '__call__'):
return default(self)
return default
def __getitem__(self, name):
# type: (unicode) -> unicode
return getattr(self, name)
def __setitem__(self, name, value):
# type: (unicode, Any) -> None
setattr(self, name, value)
def __delitem__(self, name):
# type: (unicode) -> None
delattr(self, name)
def __contains__(self, name):
# type: (unicode) -> bool
return name in self.values
def __iter__(self):
# type: () -> Iterable[ConfigValue]
for name, value in iteritems(self.values):
yield ConfigValue(name, getattr(self, name), value[1]) # type: ignore
def add(self, name, default, rebuild, types):
# type: (unicode, Any, Union[bool, unicode], Any) -> None
self.values[name] = (default, rebuild, types)
def filter(self, rebuild):
# type: (str) -> Iterator[ConfigValue]
return (value for value in self if value.rebuild == rebuild) # type: ignore
|
from django.apps import AppConfig
from pickle import load
import os
import numpy as np
import pandas as pd
class ChatbotConfig(AppConfig):
name = 'chatbot'
#modelPath = os.path.join(projectPath,'finalized_model.sav')
model = load(open("finalized_model.sav", "rb"))
data_set = pd.read_pickle("main_data_frame.pkl")
disease_indexed_data_frame = data_set.set_index('prognosis').drop('Sum', axis=1)
feature_importance = model.feature_importances_
indices = np.argsort(feature_importance)[::-1]
features = data_set.columns.drop(['prognosis', 'Sum'])
features_dict = {}
for i,f in enumerate(features):
features_dict[f] = i
ASKING_LIMIT = 8
# Create a DataFrame that hase the features in the index, and the importance as the data
feature_importance_df = pd.concat([ pd.DataFrame(data=features, columns=['symptom']),
pd.DataFrame(data=model.feature_importances_,
columns=['importance'])]
, axis=1)
feature_importance_df.set_index('symptom', inplace=True)
# Sort the feature_importance_df by importance
sorted_feature_importance_df = feature_importance_df.sort_values('importance', ascending=False)
# The top symptoms to be used to ask the user for input, iterating 4 times, and using 5 symptoms at a time.
top_symptoms = sorted_feature_importance_df.iloc[:20].index.values
def symptoms_of(disease):
df = ChatbotConfig.disease_indexed_data_frame.loc[[disease], :]
return df[df.columns[(df==1).any()]].columns
def find_most_common_symptom(feature_importance_df, df=None, list_of_symptoms=None):
if df is not None and list_of_symptoms is None:
list_of_symptoms = ChatbotConfig.symptoms_in_dataframe(df)
elif (df is None and list_of_symptoms is None) and (df is not None and list_of_symptoms is not None):
raise Exception("Must use either df OR a list of symptoms")
highset_importance = 0.0
highest_symptom = ''
for symptom in list_of_symptoms:
current_importance = feature_importance_df.loc[symptom].values[0]
if current_importance > highset_importance:
highset_importance = current_importance
highest_symptom = symptom
return highest_symptom
def update_dataframe_with_symptom(df, symptom, has_symptom = False):
if has_symptom:
return df[df[symptom] > 0].drop(symptom, axis=1)
else:
return df[df[symptom] == 0].drop(symptom, axis=1)
def find_top_n_common_symptom(df, n):
common_symptoms = []
list_of_symptoms = symptoms_in_dataframe(df)
for i in range(n):
common_symptom = find_most_common_symptom(feature_importance_df, list_of_symptoms=list_of_symptoms)
common_symptoms.append(common_symptom)
list_of_symptoms.remove(common_symptom)
return common_symptoms
def remaning_symptoms_of_possible_disease(possible_disease, already_asked):
symptoms_of_possible_disease = list(ChatbotConfig.symptoms_of(possible_disease[0]))
for symptom in already_asked.keys():
if symptom in symptoms_of_possible_disease:
symptoms_of_possible_disease.remove(symptom)
return symptoms_of_possible_disease
def symptoms_in_dataframe(df):
"""
A method used to get the symptoms exists in a dataframe
Input: df, the data frame we want to get the symptoms in it.
Output: a list of the symptoms exist in the dataframe df.
"""
symptoms_dict = {}
for index, row in df.iterrows():
row = row.dropna()
disease_symptoms = row.index.tolist()
for symptom in disease_symptoms:
symptoms_dict[symptom] = 1
return list(symptoms_dict.keys())
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Public Domain 2014-present MongoDB, Inc.
# Public Domain 2008-2014 WiredTiger, Inc.
#
# This is free and unencumbered software released into the public domain.
#
# Anyone is free to copy, modify, publish, use, compile, sell, or
# distribute this software, either in source code form or as a compiled
# binary, for any purpose, commercial or non-commercial, and by any
# means.
#
# In jurisdictions that recognize copyright laws, the author or authors
# of this software dedicate any and all copyright interest in the
# software to the public domain. We make this dedication for the benefit
# of the public at large and to the detriment of our heirs and
# successors. We intend this dedication to be an overt act of
# relinquishment in perpetuity of all present and future rights to this
# software under copyright law.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
import os
from perf_stat import PerfStat, PerfStatCount, PerfStatLatency, PerfStatMinMax, PerfStatLatencyWorkgen
from typing import List
def create_test_stat_path(test_home_path: str, test_stats_file: str):
return os.path.join(test_home_path, test_stats_file)
class PerfStatCollection:
def __init__(self, operations: List[str]):
self.to_report: List[PerfStat] = []
if operations:
self.to_report = [stat for stat in PerfStatCollection.all_stats() if stat.short_label in operations]
def find_stats(self, test_home: str):
for stat in self.to_report:
test_stat_path = create_test_stat_path(test_home, stat.stat_file)
values = stat.find_stat(test_stat_path=test_stat_path)
stat.add_values(values=values)
@staticmethod
def all_stats():
return [
PerfStat(short_label="load",
pattern='Load time:',
input_offset=2,
output_label='Load time',
output_precision=2,
conversion_function=float),
PerfStat(short_label="insert",
pattern=r'Executed \d+ insert operations',
input_offset=1,
output_label='Insert count'),
PerfStat(short_label="modify",
pattern=r'Executed \d+ modify operations',
input_offset=1,
output_label='Modify count'),
PerfStat(short_label="read",
pattern=r'Executed \d+ read operations',
input_offset=1,
output_label='Read count'),
PerfStat(short_label="truncate",
pattern=r'Executed \d+ truncate operations',
input_offset=1,
output_label='Truncate count'),
PerfStat(short_label="update",
pattern=r'Executed \d+ update operations',
input_offset=1,
output_label='Update count'),
PerfStat(short_label="checkpoint",
pattern=r'Executed \d+ checkpoint operations',
input_offset=1,
output_label='Checkpoint count'),
PerfStatMinMax(short_label="min_max_update_throughput",
pattern=r'updates,',
input_offset=8,
output_label='update throughput'),
PerfStatCount(short_label="warnings",
pattern='WARN',
output_label='Latency warnings'),
PerfStatLatency(short_label="top5_latencies_read_update",
stat_file='monitor.json',
output_label='Latency(read, update) Max',
ops=['read', 'update'],
num_max=5),
PerfStatCount(short_label="eviction_page_seen",
stat_file='WiredTigerStat*',
pattern='[0-9].wt cache: pages seen by eviction',
output_label='Pages seen by eviction'),
PerfStatLatency(short_label="max_latency_insert",
stat_file='monitor.json',
output_label='Latency(insert) Max',
ops=['insert'],
num_max=1),
PerfStatLatency(short_label="max_latency_read_update",
stat_file='monitor.json',
output_label='Latency(read, update) Max',
ops=['read', 'update'],
num_max=1),
PerfStatMinMax(short_label="min_max_read_throughput",
pattern=r'updates,',
input_offset=4,
output_label='read throughput'),
PerfStatCount(short_label="warning_operations",
stat_file='../stdout_file.txt',
pattern='max latency exceeded',
output_label='Latency warnings (read, insert, update)'),
PerfStatCount(short_label="warning_read",
stat_file='../stdout_file.txt',
pattern=r'max latency exceeded.*read',
output_label='Latency warnings (read)'),
PerfStatCount(short_label="warning_insert",
stat_file='../stdout_file.txt',
pattern=r'max latency exceeded.*insert',
output_label='Latency warnings (insert)'),
PerfStatCount(short_label="warning_update",
stat_file='../stdout_file.txt',
pattern=r'max latency exceeded.*update',
output_label='Latency warnings (update)'),
PerfStatCount(short_label="warning_idle",
stat_file='../stdout_file.txt',
pattern='Cycling idle',
output_label='Warning Idle (create, drop)'),
PerfStatCount(short_label="warning_idle_create",
stat_file='../stdout_file.txt',
pattern=r'Cycling idle.*CREATE',
output_label='Warning Idle (create)'),
PerfStatCount(short_label="warning_idle_drop",
stat_file='../stdout_file.txt',
pattern=r'Cycling idle.*DROP',
output_label='Warning Idle (drop)'),
PerfStatLatencyWorkgen(short_label="max_latency_drop_diff",
stat_file='../stdout_file.txt',
pattern=r'Cycling idle.*DROP',
output_label='Latency drop diff(in sec.) Max',
input_offset=22),
PerfStatLatencyWorkgen(short_label="max_latency_drop",
stat_file='../stdout_file.txt',
pattern=r'Cycling idle.*DROP',
output_label='Latency drop(in sec.) Max',
input_offset=8),
PerfStatLatencyWorkgen(short_label="max_latency_create",
stat_file='../stdout_file.txt',
pattern=r'Cycling idle.*CREATE',
output_label='Latency create(in sec.) Max',
input_offset=8),
PerfStatLatencyWorkgen(short_label="max_latency_read_micro_sec",
stat_file='../stdout_file.txt',
pattern=r'max latency exceeded.*read',
output_label='Latency read(in micro sec) Max',
input_offset=11),
PerfStatLatencyWorkgen(short_label="max_latency_insert_micro_sec",
stat_file='../stdout_file.txt',
pattern=r'max latency exceeded.*insert',
output_label='Latency insert(in micro sec) Max',
input_offset=11),
PerfStatLatencyWorkgen(short_label="max_latency_update_micro_sec",
stat_file='../stdout_file.txt',
pattern=r'max latency exceeded.*update',
output_label='Latency update(in micro sec) Max',
input_offset=11)
]
|
#!/usr/bin/env python
# You need jsshell and a directory with tests to run this script
import os
import glob
import sys
import optparse
import types
import time
from collections import defaultdict
import random
import subprocess
import re
import platform
try:
import json
except ImportError:
import simplejson as json
import jsshells
import regression
from jobrunner import JobRunner
import output
from relpath import relpath
#Hack to make paths work under Windows wwhen running cygwin python
if platform.system().startswith("CYGWIN"):
import ntpath
os.path = ntpath
#This is a total hack
file_parts = os.path.abspath(__file__).split("\\")
file_name = file_parts[2] + ":\\" + os.path.sep.join(file_parts[3:])
__file__ = file_name
base_path = os.path.abspath(os.path.join(os.path.split(os.path.abspath(__file__))[0], "../"))
test_path = os.path.abspath(os.path.join(base_path, "tests"))
harness_path = os.path.abspath(os.path.join(base_path, "harness"))
default_engine = "carakan"
update_engines = frozenset(["carakan", "carakan-nc"])
host_object_engines = ("carakan", "carakan-gc", "carakan-nc", "carakan-nc-x87")
default_results_file = os.path.join(test_path, "results")
default_recursive_depth = 1500
class TestFile(dict):
def __init__(self, path):
if not os.path.exists(path):
print "Test file %s not found"%path
sys.exit(1)
self.path = path
self.compile_failed = None
self.get_tests_output = None
self.tests_to_run = []
self.crashed = False #Did the current file crash (fast mode only)
self.relative_path = relpath(path, test_path)
self.run_tests_individually = False
self.read_options()
def read_options(self):
f = open(self.path)
run_tests_re = re.compile("^//opjsunit:\s*run_tests_individually\s*$")
for line in f:
if run_tests_re.match(line):
self.run_tests_individually = True
break
def getTests(self, engine):
proc = engine.runCommand((os.path.join(harness_path, "utils.js"),
self.path,
os.path.join(harness_path, "gettests.js")),
[],
async = True,
use_temp_file=True)
self.proc = proc
return proc
def updateTestLists(self, rv):
self.proc.stdout.seek(0)
output = self.proc.stdout.read()
self.compile_failed = not (rv == 0)
self.get_tests_output = output
if self.compile_failed:
return
i = 0
output.replace("\r\n", "\n")
output.replace("\r", "\n")
output = output.strip()
if output.split("\n")[-1] != "##opjsunit: function end##":
print "end:" + output.split("\n")[-1]
self.compile_failed = True
return
testcases = [item.strip() for item in
output.split("##opjsunit: function end##") if item]
for i, testcase in enumerate(testcases):
lines = testcase.split("\n")
i = 0;
if not lines[i].startswith("test"):
print "lines[0]:", lines
self.compile_failed = True
return
else:
name = lines[i].strip()
#skip blank lines
while True:
i += 1
if lines[i].strip():
break
if not lines[i].startswith("comment: "):
print "lines[%i]"%i, lines[i]
self.compile_failed = True
return
else:
comment = lines[i][len("comment: "):]
#skip blank lines
while True:
i += 1
if lines[i].strip():
break
function_code = "\n".join(lines[i:])
test = Test(name, self, opts.recursive, comment=comment,
function_code=function_code, index=i+1)
self[unicode(test)] = test
def setTests(self, tests_to_run=None):
"""Set the list of tests to run"""
if tests_to_run is None or None in tests_to_run:
self.tests_to_run = self.keys()
else:
for name in tests_to_run:
assert name in self.keys(), "Test %s not found in file %s %s"%(name, self.path, self.keys())
self.tests_to_run = tests_to_run
return [self[name] for name in self.tests_to_run]
def runFile(self, engine):
"""Run a whole file worth of tests without restarting the engine"""
s = []
for test_id in self.tests_to_run:
test = self[test_id]
s.append(test.harnessCode(opts.repetitions))
s = "\n".join(s)
while True:
self.testrunner_fn = os.path.join(harness_path,
"testrunner_%i.js"%
random.randint(0, 2**32-1))
if not(os.path.exists(self.testrunner_fn)):
#paranoia
break
f = open(self.testrunner_fn, 'w')
f.write(s)
f.close()
proc = engine.runCommand((os.path.join(harness_path, "utils.js"),
os.path.join(harness_path, "opjsunit.js"),
self.path, self.testrunner_fn),
async=True, valgrind=opts.valgrind)
self.proc = proc
return proc
def setTestsStatus(self, rv, signal, proc, killed):
"""Callback to set the status of all tests when run in fast mode"""
assert proc is self.proc
output = self.parseTestOutput(self.proc.stdout.read())
printed = False
for test_id in self.tests_to_run:
test = self[test_id]
test.passed = test_id in output and output[test_id]["passed"]
test.output = test_id in output and output[test_id]["messages"] or ""
if not test_id in output:
self.crashed = True
test.crashed = not killed and ((test_id not in output and rv != 0)
or
(test_id in output and not
output[test_id]["full"]))
test.killed = test_id not in output and killed
if test.killed and not printed:
print test
printed = True
#Get rid of the reference to the process so that file handles can
#be closed
self.proc = None
def parseTestOutput(self, output):
start_re = re.compile("##opjsunit: (.+)##")
rv = {}
item_template = {"passed":False,
"full":False,
"messages":""}
current_output = ""
for line in output.split("\n"):
if line.endswith("\r"):
line = line[:-1]
if not line:
continue
start_match = start_re.match(line)
if start_match is not None:
current_item = start_match.groups()[0]
rv[current_item] = item_template.copy()
elif line == "--opjsunit:passed--":
assert current_item is not None
rv[current_item]["full"] = True
rv[current_item]["passed"] = True
current_item = None
elif line == "--opjsunit:failed--":
assert current_item is not None
rv[current_item]["full"] = True
else:
if current_item is None:
#This probably indicates a crash that truncated
#output
break
else:
rv[current_item]["messages"] += line + "\n"
return rv
def testsRun(self):
rv = TestSet([test.id for test in self.itervalues()
if test.passed is not None])
return rv
def testsFailed(self):
rv = TestSet([test.id for test in self.itervalues()
if test.passed == False])
return rv
def testsPassed(self):
rv = TestSet([test.id for test in self.itervalues()
if test.passed == True])
return rv
def testsValgrind(self):
rv = TestSet([test.id for test in self.itervalues()
if test.valgrind == True])
return rv
def testsCrashed(self):
rv = TestSet([test.id for test in self.itervalues()
if test.crashed == True])
return rv
def testsTimedOut(self):
rv = TestSet([test.id for test in self.itervalues()
if test.killed == True])
return rv
class Test(object):
def __init__(self, name, test_file, recursive, comment=None,
function_code=None, index=None):
test_id_str = test_file.path + "#" + name
self.id = TestId(test_file.path, name, index=index)
self.comment = comment
self.file = test_file
self.passed = None
self.output = None
self.valgrind = False
self.crashed = False
self.killed = False
self.proc = None
self.testrunner_fn = None
self.test_code_fn = None
self.recursive = recursive
self.function_code = function_code
timeout_match = re.search(r" timeout multipler: ([0-9]+(?:\.[0-9]+))", self.comment)
if timeout_match:
self.timeout_multipler = float(timeout_match.group(1))
else:
self.timeout_multipler = 1.0
if recursive:
self.harnessCode = self.harnessCodeRecursive
else:
self.harnessCode = self.harnessCodeIterative
def __unicode__(self):
return unicode(self.id)
#the harnessCode method is set at runtime depending on the value
#of opts.recursive
def harnessCodeIterative(self, repetitions):
rv = """
try {
writeln('##opjsunit: %s##');
for (var opjsunit_count=0; opjsunit_count<%i; opjsunit_count++) {
%s();
}
writeln('--opjsunit:passed--')
}
catch (e) {
writeln(%se);
writeln('--opjsunit:failed--')
}
"""%(unicode(self), repetitions, self.id.function,
repetitions > 1 and
"'At repetition ' + opjsunit_count + ': ' + " or "")
return rv
def harnessCodeRecursive(self, repetitions):
rv = """
try {
writeln('##opjsunit: %s##');
var opjsunit_count = 0;
function f() {
try {
%s();
opjsunit_count++;
if (opjsunit_count < %i) {
f();
}
}
catch(e) {
if (e instanceof RangeError) {
writeln('Too deep recursion with --recursive')
} else {
throw e;
}
}
}
f();
writeln('--opjsunit:passed--')
}
catch (e) {
writeln(%se);
writeln('--opjsunit:failed--')
}
"""%(unicode(self), self.id.function, repetitions,
repetitions > 1 and
"'At repetition ' + opjsunit_count + ': ' + " or "")
return rv
def get_temp_filename(self, format="%i.js"):
while True:
rv = os.path.join(harness_path,
format%random.randint(0, 2**32-1))
if not(os.path.exists(rv)):
#paranoia
break
return rv
def run(self, engine, args):
#is_carakan = engine.name.startswith("carakan")
command_line = [os.path.join(harness_path, "utils.js"),
os.path.join(harness_path, "opjsunit.js")]
if (self.file.run_tests_individually and
not opts.no_run_individually):
s = "\n".join((self.function_code,
self.harnessCode(opts.repetitions)))
else:
s = self.harnessCode(opts.repetitions)
command_line.append(self.id.filename)
self.testrunner_fn = self.get_temp_filename("testrunner_%i.js")
f = open(self.testrunner_fn, 'w')
f.write(s)
f.close()
command_line.append(self.testrunner_fn)
command_line = tuple(command_line)
proc = engine.runCommand(command_line, args, async=True,
use_temp_file = False, valgrind=opts.valgrind)
self.proc = proc
return proc
def getReproductionCommand(self):
#Command line for reproducing tests in gdb or
#from the shell
command_line = [os.path.join(harness_path, "utils.js"),
os.path.join(harness_path, "opjsunit.js")]
s = self.harnessCode(opts.repetitions)
command_line.append(self.id.filename)
command_line.extend(["-e", s])
return tuple(command_line)
def setStatus(self, rv, signal, proc, killed):
assert proc is self.proc
output = self.parseResult(self.proc.stdout.read())
success = (rv == 0 and signal == 0 and killed == False)
self.passed = success and output["passed"]
self.output = output["messages"]
self.valgrind = not success and not killed and rv == 0 and signal == 102
self.killed = killed
self.crashed = not output["full"] and not success and not killed and not self.valgrind
#Get rid of the reference to the process so that file handles can
#be closed
self.proc = None
def parseResult(self, output):
start_re = re.compile("##opjsunit: (.+)##")
rv = {"passed":False,
"full":False,
"messages":""}
for line in output.split("\n"):
if line.endswith("\r"):
line = line[:-1]
if not line:
continue
start_match = start_re.match(line)
if start_match is not None:
#We have found the start of the output
pass
elif line == "--opjsunit:failed--":
rv["full"] = True
elif line == "--opjsunit:passed--":
rv["passed"] = True
rv["full"] = True
else:
rv["messages"] += line + "\n"
return rv
class TestId(object):
"""An object representing a testcase. Each testcase has a filename
and a function name. These are immutable properties that determine
comparisons between different testcase objects. They may also have
a comment and in index (used to e.g. store the relative position
of a particular testcase in its file).
"""
def __init__(self, filename, function=None, index = None):
self._filename = filename
self._function = function
self._relative_path = relpath(filename, test_path)
self.name = unicode(self)
self.index = index
filename = property(lambda self:self._filename)
function = property(lambda self:self._function)
def __eq__(self, other):
return (self.name == other.name)
def __hash__(self):
return hash(self.name)
def __unicode__(self):
name = unicode(self._relative_path) if self.function is None else u"%s#%s"%(self._relative_path, self.function)
return name
@classmethod
def fromString(cls, name, index=None):
components = name.split("#")
if not os.path.isabs(components[0]):
filename = os.path.join(test_path, components[0])
else:
filename = components[0]
if len(components) > 1:
function = components[1]
else:
function = None
return cls(filename, function, index)
class TestSet(set):
def __init__(self, values=None):
if values is not None:
self.fromIter(values)
def iterByName(self):
"""Iterator that always returns entries sorted by filename and in
alphabetical order"""
test_list = list(set.__iter__(self))
def cmp(x, y):
x_name = x.name
y_name = y.name
if x_name == y_name:
return 0
else:
return x_name > y_name and 1 or -1
test_list.sort(cmp)
for item in test_list:
yield item
def iterByIndex(self):
"""Returns an iterator over all the TestIds in the set, sorted by
index. This is mainly useful for printing things in file order
rather than in alphabetical order"""
test_list = list(set.__iter__(self))
def cmp(x, y):
if x.filename == y.filename:
if x.index is None:
return -1
elif y.index is None:
return 1
return x.index - y.index
else:
return x.filename > y.filename and 1 or -1
test_list.sort(cmp)
for item in test_list:
yield item
def fromIter(self, iterable):
for i, item in enumerate(iterable):
if type(item) in types.StringTypes:
test_id = TestId.fromString(item, i)
else:
#Strictly bad form
assert isinstance(item, TestId)
test_id = item
set.add(self, test_id)
return self
@classmethod
def fromFile(cls, file_name, limit_to=None):
tests = []
try:
f = open(file_name)
except IOError:
raise
for i, line in enumerate(f):
line = line.strip()
if not line:
continue
test_id = TestId.fromString(line, i)
if limit_to is None or None in limit_to or unicode(test_id) in limit_to:
tests.append(test_id)
return cls(tests)
def toFile(self, file_obj):
for item in self.iterByName():
file_obj.write(unicode(item).encode("utf-8") + "\n")
def filterByPaths(self, file_paths):
return TestSet([item for item in self if item.filename in file_paths])
def uniqueFiles(self):
rv = set([item.filename for item in self])
return rv
class TestRunner(object):
def __init__(self, engine, test_paths, processes):
self.engine = engine
self.all_files = []
#Test files that have successfully compiled
self.compiled_files = []
#Test files that have not successfully compiled
self.uncompiled_files = []
self.setTests(test_paths)
self.num_processes = processes
def setTests(self, test_paths):
"""Set the tests to be run.
test_paths - an iterable here each item is either the name of a
directory containing tests to be run, the name of a
file containing tests to be run or the name of a
specific test within a file"""
tests = {}
test_cases = defaultdict(set)
test_files = []
for path in test_paths:
#Convert everything to absolute paths here
path = os.path.abspath(path)
if os.path.isdir(path):
files = glob.glob(os.path.join(path, "*.js"))
if self.engine.name not in host_object_engines:
files = [fn for fn in files if
not fn.endswith("_hostobject.js")]
test_files.extend(files)
else:
test_files.append(path)
for path in test_files:
components = path.split("#")
if components[0] not in tests:
tests[components[0]] = TestFile(components[0])
if len(components) > 1:
test_cases[components[0]].add(os.path.split(path)[1])
else:
test_cases[components[0]].add(None)
self.test_cases = test_cases
self.tests = tests
def readTestFunctions(self):
"""For each TestFile we are running tests in, load the list of
test functions in that file"""
#XXX TODO - error handling if a test is specified that does not exist
sys.stderr.write("Finding tests...\n")
def get_job(test_file):
def get_tests():
def callback(rv, signal, proc, killed):
test_file.updateTestLists(rv)
if test_file.compile_failed:
self.uncompiled_files.append(test_file)
else:
self.compiled_files.append(test_file)
return test_file.getTests(self.engine), callback, None
return get_tests
test_finder = JobRunner(self.num_processes, timeout=30)
for path, test_file in self.tests.iteritems():
self.all_files.append(relpath(path, test_path))
test_finder.queue([get_job(test_file)])
test_finder.run()
def run(self):
self.readTestFunctions()
if opts.no_run:
return Results(self.engine.name,
set([]),
set([]),
set([]),
set([]),
loaded_files = self.all_files,
compile_failed = [f.relative_path for f in
self.uncompiled_files])
else:
return self.runTests()
def runTests(self):
all_tests = TestSet()
passed = TestSet()
failed = TestSet()
valgrind = TestSet()
crashed = TestSet()
timed_out = TestSet()
def get_job(test):
def run_job():
def callback(rv, signal, proc, killed):
test.setStatus(rv, signal, proc, killed)
#This is cleanup code that should perhaps be elsewhere
if test.testrunner_fn is not None:
os.unlink(test.testrunner_fn)
if test.test_code_fn is not None:
os.unlink(test.test_code_fn)
test.testrunner_fn = None
return test.run(self.engine, opts.engine_args), callback, test.timeout_multipler
return run_job
def get_job_fast(test_file):
def run_job():
def callback(rv, signal, proc, killed):
test_file.setTestsStatus(rv, signal, proc, killed)
#This is cleanup code that should perhaps be elsewhere
if test_file.testrunner_fn is not None:
os.unlink(test_file.testrunner_fn)
test_file.testrunner_fn = None
return test_file.runFile(self.engine, opts.engine_args), callback, None
return run_job
sys.stderr.write("Running tests...\n")
if opts.fast:
timeout = 10
elif opts.valgrind:
timeout = 30*50
else:
timeout = 30
test_runner = JobRunner(self.num_processes, timeout=timeout)
if opts.fast:
for test_file in self.compiled_files:
test_names = self.test_cases[test_file.path]
test_file.setTests(test_names)
test_runner.queue([get_job_fast(test_file)])
else:
for test_file in self.compiled_files:
test_names = self.test_cases[test_file.path]
test_runner.queue([get_job(item) for item in
test_file.setTests(test_names)])
test_runner.run()
crashed_files = []
for test_file in self.compiled_files:
if opts.fast and test_file.crashed:
crashed_files.append(test_file)
continue
all_tests |= test_file.testsRun()
passed |= test_file.testsPassed()
failed |= test_file.testsFailed()
valgrind |= test_file.testsValgrind()
crashed |= test_file.testsCrashed()
timed_out |= test_file.testsTimedOut()
results = regression.Results(getEngineName(self.engine),
passed, failed, valgrind, crashed, timed_out,
loaded_files=self.all_files,
compile_failed=[f.relative_path for f in
self.uncompiled_files],
crashed_files=[f.relative_path for f in crashed_files])
tests_by_id = {}
for test_file in self.tests.itervalues():
for test in test_file.itervalues():
tests_by_id[test.id] = test
return tests_by_id, results
def makeOptions():
usage = """usage: %prog [options] path/to/tests*
opjsunit javascript test harness.
If the -s or --shell option is set, its argument is used as the path to the
jsshell. Otherwise is the OP_JSSHELL environment variable is set that is used
as the path. All other positional arguments are paths either to
individual test files or to directories containing test files. Recursion into
subdirectories is not yet supported.
"""
parser = optparse.OptionParser(usage=usage)
#parser.add_option("-v", "--verbose", action="store_true", dest="verbose",
# default=False, help="Verbose output")
parser.add_option("-s", "--shell", action="store", dest="shell",
default="", help="Path to the javascript shell")
#Maybe change this to an enum later or something
parser.add_option("-e", "--engine", action="store", dest="engine",
type="choice", choices=("carakan", "futhark",
"spidermonkey", "V8",
"squirrelfish", "es4",
"carakan-nc", "carakan-gc",
"carakan-nc-x87"),
default=default_engine,
help="Select the javascript interpreter to use")
parser.add_option("--valgrind", default=False, action="store_true",
dest="valgrind", help="Run tests in valgrind")
parser.add_option("--buildbot", default=False,
action="store_true", dest="buildbot", help="Run in buildbot mode")
parser.add_option("--pass", action="store_true", default=False,
dest="write_pass", help="Output tests that pass")
parser.add_option("--fail", action="store_true", default=False,
dest="write_fail", help="Output tests that fail")
parser.add_option("--valgrind-errors", action="store_true", default=False,
dest="write_valgrind", help="Output tests that give valgrind errors")
parser.add_option("--new", action="store_true", default=False,
dest="write_new",
help="Output only new tests since last checkout (implies --pass and --fail if neither are specified)")
parser.add_option("--regressions", action="store_true", default=False,
dest="write_regressions",
help="Output a list of regressions")
parser.add_option("--fixes", action="store_true", default=False,
dest="write_fixes",
help="Output a list of fixes")
parser.add_option("--store-regressions", action="store_true", default=False,
dest="store_regressions",
help="Save the list of regressions to a file so they can be easilly rerun")
parser.add_option("--missing", action="store_true", default=False,
dest="write_missing",
help="Output missing tests (those in the pass/fail files but not run)")
parser.add_option("--count", action="store_true", default=False,
dest="write_counts",
help="Output a count of the number of tests that pass and the number that fail")
parser.add_option("--crash", action="store_true", default=False,
dest="write_crash",
help="Output tests that crashed")
parser.add_option("--timeout", action="store_true", default=False,
dest="write_timeout",
help="Output tests that timed out")
parser.add_option("--force-update", action="store_true", default=False,
dest="force_update",
help="Force an update of the pass and fail files based on the currently run tests with the currently run engine. All safety checks are off!")
parser.add_option("--results-file", action="store", dest="results_file",
default=default_results_file, help="Used to specify a results file for regression analysis")
parser.add_option("--load", action="store", type="string", default="",
dest="load",
help="Load file containing a list of test cases")
parser.add_option("-p", "--processes", action="store", type="int",
default=4, dest="processes",
help="Number of distinct processes to use")
parser.add_option("--profile", action="store_true", default=False,
dest="profile", help=optparse.SUPPRESS_HELP)
parser.add_option("--optional", action="store_true", default=False,
dest="optional", help="load optional test files")
parser.add_option("--update-with-non-default-shell",
action="store_true", default=False,
dest="update_with_non_default_shell",
help="Update the test files even if a non-default shell is being used")
parser.add_option("-v", "--verbose", action="store_true", default=False,
help="Verbose output")
parser.add_option("--fast", action="store_true", default=False,
help="Run the tests without restarting the interpreter. Faster but less accurate. Not for full regression runs.")
parser.add_option("--repetitions", "-r", action="store", default=None,
help="Number of times to call each test function")
parser.add_option("--recursive", action="store_true",
default=False, help="Run test function inside a recursive function")
parser.add_option("--fail-list", action="store",
dest="fail_list_file", default=None,
help="File name to write command lines of failing tests to")
parser.add_option("--regression-list", action="store",
dest="regression_list_file", default=None,
help="File name to write command lines of regressed tests to")
parser.add_option("--crash-list", action="store",
dest="crash_list_file", default=None,
help="File name to write command lines of crashing tests to")
parser.add_option("--no-run-individually", action="store_true",
dest="no_run_individually", default=False,
help="Never run tests individually; always load the whole file")
parser.add_option("--no-run", action="store_true",
dest="no_run", default=False,
help="Just look for tests don't try to run them (useful for testing the compiler)")
parser.add_option("--generate-baseline", action="store",
dest="generate_baseline", default=None,
help="Generate baseline results for current profile into specified file")
parser.add_option("--engine-arg", action="append",
dest="engine_args", default=[],
help="Extra per-test arguments to supply the engine's jsshell")
return parser
def setOptionDependencies():
#Things that happen after here cannot use the distinction between
#default and developer
if not (opts.write_pass or opts.write_fail or opts.write_valgrind
or opts.write_new or opts.write_regressions or opts.write_fixes
or opts.write_crash or opts.write_missing or opts.write_counts):
opts.write_valgrind = True
opts.write_crash = True
opts.write_timeout = True
opts.write_regressions = True
opts.write_fixes = True
opts.write_counts = True
def loadExpected(test_data_filename, profile_name):
"""Load the expected results of the tests i.e. the results from previous
runs of the test suite."""
test_data = regression.TestData.from_file(test_data_filename)
expected = regression.Expected(profile_name, test_data, test_id_class=TestId)
#Now convert the sets to TestSet objects
for result_type, test_names in expected.iteritems():
test_list = TestSet(test_names)
expected[result_type] = test_list
return expected
def loadTestList(filename):
f = open(filename)
tests = [os.path.join(test_path, item.split()[0].strip())
for item in f.read().split("\n") if item.strip()]
return tests
def run(engine, test_paths, opts):
test_runner = TestRunner(engine, test_paths, opts.processes)
tests_by_id, results = test_runner.run()
expected = loadExpected(opts.results_file, getEngineName(engine))
comparison = regression.ResultsComparison(expected, results)
return tests_by_id, comparison
def printOutput(tests_by_id, comparison, engine, opts):
if opts.buildbot:
outputFunction = output.outputBuildbot
elif opts.no_run:
outputFunction = output.outputNoRun
else:
outputFunction = output.outputDefault
print outputFunction(tests_by_id, comparison, engine, opts)
def updateExpected(tests_by_id, comparison, results_file):
new_test_data = comparison.updated_test_data()
#Now need to update the metadata
for test_id, test in tests_by_id.iteritems():
if test.comment:
new_test_data[unicode(test_id)]["comment"] = test.comment
json.dump(new_test_data, open(results_file, "w"), indent=2)
def getEngineName(engine):
if opts.valgrind:
return "%s-valgrind" % engine.name
else:
return engine.name
def main():
test_paths = args
if opts.load:
#Load a list of tests to run from a file
test_paths.extend(loadTestList(opts.load))
if not test_paths:
#We are running the full testsuite
test_paths = [test_path]
opts.allow_regression_update = True
else:
#we are running some specific tests
opts.allow_regression_update = False
if opts.valgrind:
opts.fast = False
setOptionDependencies()
engine_class = jsshells.shells[opts.engine.lower()]
if (opts.engine.lower() not in update_engines and
not opts.update_with_non_default_shell or
opts.fast):
opts.allow_regression_update = False
#setup the path to the jsshell
if opts.shell:
shell_path = opts.shell
elif "OP_JS_SHELL" in os.environ:
shell_path = os.environ["OP_JS_SHELL"]
else:
shell_path = os.path.join(os.path.curdir, engine_class.exec_name)
engine = engine_class(shell_path)
#Set the number of times to repeat each test
if opts.repetitions is not None:
opts.repetitions = int(opts.repetitions)
else:
if opts.recursive:
opts.repetitions = default_recursive_depth
else:
opts.repetitions = engine.default_repetitions
tests_by_id, comparison = run(engine, test_paths, opts)
printOutput(tests_by_id, comparison, engine, opts)
if opts.allow_regression_update:
updateExpected(tests_by_id, comparison, opts.results_file)
if opts.fail_list_file:
output.writeCommandLines(tests_by_id, comparison.results["fail"],
engine, opts.fail_list_file)
if opts.regression_list_file:
output.writeCommandLines(tests_by_id, comparison.sets["regressions"],
engine, opts.regression_list_file)
if opts.crash_list_file:
output.writeCommandLines(tests_by_id, comparison.results["crash"],
engine, opts.crash_list_file)
if opts.generate_baseline:
comparison.generate_baseline(opts.generate_baseline)
if __name__ == "__main__":
t0 = time.time()
optParser = makeOptions()
opts, args = optParser.parse_args()
if not opts.profile:
main()
else:
import cProfile
import pstats
cProfile.run("main()", "profile.dat")
p = pstats.Stats("profile.dat")
p.strip_dirs().sort_stats('time').print_stats()
if not opts.buildbot:
print "Run took " + str(time.time() - t0)
|
from cmdtools.ext import command
class Ping(command.Command):
def __init__(self):
super().__init__(name="ping")
def ping(self):
print("Pong!")
class Say(command.Command):
def __init__(self):
super().__init__(name="say")
def say(self, text):
print(text)
def error_say(self, error):
print(error)
|
import six
class CollectionScorecards(object):
"""
This class should be used to interact with collection scorecards. It is instantiated for you as
an attribute of the :class:`proknow.Collections.CollectionItem` class.
"""
def __init__(self, collections, collection):
"""Initializes the CollectionScorecards class.
Parameters:
collections (proknow.Collections.Collections): The Collections instance that is
instantiating the object.
collection (proknow.Collections.CollectionItem): A instance of a CollectionItem.
"""
self._collections = collections
self._requestor = collections._requestor
self._collection = collection
def create(self, name, computed, custom):
"""Creates a new collection scorecard.
Note:
For information on how to construct computed metrics visit :ref:`computed-metrics`.
For information on how to define scorecard objectives, see :ref:`scorecard-objectives`.
Parameters:
name (str): The scorecard name.
computed (list): The computed metrics.
custom (list): The custom metrics.
Returns:
:class:`proknow.Collections.CollectionScorecardItem`: A representation of the created
collection scorecard
Raises:
AssertionError: If the input parameters are invalid.
:class:`proknow.Exceptions.HttpError`: If the HTTP request generated an error.
Example:
This example creates a new scorecard::
from proknow import ProKnow
pk = ProKnow('https://example.proknow.com', credentials_file="./credentials.json")
collection = pk.collections.find(name='My Collection').get()
collection.scorecards.create("My Scorecard", [{
"type": "VOLUME",
"roi_name": "BRAINSTEM",
"arg_1": None,
"arg_2": None
}, {
"type": "VOLUME_CC_DOSE_RANGE_ROI",
"roi_name": "BRAINSTEM",
"arg_1": 30,
"arg_2": 60,
"objectives": [{
"label": "IDEAL",
"color": [18, 191, 0],
"max": 0
}, {
"label": "GOOD",
"color": [136, 223, 127],
"max": 3
}, {
"label": "ACCEPTABLE",
"color": [255, 216, 0],
"max": 6
}, {
"label": "MARGINAL",
"color": [255, 102, 0],
"max": 9
}, {
"label": "UNACCEPTABLE",
"color": [255, 0, 0]
}]
}], [{
"id": pk.custom_metrics.resolve_by_name("Genetic Type").id
}])
"""
assert isinstance(name, six.string_types), "`name` is required as a string."
assert isinstance(computed, list), "`computed` is required as a list."
assert isinstance(custom, list), "`custom` is required as a list."
body = {'name': name, 'computed': computed, 'custom': custom}
_, scorecard = self._requestor.post('/collections/' + self._collection.id + '/metrics/sets', json=body)
return CollectionScorecardItem(self, self._collection, scorecard)
def delete(self, scorecard_id):
"""Deletes a scorecard by id.
Parameters:
scorecard_id (str): The id of the scorecard to delete.
Raises:
AssertionError: If the input parameters are invalid.
:class:`proknow.Exceptions.HttpError`: If the HTTP request generated an error.
Example:
If you know the scorecard id, you can delete the scorecard directly using this method::
from proknow import ProKnow
pk = ProKnow('https://example.proknow.com', credentials_file="./credentials.json")
collection = pk.collections.find(name='My Collection').get()
collections.scorecards.delete('5c463a6c040040f1efda74db75c1b121')
"""
assert isinstance(scorecard_id, six.string_types), "`scorecard_id` is required as a string."
self._requestor.delete('/collections/' + self._collection.id + '/metrics/sets/' + scorecard_id)
def find(self, predicate=None, **props):
"""Finds the first scorecard that matches the input paramters.
Note:
For more information on how to use this method, see :ref:`find-methods`.
Parameters:
predicate (func): A function that is passed a scorecard as input and which should return
a bool indicating whether the scorecard is a match.
**props: A dictionary of keyword arguments that may include any scorecard attribute.
These arguments are considered in turn to find matching scorecards.
Returns:
:class:`proknow.Collections.CollectionScorecardSummary`: A summary representation of the
matching scorecard.
Raises:
:class:`proknow.Exceptions.HttpError`: If the HTTP request generated an error.
"""
if predicate is None and len(props) == 0:
return None
scorecards = self.query()
for scorecard in scorecards:
match = True
if predicate is not None and not predicate(scorecard):
match = False
for key in props:
if scorecard._data[key] != props[key]:
match = False
if match:
return scorecard
return None
def get(self, scorecard_id):
"""Gets a scorecard by id.
Parameters:
scorecard_id (str): The id of the scorecard to get.
Returns:
:class:`proknow.Collections.CollectionScorecardItem`: A complete representation of the
collection scorecard
Raises:
AssertionError: If the input parameters are invalid.
:class:`proknow.Exceptions.HttpError`: If the HTTP request generated an error.
Example:
If you know the scorecard id, you can get the collection scorecard directly using this
method::
from proknow import ProKnow
pk = ProKnow('https://example.proknow.com', credentials_file="./credentials.json")
collection = pk.collections.find(name='My Collection').get()
scorecard = collection.scorecards.get('5c463a6c040068100c7f665acad17ac4')
"""
assert isinstance(scorecard_id, six.string_types), "`scorecard_id` is required as a string."
_, scorecard = self._requestor.get('/collections/' + self._collection.id + '/metrics/sets/' + scorecard_id)
return CollectionScorecardItem(self, self._collection, scorecard)
def query(self):
"""Queries for collection scorecards.
Returns:
list: A list of :class:`proknow.Collections.CollectionScorecardSummary` objects, each
representing a summarized collection scorecard for the current collection.
Raises:
AssertionError: If the input parameters are invalid.
:class:`proknow.Exceptions.HttpError`: If the HTTP request generated an error.
Example:
This example queries the scorecards and prints the name of each scorecard::
from proknow import ProKnow
pk = ProKnow('https://example.proknow.com', credentials_file="./credentials.json")
collection = pk.collections.find(name='My Collection').get()
for scorecard in collection.scorecards.query():
print(scorecard.name)
"""
_, scorecards = self._requestor.get('/collections/' + self._collection.id + '/metrics/sets')
return [CollectionScorecardSummary(self, self._collection, scorecard) for scorecard in scorecards]
class CollectionScorecardSummary(object):
"""
This class represents a summary view of a collection scorecard. It's instantiated by the
:meth:`proknow.Collections.CollectionScorecards.query` method to represent each of the
scorecards returned in a query result.
Attributes:
id (str): The id of the scorecard (readonly).
name (str): The name of the scorecard (readonly).
data (dict): The summary representation of the scorecard as returned from the API
(readonly).
"""
def __init__(self, scorecards, collection, scorecard):
"""Initializes the CollectionScorecardSummary class.
Parameters:
scorecards (proknow.Collections.CollectionScorecards): The CollectionScorecard instance
that is instantiating the object.
collection (proknow.Collections.CollectionItem): A instance of a CollectionItem.
scorecard (dict): A dictionary of scorecard attributes.
"""
self._scorecards = scorecards
self._collection = collection
self._data = scorecard
self._id = scorecard["id"]
self._name = scorecard["name"]
@property
def id(self):
return self._id
@property
def name(self):
return self._name
@property
def data(self):
return self._data
def get(self):
"""Gets the complete representation of the scorecard.
Returns:
:class:`proknow.Collections.CollectionScorecardItem`: A complete representation of the
collection scorecard
Raises:
:class:`proknow.Exceptions.HttpError`: If the HTTP request generated an error.
Example:
The following example shows how to turn a list of CollectionScorecardSummary objects
into a list of CollectionScorecardItem objects::
from proknow import ProKnow
pk = ProKnow('https://example.proknow.com', credentials_file="./credentials.json")
collection = pk.collections.find(name='My Collection').get()
scorecards = [scorecard.get() for scorecard in collection.scorecards.query()]
"""
return self._scorecards.get(self._id)
class CollectionScorecardItem(object):
"""
This class represents a collection scorecard. It's instantiated by the
:class:`proknow.Collections.CollectionScorecards` class as a complete representation of the
scorecard.
Attributes:
id (str): The id of the scorecard (readonly).
data (dict): The complete representation of the scorecard as returned from the API
(readonly).
name (str): The name of the scorecard.
computed (list): The computed metrics of the scorecard.
custom (list): The custom metrics of the scorecard.
"""
def __init__(self, scorecards, collection, scorecard):
"""Initializes the CollectionScorecardItem class.
Parameters:
scorecards (proknow.Collections.CollectionScorecards): The CollectionScorecard instance
that is instantiating the object.
collection (proknow.Collections.CollectionItem): A instance of a CollectionItem.
scorecard (dict): A dictionary of scorecard attributes.
"""
self._scorecards = scorecards
self._requestor = self._scorecards._requestor
self._collection = collection
self._data = scorecard
self._id = scorecard["id"]
self.name = scorecard["name"]
self.computed = scorecard["computed"]
self.custom = scorecard["custom"]
@property
def id(self):
return self._id
@property
def data(self):
return self._data
def delete(self):
"""Deletes the scorecard.
Raises:
:class:`proknow.Exceptions.HttpError`: If the HTTP request generated an error.
Example:
The following example shows how to find a scorecard by its name and delete it::
from proknow import ProKnow
pk = ProKnow('https://example.proknow.com', credentials_file="./credentials.json")
collection = pk.collections.find(name='My Collection').get()
scorecard = collection.scorecards.find(name='My Scorecard').get()
scorecard.delete()
"""
self._scorecards.delete(self._id)
def save(self):
"""Saves the changes made to a scorecard.
Note:
For information on how to construct computed metrics visit :ref:`computed-metrics`.
For information on how to define scorecard objectives, see :ref:`scorecard-objectives`.
Raises:
:class:`proknow.Exceptions.HttpError`: If the HTTP request generated an error.
Example:
The following example shows how to find a scorecard by its name, remove the associated
custom metrics, and save it::
from proknow import ProKnow
pk = ProKnow('https://example.proknow.com', credentials_file="./credentials.json")
collection = pk.collections.find(name='My Collection').get()
scorecard = collection.scorecards.find(name='My Scorecard').get()
scorecard.custom = []
scorecard.save()
"""
body = {
"name": self.name,
"computed": self.computed,
"custom": self.custom
}
_, scorecard = self._requestor.put('/collections/' + self._collection.id + '/metrics/sets/' + self._id, json=body)
self._data = scorecard
self.name = scorecard["name"]
self.computed = scorecard["computed"]
self.custom = scorecard["custom"]
|
# coding: utf-8
'''
@author: eju
'''
import numpy as np
import PIL
import sys
import cv2
sys.path.insert(1, 'D:\\program\\pytorch-layoutnet')
import pano_lsd_align
cutSize = 320
fov = np.pi / 3
xh = np.arange(-np.pi, np.pi*5/6, np.pi/6)
yh = np.zeros(xh.shape[0])
xp = np.array([-3/3, -2/3, -1/3, 0/3, 1/3, 2/3, -3/3, -2/3, -1/3, 0/3, 1/3, 2/3]) * np.pi
yp = np.array([ 1/4, 1/4, 1/4, 1/4, 1/4, 1/4, -1/4, -1/4, -1/4, -1/4, -1/4, -1/4]) * np.pi
x = np.concatenate([xh, xp, [0, 0]])
y = np.concatenate([yh, yp, [np.pi/2., -np.pi/2]])
im = PIL.Image.open('pano.jpg')
im_array = np.array(im)
sepScene = pano_lsd_align.separatePano(im_array.copy(), fov, x, y, cutSize)
print(len(sepScene))
scene_16 = sepScene[16]
PIL.Image.fromarray(scene_16['img'].astype(np.uint8)).save('separate_0_origin.png')
edge = []
LSD = cv2.createLineSegmentDetector(_refine=cv2.LSD_REFINE_ADV, _quant=0.7)
gray_img = cv2.cvtColor(scene_16['img'], cv2.COLOR_RGB2GRAY)
PIL.Image.fromarray(gray_img.astype(np.uint8)).save('separate_0_gray.png')
lines, width, prec, nfa = LSD.detect(gray_img)
edgeMap = LSD.drawSegments(np.zeros_like(gray_img), lines)[..., -1]
PIL.Image.fromarray(edgeMap.astype(np.uint8)).save('separate_0_edge.png')
print(lines.shape)
lines = np.squeeze(lines, 1) # 从数组的形状中删除单维条目,即把shape中为1的维度去掉
print(lines.shape)
edgeList = np.concatenate([lines, width, prec, nfa], 1)
#print(edgeList)
#print(edgeList.shape)
edge = {
'img': edgeMap,
'edgeLst': edgeList,
'vx': scene_16['vx'],
'vy': scene_16['vy'],
'fov': scene_16['fov']
}
print(edge['edgeLst'].shape)
# 计算panoLst
edgeList = edge['edgeLst']
vx = edge['vx']
vy = edge['vy']
fov = edge['fov']
imH, imW = edge['img'].shape
print(imH,imW)
R = (imW/2) / np.tan(fov/2)
print("R:", R)
# im is the tangent plane, contacting with ball at [x0 y0 z0]
x0 = R * np.cos(vy) * np.sin(vx)
y0 = R * np.cos(vy) * np.cos(vx)
z0 = R * np.sin(vy)
print("x0,y0,z0: ", x0, y0, z0)
vecposX = np.array([np.cos(vx), -np.sin(vx), 0])
vecposY = np.cross(np.array([x0, y0, z0]), vecposX)
vecposY = vecposY / np.sqrt(vecposY @ vecposY.T)
vecposX = vecposX.reshape(1, -1)
vecposY = vecposY.reshape(1, -1)
Xc = (0 + imW-1) / 2
Yc = (0 + imH-1) / 2
#print("Xc,Yc: ", Xc, Yc)
vecx1 = edgeList[:, [0]] - Xc
vecy1 = edgeList[:, [1]] - Yc
vecx2 = edgeList[:, [2]] - Xc
vecy2 = edgeList[:, [3]] - Yc
print("vecPosX VecPosY: ", vecposX, vecposY)
print("Xc YC: ", Xc, Yc)
print("vecx1 vecy1 vecx2 vecy2: ", vecx1[0], vecy1[0], vecx2[0], vecy2[0])
vec1 = np.tile(vecx1, [1, 3]) * vecposX + np.tile(vecy1, [1, 3]) * vecposY
vec2 = np.tile(vecx2, [1, 3]) * vecposX + np.tile(vecy2, [1, 3]) * vecposY
print("vec1 vec2: ", vec1[0], vec2[0])
coord1 = [[x0, y0, z0]] + vec1
coord2 = [[x0, y0, z0]] + vec2
normal = np.cross(coord1, coord2, axis=1)
normal = normal / np.linalg.norm(normal, axis=1, keepdims=True)
panoList = np.hstack([normal, coord1, coord2, edgeList[:, [-1]]])
# print(vx,vy,fov)
#
#
# print(edgeList[0])
# print("normal: ", normal[0])
# print("coord1: ", coord1[0])
# print("coord2: ", coord2[0])
# print(edgeList[:, [-1]][0])
# return panoList
# edge['panoLst'] = pano_lsd_align.edgeFromImg2Pano(edge)
#
# print(edge['panoLst'].shape)
# edgeLst
# [x1-------------y1-------------x2-------------y2-------------width----------prec-----------nfa-----------]
# [8.97189808e+00 1.40631226e+02 7.87251377e+00 6.93788986e+01 3.23251407e+00 1.25000000e-01 4.11825416e+02]
# print(edge['edgeLst'][0])
# panoList
# [nx--------------ny---------------nz--------------x1-------------y1---------------z1--------------x2-------------y2--------------z2--------------nfa]
# [-8.79043790e-01 -4.76548062e-01 1.35631633e-02 1.50528102e+02 -2.77128129e+02 1.88687744e+01 1.51627486e+02 -2.77128129e+02 9.01211014e+01 4.11825416e+02]
# print(edge['panoLst'][0])
# print(edge['vx'], edge['vy'], edge['fov'])
|
from __future__ import division
import math
import os
import sys
import time
import numpy as np
import scipy.stats
from scipy.optimize import curve_fit
from math import log10
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from scipy import pi as nombrepi
##### Lecture du fichier de donnees (frequences/Intensite)
donnees = open("inputtemps.dat","r")
lignes = donnees.readlines()
donnees.close()
##### Initialisation des tableaux
s = len(lignes) # Nombre de lignes du fichier correspondant au nombre de points #
t = np.zeros(s) # Tableau des temps entier #
I = np.zeros(s) # Tableau des intensites #
##### Choix de la plage de frequences et remplissage des tableaux
# tmin = float(input("Minimal time : "))
# tmax = float(input("Maximal time : "))
tmin = 800
tmax = 1200
for i in range(1,s+1) :
t[i-1] = float(lignes[i-1].split()[0])
I[i-1] = float(lignes[i-1].split()[1])
if (t[i-1] <= tmin) :
indicemin = i-1
if (t[i-1] <= tmax) :
indicemax = i-1
n = indicemax-indicemin+1 # nombre de points sur la plage de frequences voulue
temps = np.zeros(n) # Tableau des temps sur la plage voulue
intensite = np.zeros(n) # Tableau des intensites sur la plage voulue
for i in range(1,n+1) :
temps[i-1] = t[indicemin+(i-1)]
intensite[i-1] = I[indicemin+(i-1)]
##### Calcul des grandeurs propres au signal
esperance = sum(temps*intensite)/sum(intensite)
variance = np.sqrt(sum(intensite*(temps-esperance)**2)/sum(intensite))
##### Definition de la fonction fit gaussienne (pas
def gaussienne(x,b,a,xo,sigma) :
return b+a*np.exp(-(x-xo)**2/(2*(sigma**2)))
##### Codage du fit
parametres,covariance = curve_fit(gaussienne,temps,intensite,p0=[np.max(intensite),np.max(intensite),esperance,variance])
##### Ecriture et affichage des resultats
plt.plot(temps,intensite,'b+',label = 'data')
plt.plot(temps,gaussienne(temps,*parametres),'r',label = 'fit')
plt.legend()
plt.xlabel("bin")
plt.ylabel("I")
plt.title("Sommes sur toutes les frequences")
plt.show()
#print(parametres)
|
# # basicMLpy.cross_validation module
import numpy as np
import itertools
from .utils import check_for_intercept, split_indices
class CrossValidation:
"""
Class that performs the cross validation given a certain function.
Methods:
fit(X,y) -> Performs the cross validation algorithm on the training set(x,y).
scores() -> Gives the cross validation scores for the training set.
expected_generalization_error() -> Gives the predicted generalization(out of sample) test error.
get_cv_estimators() -> Returns all the estimators trained during the cross validation. Requires bool return_estimator to be set to True.
"""
def __init__(self,estimator,loss_function,n_folds:int,return_estimator:bool=False):
"""
Initialize self.
Inputs:
estimator: estimator object implementing fit
input the estimator to be used in the cross validation algorithm
loss_functions: loss function object return loss betweet targets and predictions
input the loss function to be used when calculating the cross validation algorithm
n_folds: int
input number of folds to be created. must be bigger than two.
return_estimator: bool, default=False
Whether to return the estimators fitted on each fold.
"""
self.estimator = estimator
self.loss_function = loss_function
self.n_folds = n_folds
self.return_estimator = return_estimator
def fit(self,x,y):
"""
Performs the cross-validation on a given dataset.
Inputs:
x: array
input array of input points, without the intercept(raw data).
y: array
input array of output points.
"""
self.test_scores = []
self.train_scores = []
self.folds = split_indices(x,self.n_folds)
if self.return_estimator:
self.cv_estimators = []
for curr_fold in range(len(self.folds)):
curr_train_set = list(itertools.chain.from_iterable([x for i,x in enumerate(self.folds) if i != curr_fold]))
self.estimator.fit(x[curr_train_set],y[curr_train_set])
self.test_scores.append(self.loss_function(self.estimator.predict(x[self.folds[curr_fold]]),y[self.folds[curr_fold]]))
self.train_scores.append(self.loss_function(self.estimator.predict(x[curr_train_set]),y[curr_train_set]))
if self.return_estimator:
self.cv_estimators.append(self.estimator)
def scores(self):
"""
Gives the calculated cross-validation scores for the dataset.
Returns:
scores_dict: dict
outputs a dict of arrays. This dict has two keys: 'train_scores' and 'test_scores', each mapping to an array of the respective scores.
"""
scores_dict = {}
scores_dict['train_scores'] = self.train_scores
scores_dict['test_scores'] = self.test_scores
return scores_dict
def expected_generalization_error(self):
"""
Calculates the expected test error of the model, that by definition is the average of the sum of the cross-validation error found by the algorithm.
Returns:
self.error: float
Outputs the expected test error of the model.
"""
return sum(self.scores)/self.n_folds
def get_cv_estimators(self):
"""
Returns all of the estimators fitted in the cross validation.
Returns:
self.cv_estimators: list of estimator objects
Outputs the estimators fitted in the cross validation.
"""
try:
assert self.return_estimator == True
except:
raise ValueError("return_estimator must be set to True in order to use this method!")
return self.cv_estimators
###
|
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
def normalize_data_format(value):
if value is None:
value = tf.keras.backend.image_data_format()
data_format = value.lower()
if data_format not in {"channels_first", "channels_last"}:
raise ValueError(
"The `data_format` argument must be one of "
f'"channels_first", "channels_last". Received: {value}'
)
return data_format
def normalize_tuple(value, n, name, allow_zero=False):
"""Transforms non-negative/positive integer/integers into an integer tuple.
Args:
value: The value to validate and convert. Could an int, or any iterable of
ints.
n: The size of the tuple to be returned.
name: The name of the argument being validated, e.g. "strides" or
"kernel_size". This is only used to format error messages.
allow_zero: Default to False. A ValueError will raised if zero is received
and this param is False.
Returns:
A tuple of n integers.
Raises:
ValueError: If something else than an int/long or iterable thereof or a
negative value is
passed.
"""
error_msg = (
f"The `{name}` argument must be a tuple of {n} " f"integers. Received: {value}"
)
if isinstance(value, int):
value_tuple = (value,) * n
else:
try:
value_tuple = tuple(value)
except TypeError:
raise ValueError(error_msg)
if len(value_tuple) != n:
raise ValueError(error_msg)
for single_value in value_tuple:
try:
int(single_value)
except (ValueError, TypeError):
error_msg += (
f"including element {single_value} of " f"type {type(single_value)}"
)
raise ValueError(error_msg)
if allow_zero:
unqualified_values = {v for v in value_tuple if v < 0}
req_msg = ">= 0"
else:
unqualified_values = {v for v in value_tuple if v <= 0}
req_msg = "> 0"
if unqualified_values:
error_msg += (
f" including {unqualified_values}"
f" that does not satisfy the requirement `{req_msg}`."
)
raise ValueError(error_msg)
return value_tuple
|
#
# Copyright 2015 Naver Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
homedir = './bin'
logdir = '%s/log' % (homedir)
# Binary names
SMR = 'smr-replicator'
GW = 'redis-gateway'
REDIS = 'redis-arc'
CLUSTER_UTIL = 'cluster-util'
DUMP_UTIL = 'dump-util'
DUMP_UTIL_PLUGIN = 'dump2json_base32hex.so'
LOG_UTIL = 'smr-logutil'
CAPI_SO_FILE = 'libarcci.so'
CAPI32_SO_FILE = 'libarcci32.so'
CAPI_TEST_SERVER = 'local_proxy'
CAPI32_TEST_SERVER = 'local_proxy32'
CC = 'confmaster-1.0.0-SNAPSHOT-jar-with-dependencies.jar'
CM_DEFAULT_PORT = 1122
CM_PROPERTY_FILE_NAME = 'cc.properties'
CM_EXEC_SCRIPT = 'confmaster-integrationtest.sh'
ZK_CLI = 'test-zk-cli-0.0.1-SNAPSHOT-jar-with-dependencies.jar'
# Binary directoryes
SMR_DIR = '%s/smr' % (homedir)
GW_DIR = '%s/gw' % (homedir)
REDIS_DIR = '%s/redis' % (homedir)
REDIS_CHECK_POINT_FILE_NAME = 'dump.rdb'
CLUSTER_UTIL_DIR = '%s/redis' % (homedir)
DUMP_UTIL_DIR = '%s/redis' % (homedir)
LOG_UTIL_DIR = '%s/smr' % (homedir)
CAPI_DIR = '%s/redis' % (homedir)
CC_DIR = '%s/confmaster' % (homedir)
ARCCI_DIR = "../api/arcci/"
ARCCI_SO_PATH = "../api/arcci/.obj64/lib/libarcci.so"
ARCCI32_SO_PATH = "../api/arcci/.obj32/lib/libarcci.so"
ZK_CLI_DIR = '../tools/test-zk-cli/target/'
ROLE_LCONN = '1'
ROLE_MASTER = '2'
ROLE_SLAVE = '3'
CC_LEADER = 'leader'
CC_FOLLOWER = 'follower'
|
import logging
import numpy as np
import tensorflow as tf
from keras.datasets import cifar10
from keras.utils import np_utils
logger = logging.getLogger(__name__)
def _create_tf_dataset(x, y, batch_size):
return tf.data.Dataset.zip((tf.data.Dataset.from_tensor_slices(x),
tf.data.Dataset.from_tensor_slices(y))).shuffle(500).repeat().batch(batch_size)
def get_tf_datasets_from_numpy(batch_size, validation_split=0.1):
"""
Main function getting tf.Data.datasets for training, validation, and testing
Args:
batch_size (int): Batch size
validation_split (float): Split for partitioning training and validation sets. Between 0.0 and 1.0.
"""
# Load data from keras datasets api
(X, y), (X_test, y_test) = cifar10.load_data()
logger.info("Dividing pixels by 255")
X = X / 255.
X_test = X_test / 255.
X = X.astype(np.float32)
X_test = X_test.astype(np.float32)
y = y.astype(np.float32)
y_test = y_test.astype(np.float32)
# Turn labels into onehot encodings
if y.shape[1] != 10:
y = np_utils.to_categorical(y, num_classes=10)
y_test = np_utils.to_categorical(y_test, num_classes=10)
logger.info("Loaded data from keras")
split_idx = int((1.0 - validation_split) * len(X))
X_train, y_train = X[:split_idx], y[:split_idx]
X_valid, y_valid = X[split_idx:], y[split_idx:]
train_dataset = _create_tf_dataset(X_train, y_train, batch_size)
valid_dataset = _create_tf_dataset(X_valid, y_valid, batch_size)
test_dataset = _create_tf_dataset(X_test, y_test, batch_size)
# Get the batch sizes for the train, valid, and test datasets
num_train_batches = int(X_train.shape[0] // batch_size)
num_valid_batches = int(X_valid.shape[0] // batch_size)
num_test_batches = int(X_test.shape[0] // batch_size)
return train_dataset, valid_dataset, test_dataset, num_train_batches, num_valid_batches, num_test_batches
|
'''This is a macro for ccpn analysis. The only thing this macro does
is compile c extentions necesarry for Malandro.
If you downloaded a 'pre-compiled' version of Malandro you don't
have to run this macro.
If you do want to compile the c code (again) you can use this macro.
The reason this is written in the form of a ccpn macro is that a lot
of people run a 'pre-compiled' vesrion of CCPN Analysis on their
computer. This version of Analysis comes with its own python
interpreter, which might be different than the default one on your
system. This macro is just an easy way to make sure the code gets
compiled in a compatible way with the python version your Analysis
is using.
To run this macro: open CCPN Analysis, open a project and go to:
Macro -> Organize Macros
click on 'Add Macro', navigate to the location of this file and
selected it. In the bottom half of the dialog, select 'runInstaller',
and press the 'Load Macro' button. Now the macro should appear in
the list of macros and you can select and run it. Most probably
there will be some compiler messages and if the compilation process
finishes successfully a message will appear:
'Compiling is done, you can now run the macro.'
For more installation instruction read the README.
'''
import sys
import os
def runInstaller(argServer) :
pathToPython = sys.executable
print pathToPython
workingDirectory = os.path.dirname(__file__)
import subprocess
print 'Compiling malandro for your version of ccpn analysis...'
print 'using python version %s' %sys.version
process = subprocess.Popen([pathToPython, 'setupC.py', 'build_ext', '--inplace'], cwd=workingDirectory, stdout=subprocess.PIPE)
process.wait()
if process.returncode == 0:
print 'Compiling is done, you can now run the macro.'
|
import os
import sys
from dotenv import load_dotenv
load_dotenv()
def get_env_name() -> str:
env_name: str = os.getenv('ENV_NAME', 'dev')
script_name: str = sys.argv[0]
if script_name.startswith('tests/'):
env_name = 'test'
return env_name
class Config:
DATABASE_URL: str = os.environ['DATABASE_URL']
BASIC_AUTH_ADMIN_PANEL_NAME: str = os.environ['BASIC_AUTH_ADMIN_PANEL_NAME']
BASIC_AUTH_ADMIN_PANEL_PASS: str = os.environ['BASIC_AUTH_ADMIN_PANEL_PASS']
JWT_SECRET_KEY: str = os.environ['JWT_SECRET_KEY']
DEBUG: bool = False
TESTING: bool = False
class TestConfig(Config):
DATABASE_URL: str = os.environ['TEST_DATABASE_URL']
TESTING: bool = True
class DevelopmentConfig(Config):
DEBUG: bool = True
class ProductionConfig(Config):
pass
config_by_env_name: dict = dict(
test=TestConfig,
dev=DevelopmentConfig,
prod=ProductionConfig,
)
ENV_NAME: str = get_env_name()
CONFIG: Config = config_by_env_name[ENV_NAME]
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._inputs import *
__all__ = ['GroupMetricRuleArgs', 'GroupMetricRule']
@pulumi.input_type
class GroupMetricRuleArgs:
def __init__(__self__, *,
category: pulumi.Input[str],
escalations: pulumi.Input['GroupMetricRuleEscalationsArgs'],
group_id: pulumi.Input[str],
group_metric_rule_name: pulumi.Input[str],
metric_name: pulumi.Input[str],
namespace: pulumi.Input[str],
rule_id: pulumi.Input[str],
contact_groups: Optional[pulumi.Input[str]] = None,
dimensions: Optional[pulumi.Input[str]] = None,
effective_interval: Optional[pulumi.Input[str]] = None,
email_subject: Optional[pulumi.Input[str]] = None,
interval: Optional[pulumi.Input[str]] = None,
no_effective_interval: Optional[pulumi.Input[str]] = None,
period: Optional[pulumi.Input[int]] = None,
silence_time: Optional[pulumi.Input[int]] = None,
webhook: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a GroupMetricRule resource.
:param pulumi.Input[str] category: The abbreviation of the service name.
:param pulumi.Input['GroupMetricRuleEscalationsArgs'] escalations: Alarm level. See the block for escalations.
:param pulumi.Input[str] group_id: The ID of the application group.
:param pulumi.Input[str] group_metric_rule_name: The name of the alert rule.
:param pulumi.Input[str] metric_name: The name of the metric.
:param pulumi.Input[str] namespace: The namespace of the service.
:param pulumi.Input[str] rule_id: The ID of the alert rule.
:param pulumi.Input[str] contact_groups: Alarm contact group.
:param pulumi.Input[str] dimensions: The dimensions that specify the resources to be associated with the alert rule.
:param pulumi.Input[str] effective_interval: The time period during which the alert rule is effective.
:param pulumi.Input[str] email_subject: The subject of the alert notification email. .
:param pulumi.Input[str] interval: The interval at which Cloud Monitor checks whether the alert rule is triggered. Unit: seconds.
:param pulumi.Input[str] no_effective_interval: The time period during which the alert rule is ineffective.
:param pulumi.Input[int] period: The aggregation period of the monitoring data. Unit: seconds. The value is an integral multiple of 60. Default value: `300`.
:param pulumi.Input[int] silence_time: The mute period during which new alerts are not reported even if the alert trigger conditions are met. Unit: seconds. Default value: `86400`, which is equivalent to one day.
:param pulumi.Input[str] webhook: The callback URL.
"""
pulumi.set(__self__, "category", category)
pulumi.set(__self__, "escalations", escalations)
pulumi.set(__self__, "group_id", group_id)
pulumi.set(__self__, "group_metric_rule_name", group_metric_rule_name)
pulumi.set(__self__, "metric_name", metric_name)
pulumi.set(__self__, "namespace", namespace)
pulumi.set(__self__, "rule_id", rule_id)
if contact_groups is not None:
pulumi.set(__self__, "contact_groups", contact_groups)
if dimensions is not None:
pulumi.set(__self__, "dimensions", dimensions)
if effective_interval is not None:
pulumi.set(__self__, "effective_interval", effective_interval)
if email_subject is not None:
pulumi.set(__self__, "email_subject", email_subject)
if interval is not None:
pulumi.set(__self__, "interval", interval)
if no_effective_interval is not None:
pulumi.set(__self__, "no_effective_interval", no_effective_interval)
if period is not None:
pulumi.set(__self__, "period", period)
if silence_time is not None:
pulumi.set(__self__, "silence_time", silence_time)
if webhook is not None:
pulumi.set(__self__, "webhook", webhook)
@property
@pulumi.getter
def category(self) -> pulumi.Input[str]:
"""
The abbreviation of the service name.
"""
return pulumi.get(self, "category")
@category.setter
def category(self, value: pulumi.Input[str]):
pulumi.set(self, "category", value)
@property
@pulumi.getter
def escalations(self) -> pulumi.Input['GroupMetricRuleEscalationsArgs']:
"""
Alarm level. See the block for escalations.
"""
return pulumi.get(self, "escalations")
@escalations.setter
def escalations(self, value: pulumi.Input['GroupMetricRuleEscalationsArgs']):
pulumi.set(self, "escalations", value)
@property
@pulumi.getter(name="groupId")
def group_id(self) -> pulumi.Input[str]:
"""
The ID of the application group.
"""
return pulumi.get(self, "group_id")
@group_id.setter
def group_id(self, value: pulumi.Input[str]):
pulumi.set(self, "group_id", value)
@property
@pulumi.getter(name="groupMetricRuleName")
def group_metric_rule_name(self) -> pulumi.Input[str]:
"""
The name of the alert rule.
"""
return pulumi.get(self, "group_metric_rule_name")
@group_metric_rule_name.setter
def group_metric_rule_name(self, value: pulumi.Input[str]):
pulumi.set(self, "group_metric_rule_name", value)
@property
@pulumi.getter(name="metricName")
def metric_name(self) -> pulumi.Input[str]:
"""
The name of the metric.
"""
return pulumi.get(self, "metric_name")
@metric_name.setter
def metric_name(self, value: pulumi.Input[str]):
pulumi.set(self, "metric_name", value)
@property
@pulumi.getter
def namespace(self) -> pulumi.Input[str]:
"""
The namespace of the service.
"""
return pulumi.get(self, "namespace")
@namespace.setter
def namespace(self, value: pulumi.Input[str]):
pulumi.set(self, "namespace", value)
@property
@pulumi.getter(name="ruleId")
def rule_id(self) -> pulumi.Input[str]:
"""
The ID of the alert rule.
"""
return pulumi.get(self, "rule_id")
@rule_id.setter
def rule_id(self, value: pulumi.Input[str]):
pulumi.set(self, "rule_id", value)
@property
@pulumi.getter(name="contactGroups")
def contact_groups(self) -> Optional[pulumi.Input[str]]:
"""
Alarm contact group.
"""
return pulumi.get(self, "contact_groups")
@contact_groups.setter
def contact_groups(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "contact_groups", value)
@property
@pulumi.getter
def dimensions(self) -> Optional[pulumi.Input[str]]:
"""
The dimensions that specify the resources to be associated with the alert rule.
"""
return pulumi.get(self, "dimensions")
@dimensions.setter
def dimensions(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "dimensions", value)
@property
@pulumi.getter(name="effectiveInterval")
def effective_interval(self) -> Optional[pulumi.Input[str]]:
"""
The time period during which the alert rule is effective.
"""
return pulumi.get(self, "effective_interval")
@effective_interval.setter
def effective_interval(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "effective_interval", value)
@property
@pulumi.getter(name="emailSubject")
def email_subject(self) -> Optional[pulumi.Input[str]]:
"""
The subject of the alert notification email. .
"""
return pulumi.get(self, "email_subject")
@email_subject.setter
def email_subject(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "email_subject", value)
@property
@pulumi.getter
def interval(self) -> Optional[pulumi.Input[str]]:
"""
The interval at which Cloud Monitor checks whether the alert rule is triggered. Unit: seconds.
"""
return pulumi.get(self, "interval")
@interval.setter
def interval(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "interval", value)
@property
@pulumi.getter(name="noEffectiveInterval")
def no_effective_interval(self) -> Optional[pulumi.Input[str]]:
"""
The time period during which the alert rule is ineffective.
"""
return pulumi.get(self, "no_effective_interval")
@no_effective_interval.setter
def no_effective_interval(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "no_effective_interval", value)
@property
@pulumi.getter
def period(self) -> Optional[pulumi.Input[int]]:
"""
The aggregation period of the monitoring data. Unit: seconds. The value is an integral multiple of 60. Default value: `300`.
"""
return pulumi.get(self, "period")
@period.setter
def period(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "period", value)
@property
@pulumi.getter(name="silenceTime")
def silence_time(self) -> Optional[pulumi.Input[int]]:
"""
The mute period during which new alerts are not reported even if the alert trigger conditions are met. Unit: seconds. Default value: `86400`, which is equivalent to one day.
"""
return pulumi.get(self, "silence_time")
@silence_time.setter
def silence_time(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "silence_time", value)
@property
@pulumi.getter
def webhook(self) -> Optional[pulumi.Input[str]]:
"""
The callback URL.
"""
return pulumi.get(self, "webhook")
@webhook.setter
def webhook(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "webhook", value)
@pulumi.input_type
class _GroupMetricRuleState:
def __init__(__self__, *,
category: Optional[pulumi.Input[str]] = None,
contact_groups: Optional[pulumi.Input[str]] = None,
dimensions: Optional[pulumi.Input[str]] = None,
effective_interval: Optional[pulumi.Input[str]] = None,
email_subject: Optional[pulumi.Input[str]] = None,
escalations: Optional[pulumi.Input['GroupMetricRuleEscalationsArgs']] = None,
group_id: Optional[pulumi.Input[str]] = None,
group_metric_rule_name: Optional[pulumi.Input[str]] = None,
interval: Optional[pulumi.Input[str]] = None,
metric_name: Optional[pulumi.Input[str]] = None,
namespace: Optional[pulumi.Input[str]] = None,
no_effective_interval: Optional[pulumi.Input[str]] = None,
period: Optional[pulumi.Input[int]] = None,
rule_id: Optional[pulumi.Input[str]] = None,
silence_time: Optional[pulumi.Input[int]] = None,
status: Optional[pulumi.Input[str]] = None,
webhook: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering GroupMetricRule resources.
:param pulumi.Input[str] category: The abbreviation of the service name.
:param pulumi.Input[str] contact_groups: Alarm contact group.
:param pulumi.Input[str] dimensions: The dimensions that specify the resources to be associated with the alert rule.
:param pulumi.Input[str] effective_interval: The time period during which the alert rule is effective.
:param pulumi.Input[str] email_subject: The subject of the alert notification email. .
:param pulumi.Input['GroupMetricRuleEscalationsArgs'] escalations: Alarm level. See the block for escalations.
:param pulumi.Input[str] group_id: The ID of the application group.
:param pulumi.Input[str] group_metric_rule_name: The name of the alert rule.
:param pulumi.Input[str] interval: The interval at which Cloud Monitor checks whether the alert rule is triggered. Unit: seconds.
:param pulumi.Input[str] metric_name: The name of the metric.
:param pulumi.Input[str] namespace: The namespace of the service.
:param pulumi.Input[str] no_effective_interval: The time period during which the alert rule is ineffective.
:param pulumi.Input[int] period: The aggregation period of the monitoring data. Unit: seconds. The value is an integral multiple of 60. Default value: `300`.
:param pulumi.Input[str] rule_id: The ID of the alert rule.
:param pulumi.Input[int] silence_time: The mute period during which new alerts are not reported even if the alert trigger conditions are met. Unit: seconds. Default value: `86400`, which is equivalent to one day.
:param pulumi.Input[str] status: The status of Group Metric Rule.
:param pulumi.Input[str] webhook: The callback URL.
"""
if category is not None:
pulumi.set(__self__, "category", category)
if contact_groups is not None:
pulumi.set(__self__, "contact_groups", contact_groups)
if dimensions is not None:
pulumi.set(__self__, "dimensions", dimensions)
if effective_interval is not None:
pulumi.set(__self__, "effective_interval", effective_interval)
if email_subject is not None:
pulumi.set(__self__, "email_subject", email_subject)
if escalations is not None:
pulumi.set(__self__, "escalations", escalations)
if group_id is not None:
pulumi.set(__self__, "group_id", group_id)
if group_metric_rule_name is not None:
pulumi.set(__self__, "group_metric_rule_name", group_metric_rule_name)
if interval is not None:
pulumi.set(__self__, "interval", interval)
if metric_name is not None:
pulumi.set(__self__, "metric_name", metric_name)
if namespace is not None:
pulumi.set(__self__, "namespace", namespace)
if no_effective_interval is not None:
pulumi.set(__self__, "no_effective_interval", no_effective_interval)
if period is not None:
pulumi.set(__self__, "period", period)
if rule_id is not None:
pulumi.set(__self__, "rule_id", rule_id)
if silence_time is not None:
pulumi.set(__self__, "silence_time", silence_time)
if status is not None:
pulumi.set(__self__, "status", status)
if webhook is not None:
pulumi.set(__self__, "webhook", webhook)
@property
@pulumi.getter
def category(self) -> Optional[pulumi.Input[str]]:
"""
The abbreviation of the service name.
"""
return pulumi.get(self, "category")
@category.setter
def category(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "category", value)
@property
@pulumi.getter(name="contactGroups")
def contact_groups(self) -> Optional[pulumi.Input[str]]:
"""
Alarm contact group.
"""
return pulumi.get(self, "contact_groups")
@contact_groups.setter
def contact_groups(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "contact_groups", value)
@property
@pulumi.getter
def dimensions(self) -> Optional[pulumi.Input[str]]:
"""
The dimensions that specify the resources to be associated with the alert rule.
"""
return pulumi.get(self, "dimensions")
@dimensions.setter
def dimensions(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "dimensions", value)
@property
@pulumi.getter(name="effectiveInterval")
def effective_interval(self) -> Optional[pulumi.Input[str]]:
"""
The time period during which the alert rule is effective.
"""
return pulumi.get(self, "effective_interval")
@effective_interval.setter
def effective_interval(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "effective_interval", value)
@property
@pulumi.getter(name="emailSubject")
def email_subject(self) -> Optional[pulumi.Input[str]]:
"""
The subject of the alert notification email. .
"""
return pulumi.get(self, "email_subject")
@email_subject.setter
def email_subject(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "email_subject", value)
@property
@pulumi.getter
def escalations(self) -> Optional[pulumi.Input['GroupMetricRuleEscalationsArgs']]:
"""
Alarm level. See the block for escalations.
"""
return pulumi.get(self, "escalations")
@escalations.setter
def escalations(self, value: Optional[pulumi.Input['GroupMetricRuleEscalationsArgs']]):
pulumi.set(self, "escalations", value)
@property
@pulumi.getter(name="groupId")
def group_id(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the application group.
"""
return pulumi.get(self, "group_id")
@group_id.setter
def group_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "group_id", value)
@property
@pulumi.getter(name="groupMetricRuleName")
def group_metric_rule_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the alert rule.
"""
return pulumi.get(self, "group_metric_rule_name")
@group_metric_rule_name.setter
def group_metric_rule_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "group_metric_rule_name", value)
@property
@pulumi.getter
def interval(self) -> Optional[pulumi.Input[str]]:
"""
The interval at which Cloud Monitor checks whether the alert rule is triggered. Unit: seconds.
"""
return pulumi.get(self, "interval")
@interval.setter
def interval(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "interval", value)
@property
@pulumi.getter(name="metricName")
def metric_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the metric.
"""
return pulumi.get(self, "metric_name")
@metric_name.setter
def metric_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "metric_name", value)
@property
@pulumi.getter
def namespace(self) -> Optional[pulumi.Input[str]]:
"""
The namespace of the service.
"""
return pulumi.get(self, "namespace")
@namespace.setter
def namespace(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "namespace", value)
@property
@pulumi.getter(name="noEffectiveInterval")
def no_effective_interval(self) -> Optional[pulumi.Input[str]]:
"""
The time period during which the alert rule is ineffective.
"""
return pulumi.get(self, "no_effective_interval")
@no_effective_interval.setter
def no_effective_interval(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "no_effective_interval", value)
@property
@pulumi.getter
def period(self) -> Optional[pulumi.Input[int]]:
"""
The aggregation period of the monitoring data. Unit: seconds. The value is an integral multiple of 60. Default value: `300`.
"""
return pulumi.get(self, "period")
@period.setter
def period(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "period", value)
@property
@pulumi.getter(name="ruleId")
def rule_id(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the alert rule.
"""
return pulumi.get(self, "rule_id")
@rule_id.setter
def rule_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "rule_id", value)
@property
@pulumi.getter(name="silenceTime")
def silence_time(self) -> Optional[pulumi.Input[int]]:
"""
The mute period during which new alerts are not reported even if the alert trigger conditions are met. Unit: seconds. Default value: `86400`, which is equivalent to one day.
"""
return pulumi.get(self, "silence_time")
@silence_time.setter
def silence_time(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "silence_time", value)
@property
@pulumi.getter
def status(self) -> Optional[pulumi.Input[str]]:
"""
The status of Group Metric Rule.
"""
return pulumi.get(self, "status")
@status.setter
def status(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "status", value)
@property
@pulumi.getter
def webhook(self) -> Optional[pulumi.Input[str]]:
"""
The callback URL.
"""
return pulumi.get(self, "webhook")
@webhook.setter
def webhook(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "webhook", value)
class GroupMetricRule(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
category: Optional[pulumi.Input[str]] = None,
contact_groups: Optional[pulumi.Input[str]] = None,
dimensions: Optional[pulumi.Input[str]] = None,
effective_interval: Optional[pulumi.Input[str]] = None,
email_subject: Optional[pulumi.Input[str]] = None,
escalations: Optional[pulumi.Input[pulumi.InputType['GroupMetricRuleEscalationsArgs']]] = None,
group_id: Optional[pulumi.Input[str]] = None,
group_metric_rule_name: Optional[pulumi.Input[str]] = None,
interval: Optional[pulumi.Input[str]] = None,
metric_name: Optional[pulumi.Input[str]] = None,
namespace: Optional[pulumi.Input[str]] = None,
no_effective_interval: Optional[pulumi.Input[str]] = None,
period: Optional[pulumi.Input[int]] = None,
rule_id: Optional[pulumi.Input[str]] = None,
silence_time: Optional[pulumi.Input[int]] = None,
webhook: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Provides a Cloud Monitor Service Group Metric Rule resource.
For information about Cloud Monitor Service Group Metric Rule and how to use it, see [What is Group Metric Rule](https://www.alibabacloud.com/help/en/doc-detail/114943.htm).
> **NOTE:** Available in v1.104.0+.
## Import
Cloud Monitor Service Group Metric Rule can be imported using the id, e.g.
```sh
$ pulumi import alicloud:cms/groupMetricRule:GroupMetricRule example <rule_id>
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] category: The abbreviation of the service name.
:param pulumi.Input[str] contact_groups: Alarm contact group.
:param pulumi.Input[str] dimensions: The dimensions that specify the resources to be associated with the alert rule.
:param pulumi.Input[str] effective_interval: The time period during which the alert rule is effective.
:param pulumi.Input[str] email_subject: The subject of the alert notification email. .
:param pulumi.Input[pulumi.InputType['GroupMetricRuleEscalationsArgs']] escalations: Alarm level. See the block for escalations.
:param pulumi.Input[str] group_id: The ID of the application group.
:param pulumi.Input[str] group_metric_rule_name: The name of the alert rule.
:param pulumi.Input[str] interval: The interval at which Cloud Monitor checks whether the alert rule is triggered. Unit: seconds.
:param pulumi.Input[str] metric_name: The name of the metric.
:param pulumi.Input[str] namespace: The namespace of the service.
:param pulumi.Input[str] no_effective_interval: The time period during which the alert rule is ineffective.
:param pulumi.Input[int] period: The aggregation period of the monitoring data. Unit: seconds. The value is an integral multiple of 60. Default value: `300`.
:param pulumi.Input[str] rule_id: The ID of the alert rule.
:param pulumi.Input[int] silence_time: The mute period during which new alerts are not reported even if the alert trigger conditions are met. Unit: seconds. Default value: `86400`, which is equivalent to one day.
:param pulumi.Input[str] webhook: The callback URL.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: GroupMetricRuleArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Provides a Cloud Monitor Service Group Metric Rule resource.
For information about Cloud Monitor Service Group Metric Rule and how to use it, see [What is Group Metric Rule](https://www.alibabacloud.com/help/en/doc-detail/114943.htm).
> **NOTE:** Available in v1.104.0+.
## Import
Cloud Monitor Service Group Metric Rule can be imported using the id, e.g.
```sh
$ pulumi import alicloud:cms/groupMetricRule:GroupMetricRule example <rule_id>
```
:param str resource_name: The name of the resource.
:param GroupMetricRuleArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(GroupMetricRuleArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
category: Optional[pulumi.Input[str]] = None,
contact_groups: Optional[pulumi.Input[str]] = None,
dimensions: Optional[pulumi.Input[str]] = None,
effective_interval: Optional[pulumi.Input[str]] = None,
email_subject: Optional[pulumi.Input[str]] = None,
escalations: Optional[pulumi.Input[pulumi.InputType['GroupMetricRuleEscalationsArgs']]] = None,
group_id: Optional[pulumi.Input[str]] = None,
group_metric_rule_name: Optional[pulumi.Input[str]] = None,
interval: Optional[pulumi.Input[str]] = None,
metric_name: Optional[pulumi.Input[str]] = None,
namespace: Optional[pulumi.Input[str]] = None,
no_effective_interval: Optional[pulumi.Input[str]] = None,
period: Optional[pulumi.Input[int]] = None,
rule_id: Optional[pulumi.Input[str]] = None,
silence_time: Optional[pulumi.Input[int]] = None,
webhook: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = GroupMetricRuleArgs.__new__(GroupMetricRuleArgs)
if category is None and not opts.urn:
raise TypeError("Missing required property 'category'")
__props__.__dict__["category"] = category
__props__.__dict__["contact_groups"] = contact_groups
__props__.__dict__["dimensions"] = dimensions
__props__.__dict__["effective_interval"] = effective_interval
__props__.__dict__["email_subject"] = email_subject
if escalations is None and not opts.urn:
raise TypeError("Missing required property 'escalations'")
__props__.__dict__["escalations"] = escalations
if group_id is None and not opts.urn:
raise TypeError("Missing required property 'group_id'")
__props__.__dict__["group_id"] = group_id
if group_metric_rule_name is None and not opts.urn:
raise TypeError("Missing required property 'group_metric_rule_name'")
__props__.__dict__["group_metric_rule_name"] = group_metric_rule_name
__props__.__dict__["interval"] = interval
if metric_name is None and not opts.urn:
raise TypeError("Missing required property 'metric_name'")
__props__.__dict__["metric_name"] = metric_name
if namespace is None and not opts.urn:
raise TypeError("Missing required property 'namespace'")
__props__.__dict__["namespace"] = namespace
__props__.__dict__["no_effective_interval"] = no_effective_interval
__props__.__dict__["period"] = period
if rule_id is None and not opts.urn:
raise TypeError("Missing required property 'rule_id'")
__props__.__dict__["rule_id"] = rule_id
__props__.__dict__["silence_time"] = silence_time
__props__.__dict__["webhook"] = webhook
__props__.__dict__["status"] = None
super(GroupMetricRule, __self__).__init__(
'alicloud:cms/groupMetricRule:GroupMetricRule',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
category: Optional[pulumi.Input[str]] = None,
contact_groups: Optional[pulumi.Input[str]] = None,
dimensions: Optional[pulumi.Input[str]] = None,
effective_interval: Optional[pulumi.Input[str]] = None,
email_subject: Optional[pulumi.Input[str]] = None,
escalations: Optional[pulumi.Input[pulumi.InputType['GroupMetricRuleEscalationsArgs']]] = None,
group_id: Optional[pulumi.Input[str]] = None,
group_metric_rule_name: Optional[pulumi.Input[str]] = None,
interval: Optional[pulumi.Input[str]] = None,
metric_name: Optional[pulumi.Input[str]] = None,
namespace: Optional[pulumi.Input[str]] = None,
no_effective_interval: Optional[pulumi.Input[str]] = None,
period: Optional[pulumi.Input[int]] = None,
rule_id: Optional[pulumi.Input[str]] = None,
silence_time: Optional[pulumi.Input[int]] = None,
status: Optional[pulumi.Input[str]] = None,
webhook: Optional[pulumi.Input[str]] = None) -> 'GroupMetricRule':
"""
Get an existing GroupMetricRule resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] category: The abbreviation of the service name.
:param pulumi.Input[str] contact_groups: Alarm contact group.
:param pulumi.Input[str] dimensions: The dimensions that specify the resources to be associated with the alert rule.
:param pulumi.Input[str] effective_interval: The time period during which the alert rule is effective.
:param pulumi.Input[str] email_subject: The subject of the alert notification email. .
:param pulumi.Input[pulumi.InputType['GroupMetricRuleEscalationsArgs']] escalations: Alarm level. See the block for escalations.
:param pulumi.Input[str] group_id: The ID of the application group.
:param pulumi.Input[str] group_metric_rule_name: The name of the alert rule.
:param pulumi.Input[str] interval: The interval at which Cloud Monitor checks whether the alert rule is triggered. Unit: seconds.
:param pulumi.Input[str] metric_name: The name of the metric.
:param pulumi.Input[str] namespace: The namespace of the service.
:param pulumi.Input[str] no_effective_interval: The time period during which the alert rule is ineffective.
:param pulumi.Input[int] period: The aggregation period of the monitoring data. Unit: seconds. The value is an integral multiple of 60. Default value: `300`.
:param pulumi.Input[str] rule_id: The ID of the alert rule.
:param pulumi.Input[int] silence_time: The mute period during which new alerts are not reported even if the alert trigger conditions are met. Unit: seconds. Default value: `86400`, which is equivalent to one day.
:param pulumi.Input[str] status: The status of Group Metric Rule.
:param pulumi.Input[str] webhook: The callback URL.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _GroupMetricRuleState.__new__(_GroupMetricRuleState)
__props__.__dict__["category"] = category
__props__.__dict__["contact_groups"] = contact_groups
__props__.__dict__["dimensions"] = dimensions
__props__.__dict__["effective_interval"] = effective_interval
__props__.__dict__["email_subject"] = email_subject
__props__.__dict__["escalations"] = escalations
__props__.__dict__["group_id"] = group_id
__props__.__dict__["group_metric_rule_name"] = group_metric_rule_name
__props__.__dict__["interval"] = interval
__props__.__dict__["metric_name"] = metric_name
__props__.__dict__["namespace"] = namespace
__props__.__dict__["no_effective_interval"] = no_effective_interval
__props__.__dict__["period"] = period
__props__.__dict__["rule_id"] = rule_id
__props__.__dict__["silence_time"] = silence_time
__props__.__dict__["status"] = status
__props__.__dict__["webhook"] = webhook
return GroupMetricRule(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def category(self) -> pulumi.Output[str]:
"""
The abbreviation of the service name.
"""
return pulumi.get(self, "category")
@property
@pulumi.getter(name="contactGroups")
def contact_groups(self) -> pulumi.Output[str]:
"""
Alarm contact group.
"""
return pulumi.get(self, "contact_groups")
@property
@pulumi.getter
def dimensions(self) -> pulumi.Output[str]:
"""
The dimensions that specify the resources to be associated with the alert rule.
"""
return pulumi.get(self, "dimensions")
@property
@pulumi.getter(name="effectiveInterval")
def effective_interval(self) -> pulumi.Output[Optional[str]]:
"""
The time period during which the alert rule is effective.
"""
return pulumi.get(self, "effective_interval")
@property
@pulumi.getter(name="emailSubject")
def email_subject(self) -> pulumi.Output[str]:
"""
The subject of the alert notification email. .
"""
return pulumi.get(self, "email_subject")
@property
@pulumi.getter
def escalations(self) -> pulumi.Output['outputs.GroupMetricRuleEscalations']:
"""
Alarm level. See the block for escalations.
"""
return pulumi.get(self, "escalations")
@property
@pulumi.getter(name="groupId")
def group_id(self) -> pulumi.Output[str]:
"""
The ID of the application group.
"""
return pulumi.get(self, "group_id")
@property
@pulumi.getter(name="groupMetricRuleName")
def group_metric_rule_name(self) -> pulumi.Output[str]:
"""
The name of the alert rule.
"""
return pulumi.get(self, "group_metric_rule_name")
@property
@pulumi.getter
def interval(self) -> pulumi.Output[Optional[str]]:
"""
The interval at which Cloud Monitor checks whether the alert rule is triggered. Unit: seconds.
"""
return pulumi.get(self, "interval")
@property
@pulumi.getter(name="metricName")
def metric_name(self) -> pulumi.Output[str]:
"""
The name of the metric.
"""
return pulumi.get(self, "metric_name")
@property
@pulumi.getter
def namespace(self) -> pulumi.Output[str]:
"""
The namespace of the service.
"""
return pulumi.get(self, "namespace")
@property
@pulumi.getter(name="noEffectiveInterval")
def no_effective_interval(self) -> pulumi.Output[Optional[str]]:
"""
The time period during which the alert rule is ineffective.
"""
return pulumi.get(self, "no_effective_interval")
@property
@pulumi.getter
def period(self) -> pulumi.Output[Optional[int]]:
"""
The aggregation period of the monitoring data. Unit: seconds. The value is an integral multiple of 60. Default value: `300`.
"""
return pulumi.get(self, "period")
@property
@pulumi.getter(name="ruleId")
def rule_id(self) -> pulumi.Output[str]:
"""
The ID of the alert rule.
"""
return pulumi.get(self, "rule_id")
@property
@pulumi.getter(name="silenceTime")
def silence_time(self) -> pulumi.Output[Optional[int]]:
"""
The mute period during which new alerts are not reported even if the alert trigger conditions are met. Unit: seconds. Default value: `86400`, which is equivalent to one day.
"""
return pulumi.get(self, "silence_time")
@property
@pulumi.getter
def status(self) -> pulumi.Output[str]:
"""
The status of Group Metric Rule.
"""
return pulumi.get(self, "status")
@property
@pulumi.getter
def webhook(self) -> pulumi.Output[Optional[str]]:
"""
The callback URL.
"""
return pulumi.get(self, "webhook")
|
#!/usr/bin/env python
# Copyright 2021 Citrix Systems, Inc. All rights reserved.
# Use of this software is governed by the license terms, if any,
# which accompany or are included with this software.
import logging
import re
class PILex(object):
"""
Class to parse Advanced expressions.
"""
@staticmethod
def get_pi_string(expr):
"""
Helper function to get classic expression from
SYS.EVAL_CLASSIC_EXPR("<>").
expr - should be substring which starts from opening quote in
SYS.EVAL_CLASSIC_EXPR expression to the end of string.
Example:
"ns_true") && true - Returns ns_true
Return values:
-classic expression after removing quotes and handling backslashes
-length of classic expression including double quotes in original
expression expr.
"""
if not expr.startswith('"'):
return None
index = 0
value = ""
# Increment by 1 for opening quote
index += 1
expr_length = len(expr)
while index < expr_length:
if expr[index] == '\\':
index += 1
if index >= expr_length:
return None
if expr[index] in '\\\'"':
value += expr[index]
elif expr[index] == 't':
value += '\t'
elif expr[index] == 'r':
value += '\r'
elif expr[index] == 'n':
value += '\n'
elif expr[index] == 'x':
# Taking next 2 characters to validate for hex digits and
# then to convert to byte.
# Now index points to 2nd hex digit
index += 2
if (index < expr_length and re.match(r"^[0-9a-fA-F]{2}$",
expr[index - 1: index + 1])):
hex_digits = expr[index - 1: index + 1]
hex_digits = int(hex_digits, 16)
if hex_digits > 127:
logging.error("Invalid hex value is used. Maximum "
"hex value allowed is 7f.")
return None
value += chr(hex_digits)
else:
return None
elif expr[index] in "01234567":
# Check for oct digits and convert to byte.
m = re.match(r"^([0-7]{1,3})", expr[index:])
oct_digits = m.group(1)
oct_digits_length = len(oct_digits)
# Now index points to last octal digit.
index += oct_digits_length - 1
oct_digits = int(oct_digits, 8)
if oct_digits > 127:
logging.error("Invalid octal value is used. Maximum "
"octal value allowed is 177.")
return None
value += chr(oct_digits)
else:
return None
elif expr[index] == '"':
break
else:
value = value + expr[index]
index += 1
if index >= expr_length:
return None
# Increment by 1 for closing quote.
value_length = index + 1
return [value, value_length]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun May 19 12:21:06 2019
@author: mishugeb
"""
from SigProfilerExtractor import subroutines as sub
import numpy as np
import pandas as pd
import SigProfilerExtractor as cosmic
import os
def decompose(signatures, activities, samples, output, signature_database=None, nnls_add_penalty=0.05, nnls_remove_penalty=0.01, initial_remove_penalty=0.05, de_novo_fit_penalty=0.02, genome_build="GRCh37", refit_denovo_signatures=True, make_decomposition_plots=True, connected_sigs=True, verbose=False):
"""
Decomposes the De Novo Signatures into COSMIC Signatures and assigns COSMIC signatures into samples.
Parameters:
signatures: A string. Path to a tab delimited file that contains the signaure table where the rows are mutation types and colunms are signature IDs.
activities: A string. Path to a tab delimilted file that contains the activity table where the rows are sample IDs and colunms are signature IDs.
samples: A string. Path to a tab delimilted file that contains the activity table where the rows are mutation types and colunms are sample IDs.
output: A string. Path to the output folder.
genome_build = A string. The genome type. Example: "GRCh37", "GRCh38", "mm9", "mm10". The default value is "GRCh37"
verbose = Boolean. Prints statements. Default value is False.
Values:
The files below will be generated in the output folder.
Cluster_of_Samples.txt
comparison_with_global_ID_signatures.csv
Decomposed_Solution_Activities.txt
Decomposed_Solution_Samples_stats.txt
Decomposed_Solution_Signatures.txt
decomposition_logfile.txt
dendogram.pdf
Mutation_Probabilities.txt
Signature_assaignment_logfile.txt
Signature_plot[MutatutionContext]_plots_Decomposed_Solution.pdf
Example:
>>>from SigProfilerExtractor import decomposition as decomp
>>>signatures = "path/to/dDe_Novo_Solution_Signatures.txt"
>>>activities="path/to/De_Novo_Solution_Activities.txt"
>>>samples="path/to/Samples.txt"
>>>output="name or path/to/output.txt"
decomp.decompose(signatures, activities, samples, output, genome_build="GRCh37", verbose=False)
"""
processAvg = pd.read_csv(signatures, sep = "\t", index_col=0)
originalProcessAvg=processAvg
exposureAvg = pd.read_csv(activities, sep = "\t", index_col = 0)
genomes = pd.read_csv(samples, sep = "\t", index_col = 0)
mutation_type = str(genomes.shape[0])
m=mutation_type
index = genomes.index
colnames = genomes.columns
listOfSignatures = processAvg.columns
#creating list of mutational type to sync with the vcf type input
if mutation_type == "78":
mutation_context = "DBS78"
elif mutation_type == "83":
mutation_context = "ID83"
elif mutation_type=="48":
mutation_context = "CNV48"
print("Mutation Type is: CNV")
#paths = cosmic.__path__[0]
#sigDatabase = pd.read_csv(paths+"/data/CNV_signatures.txt", sep="\t",index_col=0)
#genomes=genomes.loc[sigDatabase.index]
#processAvg=processAvg.loc[sigDatabase.index]
else:
mutation_context = "SBS"+mutation_type
processAvg = np.array(processAvg)
signature_names = sub.make_letter_ids(idlenth = processAvg.shape[1], mtype = mutation_context)
exposureAvg.columns=signature_names
# create the folder for the final solution/ De Novo Solution
try:
if not os.path.exists(output):
os.makedirs(output)
except:
print ("The {} folder could not be created".format("output"))
# make the texts for signature plotting
layer_directory1 = output+"/De_Novo_Solution"
try:
if not os.path.exists(layer_directory1):
os.makedirs(layer_directory1)
except:
print ("The {} folder could not be created".format("De_Novo_Solution"))
listOfSignatures = sub.make_letter_ids(idlenth = processAvg.shape[1], mtype=mutation_context)
genomes = pd.DataFrame(genomes)
denovo_exposureAvg = np.array(exposureAvg.T)
exposureAvg = sub.make_final_solution(processAvg, genomes, listOfSignatures, layer_directory1, m, index,\
colnames,denovo_exposureAvg = denovo_exposureAvg, add_penalty=nnls_add_penalty, remove_penalty=nnls_remove_penalty, initial_remove_penalty=initial_remove_penalty, de_novo_fit_penalty=de_novo_fit_penalty, connected_sigs=connected_sigs, refit_denovo_signatures=refit_denovo_signatures)
layer_directory2 = output+"/Decompose_Solution"
try:
if not os.path.exists(layer_directory2):
os.makedirs(layer_directory2)
except:
print ("The {} folder could not be created".format("Decomposed_Solution"))
if processAvg.shape[0]==1536: #collapse the 1596 context into 96 only for the deocmposition
processAvg = pd.DataFrame(processAvg, index=index)
processAvg = processAvg.groupby(processAvg.index.str[1:8]).sum()
genomes = genomes.groupby(genomes.index.str[1:8]).sum()
index = genomes.index
processAvg = np.array(processAvg)
if processAvg.shape[0]==288: #collapse the 288 context into 96 only for the deocmposition
processAvg = pd.DataFrame(processAvg, index=index)
processAvg = processAvg.groupby(processAvg.index.str[2:9]).sum()
genomes = pd.DataFrame(genomes, index=index)
genomes = genomes.groupby(genomes.index.str[2:9]).sum()
index = genomes.index
processAvg = np.array(processAvg)
final_signatures = sub.signature_decomposition(processAvg, m, layer_directory2, genome_build=genome_build,signature_database=signature_database, mutation_context=mutation_context, add_penalty=0.05, connected_sigs=connected_sigs,remove_penalty=0.01, make_decomposition_plots=make_decomposition_plots, originalProcessAvg=originalProcessAvg)
#final_signatures = sub.signature_decomposition(processAvg, m, layer_directory2, genome_build=genome_build)
# extract the global signatures and new signatures from the final_signatures dictionary
globalsigs = final_signatures["globalsigs"]
globalsigs = np.array(globalsigs)
newsigs = final_signatures["newsigs"]
processAvg = np.hstack([globalsigs, newsigs])
allsigids = final_signatures["globalsigids"]+final_signatures["newsigids"]
attribution = final_signatures["dictionary"]
background_sigs= final_signatures["background_sigs"]
index = genomes.index
colnames = genomes.columns
result = sub.make_final_solution(processAvg, genomes, allsigids, layer_directory2, m, index, colnames, \
cosmic_sigs=True, attribution = attribution, denovo_exposureAvg = exposureAvg , background_sigs=background_sigs, verbose=verbose, genome_build=genome_build, add_penalty=nnls_add_penalty, remove_penalty=nnls_remove_penalty, initial_remove_penalty=initial_remove_penalty,connected_sigs=connected_sigs,refit_denovo_signatures=False)
return result
|
from .video import VideoEntity
ENTITY_CLASSES = [VideoEntity]
ENTITY_TYPE_CHOICES = [
(VideoEntity.name, 'Video'),
]
ENTITY_TYPE_NAME_TO_CLASS = {
k.name: k for k in ENTITY_CLASSES
}
|
#
# This file is licensed under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from test_support.smvbasetest import SmvBaseTest
from smv.error import SmvRuntimeError
class RunCmdLineBaseTest(SmvBaseTest):
@classmethod
def whatToRun(cls):
["-m", "None"]
@classmethod
def smvAppInitArgs(cls):
return ['--smv-props', 'smv.stages=runstage.stage1'] + cls.whatToRun()
class RunModuleFromCmdLineTest(RunCmdLineBaseTest):
@classmethod
def whatToRun(cls):
return ['-m', "modules.A"]
def test_can_run_module_from_cmdline(self):
self.smvApp.run()
a = self.df("runstage.stage1.modules.A")
expected = self.createDF("k:String;v:Integer", "a,;b,2")
self.should_be_same(a, expected)
class DryRunTest(RunCmdLineBaseTest):
@classmethod
def whatToRun(cls):
return ["-m", "modules.A", "--dry-run"]
def test_dry_run_just_print(self):
self.smvApp.run()
self.assertTrue(self.load("runstage.stage1.modules.A")[0].needsToRun())
class RunStageFromCmdLineTest(RunCmdLineBaseTest):
@classmethod
def whatToRun(cls):
return ['-s', "runstage.stage1"]
def test_can_run_stage_from_cmdline(self):
self.smvApp.run()
a = self.df("runstage.stage1.modules.A")
self.should_be_same(a, self.createDF("k:String;v:Integer", "a,;b,2"))
b = self.df("runstage.stage1.modules.B")
self.should_be_same(b, self.createDF("k:String;v:Integer", "c,3;d,4"))
class RunNotExistModuleTest(RunCmdLineBaseTest):
@classmethod
def whatToRun(cls):
return ['-m', 'tooth-fary']
def test_should_report_non_existing_module(self):
with self.assertRaisesRegexp(SmvRuntimeError, "Can't find name tooth-fary"):
self.smvApp.run()
class RunModuleAmbiguousTest(RunCmdLineBaseTest):
@classmethod
def whatToRun(cls):
return ['-m', 'A']
def test_should_report_ambiguous_modules(self):
with self.assertRaisesRegexp(SmvRuntimeError, r"Partial name A is ambiguous"):
self.smvApp.run()
class SmvAppForceAllTest(RunCmdLineBaseTest):
@classmethod
def whatToRun(cls):
return ['-m', 'modules.A', '--force-run-all']
def test_should_force_run(self):
self.smvApp.run()
|
import net_arch.mobilenet_v3
import tensorflow as tf
def EfficientNetB3(shape):
model = tf.keras.applications.EfficientNetB3(
input_shape=shape,
classifier_activation=None,
include_top=False,
weights='imagenet')
return model
def MobileNetV2(shape):
model = tf.keras.applications.MobileNetV2(
input_shape=shape,
classifier_activation=None,
include_top=False,
weights='imagenet')
return model
def ResNet50V2(shape):
model = tf.keras.applications.ResNet50V2(
input_shape=shape,
classifier_activation=None,
include_top=False,
weights='imagenet')
return model
model_list = {
"MobileNetV2": MobileNetV2,
"MobileNetV3": net_arch.mobilenet_v3.MakeMobileNetV3,
"EfficientNetB3": EfficientNetB3,
"ResNet50": ResNet50V2
}
def get_model(name, shape):
return model_list[name](shape)
|
from django.utils import simplejson
from django.test.client import Client
from correx.tests import ChangeTestCase
from correx.models import Change, ChangeType
from django.db.models import get_model
from django.contrib.contenttypes.models import ContentType
class CorrexViewTests(ChangeTestCase):
def setUp(self):
"""
Setting up the test client for reuse throughout.
"""
self.client = Client()
self.app_list = [i[0] for i in Change().APP_CHOICES]
def testFilterContentTypesByApp(self):
"""
Test the admin jQuery's request for the list of models
associated with a particular application.
"""
url = '/correx/admin/filter/contenttype/'
for app in self.app_list:
# Issue a GET request.
response = self.client.get(url, {'app_label': app})
# Check that the response is 200 OK.
self.failUnlessEqual(response.status_code, 200)
# Load JSON
json = simplejson.loads(response.content)
# Test to make sure it's a list
self.failUnlessEqual(type(json), type([]))
# Split the model names from the json
model_names = [i.values()[0] for i in json if i.values()[0] != '---------']
# Run through the list and make sure each is a legit model
for model in model_names:
# If it equals null it means the model couldn't be found
self.failIfEqual(get_model(app, model), None)
|
#! /usr/bin/python
import os
import re
import sys
import ssl
import json
import time
import uuid
import copy
import socket
import urllib
import Cookie
import thread
import urllib
import base64
import httplib
import datetime
import traceback
import mimetypes
import multiprocessing
import SimpleHTTPSServer
import constants
import errors
import sockhttp
import server
class call_result(object):
"""
Shares a bool between processes
"""
def __init__(self, initval=None):
self.initval = initval
self.value = initval
self.call_failed = False
def __call__(self, *args, **kwargs):
return self.result(*args, **kwargs)
def result(self, value=None):
if value is not None:
self.value = value
while self.value is self.initval:
self.failed()
return self.value
def failed(self, value=None):
if value is not None:
self.call_failed = value
elif self.call_failed is not False:
error_string = self.call_failed
error_trace = False
if constants.DOUBLE_LINE_BREAK in error_string:
error_trace = error_string.split(constants.DOUBLE_LINE_BREAK)[1]
error_string = error_string.split(constants.DOUBLE_LINE_BREAK)[0]
raise errors.ServiceCallFailed(error_string, error_trace)
return self.call_failed
class client(server.server):
"""docstring for client"""
def __init__(self):
super(client, self).__init__()
self.host = "localhost"
self.port = constants.PORT
self.ssl = False
self.name = socket.gethostname()
self.username = False
self.password = False
self.update = constants.TIME_OUT - 5
self.recv = False
self.connect_fail = False
self.crt = False
self.ping_conn = False
self.send_conn = False
self.results = {}
def http_conncet(self, recv_listen=True):
"""
Connects to the server with tcp http connections.
"""
self.log("http_conncet")
self.headers = {"Connection": "keep-alive"}
if self.username and self.password:
encoded = base64.b64encode(self.username + ':' + self.password)
self.headers["Authorization"] = "Basic " + encoded
try:
self.recv_connect(recv_listen=recv_listen)
self.ping_conn = self.httplib_conn()
self.send_conn = self.httplib_conn()
except socket.error as error:
self.log("http_conncet, failed")
self._connection_failed(error)
return True
def httplib_conn(self):
values = (self.host, self.port, )
host = "%s:%s" % values
if self.ssl:
return httplib.HTTPSConnection(host)
else:
return httplib.HTTPConnection(host)
def return_status(self, res):
"""
Returns True if there was a json to pass to recv.
"""
try:
self.log("RECEVED " + str(res))
res = json.loads(res)
if len(res) > 0:
for item in xrange(0, len(res)):
data = res[item]
data["__name__"] = self.name
self.call_recv(data)
return True
except (ValueError, KeyError):
return False
def call_recv(self, data):
if "data" in data and hasattr(self.recv, '__call__'):
as_json = self.json(data["data"])
if as_json:
data["data"] = as_json
thread.start_new_thread(self.recv, (data, ))
elif "call/return" in data or "call/failed" in data:
if "call/return" in data:
message_type = "call/return"
elif "call/failed" in data:
message_type = "call/failed"
as_json = self.json(data[message_type])
if as_json:
data[message_type] = as_json
if data[message_type] == "false":
data[message_type] = False
# Call and send back result
if "return_key" in data and data["return_key"] in self.results:
if "call/return" == message_type:
self.results[data["return_key"]](data[message_type])
elif "call/failed" == message_type:
self.results[data["return_key"]].failed(data[message_type])
del self.results[data["return_key"]]
def json(self, res):
"""
Returns json if it can.
"""
if isinstance(res, dict) or isinstance(res, list):
return res
try:
res = json.loads(res)
return res
except (ValueError, KeyError):
return False
def _connection_failed(self, error):
if "111" in str(error):
self.log(constants.CONNECTION_REFUSED)
if hasattr(self.connect_fail, '__call__'):
self.connect_fail()
else:
raise
def get(self, url, http_conn, reconnect=True):
"""
Requests the page and returns data
"""
res = ""
try:
if reconnect:
url = urllib.quote(url, safe='')
http_conn.request("GET", "/" + url, headers=self.headers)
res = http_conn.getresponse()
res = res.read()
except (AttributeError, httplib.BadStatusLine, httplib.CannotSendRequest) as error:
if reconnect:
self.log("Reconecting")
self.http_conncet(recv_listen=False)
res = self.get(url, http_conn, reconnect=False)
self.info(self.store_info, store=False)
except socket.error as error:
self._connection_failed(error)
return res
def post(self, url, data, reconnect=True):
"""
Requests the page and returns data
"""
res = ""
try:
connection = self.httplib_conn()
url = urllib.quote(url, safe='')
headers = self.headers.copy()
headers["Content-Type"] = "application/x-www-form-urlencoded"
# So we don't urlencode twice
if reconnect:
data = urllib.urlencode(data, True).replace("+", "%20")
connection.request("POST", "/" + url, data, headers)
res = connection.getresponse()
res = res.read()
except (httplib.BadStatusLine, httplib.CannotSendRequest) as error:
if reconnect:
self.log("Reconecting")
self.http_conncet(recv_listen=False)
res = self.post(url, data, reconnect=False)
except socket.error as error:
self._connection_failed(error)
return False
return res
def connect(self, host="localhost", port=constants.PORT, ssl=False, \
name=socket.gethostname(), update=constants.TIME_OUT, crt=False, \
username=False, password=False, ip=False, start_main=True, **kwargs):
"""
Starts main
"""
# Connect to ip if specified
if ip:
host = ip
self.host = host
self.port = port
self.ssl = ssl
self.name = name
self.username = username
self.password = password
self.update = update
self.crt = crt
# So that info can be sent to the server on reconnect
self.store_info = {}
self.log("Connecting to {0}:{1}".format(self.host, self.port))
self.http_conncet()
if start_main:
return thread.start_new_thread(self.main, ())
return True
def main(self):
"""
Continues to ping
"""
self.running = True
while self.running:
self.ping()
time.sleep(self.update)
return 0
def recv_connect(self, recv_listen=True):
"""
Connects a socket that the server can push to.
"""
self.recv_conn = sockhttp.conn(self.host, self.port, \
headers=self.headers, ssl=self.ssl, crt=self.crt)
url = "/connect/" + self.name
res = self.recv_conn.get(url)
res = self.return_status(res)
if recv_listen:
thread.start_new_thread(self.listen, () )
return res
def listen(self):
self.running = True
while self.running:
try:
res = self.recv_conn.recv()
if len(res):
thread.start_new_thread(self.return_status, (res, ))
except errors.RecvDisconnected as error:
self.log("RecvDisconnected, Reconecting")
time.sleep(constants.BIND_TIME)
self.http_conncet(recv_listen=False)
def ping(self):
"""
Tells the server its still here and asks for instructions
"""
url = "ping/" + self.name
res = self.get(url, self.ping_conn)
return self.return_status(res)
def disconnect(self):
"""
Tells the server we are disconnecting
"""
url = "disconnect/" + self.name
self.running = False
res = self.get(url, self.send_conn)
return self.return_status(res)
def send(self, data, to=constants.ALL_CLIENTS):
"""
Queues data for sending
"""
url = "ping/" + self.name
if type(data) != str and type(data) != unicode:
data = json.dumps(data)
res = self.post(url, {"to": to, "data": data})
return self.return_status(res)
def __call__(self, *args, **kwargs):
return self.call(*args, **kwargs)
def call(self, service, method, *args, **kwargs):
"""
Calls a method on a node
"""
url = "call/" + self.name
call_args = {
"name": method,
"args": args,
"kwargs": kwargs
}
if type(call_args) != str and type(call_args) != unicode:
call_args = json.dumps(call_args)
data = {
"call": call_args,
"service": service,
# So we know where to return to
"return_key": str(uuid.uuid4())
}
result_aysc = call_result()
self.results[data["return_key"]] = result_aysc
res = self.post(url, data)
self.return_status(res)
return result_aysc
def info(self, data, store=True):
"""
Queues data for sending
"""
url = "info/" + self.name
if isinstance(data, dict) or isinstance(data, list):
if store:
self.store_info.update(data)
data = json.dumps(data)
res = self.post(url, {"info": data})
return self.return_status(res)
def connected(self):
"""
Gets others connected
"""
url = "connected"
res = self.get(url, self.send_conn)
return self.json(res)
def online(self):
"""
Gets others online
"""
connected = self.connected()
online = {}
if connected:
for item in connected:
if connected[item]["online"]:
online[item] = connected[item]
return online
|
class MovingAverage(object):
def __init__(self, size):
"""
Initialize your data structure here.
:type size: int
"""
self.tot = 0
self.size = size
self.q = []
def next(self, val):
"""
:type val: int
:rtype: float
"""
tot, size, q = self.tot, self.size, self.q
q.append(val)
tot += val
if len(q) > size:
tot -= q.pop(0)
self.tot = tot
return float(tot) / len(q)
# Your MovingAverage object will be instantiated and called as such:
# obj = MovingAverage(size)
# param_1 = obj.next(val)
|
"""Checks import position rule"""
# pylint: disable=unused-import
import os
try:
import ast
except ImportError:
def method(items):
"""docstring"""
value = 0
for item in items:
value += item
return value
import sys
|
"""Contrastive Explanation [WIP]: pedagogical (model-agnostic) method
for persuasive explanations based on the user's outcome of interest.
Marcel Robeer (c) 2018 - 2019
TNO, Utrecht University
Todo:
* Add more domain_mappers (image, text)
* Add more methods to obtain a foil
* Define new strategies for DecisionTreeExplanator
* Extend support for regression
* Adjust DomainMapper.generate_neighborhood_data() to
generate foil samples
"""
import numpy as np
import pandas as pd
import warnings
import sklearn
import time
from itertools import groupby
from sklearn.utils import check_random_state
from .rules import Operator, Literal
from .domain_mappers import DomainMapper
from .explanators import Explanator, TreeExplanator
from .fact_foil import FactFoilClassification, FactFoilRegression
class ContrastiveExplanation:
"""General class for creating a Contrastive Explanation."""
def __init__(self,
domain_mapper,
explanator=None,
regression=False,
verbose=False,
seed=1):
"""Init.
Args:
explanator: explanator.Explanator() to create an explanation
domain_mapper: domain_mapper.DomainMapper() for generating
neighborhood data and giving descriptive names to features
and contrasts
regression (bool): regression (True) or other type of ML
verbose (bool): Print intermediary steps of algorithm
seed (int): Seed for random functions
"""
self.seed = check_random_state(seed)
if not explanator:
explanator = TreeExplanator(seed=self.seed)
if not isinstance(domain_mapper, DomainMapper):
raise Exception('domain_mapper should be a DomainMapper')
if not isinstance(explanator, Explanator):
raise Exception('explanator should be an Explanator')
self.explanator = explanator
self.domain_mapper = domain_mapper
self.regression = regression
self.verbose = verbose
self.fact_foil = None
def _combine_features(self, decision_path):
"""Combine tuples with the same feature in a decision path.
Args:
decision_path: Decision path
Returns:
Decision path with duplicate features merged.
"""
if self.verbose:
print(f'[C] Combining full rules {decision_path}...')
def combine(rules):
seq = []
geq = []
eq = []
for rule in rules:
if rule.operator == Operator.SEQ:
seq.append(rule[2])
elif rule.operator == Operator.GT:
geq.append(rule[2])
elif rule.operator == Operator.EQ:
eq.append(rule[2])
feature = rules[0][0]
if not seq and not geq and len(eq) <= 1:
return rules
elif len(eq) > 1:
return [Literal(feature, Operator.EQ, eq)]
elif not seq:
return [Literal(feature, Operator.GT, max(geq))]
elif not geq:
return [Literal(feature, Operator.SEQ, min(seq))]
else:
return [Literal(feature, Operator.SEQ, min(seq)),
Literal(feature, Operator.GT, max(geq))]
combined = [combine(list(subiter))
for _, subiter in groupby(decision_path, lambda t: t[0])]
return [c for sc in combined for c in sc]
def form_explanation(self, decision, contrastive=True):
"""Form an explanation of Literals, combine Literals
when they describe the same feature.
"""
if decision is None:
return None
if self.verbose:
print(f'[C] Decision obtained: {decision}')
# Get explanation
exp = self.explanator.get_explanation(decision,
contrastive=contrastive)
exp = list(filter(None, exp))
# Combine explanation
return self._combine_features(exp)
def explain_instance(self,
model_predict,
fact_sample,
foil=None,
foil_method=None,
foil_strategy='informativeness',
generate_data=True,
n_samples=500,
include_factual=False,
epsilon=0.1,
**kwargs):
"""Contrastively explain an instance (counterfactual).
Args:
model_predict: Black-box model predictor (proba for class)
fact_sample: Input sample of fact
foil: Manually enter a foil (if None, uses foil_method)
foil_method: Method to decide on foil, choose
class: ('second' = second most probable decision,
'random' = randomly pick from not-foil)
reg: ('greater' = greater than fact,
'smaller' = smaller than fact)
foil_strategy: How to determine the contrastive
decision region for the foil, choose from:
('closest' = closest to fact,
'size' = based on number of instances in node,
'impurity' = minimize the impurity difference,
'informativeness' = weighted function of size and impurity,
'random' = random pick)
generate_data (bool): Generate neighborhood data (True) or pick
from training data (False)
n_samples (int): Number of samples to pick from data
include_factual (bool): Also return a factual explanation tree,
trained on generated/sampled data.
epsilon: Small offset for regression, increase when no explanation
is found.
Returns:
Tuple (fact, foil, counterfactual), feed into the explain()
function in the domain_mapper
"""
if type(fact_sample) is pd.core.series.Series:
fact_sample = np.array(fact_sample)
st = time.time()
# Get fact and foil
if self.regression:
self.fact_foil = FactFoilRegression(verbose=self.verbose,
epsilon=epsilon)
else:
self.fact_foil = FactFoilClassification(verbose=self.verbose)
if foil is not None:
foil = self.domain_mapper.map_contrast_names(foil, inverse=True)
fact, foil = self.fact_foil.get_fact(model_predict,
fact_sample,
foil)
if foil is None:
fact, foil = self.fact_foil.get_fact_foil(model_predict,
fact_sample,
foil_method=foil_method)
# Generate neighborhood data
if self.verbose:
print('[D] Obtaining neighborhood data')
encoded_fact_sample = self.domain_mapper.apply_encode(fact_sample)
if generate_data:
data_fn = self.domain_mapper.generate_neighborhood_data
else:
data_fn = self.domain_mapper.sample_training_data
xs, weights, ys, fact_sample = data_fn(encoded_fact_sample,
model_predict,
n_samples=n_samples,
foil_encode_fn=self.fact_foil.encode,
**kwargs)
# Encode foil such that foil = 1 / else = 0
ys_foil = self.fact_foil.encode(ys)
if 1 not in ys_foil:
warnings.warn('Neighborhood data does not contain any foils')
return fact, foil, None, None, 0, 0, time.time() - st
# Train model and get rules
exp_return = self.explanator.get_rule(encoded_fact_sample,
fact,
foil,
xs,
ys_foil,
weights,
foil_strategy=foil_strategy)
rule, confidence, local_fidelity = exp_return
# Explain difference between fact and closest decision
counterfactual = self.form_explanation(rule)
# Also explain using factual if required
factual = None
if include_factual:
if type(self.explanator) is TreeExplanator:
e = self.explanator
else:
e = TreeExplanator()
if not self.regression:
t = sklearn.tree.DecisionTreeClassifier(random_state=self.seed,
class_weight='balanced')
else:
t = sklearn.tree.DecisionTreeRegressor(random_state=self.seed)
t.fit(xs, ys, sample_weight=weights)
if t.tree_.node_count > 1:
fact_rule = e.decision_path(t, encoded_fact_sample)
factual = self.form_explanation(fact_rule, contrastive=False)[:-1]
else:
factual = None
# Warnings
if not counterfactual:
# First, try to overfit more to get explanation
if (type(self.explanator) is TreeExplanator and
self.explanator.generalize < 2):
self.explanator.generalize = 2
return self.explain_instance(model_predict,
encoded_fact_sample,
foil_method=foil_method,
foil_strategy=foil_strategy,
generate_data=generate_data,
n_samples=n_samples,
include_factual=include_factual,
epsilon=epsilon,
**kwargs)
n = self.domain_mapper.map_contrast_names
warnings.warn(f'Could not find a difference between fact '
f'"{n(fact)}" and foil "{n(foil)}"')
if self.regression:
warnings.warn('Consider increasing epsilon')
return (fact, foil,
counterfactual, factual,
confidence, local_fidelity,
time.time() - st)
def explain_instance_domain(self,
*args,
**kwargs):
"""Explain instance and map to domain. For arguments see
ContrastiveExplanation.explain_instance().
"""
return self.domain_mapper.explain(*self.explain_instance(*args,
**kwargs))
|
import unittest # Importing the unittest module
from user import User # Importing the User class
class TestUser(unittest.TestCase):
"""Test Class for defining test cases for user class behaviours
Args:
unittest.TestCase: TestCase class that helps in creating test cases
"""
def setUp(self):
"""Set up method to run before each test cases.
"""
self.new_user = User("Justo", "Ochung", "lK7HR,0Op") # Create user object
def test_init(self):
"""test_init test case to test if the object is initialized properly
"""
self.assertEqual(self.new_user.account, "Justo") #TestCase method that checks for an expected result
self.assertEqual(self.new_user.user_username, "Ochung")
self.assertEqual(self.new_user.user_password, "lK7HR,0Op")
def test_save_user(self):
"""Test case to test if the user object is saved into the user list
"""
self.new_user.save_user() #saving the new user
self.assertEqual(len(User.user_list), 1)
def tearDown(self):
"""Method for cleaning up after each test case has run
"""
User.user_list = []
def test_save_multiple_user(self):
"""Test case to check if we can save multiple user objects to user list
"""
self.new_user.save_user()
test_user = User("Test", "user", "0799762744") # New User
test_user.save_user()
self.assertEqual(len(User.user_list),2)
def test_delete_user(self):
"""Test case to remove a contact from user list
"""
self.new_user.save_user()
test_user = User("Test", "user", "0799762744")
test_user.save_user()
self.new_user.delete_user() #Deleting a user object
self.assertEqual(len(User.user_list),1)
if __name__ == '__main__':
unittest.main()
|
from mesh.generic.nodeHeader import createHeader, packHeader, headers
from struct import pack
class TestNodeHeader:
def setup_method(self, method):
self.nodeHeaderIn = ['NodeHeader', [3, 4, 5]]
self.minimalHeaderIn = ['MinimalHeader', [3]]
self.sourceHeaderIn = ['SourceHeader', [3, 4]]
pass
def test_createHeader(self):
"""Test creation of all header types."""
# NodeHeader
header = createHeader(self.nodeHeaderIn)
self.checkHeaderContents(header, self.nodeHeaderIn)
# MinimalHeader
header = createHeader(self.minimalHeaderIn)
self.checkHeaderContents(header, self.minimalHeaderIn)
# SourceHeader
header = createHeader(self.sourceHeaderIn)
self.checkHeaderContents(header, self.sourceHeaderIn)
def test_packHeader(self):
"""Test packing of all defined header types."""
# NodeHeader
nodeHeader = createHeader(self.nodeHeaderIn)
packedHeader = pack(headers[self.nodeHeaderIn[0]]['format'], *self.nodeHeaderIn[1])
assert(packHeader(nodeHeader) == packedHeader)
# MinimalHeader
minimalHeader = createHeader(self.minimalHeaderIn)
packedHeader = pack(headers[self.minimalHeaderIn[0]]['format'], *self.minimalHeaderIn[1])
assert(packHeader(minimalHeader) == packedHeader)
# SourceHeader
sourceHeader = createHeader(self.sourceHeaderIn)
packedHeader = pack(headers[self.sourceHeaderIn[0]]['format'], *self.sourceHeaderIn[1])
assert(packHeader(sourceHeader) == packedHeader)
def checkHeaderContents(self, header, headerIn):
headerType = headerIn[0]
assert(header['type'] == headerType)
assert(len(header['header']) == len(headerIn[1]))
for i in range(len(headers[headerType]['entries'])):
assert(header['header'][headers[headerType]['entries'][i]] == headerIn[1][i])
|
# -*- coding: utf-8 -*- #
# Copyright 2020 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Task for retrieving a list of resources from the cloud.
Typically executed in a task iterator:
googlecloudsdk.command_lib.storage.tasks.task_executor.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import abc
import enum
from googlecloudsdk.api_lib.storage import cloud_api
from googlecloudsdk.command_lib.storage import errors
from googlecloudsdk.command_lib.storage import plurality_checkable_iterator
from googlecloudsdk.command_lib.storage import storage_url
from googlecloudsdk.command_lib.storage import wildcard_iterator
from googlecloudsdk.command_lib.storage.resources import resource_reference
from googlecloudsdk.command_lib.storage.resources import resource_util
from googlecloudsdk.command_lib.storage.tasks import task
from googlecloudsdk.core.util import scaled_integer
import six
LONG_LIST_ROW_FORMAT = ('{size:>10} {creation_time:>20} {url}{metageneration}'
'{etag}')
class DisplayDetail(enum.Enum):
"""Level of detail to display about items being printed."""
SHORT = 1
LONG = 2
FULL = 3
JSON = 4
def _translate_display_detail_to_fields_scope(
display_detail, is_bucket_listing):
"""Translates display details to fields scope equivalent.
Args:
display_detail (DisplayDetail): Argument to translate.
is_bucket_listing (bool): Buckets require special handling.
Returns:
cloud_api.FieldsScope appropriate for the resources and display detail.
"""
# Long listing is the same as normal listing for buckets.
if display_detail == DisplayDetail.LONG and is_bucket_listing:
return cloud_api.FieldsScope.SHORT
display_detail_to_fields_scope = {
DisplayDetail.SHORT: cloud_api.FieldsScope.SHORT,
DisplayDetail.LONG: cloud_api.FieldsScope.NO_ACL,
DisplayDetail.FULL: cloud_api.FieldsScope.FULL,
DisplayDetail.JSON: cloud_api.FieldsScope.FULL,
}
return display_detail_to_fields_scope[display_detail]
class _BaseFormatWrapper(six.with_metaclass(abc.ABCMeta)):
"""For formatting how items are printed when listed.
Attributes:
resource (resource_reference.Resource): Item to be formatted for printing.
"""
def __init__(self, resource, display_detail=DisplayDetail.SHORT,):
"""Initializes wrapper instance.
Args:
resource (resource_reference.Resource): Item to be formatted for printing.
display_detail (DisplayDetail): Level of metadata detail for printing.
"""
self.resource = resource
self._display_detail = display_detail
class _HeaderFormatWrapper(_BaseFormatWrapper):
"""For formatting how containers are printed as headers when listed."""
def __str__(self):
url = self.resource.storage_url.versionless_url_string
if self._display_detail == DisplayDetail.JSON:
return self.resource.get_json_dump()
# This will print as "gs://bucket:" or "gs://bucket/prefix/:".
return '\n{}:'.format(url)
class _ResourceFormatWrapper(_BaseFormatWrapper):
"""For formatting how resources print when listed."""
def __init__(self,
resource,
all_versions=False,
display_detail=DisplayDetail.SHORT,
include_etag=False,
readable_sizes=False):
"""Initializes wrapper instance.
Args:
resource (resource_reference.Resource): Item to be formatted for printing.
all_versions (bool): Display information about all versions of resource.
display_detail (DisplayDetail): Level of metadata detail for printing.
include_etag (bool): Display etag string of resource.
readable_sizes (bool): Convert bytes to a more human readable format for
long lising. For example, print 1024B as 1KiB.
"""
self._all_versions = all_versions
self._include_etag = include_etag
self._readable_sizes = readable_sizes
super().__init__(resource, display_detail)
def _format_for_list_long(self):
"""Returns string of select properties from resource."""
if isinstance(self.resource, resource_reference.PrefixResource):
# Align PrefixResource URLs with ObjectResource URLs.
return LONG_LIST_ROW_FORMAT.format(
size='', creation_time='',
url=self.resource.storage_url.url_string, metageneration='',
etag='')
creation_time = resource_util.get_formatted_timestamp_in_utc(
self.resource.creation_time)
if self._all_versions:
url_string = self.resource.storage_url.url_string
metageneration_string = ' metageneration={}'.format(
str(self.resource.metageneration))
else:
url_string = self.resource.storage_url.versionless_url_string
metageneration_string = ''
if self._include_etag:
etag_string = ' etag={}'.format(str(self.resource.etag))
else:
etag_string = ''
if self._readable_sizes and self.resource.size is not None:
size = scaled_integer.FormatBinaryNumber(
self.resource.size, decimal_places=2)
else:
# Also handles None values.
size = str(self.resource.size)
# Full example (add 9 spaces of padding to the left):
# 8 2020-07-27T20:58:25Z gs://b/o metageneration=4 etag=CJqt6aup7uoCEAQ=
return LONG_LIST_ROW_FORMAT.format(
size=size,
creation_time=creation_time,
url=url_string,
metageneration=metageneration_string,
etag=etag_string)
def __str__(self):
if self._display_detail == DisplayDetail.LONG and (
isinstance(self.resource, resource_reference.ObjectResource) or
isinstance(self.resource, resource_reference.PrefixResource)):
return self._format_for_list_long()
if self._display_detail == DisplayDetail.FULL and (
isinstance(self.resource, resource_reference.BucketResource) or
isinstance(self.resource, resource_reference.ObjectResource)):
return self.resource.get_full_metadata_string()
if self._display_detail == DisplayDetail.JSON:
return self.resource.get_json_dump()
if self._all_versions:
# Include generation in URL.
return self.resource.storage_url.url_string
return self.resource.storage_url.versionless_url_string
class CloudListTask(task.Task):
"""Represents an ls command operation."""
def __init__(self,
cloud_url,
all_versions=False,
buckets_flag=False,
display_detail=DisplayDetail.SHORT,
include_etag=False,
readable_sizes=False,
recursion_flag=False):
"""Initializes task.
Args:
cloud_url (storage_url.CloudUrl): Object for a non-local filesystem URL.
all_versions (bool): Determine whether or not to return all versions of
listed objects.
buckets_flag (bool): If given a bucket URL, only return matching buckets
ignoring normal recursion rules.
display_detail (DisplayDetail): Determines level of metadata printed.
include_etag (bool): Print etag string of resource, depending on other
settings.
readable_sizes (bool): Convert bytes to a more human readable format for
long lising. For example, print 1024B as 1KiB.
recursion_flag (bool): Recurse through all containers and format all
container headers.
"""
super().__init__()
self._cloud_url = cloud_url
self._all_versions = all_versions
self._buckets_flag = buckets_flag
self._display_detail = display_detail
self._include_etag = include_etag
self._readable_sizes = readable_sizes
self._recursion_flag = recursion_flag
self._only_display_buckets = self._cloud_url.is_provider() or (
self._buckets_flag and self._cloud_url.is_bucket())
def _get_container_iterator(
self, cloud_url, recursion_level):
"""For recursing into and retrieving the contents of a container.
Args:
cloud_url (storage_url.CloudUrl): Container URL for recursing into.
recursion_level (int): Determines if iterator should keep recursing.
Returns:
_BaseFormatWrapper generator.
"""
# End URL with '/*', so WildcardIterator won't filter out its contents.
new_url_string = cloud_url.versionless_url_string
if cloud_url.versionless_url_string[-1] != cloud_url.delimiter:
new_url_string += cloud_url.delimiter
new_cloud_url = storage_url.storage_url_from_string(new_url_string + '*')
fields_scope = _translate_display_detail_to_fields_scope(
self._display_detail, is_bucket_listing=False)
iterator = wildcard_iterator.CloudWildcardIterator(
new_cloud_url,
all_versions=self._all_versions,
fields_scope=fields_scope)
return self._recursion_helper(iterator, recursion_level)
def _recursion_helper(self, iterator, recursion_level):
"""For retrieving resources from URLs that potentially contain wildcards.
Args:
iterator (Iterable[resource_reference.Resource]): For recursing through.
recursion_level (int): Integer controlling how deep the listing
recursion goes. "1" is the default, mimicking the actual OS ls, which
lists the contents of the first level of matching subdirectories.
Call with "float('inf')" for listing everything available.
Yields:
_BaseFormatWrapper generator.
"""
for resource in iterator:
# Check if we need to display contents of a container.
if resource.is_container() and recursion_level > 0:
yield _HeaderFormatWrapper(
resource, display_detail=self._display_detail)
# Get container contents by adding wildcard to URL.
nested_iterator = self._get_container_iterator(
resource.storage_url, recursion_level-1)
for nested_resource in nested_iterator:
yield nested_resource
else:
# Resource wasn't a container we can recurse into, so just yield it.
yield _ResourceFormatWrapper(
resource,
all_versions=self._all_versions,
display_detail=self._display_detail,
include_etag=self._include_etag,
readable_sizes=self._readable_sizes)
def _print_json_list(self, resource_wrappers):
"""Prints ResourceWrapper objects as JSON list."""
is_empty_list = True
for i, resource_wrapper in enumerate(resource_wrappers):
is_empty_list = False
if i == 0:
# Start of JSON list for long long listing.
print('[')
print(resource_wrapper, end='')
else:
# Print resource without newline at end to allow list formatting for
# unknown number of items in generator.
print(',\n{}'.format(resource_wrapper), end='')
# New line because we were removing it from previous prints to give us
# the ability to do a trailing comma for JSON list printing.
print()
if not is_empty_list:
# Close long long listing JSON list. Prints nothing if no items.
print(']')
def _print_row_list(self, resource_wrappers):
"""Prints ResourceWrapper objects in list with custom row formatting."""
object_count = total_bytes = 0
for i, resource_wrapper in enumerate(resource_wrappers):
resource_wrapper_string = str(resource_wrapper)
if i == 0 and resource_wrapper and resource_wrapper_string[0] == '\n':
# First print should not begin with a line break, which can happen
# for headers.
print(resource_wrapper_string[1:])
else:
print(resource_wrapper_string)
if isinstance(resource_wrapper.resource,
resource_reference.ObjectResource):
# For printing long listing data summary.
object_count += 1
total_bytes += resource_wrapper.resource.size or 0
if (self._display_detail in (DisplayDetail.LONG, DisplayDetail.FULL) and
not self._only_display_buckets):
# Long listing needs summary line.
print('TOTAL: {} objects, {} bytes ({})'.format(
object_count, int(total_bytes),
scaled_integer.FormatBinaryNumber(total_bytes, decimal_places=2)))
def execute(self, task_status_queue=None):
"""Recursively create wildcard iterators to print all relevant items."""
# List task does not need to report status information.
del task_status_queue
fields_scope = _translate_display_detail_to_fields_scope(
self._display_detail, is_bucket_listing=self._cloud_url.is_provider())
resources = plurality_checkable_iterator.PluralityCheckableIterator(
wildcard_iterator.CloudWildcardIterator(
self._cloud_url,
all_versions=self._all_versions,
fields_scope=fields_scope,
get_bucket_metadata=self._buckets_flag))
if resources.is_empty():
raise errors.InvalidUrlError('One or more URLs matched no objects.')
if self._only_display_buckets:
# Received a provider URL ("gs://") -> List all buckets.
# Received buckets flag and bucket URL -> List matching buckets, ignoring
# recursion.
resources_wrappers = self._recursion_helper(resources, recursion_level=0)
elif self._recursion_flag and '**' not in self._cloud_url.url_string:
# "**" overrides recursive flag.
resources_wrappers = self._recursion_helper(resources, float('inf'))
elif not resources.is_plural() and resources.peek().is_container():
# One container was returned by the query, in which case we show
# its contents.
resources_wrappers = self._get_container_iterator(
resources.peek().storage_url, recursion_level=0)
else:
resources_wrappers = self._recursion_helper(resources, recursion_level=1)
if self._display_detail == DisplayDetail.JSON:
self._print_json_list(resources_wrappers)
else:
self._print_row_list(resources_wrappers)
|
_base_ = [
'../_base_/models/upernet_swin.py', '../_base_/datasets/ade20k.py',
'../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py'
]
model = dict(
backbone=dict(
embed_dim=96,
depths=[2, 2, 6, 2],
num_heads=[3, 6, 12, 24],
window_size=7,
ape=False,
drop_path_rate=0.3,
patch_norm=True,
use_checkpoint=False
),
decode_head=dict(
in_channels=[96, 192, 384, 768],
num_classes=1
),
auxiliary_head=dict(
in_channels=384,
num_classes=1
))
|
from django.core.management.base import BaseCommand, CommandError
from estatisticas_facebook.pages.models import *
from estatisticas_facebook.posts.models import getPostInfo
from estatisticas_facebook.comments.models import getCommentInfo
from estatisticas_facebook.reactions.models import getReactionInfo
class Command(BaseCommand):
help = 'Save page insights'
def add_arguments(self, parser):
parser.add_argument(
'--since',
dest='since',
help='Date to extract ',
)
def handle(self, *args, **options):
print('Extraindo Page insights a partir de: '+str(options['since']))
args = {}
args['since'] = str(options['since'])
args['id'] = '316136345150243'
getPageInsights(args)
page_model = Page.objects.get(id__iexact=args['id'])
getPostInfo(page_model,args['since'])
getCommentInfo(page_model)
getReactionInfo(page_model)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 18-7-10 上午10:22
# @Author : Tom.Lee
# @File : manager.py
# @Product : PyCharm
# @Docs :
# @Source :
import sys
from flaskapp import app
from flaskapp import manager
if __name__ == '__main__':
# 初始化
# $ python manager.py db init
# 创建/更新表
# $ python manager.py db migrate
# 升级/迁移
# $ python manager.py db upgrade
# 降级
# $ python manager.py db downgrade
# 其他
# $ python manager.py db --help
# runserver
# $ python manager.py runserver
# shell
# $ python manager.py shell
commands = sys.argv
if len(commands) == 2 and 'runserver' == commands[1]:
app.run()
else:
manager.run()
|
import os
from conans import ConanFile, tools
class FastNoise(ConanFile):
name = "fastnoise"
version = f"1.0.1"
license = "MIT"
url = "https://github.com/Auburn/FastNoise"
description = "coherent noise-generating library for C++"
topics = ("noise")
settings = "os", "arch", "compiler", "build_type"
options: dict = {}
default_options: dict = {}
generators = "cmake_find_package"
def source(self):
tools.download(f"{self.url}/archive/v{self.version}.tar.gz", "src.tar.gz")
tools.untargz("src.tar.gz")
def package(self):
self.copy("*.h*", dst="include", src=os.path.join(f"FastNoise-{self.version}", "Cpp"))
|
from molsysmt._private.exceptions import *
from molsysmt._private.digestion import *
def to_file_pdb(item, atom_indices='all', structure_indices='all', output_filename=None, check=True):
if check:
digest_item(item, 'string:pdb_id')
atom_indices = digest_atom_indices(atom_indices)
structure_indices = digest_structure_indices(structure_indices)
from ..file_pdb import download as download_file_pdb
from ..file_pdb import extract as extract_file_pdb
tmp_item = download_file_pdb(item.replace('pdb_id:', ''), output_filename)
tmp_item = extract_file_pdb(tmp_item, atom_indices=atom_indices, structure_indices=structure_indices,
output_filename=tmp_item, copy_if_all=False, check=False)
return tmp_item
|
import wrapped_flappy_bird as game
import numpy as np
import matplotlib.pyplot as plt
import time
import os
import glob
import h5py
t0 = time.time()
ALPHA = .7 # learning rate
GAMMA = 0.95 # discount factor
# EPISODES = 100_000 # 17 minute run time
# EPISODES = 600_000 # 36 minute run time
# EPISODES = 600_000 # 93 minute run time
# EPISODES = 600_000 # 93 minute run time
# EPISODES = 600_000*5.4 # 93 minute run time
EPISODES = 3240000 # 93 minute run time
# EPISODES = 10 # 17 minute run time
# EPISODES = 10_000
# EPISODES = 1000
# SHOW_EVERY = 100_000
SHOW_EVERY = 3240000
# SHOW_EVERY = 1_000
# SHOW_EVERY = 1
# AFTER = 80_000
AFTER = 0
# Exploration settings
epsilon = 1 # not a constant, qoing to be decayed
START_EPSILON_DECAYING = 1
END_EPSILON_DECAYING = EPISODES//2
epsilon_decay_value = epsilon/(END_EPSILON_DECAYING - START_EPSILON_DECAYING)
# FLAP_EVERY = 17
bin_count = [200, 410, 410, 10] # [20, 20]
# bin_count = [220, 451, 451, 380, 10] # [20, 20]
env_state_high = np.array([250, 234, 234, 11])
env_state_low = np.array([30, -217, -217, -9])
env_number_of_actions = 2
# bin_size = ([234 - -60, 200 - -200 ]) / bin_count
bin_size = (env_state_high - env_state_low) / bin_count
# q_table = np.random.uniform(low= -0.2, high=0.2, size=(bin_count[0],bin_count[1],2))
# q_table = np.random.uniform(low= -0.1, high=0.0, size=(bin_count + [env_number_of_actions]))
# q_table = np.random.uniform(low= -0.2, high=0.0, size=(bin_count + [env_number_of_actions]))
# q_table[:,:,1] = np.random.uniform(low=-.5, high=0.0, size=(bin_count[0],bin_count[1])) # de-emphasize flap (avoid hitting ceiling)
# q_table = np.load(f"./qtables/{7078}-qtable.npy")
# hfr = h5py.File(f"qtables/{6640}-qtable.h5", 'r')
# q_table = np.array(hfr.get('dataset_1'))
# hfr.close()
hfr = h5py.File(f"qtables/qtable_long.h5", 'r')
q_table = np.array(hfr.get('dataset_1'))
hfr.close()
def discretize_state(state):
# print(state)
# print(state - env.observation_space.low)
discrete_state = (state - env_state_low) / bin_size
# print(discrete_state)
return tuple(discrete_state.astype(int))
episode_state_action_new_states = []
frames_survived = []
env_max_measured_values = [-999, -999, -999, -999, -999]
env_min_measured_values = [999, 999, 999, 999, 999]
best_frames_survived = 0
for episode in range(EPISODES):
game_state = game.GameState()
total_frames = 0
max_frames = 10000 # Can change this number according to yourself
action = 0 # first action will always be nothing
state, reward, done = game_state.frame_step(action, headless=True, desired_fps=16000)
# print("starting state: ", state)
action = 0 # first action will always be nothing
state, reward, done = game_state.frame_step(action, headless=True, desired_fps=16000)
# print("starting state: ", state)
discrete_state = discretize_state(state)
for frame in range(max_frames):
try:
action = np.argmax(q_table[discrete_state])
# if np.random.random() > epsilon:
# # Get action from Q table
# action = np.argmax(q_table[discrete_state])
# else:
# # Get random action
# roll = np.random.uniform(low=0.0, high=1.0)
# if roll < 0.80: # do random action, with emphasis on doing nothing
# action = 0
# else:
# action = 1
# action = np.argmax(q_table[discrete_state])
# if frame % FLAP_EVERY == 0: action = 1
# else: action = 0
except:
print(state)
# new_state, reward, done = game_state.frame_step(action, headless=False, desired_fps=10)
if episode % SHOW_EVERY == 0 and episode > AFTER:
new_state, reward, done = game_state.frame_step(action, headless=False, desired_fps=30)
print(new_state, action)
else:
new_state, reward, done = game_state.frame_step(action, headless=True, desired_fps=16000)
# if new_state[0] == 257.0:
# pass
# print("stop")
total_frames += 1
if not done:
# if new_state[0] < env_min_measured_values[0]:
# env_min_measured_values[0] = new_state[0]
# if new_state[1] < env_min_measured_values[1]:
# env_min_measured_values[1] = new_state[1]
# if new_state[2] < env_min_measured_values[2]:
# env_min_measured_values[2] = new_state[2]
# if new_state[3] < env_min_measured_values[3]:
# env_min_measured_values[3] = new_state[3]
#
# if new_state[0] > env_max_measured_values[0]:
# env_max_measured_values[0] = new_state[0]
# if new_state[1] > env_max_measured_values[1]:
# env_max_measured_values[1] = new_state[1]
# if new_state[2] > env_max_measured_values[2]:
# env_max_measured_values[2] = new_state[2]
# if new_state[3] > env_max_measured_values[3]:
# env_max_measured_values[3] = new_state[3]
new_discrete_state = discretize_state(new_state)
episode_state_action_new_states.append((discrete_state, action, new_discrete_state))
# # max_future_q = np.max(q_table[discrete_state]) # big mistake
# max_future_q = np.max(q_table[new_discrete_state])
# current_q = q_table[discrete_state][action]
# new_q = (1 - ALPHA) * current_q + ALPHA * (reward + GAMMA * max_future_q)
# q_table[discrete_state][action] = new_q
elif done:
# new_q = (1 - ALPHA) * current_q + ALPHA * (reward)
# q_table[discrete_state][action] = new_q
episode_state_action_new_states.reverse() # already not appending very very last faulty state (don't reach if not done above)
last_flap_dealt_with = False
if episode_state_action_new_states[0][0][1] > 0: upper_pipe_death = True
else: upper_pipe_death = False
# bird has died, update q values
for idx, state_action_new_state in enumerate(episode_state_action_new_states):
discrete_state_ = state_action_new_state[0]
action_ = state_action_new_state[1]
new_discrete_state_ = state_action_new_state[2]
# idea behind this: if there was an upper pipe death, it was ACTION that caused that, versus no action, if lower pipe death
# if upper_pipe_death == True:
if last_flap_dealt_with == False and upper_pipe_death == True and action_ == 1: # deal with last flap if we haven't before and action = 1 = flap and we had upper_pipe_death
max_future_q = np.max(q_table[new_discrete_state_])
current_q = q_table[discrete_state_][action_]
new_q = (1 - ALPHA) * current_q + ALPHA * (-1000 + GAMMA * max_future_q) # -1000 reward
q_table[discrete_state_][action_] = new_q
last_flap_dealt_with = True
elif idx == 0 or idx == 1: # punish anything near ceiling, floor, or pipes
max_future_q = np.max(q_table[new_discrete_state_])
current_q = q_table[discrete_state_][action_]
new_q = (1 - ALPHA) * current_q + ALPHA * (-1000 + GAMMA * max_future_q) # -1000 reward
q_table[discrete_state_][action_] = new_q
else: # else, normal case, just give +1 reward
max_future_q = np.max(q_table[new_discrete_state_])
current_q = q_table[discrete_state_][action_]
new_q = (1 - ALPHA) * current_q + ALPHA * (1 + GAMMA * max_future_q) # +1 reward
q_table[discrete_state_][action_] = new_q
episode_state_action_new_states = [] # empty out saved states action state tuples
print("Total Frames ", str(total_frames), " for episode ", episode)
if total_frames > best_frames_survived:
best_frames_survived = total_frames
# if total_frames > 4000: # save hard drive space
# # np.save(f"qtables/{total_frames}-qtable.npy", q_table)
# hfw = h5py.File(f"qtables/{total_frames}-qtable.h5", 'w')
# hfw.create_dataset('dataset_1', data=q_table)
# hfw.close()
if total_frames >= 10000: # save hard drive space
print("saving q table over 4000")
# np.save(f"qtables/{total_frames}-qtable.npy", q_table)
# hfw = h5py.File(f"qtables/{11111111}-qtable.h5", 'w')
hfw = h5py.File(f"qtables/{total_frames}-qtable_long.h5", 'w')
hfw.create_dataset('dataset_1', data=q_table)
hfw.close()
print("q table done saving over 4000")
if episode == EPISODES-1: # save hard drive space
print("saving q table")
# np.save(f"qtables/{total_frames}-qtable.npy", q_table)
# hfw = h5py.File(f"qtables/{11111111}-qtable.h5", 'w')
hfw = h5py.File(f"qtables/qtable_long.h5", 'w')
hfw.create_dataset('dataset_1', data=q_table)
hfw.close()
print("q table done saving")
break
discrete_state = new_discrete_state
frames_survived.append(total_frames)
# Decaying is being done every episode if episode number is within decaying range
if END_EPSILON_DECAYING >= episode >= START_EPSILON_DECAYING:
epsilon -= epsilon_decay_value
print(" ")
print("best_frames_survived: ", best_frames_survived)
t1 = time.time()
print("total time: ", t1-t0) # 9.764827251434326, 20,000 episodes, completely headless, 16000 FPS
plt.plot(range(len(frames_survived)), frames_survived, linestyle='', marker='.')
plt.show()
print("total frames survived = ", sum(frames_survived))
print("min frames survived: ", min(frames_survived) )
print("average frames survived: ", sum(frames_survived)/len(frames_survived) )
print("max frames survived: ", max(frames_survived))
print(" ")
print("env_min_measured_values: ", env_min_measured_values)
print("env_max_measured_values: ", env_max_measured_values)
|
import turtle
import pandas
screen = turtle.Screen()
screen.title("U.S. States Game")
image = "blank_states_img.gif"
screen.addshape(image)
turtle.shape(image)
data = pandas.read_csv("50_states.csv")
all_states = data.state.to_list()
guessed_states = []
while len(guessed_states) < 50:
answer_state = screen.textinput(title=f"{len(guessed_states)}/50 Sates Correct", prompt="What's another state name?").title()
# check if answer is one of the states in all the states of 50_states.csv
if answer_state == "Exit":
missing_states = [state for state in all_states if state not in guessed_states]
new_data = pandas.DataFrame(missing_states)
new_data.to_csv("states_to_learn.csv")
break
if answer_state in all_states:
guessed_states.append(answer_state)
t = turtle.Turtle()
t.speed("fastest")
t.hideturtle()
t.penup()
state_data = data[data.state == answer_state]
t.goto(int(state_data.x), int(state_data.y))
t.write(answer_state)
# Get the coordinates of the states
# def get_mouse_click_coor(x, y):
# print(x, y)
#
#
# turtle.onscreenclick(get_mouse_click_coor)
|
# Definition for a binary tree node.
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
def checkSubTree(self, t1: TreeNode, t2: TreeNode) -> bool:
return self.preorder_traversal(t1, t2)
def preorder_traversal(self, p: TreeNode, t2: TreeNode) -> bool:
if not p:
return False
elif self.preorder(p, t2):
return True
return self.preorder_traversal(p.left, t2) or self.preorder_traversal(p.right, t2)
def preorder(self, p: TreeNode, q: TreeNode) -> bool:
if not p and not q:
return True
elif not q or not p:
return False
elif p.val != q.val:
return False
return self.preorder(p.left, q.left) and self.preorder(p.right, q.right)
|
"""Load a yaml resource from a python package."""
import pkgutil
import types
import yamlsettings
from yamlsettings.extensions.base import YamlSettingsExtension
class PackageExtension(YamlSettingsExtension):
"""Load a yaml resource from a python package.
Args:
resource: The resource to load from the package (default: settings.yaml)
env: When set the yamldict will update with env variables (default: true)
prefix: Prefix for environment loading (default: None)
persist: When set the yamldict will only be loaded once. (default: true)
examples:
* pkg://example (opens the settings.yaml resource and loads env vars)
"""
protocols = ['pkg', 'package']
default_query = {
'resource': 'settings.yaml',
'env': True,
'prefix': None,
'persist': True,
}
_persistence = {}
@classmethod
def load_target(cls, scheme, path, fragment, username,
password, hostname, port, query,
load_method, **kwargs):
package_path = (hostname or '') + path
query.update(kwargs)
resource = query['resource']
env = query['env']
prefix = query['prefix']
persist = query['persist']
persistence_key = "{}:{}".format(package_path, resource)
if persist and persistence_key in cls._persistence:
yaml_contents = cls._persistence[persistence_key]
else:
pkg_data = pkgutil.get_data(package_path, resource)
if pkg_data is None:
raise IOError("package - {}:{}".format(package_path, resource))
yaml_contents = load_method(pkg_data)
# Load all returns a generator list of configurations
many = isinstance(yaml_contents, types.GeneratorType)
yaml_contents = list(yaml_contents) if many else yaml_contents
if env and many:
for contents in yaml_contents:
yamlsettings.update_from_env(contents, prefix)
elif env:
yamlsettings.update_from_env(yaml_contents, prefix)
if persist:
cls._persistence[persistence_key] = yaml_contents
return yaml_contents
|
# -*- coding: utf-8 -*-
from collections import defaultdict
class Annotation(object):
"""This class represents an annotation."""
def __init__(self, id, representation, spans, labels=()):
"""
Create an annotation object.
:param id: (string) The id of the current annotation.
:param representation: (string) The string representation of the
annotation. Doesn't take into account the fact that annotations may be
discontinous.
:param spans: (list of list of ints) A list of list of ints
representing the starting and ending points, in characters, for any
words in the annotation.
:param labels: (list of strings) a list of initial labels for the
annotation object. These never get an initial value.
:return: None
"""
self.id = id
# Returns a new dictionary-like object.
self.links = defaultdict(list)
self.labels = defaultdict(list)
for label in labels:
self.labels[label] = []
self.repr = representation
self.spans = spans
self.realspan = (spans[0][0], spans[-1][1])
self.words = []
def __repr__(self):
"""Representation of the annotation."""
return "Annotation: {0}".format(self.repr.encode("utf-8"))
|
import pytest
from src.graph.graph import Graph
from src.graph.concomp import concomp0, concomp1, concomp2
CONNECTED_COMPONENTS = [concomp0, concomp1, concomp2]
@pytest.mark.parametrize("concomp", CONNECTED_COMPONENTS)
def test_one_edge(concomp):
msg = "{} failed".format(concomp.__name__)
g = Graph()
g.add_edge(0, 1)
cc = concomp(g)
assert len(cc) == 2, msg
assert cc[0] == cc[1], msg
@pytest.mark.parametrize("concomp", CONNECTED_COMPONENTS)
@pytest.mark.parametrize("n", list(range(2, 42)))
def test_simple_chain(concomp, n):
assert n > 1
msg = "{} failed".format(concomp.__name__)
g = Graph()
for i in range(n):
g.add_edge(i, i + 1)
cc = concomp(g)
assert len(cc) == n + 1
for i in range(n):
assert cc[i] == cc[i + 1], msg
@pytest.mark.parametrize("concomp", CONNECTED_COMPONENTS)
def test_two_disjoint_edges(concomp):
msg = "{} failed".format(concomp.__name__)
g = Graph()
g.add_edge(0, 1)
g.add_edge(2, 3)
cc = concomp(g)
assert len(cc) == 4
assert cc[0] == cc[1]
assert cc[2] == cc[3]
assert cc[0] != cc[2]
|
{'application':{'type':'Application',
'name':'Template',
'backgrounds': [
{'type':'Background',
'name':'bgTemplate',
'title':'Standard Template with File->Exit menu',
'size':(400, 300),
'style':['resizeable'],
'menubar': {'type':'MenuBar',
'menus': [
{'type':'Menu',
'name':'menuFile',
'label':'&File',
'items': [
{'type':'MenuItem',
'name':'menuFileExit',
'label':'E&xit',
'command':'exit',
},
]
},
]
},
'components': [
{'type':'StaticText',
'name':'StaticText5',
'position':(53, 203),
'text':u'StaticText5',
},
{'type':'StaticText',
'name':'StaticText4',
'position':(51, 175),
'font':{'style': 'bold', 'family': 'sansSerif', 'size': 9},
'text':u'StaticText4',
},
{'type':'StaticText',
'name':'StaticText3',
'position':(31, 94),
'text':u'Type in your guess, then click the Guess button.',
},
{'type':'StaticText',
'name':'StaticText2',
'position':(20, 31),
'text':u"It is a number from 1 to 99. I'll give you 6 tries.",
},
{'type':'StaticText',
'name':'StaticText1',
'position':(15, 9),
'text':u"AHOY! I'm the Dread Pirate Roberts, and I have a secret!",
},
{'type':'Button',
'name':'btnGuess',
'position':(146, 125),
'label':u'Guess',
},
{'type':'TextField',
'name':'tfGuessNum',
'position':(30, 125),
'text':u'0',
},
] # end components
} # end background
] # end backgrounds
} }
|
from uuid import UUID
from pydantic import BaseModel
class LaaApplication(BaseModel):
reference: str | None
id: UUID | None
status_code: str | None
description: str | None
status_date: str | None
effective_start_date: str | None
effective_end_date: str | None
contract_number: str | None
|
# -*- coding:utf-8 -*-
from selenium import webdriver
from time import sleep
from selenium.webdriver.common.keys import Keys
driver = webdriver.PhantomJS(executable_path="./phantomjs-2.1.1-macosx/bin/phantomjs")
driver.get("http://baidu.com/")
driver.find_element_by_id("kw").send_keys(u"长城")
sleep(10)
driver.find_element_by_id("su").click()
driver.save_screenshot("长城.png")
|
from decimal import Decimal
from typing import Iterable, Optional, TypeVar
from stock_indicators._cslib import CsIndicator
from stock_indicators._cstypes import List as CsList
from stock_indicators._cstypes import Decimal as CsDecimal
from stock_indicators._cstypes import to_pydecimal
from stock_indicators.indicators.common.helpers import RemoveWarmupMixin
from stock_indicators.indicators.common.results import IndicatorResults, ResultBase
from stock_indicators.indicators.common.quote import Quote
def get_keltner(quotes: Iterable[Quote], ema_periods: int = 20,
multiplier: float = 2, atr_periods: int = 10):
"""Get Keltner Channels calculated.
Keltner Channels are based on an EMA centerline andATR band widths.
See also STARC Bands for an SMA centerline equivalent.
Parameters:
`quotes` : Iterable[Quote]
Historical price quotes.
`ema_periods` : int, defaults 20
Number of periods for the centerline EMA.
`multiplier` : float, defaults 2
ATR multiplier sets the width of the channel.
`atr_periods` : int, defaults 10
Number of periods in the ATR evaluation.
Returns:
`KeltnerResults[KeltnerResult]`
KeltnerResults is list of KeltnerResult with providing useful helper methods.
See more:
- [Keltner Channels Reference](https://daveskender.github.io/Stock.Indicators.Python/indicators/Keltner/#content)
- [Helper Methods](https://daveskender.github.io/Stock.Indicators.Python/utilities/#content)
"""
results = CsIndicator.GetKeltner[Quote](CsList(Quote, quotes), ema_periods,
CsDecimal(multiplier), atr_periods)
return KeltnerResults(results, KeltnerResult)
class KeltnerResult(ResultBase):
"""
A wrapper class for a single unit of Keltner Channels results.
"""
@property
def upper_band(self) -> Optional[Decimal]:
return to_pydecimal(self._csdata.UpperBand)
@upper_band.setter
def upper_band(self, value):
self._csdata.UpperBand = CsDecimal(value)
@property
def center_line(self) -> Optional[Decimal]:
return to_pydecimal(self._csdata.Centerline)
@center_line.setter
def center_line(self, value):
self._csdata.Centerline = CsDecimal(value)
@property
def lower_band(self) -> Optional[Decimal]:
return to_pydecimal(self._csdata.LowerBand)
@lower_band.setter
def lower_band(self, value):
self._csdata.LowerBand = CsDecimal(value)
@property
def width(self) -> Optional[Decimal]:
return to_pydecimal(self._csdata.Width)
@width.setter
def width(self, value):
self._csdata.Width = CsDecimal(value)
_T = TypeVar("_T", bound=KeltnerResult)
class KeltnerResults(RemoveWarmupMixin, IndicatorResults[_T]):
"""
A wrapper class for the list of Keltner Channels results.
It is exactly same with built-in `list` except for that it provides
some useful helper methods written in CSharp implementation.
"""
|
import logging
import torch
import wrapt
import tqdm
from .recording import Recorder
logger = logging.getLogger('pystematic.torch')
class DDPModuleProxy(wrapt.ObjectProxy):
"""Delegates any unknown getattr calls to the underlying module. Makes the
DDP module completely transparent.
"""
def __getattr__(self, name):
try:
return super().__getattr__(name)
except AttributeError:
return getattr(self.__wrapped__.module, name)
def __call__(self, *args, **kwargs):
return self.__wrapped__(*args, **kwargs)
def _move_to_same_device_as(to_move, target):
if hasattr(target, "device"):
return _move_to_device(to_move, target.device)
elif callable(getattr(target, "parameters", None)): # torch modules
try:
return _move_to_device(to_move, next(target.parameters()).device)
except StopIteration:
pass
return to_move
def _move_to_device(obj, device):
if callable(getattr(obj, "to", None)):
return obj.to(device=device)
if isinstance(obj, torch.optim.Optimizer):
obj.load_state_dict(_move_to_device(obj.state_dict(), device))
return obj
if isinstance(obj, dict):
res = {}
for name, value in obj.items():
res[name] = _move_to_device(value, device)
return res
if isinstance(obj, (list, tuple)):
res = []
for i, sub_item in enumerate(obj):
res.append(_move_to_device(sub_item, device))
return res
return obj
def _get_state_dict(item):
if callable(getattr(item, "state_dict", None)):
if isinstance(item, torch.nn.parallel.DistributedDataParallel):
return item.module.state_dict()
else:
return item.state_dict()
if isinstance(item, (int, float, complex, str)):
return item
if isinstance(item, dict):
res = {}
for name, sub_item in item.items():
res[name] = _get_state_dict(sub_item)
return res
if isinstance(item, (list, tuple)):
res = []
for sub_item in item:
res.append(_get_state_dict(sub_item))
return res
return None
def _set_state_dict(item, state, path=[]):
if callable(getattr(item, "load_state_dict", None)):
if isinstance(item, torch.nn.parallel.DistributedDataParallel):
item.module.load_state_dict(_move_to_same_device_as(state, item.module))
else:
item.load_state_dict(_move_to_same_device_as(state, item))
return item
if isinstance(item, (int, float, complex, str)):
if not isinstance(state, (int, float, complex, str)):
raise ValueError(f"Error when setting state for item '{'.'.join(path)}', "
f"expected a primitive value, got '{type(state)}'.")
return state
if isinstance(item, dict):
if not isinstance(state, dict):
raise ValueError(f"Error when setting state for item '{'.'.join(path)}', "
f"expected a dict, got '{type(state)}'.")
res = {}
for name, sub_item in item.items():
if name in state:
res[name] = _set_state_dict(sub_item, state[name], path + [name])
else:
raise ValueError(f"Error when setting state for item '{'.'.join(path)}', "
f"key '{name}' was not found in state.")
return res
if isinstance(item, (list, tuple)):
if not isinstance(state, (list, tuple)):
raise ValueError(f"Error when setting state for item '{'.'.join(path)}', "
f"expected a list, got '{type(state)}'")
if len(item) != len(state):
raise ValueError(f"Error when setting state for item '{'.'.join(path)}', "
f"expected a list of length '{len(item)}', got one of length '{len(state)}'.")
res = []
for i, sub_item in enumerate(item):
res.append(_set_state_dict(sub_item, state[i], path + [str(i)]))
return res
if state is not None:
raise ValueError(f"Error when setting state for item '{'.'.join(path)}', "
f"expected None, got '{type(state)}'")
return item
def _to_distributed_data_parallel(item):
if callable(getattr(item, "ddp", None)):
return item.ddp()
if isinstance(item, torch.nn.Module):
if any([p.requires_grad for p in item.parameters()]):
logger.debug(f"Converting to distributed for model '{item}'.")
return DDPModuleProxy(torch.nn.parallel.DistributedDataParallel(
module=item,
device_ids=[torch.cuda.current_device()]
))
return item
if isinstance(item, Recorder):
if torch.distributed.get_rank() != 0: # Only rank zero may log stats
item.silence()
logger.debug(f"Silencing recorder '{item}' in rank '{torch.distributed.get_rank()}'.")
return item
if isinstance(item, dict):
return {name: _to_distributed_data_parallel(sub_item) for name, sub_item in item.items()}
if isinstance(item, (list, tuple)):
return [_to_distributed_data_parallel(sub_item) for sub_item in item]
return item
class Context:
def state_dict(self) -> dict:
"""Returns the whole state of the context by iterating all registered
items and calling ``state_dict()`` on the item to retrieve its state.
Primitive values will also be saved.
Returns:
dict: A dict representing the state of all registered objects.
"""
return {name: _get_state_dict(item) for name, item in vars(self).items()}
def load_state_dict(self, state : dict) -> None:
"""Sets the state for the context.
Args:
state (dict): The state to load.
"""
for name, item_state in state.items():
if name in vars(self):
setattr(self, name, _set_state_dict(getattr(self, name), item_state))
def to(self, device):
"""Move the context to a specific device
Args:
device (str, torch.Device): The device to move the context to.
"""
for name, item in vars(self).items():
setattr(self, name, _move_to_device(item, device))
return self
def cuda(self):
"""Moves the context to ``torch.cuda.current_device()``.
"""
return self.to(f"cuda:{torch.cuda.current_device()}")
def cpu(self):
"""Moves the context to the cpu.
"""
return self.to("cpu")
# def ddp(self, wrap_nn_module=True, silence_recorders=True):
# """Moves the context to a distributed data-parallell setting. Can only
# be used if torch.distributed is initialized. The flags passed to
# this function allows you to toggle some behavior of the transform.
# Args:
# wrap_nn_module (bool, optional): Controls if torch.nn.Module objects should be wrapped
# in :obj:`torch.nn.parallel.DistributedDataParallel`. Defaults to True.
# silence_recorders (bool, optional): Controls if all recorders on all non-master processes should
# be silenced. Silencing recorders means that only the recorder in the master process is active,
# and that all recording done in non-master processes is ignored. Defaults to True.
# """
def ddp(self):
"""Moves the context to a distributed data-parallell setting. Can only
be used if torch.distributed is initialized."""
for name, item in vars(self).items():
setattr(self, name, _to_distributed_data_parallel(item))
return self
# def autotransform(self, wrap_nn_modules=True, load_checkpoint=True, silence_recorders=True):
# """Transforms the context according to the current experiment
# parameters. More specifically it; loads a state_dict from the parameter
# ``checkpoint`` if set, moves to cuda if paramter ``cuda`` is set, moves
# to distributed if parameter ``distributed`` is set. The flags passed to
# this function allows you to toggle some behavior of the transform.
# Args:
# wrap_nn_modules (bool, optional): Controls if torch.nn.Module objects should be wrapped
# in :obj:`torch.nn.parallel.DistributedDataParallel`. Defaults to True.
# load_checkpoint (bool, optional): Controls the checkpoint given in the experiment parameter
# ``checkpoint`` should be loaded (if the parameter has been set). Defaults to True.
# silence_recorders (bool, optional): Controls if all recorders on all non-master processes should
# be silenced. Silencing recorders means that only the recorder in the master process is active,
# and that all recording done in non-master processes is ignored. Defaults to True.
# """
def autotransform(self):
"""Transforms the context according to the current experiment
parameters. More specifically it; loads a state_dict from the parameter
``checkpoint`` if set, moves to cuda if paramter ``cuda`` is set, moves
to distributed if parameter ``distributed`` is set.
"""
from pystematic import params
if params["checkpoint"]:
logger.info(f"Loading checkpoint '{params['checkpoint']}'.")
with open(params["checkpoint"], "rb") as f:
self.load_state_dict(torch.load(f, map_location="cpu"))
if params["cuda"]:
self.cuda()
if params["distributed"]:
self.ddp()
return self
class SmartDataLoader(torch.utils.data.DataLoader):
"""Extends the :obj:`torch.utils.data.DataLoader` with the following:
* A loading bar is displayed when iterating the dataloader.
* The items yielded when iterating are moved to the device previously
set with :meth:`to`.
* Transparently handles both distributed and non-distributed modes.
"""
def __init__(self, dataset, shuffle=False, random_seed=None, sampler=None, move_output=True, loading_bar=True, **kwargs):
"""
Args:
dataset (torch.utils.data.Dataset): The dataset to construct a loader for
shuffle (bool, optional): Whether to shuffle the data when loading.
Ignored if ``sampler`` is not None. Defaults to False.
random_seed (int, optional): Random seed to use when shuffleing data. Ignored
if ``sampler`` is not None. Defaults to None.
sampler (torch.utils.data.Sampler, Iterable, optional): An object defining how
to sample data items. Defaults to None.
move_output (bool, optional): If items yielded during iteration automatically
should be moved to the curent device. Defaults to True.
loading_bar (bool, optional): If a loading bar should be displayed during
iteration. Defaults to True.
"""
if sampler is None:
sampler = create_sampler(dataset, shuffle, random_seed)
super().__init__(dataset, sampler=sampler, **kwargs)
self._move_output = move_output
self._show_loading_bar = loading_bar
self._device = None
def to(self, device):
"""Sets the device that yielded items should be placed on when iterating.
Args:
device (str, torch.Device): The device to move the items to.
"""
self._device = device
return self
def __iter__(self):
is_master = not torch.distributed.is_initialized() or torch.distributed.get_rank() == 0
if self._show_loading_bar and is_master:
iterable = tqdm.tqdm(super().__iter__(), leave=True)
else:
iterable = super().__iter__()
if self._move_output and self._device is not None:
for item in iterable:
yield _move_to_device(item, self._device)
else:
yield from iterable
def create_sampler(dataset, shuffle=True, seed=None):
"""Returns a DistributedSampler if running in distributed mode, otherwise a normal sampler
Args:
dataset (torch.utils.data.Dataset): The dataset the sampler will work on.
shuffle (bool): If the sampler should be random or not.
"""
if torch.distributed.is_initialized():
return BetterDistributedSampler(
dataset=dataset,
shuffle=shuffle,
seed=seed
)
if shuffle:
g = torch.Generator()
if seed is not None:
g.manual_seed(seed)
return torch.utils.data.RandomSampler(data_source=dataset, generator=g)
return torch.utils.data.SequentialSampler(data_source=dataset)
class BetterDistributedSampler(torch.utils.data.distributed.DistributedSampler):
"""This class extends torch's default DistributedSampler but removes the need
for manually calling the set_epoch method to reseed the random generator
"""
def __init__(self, dataset, shuffle=True, seed=None):
super().__init__(dataset, shuffle=shuffle, seed=seed)
self.epoch = 0
def __iter__(self):
self.set_epoch(self.epoch+1)
return super().__iter__()
class DistributedSampler(torch.utils.data.distributed.Sampler):
def __init__(self, dataset, shuffle=True, seed=0):
if not torch.distributed.is_initialized():
raise Exception("Distributed sampler can only be used in a distributed environment.")
self.dataset = dataset
self.num_replicas = torch.distributed.get_world_size()
self.rank = torch.distributed.get_rank()
self.shuffle = shuffle
self.num_samples = int(math.ceil(len(self.dataset) / self.num_replicas))
self.total_size = self.num_samples * self.num_replicas
self.random_gen = torch.Generator()
if seed is not None:
self.random_gen.manual_seed(seed)
def __iter__(self):
if self.shuffle:
indices = torch.randperm(len(self.dataset), self.random_gen).cuda()
torch.distributed.broadcast(indices, 0)
indices = indices.cpu().tolist()
else:
indices = list(range(len(self.dataset)))
indices += indices[:(self.total_size - len(indices))]
indices = indices[self.rank:self.total_size:self.num_replicas]
assert len(indices) == self.num_samples, "{} != {}".format(len(indices), self.num_samples)
return iter(indices)
def __len__(self):
return self.num_samples
|
import numpy as np
from numpy import arange, sin, pi, float, size
import matplotlib
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
from matplotlib.figure import Figure
from matplotlib.collections import LineCollection
from matplotlib.widgets import Button
import Tkinter as Tk
from spike_sort.ui import label_color
class PlotWithScrollBarTk(object):
def __init__(self):
self.max = 0
self.cur_pos = 0
self.page_sz = 0
self.root = Tk.Tk()
def get_canvas(self, fig):
self.canvas = FigureCanvasTkAgg(fig, master=self.root)
self.canvas.show()
self.scrollbar = Tk.Scrollbar(self.root, orient=Tk.HORIZONTAL)
self.scrollbar.pack(side=Tk.BOTTOM, fill=Tk.BOTH)
self.canvas.get_tk_widget().pack(side=Tk.TOP, fill=Tk.BOTH, expand=1)
self.scrollbar.config(command=self._callback)
return self.canvas
def _callback(self, mode, *args):
if mode == 'moveto':
pos, = args
self.cur_pos = int(float(pos) * self.max)
elif mode=="scroll":
delta, units = args
delta = float(delta)
if units == 'units':
self.cur_pos += delta*self.page_sz/5.
elif units == 'pages':
self.cur_pos += delta*self.page_sz
self.set_scroll_pos(self.cur_pos)
self.handler(self.cur_pos)
def set_scroll_handler(self, handler):
self.handler = handler
def set_scroll_pos(self, pos):
min, max = str(pos*1./self.max), str((pos+self.page_sz)*1./self.max)
self.scrollbar.set(min, max)
def set_scroll_max(self, max, page_size):
self.page_sz = page_size
self.max = max
class SpikeBrowserUI(object):
def __init__(self, window):
self.window = window
self.sp_win = [-0.8, 1]
self.spike_collection = None
self.fig = Figure((5, 4), 75)
self.canvas = window.get_canvas(self.fig)
self._mpl_init()
self.canvas.mpl_connect('key_press_event', self._on_key)
self.window.set_scroll_handler(self.OnScrollEvt)
def _mpl_init(self):
self.fig.clf()
self.axes = self.fig.add_axes([0.05, 0.1, 0.95,0.9])
self.ax_prev = self.fig.add_axes([0.8, 0.0, 0.1,0.05])
self.ax_next = self.fig.add_axes([0.9, 0.0, 0.1,0.05])
self.b_next = Button(self.ax_next, 'Next')
self.b_prev = Button(self.ax_prev, "Prev")
self.b_next.on_clicked(self._next_spike)
self.b_prev.on_clicked(self._prev_spike)
self.i_spike = 0
self.i_start = 0
self.line_collection = None
def _next_spike(self, event):
try:
if self.i_spike<len(self.spt)-1:
self.i_spike+=1
t_spk = self.spt[self.i_spike]
i_start = int(np.ceil(t_spk/1000.*self.FS-self.i_window/2.))
i_start = np.maximum(self.i_min, i_start)
i_start = np.minimum(self.i_max, i_start)
self.i_start = i_start
self.i_end = self.i_start + self.i_window
self.window.set_scroll_pos(self.i_start)
self.draw_plot()
except IndexError:
pass
def _prev_spike(self, event):
try:
if self.i_spike>0:
self.i_spike-=1
t_spk = self.spt[self.i_spike]
i_start = int(np.ceil(t_spk/1000.*self.FS-self.i_window/2.))
i_start = np.maximum(self.i_min, i_start)
i_start = np.minimum(self.i_max, i_start)
self.i_start = i_start
self.i_end = self.i_start + self.i_window
self.window.set_scroll_pos(self.i_start)
self.draw_plot()
except IndexError:
pass
def _on_key(self, event):
if event.key=='+' or event.key=='=':
self.ylims/=2.
elif event.key == '-':
self.ylims*=2.
else:
return
offset = self.ylims[1]-self.ylims[0]
self.offsets = np.arange(self.n_chans)*offset
self.draw_plot()
def set_spiketimes(self, spk_idx, labels=None, all_labels=None):
if spk_idx:
self.spt = spk_idx['data']
if labels is not None:
self.labels = labels
if all_labels is None:
self.color_func = label_color(np.unique(labels))
else:
self.color_func = label_color(all_labels)
else:
self.labels = None
self.ax_next.set_visible(True)
self.ax_prev.set_visible(True)
else:
self.spt = None
self.ax_next.set_visible(False)
self.ax_prev.set_visible(False)
def set_data(self, data):
self.x = data['data']
self.FS = data['FS']
n_chans, n_pts = self.x.shape
#reset spike times data/hide buttons
self.set_spiketimes(None)
self.i_window = int(self.winsz/1000.*self.FS)
# Extents of data sequence:
self.i_min = 0
self.i_max = n_pts - self.i_window
self.n_chans = n_chans
self.window.set_scroll_max(self.i_max, self.i_window)
# Indices of data interval to be plotted:
self.i_end = self.i_start + self.i_window
self.time = np.arange(self.i_start,self.i_end)*1./self.FS
self.segs = np.empty((n_chans, self.i_window, 2))
self.segs[:,:,0] = self.time[np.newaxis,:]
self.segs[:,:,1] = self.x[:,self.i_start:self.i_end]
ylims = (self.segs[:,:,1].min(), self.segs[:,:,1].max())
offset = ylims[1]-ylims[0]
self.offsets = np.arange(n_chans)*offset
self.segs[:,:,1] += self.offsets[:,np.newaxis]
self.ylims = np.array(ylims)
if self.line_collection:
self.line_collection.remove()
self.line_collection = LineCollection(self.segs,
offsets=None,
transform=self.axes.transData,
color='k')
self.axes.add_collection(self.line_collection)
self.axes.set_xlim((self.time[0], self.time[-1]))
self.axes.set_ylim((self.ylims[0]+self.offsets.min(),
self.ylims[1]+self.offsets.max()))
self.canvas.draw()
def draw_plot(self):
self.time = np.arange(self.i_start,self.i_end)*1./self.FS
self.segs[:,:,0] = self.time[np.newaxis,:]
self.segs[:,:,1] = self.x[:,self.i_start:self.i_end]+self.offsets[:,np.newaxis]
self.line_collection.set_segments(self.segs)
# Adjust plot limits:
self.axes.set_xlim((self.time[0], self.time[-1]))
self.axes.set_ylim((self.ylims[0]+self.offsets.min(),
self.ylims[1]+self.offsets.max()))
if self.spt is not None:
self.draw_spikes()
# Redraw:
self.canvas.draw()
def draw_spikes(self):
if self.spike_collection is not None:
self.spike_collection.remove()
self.spike_collection = None
sp_win = self.sp_win
time = self.segs[0,:,0]*1000.
t_min, t_max = time[0]-sp_win[0], time[-1]-sp_win[1]
spt = self.spt[(self.spt>t_min) & (self.spt<t_max)]
if len(spt)>0:
n_pts = int((sp_win[1]-sp_win[0])/1000.*self.FS)
sp_segs = np.empty((len(spt), self.n_chans, n_pts, 2))
for i in range(len(spt)):
start, = np.nonzero(time>=(spt[i]+sp_win[0]))
start = start[0]
stop = start+n_pts
sp_segs[i,:,:,0] = (time[np.newaxis,start:stop]/1000.)
sp_segs[i,:,:,1] = self.segs[:, start:stop, 1]
sp_segs = sp_segs.reshape(-1, n_pts, 2)
if self.labels is not None:
labs = self.labels[(self.spt>t_min) & (self.spt<t_max)]
colors = np.repeat(self.color_func(labs), self.n_chans, 0)
else:
colors = 'r'
self.spike_collection = LineCollection(sp_segs,
offsets=None,
color=colors,
transform=self.axes.transData)
self.axes.add_collection(self.spike_collection)
def OnScrollEvt(self, pos):
# Update the indices of the plot:
self.i_start = self.i_min + pos
self.i_end = self.i_min + self.i_window + pos
t_center = (self.i_start+self.i_window/2.)*1000./self.FS
idx, = np.where(self.spt<t_center)
if len(idx)>0:
self.i_spike = idx[-1]
else:
self.i_spike = 0
self.draw_plot()
def browse_data_tk(data, spk_idx=None, labels=None, win=100):
frame = PlotWithScrollBarTk()
browser = SpikeBrowserUI(frame)
browser.winsz = win
browser.set_data(data)
browser.set_spiketimes(spk_idx, labels)
Tk.mainloop()
browse_data = browse_data_tk
|
#!/usr/bin/python
'''
Program:
This is a standard code shows the style of my program.
Usage:
std_code.py
Editor:
Jacob975
20170216
#################################
update log
20170206 alpha 1
It can run properly.
20170216 alpha 2
Make code more efficient, add a link code to find darks and subdark.
It out of work now, fixing...
20170711 alpha 3
add a new restriction on proper fit
Now if the mean of data is more than 1550 count, this fit will be eliminate.
20170803 alpha 4
1. adjust the restriction about mean of data
mean mention before is renamed as background (bkg).
Now the program will not judge a image only with bkg value.
The program will read bkg and noise of all images ,and kick out the exotic one.
2. The program will write down log now.
20170807 alpha 5
1. Change program path from 'python' to 'tat_python'.
20180621 alaph 6
1. rename the code
'''
import os
from astropy.io import fits as pyfits
import numpy as np
from fit_lib import hist_gaussian_fitting
import glob
import time
# There are the parameters of header infos
PARAS=['CCDTEMP','EXPTIME','RA','DEC']
def check_header(name_image):
darkh=pyfits.getheader(name_image)
# If one of header info are lost, eliminate this image.
for para in PARAS:
try :
temp_a=darkh[para]
except KeyError:
print "{0} in {1} is wrong.".format(para, name_image)
return 1
# If the ccd temperature is too high, abandom this fit.
img_temp=darkh['CCDTEMP']
if img_temp >= -29.5:
print "Temperature is not allow\n{0} in {1}".format(img_temp, name_image)
return 1
return 0
# This func is used to get mean and std info of background.
def bkg_info(name_image):
# save the bkg and stdev of each img.
data = pyfits.getdata(name_image)
params, cov = hist_gaussian_fitting("default", data, shift = -7)
mean_bkg = params[0]
std_bkg = params[1]
return 0, mean_bkg, std_bkg
#--------------------------------------------
# Main code
if __name__ == "__main__":
# Measure time
start_time = time.time()
#----------------------------------------
# Completeness check
# make a list with names of images in this directory
image_list = glob.glob('*.fit')
# check the valid of image_list
if len(image_list) == 0:
print "Error!\nNo image found"
exit()
#---------------------------------------
# Initialize
mean_bkgs = []
std_bkgs = []
#---------------------------------------
# Header and check
bad_img_count = 0
# check headers of images, then load mean and std of background.
for name_image in image_list:
failure = check_header(name_image)
if failure:
bad_img_count += 1
temp = "mv {0} X_{0}_X".format(name_image)
os.system(temp)
mean_bkgs.append(0)
std_bkgs.append(0)
continue
failure, mean_bkg, std_bkg = bkg_info(name_image)
mean_bkgs.append(mean_bkg)
std_bkgs.append(std_bkg)
print name_image, ",checked"
#----------------------------------------
# Image quality check
# check whether the image over exposure or not.
no_loss_in_mean_bkgs = np.where(mean_bkgs != 0)
print "Number of total image: {0}".format(len(image_list))
print "Number of success: {0}".format(len(image_list) - bad_img_count)
print "Number of fail: {0}".format(bad_img_count)
#---------------------------------------
# Measure time
elapsed_time = time.time() - start_time
print "Exiting Main Program, spending ", elapsed_time, "seconds."
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.