hexsha
stringlengths 40
40
| size
int64 3
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
972
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
972
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
972
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 3
1.03M
| avg_line_length
float64 1.13
941k
| max_line_length
int64 2
941k
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
d91434d2eb3221cf812b6bc50686749baa7552a4
| 28
|
py
|
Python
|
yehua/theme.py
|
moremoban/yehua
|
e90ac103ec28e1101fd845796c02083d52ddf43e
|
[
"MIT"
] | 19
|
2017-12-08T04:54:15.000Z
|
2021-08-19T19:59:19.000Z
|
yehua/theme.py
|
moremoban/yehua
|
e90ac103ec28e1101fd845796c02083d52ddf43e
|
[
"MIT"
] | 55
|
2018-05-07T04:14:47.000Z
|
2021-04-19T17:38:09.000Z
|
yehua/theme.py
|
chfw/yehua
|
e90ac103ec28e1101fd845796c02083d52ddf43e
|
[
"MIT"
] | 3
|
2017-07-01T14:53:57.000Z
|
2017-07-23T02:25:05.000Z
|
THEME = {"info": "#F47983"}
| 14
| 27
| 0.535714
|
7b0a195737609d034dd6e2505bae558ffa494c59
| 792
|
py
|
Python
|
sieve.py
|
msghera/Study-of-Goldbach-Conjecture
|
697ef6e3399c16663f0a3a79494e71cc483d219b
|
[
"MIT"
] | null | null | null |
sieve.py
|
msghera/Study-of-Goldbach-Conjecture
|
697ef6e3399c16663f0a3a79494e71cc483d219b
|
[
"MIT"
] | null | null | null |
sieve.py
|
msghera/Study-of-Goldbach-Conjecture
|
697ef6e3399c16663f0a3a79494e71cc483d219b
|
[
"MIT"
] | null | null | null |
class sieve :
def __init__(self, __limit = 1000002):
self.__limit = __limit
self.prime = [2]
self.bs = [1]*__limit
self.bs[0]=0
self.bs[1]=0
for i in range(4, __limit, 2) : self.bs[i] = 0
for i in range(3, __limit, 2) :
if self.bs[i] == 1 :
self.prime.append(i)
for j in range(i*i, __limit, i) : self.bs[j] = 0
def get_limit (self) :
return self.__limit
def __len__ (self) :
return len(self.prime)
def get_prime(self, n):
try:
return self.prime[n-1]
except:
print('Range out of bound.')
def is_prime(self, num):
if num <= self.__limit :
return True if self.bs[num] == 1 else False
else :
for _prime in prime :
if num%prime == 0 :
return False
return True
if __name__ == '__main__':
s = sieve()
print(s.bs[:10])
| 20.307692
| 52
| 0.604798
|
29cfd576ee96a210b549cdbad3c75dd0cd5c3294
| 4,941
|
py
|
Python
|
indico/modules/events/abstracts/settings.py
|
bkmgit/indico
|
d77ee121e35880a416b9b05e6098ea912d870b5c
|
[
"MIT"
] | 1
|
2021-06-11T20:02:10.000Z
|
2021-06-11T20:02:10.000Z
|
indico/modules/events/abstracts/settings.py
|
bkmgit/indico
|
d77ee121e35880a416b9b05e6098ea912d870b5c
|
[
"MIT"
] | null | null | null |
indico/modules/events/abstracts/settings.py
|
bkmgit/indico
|
d77ee121e35880a416b9b05e6098ea912d870b5c
|
[
"MIT"
] | null | null | null |
# This file is part of Indico.
# Copyright (C) 2002 - 2022 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
from indico.core.db.sqlalchemy.descriptions import RenderMode
from indico.core.settings.converters import DatetimeConverter, EnumConverter
from indico.modules.events.settings import EventSettingsProxy
from indico.util.enum import RichEnum
from indico.util.i18n import _
class BOASortField(RichEnum):
id = 'id'
abstract_title = 'title'
board_number = 'board_number'
session_board_number = 'session_board_number'
session_title = 'session_title'
speaker = 'speaker'
schedule = 'schedule'
schedule_board_number = 'schedule_board_number'
session_schedule_board = 'session_schedule_board'
class BOACorrespondingAuthorType(RichEnum):
none = 'none'
submitter = 'submitter'
speakers = 'speakers'
class AllowEditingType(RichEnum):
submitter_all = 'submitter_all'
submitter_authors = 'submitter_authors'
submitter_primary = 'submitter_primary'
submitter = 'submitter'
class SubmissionRightsType(RichEnum):
speakers = 'speakers'
all = 'all'
class BOALinkFormat(RichEnum):
"""LaTeX book of abstracts link format setting.
value is a 2-tuple of strings:
first is the hyperref option to use
second sets additional tex commands
"""
frame = ('', '')
colorlinks = ('[colorlinks]', '')
unstyled = ('[hidelinks]', '')
BOASortField.__titles__ = {
BOASortField.id: _('ID'),
BOASortField.abstract_title: _('Abstract title'),
BOASortField.board_number: _('Board Number'),
BOASortField.session_board_number: _('Session title, Board Number'),
BOASortField.session_title: _('Session title'),
BOASortField.speaker: _('Presenter'),
BOASortField.schedule: _('Schedule'),
BOASortField.schedule_board_number: _('Schedule, Board Number'),
BOASortField.session_schedule_board: _('Session, Schedule, Board Number')
}
BOACorrespondingAuthorType.__titles__ = {
BOACorrespondingAuthorType.none: _('None'),
BOACorrespondingAuthorType.submitter: _('Submitter'),
BOACorrespondingAuthorType.speakers: _('Speakers')
}
BOALinkFormat.__titles__ = {
BOALinkFormat.frame: _('Border around links (screen only)'),
BOALinkFormat.colorlinks: _('Color links'),
BOALinkFormat.unstyled: _('Do not highlight links')
}
AllowEditingType.__titles__ = {
AllowEditingType.submitter_all: _('All involved people (submitter, authors, speakers)'),
AllowEditingType.submitter_authors: _('Abstract submitter and all authors (primary and co-authors)'),
AllowEditingType.submitter_primary: _('Abstract submitter and primary authors'),
AllowEditingType.submitter: _('Abstract submitter only')
}
SubmissionRightsType.__titles__ = {
SubmissionRightsType.speakers: _('Speakers'),
SubmissionRightsType.all: _('Speakers and authors')
}
abstracts_settings = EventSettingsProxy('abstracts', {
'description_settings': {
'is_active': True,
'is_required': True,
'max_length': None,
'max_words': None
},
'start_dt': None,
'end_dt': None,
'modification_end_dt': None,
'announcement': '',
'announcement_render_mode': RenderMode.markdown,
'allow_multiple_tracks': True,
'tracks_required': False,
'allow_attachments': False,
'copy_attachments': False,
'allow_speakers': True,
'speakers_required': True,
'allow_editing': AllowEditingType.submitter_all,
'contribution_submitters': SubmissionRightsType.all,
'contrib_type_required': False,
'submission_instructions': ''
}, acls={
'authorized_submitters'
}, converters={
'start_dt': DatetimeConverter,
'end_dt': DatetimeConverter,
'modification_end_dt': DatetimeConverter,
'allow_editing': EnumConverter(AllowEditingType),
})
abstracts_reviewing_settings = EventSettingsProxy('abstracts_reviewing', {
'scale_lower': 0,
'scale_upper': 5,
'allow_comments': True,
'allow_convener_judgment': False, # whether track conveners can make a judgment (e.g. accept/reject)
'allow_convener_track_change': False,
'allow_contributors_in_comments': False,
'reviewing_instructions': '',
'judgment_instructions': ''
})
boa_settings = EventSettingsProxy('abstracts_book', {
'extra_text': '',
'extra_text_end': '',
'sort_by': BOASortField.id,
'corresponding_author': BOACorrespondingAuthorType.submitter,
'show_abstract_ids': False,
'cache_path': None,
'cache_path_tex': None,
'min_lines_per_abstract': 0,
'link_format': BOALinkFormat.frame,
}, converters={
'sort_by': EnumConverter(BOASortField),
'corresponding_author': EnumConverter(BOACorrespondingAuthorType),
'announcement_render_mode': EnumConverter(RenderMode),
'link_format': EnumConverter(BOALinkFormat),
})
| 31.877419
| 105
| 0.722931
|
7085e86a0848bd757cda739432dc4fb19bdfefab
| 4,309
|
py
|
Python
|
tests/linkedlists_tests/queue_test.py
|
warmachine028/datastax
|
2898b517dee471a240a10e81bcfafee5dce615ca
|
[
"MIT"
] | 5
|
2021-12-25T17:08:39.000Z
|
2022-03-18T16:22:57.000Z
|
tests/linkedlists_tests/queue_test.py
|
warmachine028/datastax
|
2898b517dee471a240a10e81bcfafee5dce615ca
|
[
"MIT"
] | 1
|
2021-12-28T05:45:34.000Z
|
2021-12-28T21:31:50.000Z
|
tests/linkedlists_tests/queue_test.py
|
warmachine028/datastax
|
2898b517dee471a240a10e81bcfafee5dce615ca
|
[
"MIT"
] | null | null | null |
import unittest
from typing import Optional, Any
from datastax.errors import UnderFlowError, OverFlowError
from datastax.linkedlists import Queue, LinkedList
class TestQueue(unittest.TestCase):
def setUp(self) -> None:
self.limitedQueue = Queue(2) # With fixed size Queue
self.unlimitedQueue = Queue() # With dynamic Queue
def test_append_and_insert(self):
with self.assertRaises(NotImplementedError):
self.limitedQueue.append(10)
self.limitedQueue.insert(30)
def test_complete_fill_complete_empty(self):
# Completely Filled
self.limitedQueue.enqueue(10)
self.limitedQueue.enqueue(20)
# Should raise overflow error
with self.assertRaises(OverFlowError):
self.limitedQueue.enqueue(30)
# Completely Emptied
self.limitedQueue.dequeue()
self.limitedQueue.dequeue()
self.assertEqual([], self.items_in(self.limitedQueue))
def test_construction(self):
queue = Queue(None, [1, 2, 3, 4, 5]) # With Array Without capacity
self.assertEqual([1, 2, 3, 4, 5], self.items_in(queue))
queue = Queue(5, [1, 2, 3, 4, 5]) # With Array With capacity
self.assertEqual([1, 2, 3, 4, 5], self.items_in(queue))
queue = Queue(5) # Without Array With capacity
self.assertEqual([], self.items_in(queue))
queue = Queue() # Without Array Without capacity
self.assertEqual([], self.items_in(queue))
queue = Queue(3, [1, 2, 3, 4]) # With capacity less than Array size
self.assertEqual([1, 2, 3], self.items_in(queue))
queue = Queue(5, [1, 2, 3]) # With capacity more than Array size
self.assertEqual([1, 2, 3], self.items_in(queue))
queue.enqueue(10) # Then performing Enqueue Operation
queue.enqueue(20) # Again performing Enqueue Operation
queue.dequeue() # Performing Dequeue Operation
self.assertEqual([2, 3, 10, 20], self.items_in(queue))
queue = Queue(None, [None, 1, 2]) # With first array element as None
self.assertEqual([], self.items_in(queue))
queue = Queue(None, None) # With both arguments as None
self.assertEqual([], self.items_in(queue))
def test_dequeue_from_empty_queue(self):
with self.assertRaises(UnderFlowError):
self.limitedQueue.dequeue()
self.unlimitedQueue.dequeue()
def test_enqueue_in_empty_queue(self):
self.limitedQueue.enqueue(50)
self.assertEqual([50], self.items_in(self.limitedQueue))
self.unlimitedQueue.enqueue(50)
self.assertEqual([50], self.items_in(self.unlimitedQueue))
def test_enqueue_in_full_queue(self):
self.limitedQueue.enqueue(30)
self.limitedQueue.enqueue(40)
self.assertEqual([30, 40], self.items_in(self.limitedQueue))
with self.assertRaises(OverFlowError):
self.limitedQueue.enqueue(50)
self.unlimitedQueue.enqueue(30)
self.unlimitedQueue.enqueue(40)
self.unlimitedQueue.enqueue(50) # unlimited Queue, can't be full
self.assertEqual([30, 40, 50], self.items_in(self.unlimitedQueue))
def test_enqueueing_heterogeneous_items(self):
# inserting miscellaneous items
items = [
{1: 2, 2: 3, 3: 4}, # -> dictionary
{1, 2, 3, 4, 5, 6, 7}, # -> set
[1, 2, 3, 4, 5], # -> list
1234567890, # -> integer
"string", # -> string
'A', # -> char
# Inserting Uncommon items
LinkedList([1, 2]).head, # -> Node
LinkedList([1, 2]), # -> LinkedList
Queue(3, [1, 2, 3]), # -> self referential type
None # -> * can't be inserted as first item but otherwise okay...
# entire list will be discarded if Node as first element
]
for item in items:
self.unlimitedQueue.enqueue(item)
self.assertEqual(items, self.items_in(self.unlimitedQueue))
@staticmethod
def items_in(queue: Queue) -> list[Optional[Any]]:
result = []
head = queue.head
while head:
result.append(head.data)
head = head.next
return result
if __name__ == '__main__':
unittest.main()
| 38.81982
| 78
| 0.618937
|
6d03039714054beacf19069e8243e91dfce01ccd
| 3,371
|
py
|
Python
|
Altitude_Bot.py
|
Andre0512/Altitude_Bot
|
7461a4d5036650a4639d65c72a9e97a808372b79
|
[
"MIT"
] | null | null | null |
Altitude_Bot.py
|
Andre0512/Altitude_Bot
|
7461a4d5036650a4639d65c72a9e97a808372b79
|
[
"MIT"
] | null | null | null |
Altitude_Bot.py
|
Andre0512/Altitude_Bot
|
7461a4d5036650a4639d65c72a9e97a808372b79
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import logging
import yaml
from telegram import ReplyKeyboardMarkup, KeyboardButton, ParseMode
from telegram.ext import Updater, CommandHandler, MessageHandler, Filters
from googlemaps import elevation, Client
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
level=logging.INFO)
logger = logging.getLogger(__name__)
def get_yml(file):
result = {}
with open(os.path.join(os.path.dirname(__file__), file), 'rb') as ymlfile:
result = yaml.load(ymlfile)
return result
def get_altitude(latitude, longitude):
maps = Client(key=get_yml('./config.yml')['altitude']['mapstoken'])
req = elevation.elevation(maps, (latitude, longitude))[0]
altitude = req['elevation']
return altitude
def start(bot, update):
if get_language(update):
salutation = 'Hallo ' + update.message.from_user.first_name + " ✌🏻\n"
text = "Sende mir einen Standort und ich sage dir auf welcher Höhe über dem Meerespiegel dieser liegt 🙂"
keyboard_text = 'Aktuellen Standort senden 📍'
else:
salutation = 'Hello ' + update.message.from_user.first_name + " ✌🏻\n"
text = 'Send me a location and I will tell you at what altitude this is above sea level 🙂'
keyboard_text = 'Send current location 📍'
if update.message.text == '/start':
text = salutation + text
keyboard = ReplyKeyboardMarkup([[KeyboardButton(text=keyboard_text, request_location=True)]], resize_keyboard=True)
update.message.reply_text(text=text, reply_markup=keyboard)
def help(bot, update):
update.message.reply_text('Help!')
def get_language(update):
try:
language = update.message.from_user.language_code
except:
language = 'en'
if language.split('-')[0] == 'de':
return True
else:
return False
def location(bot, update):
location = update.message.location
altitude = get_altitude(location.latitude, location.longitude)
altitude = round(altitude, 2)
if get_language(update):
altitude = str(altitude).replace('.', ',')
reply_text = "Du bist auf einer Höhe von *" + altitude + "* Metern 😁"
else:
reply_text = "You are at an altitude of " + str(altitude) + " meters 😁"
update.message.reply_text(reply_text, parse_mode=ParseMode.MARKDOWN)
user = update.message.from_user
name = user.first_name + " " + user.last_name if user.last_name else user.first_name
log = ("\n" + name + " " + str(user.id) + " " + user.language_code +
" (" + str(location.latitude) + ", " + str(location.longitude) + ")")
file = open("log.txt", "a")
file.write(log)
file.close()
def error(bot, update, error):
logger.warn('Update "%s" caused error "%s"' % (update, error))
def main():
updater = Updater(get_yml('./config.yml')['altitude']['bottoken'])
dp = updater.dispatcher
dp.add_handler(CommandHandler("start", start))
dp.add_handler(CommandHandler("help", help))
dp.add_handler(MessageHandler(Filters.location, location))
dp.add_handler(MessageHandler(Filters.text, start))
dp.add_handler(CallbackQueryHandler(button, pass_chat_data=True))
dp.add_error_handler(error)
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
| 32.104762
| 119
| 0.666864
|
1e68f0a3acd2dc33101adbf6c5a5ce1fe6af36ad
| 1,165
|
py
|
Python
|
conda_kapsel/plugins/test/test_network_util.py
|
LaudateCorpus1/kapsel
|
736b95d4ef81121bfeb965b989f7c159cf470386
|
[
"BSD-3-Clause"
] | 43
|
2016-07-13T19:05:06.000Z
|
2021-02-18T08:17:52.000Z
|
conda_kapsel/plugins/test/test_network_util.py
|
conda-archive/kapsel
|
736b95d4ef81121bfeb965b989f7c159cf470386
|
[
"BSD-3-Clause"
] | 13
|
2016-08-04T18:42:27.000Z
|
2017-02-21T01:07:03.000Z
|
conda_kapsel/plugins/test/test_network_util.py
|
conda-archive/kapsel
|
736b95d4ef81121bfeb965b989f7c159cf470386
|
[
"BSD-3-Clause"
] | 10
|
2016-07-27T00:40:52.000Z
|
2021-08-23T05:42:23.000Z
|
# -*- coding: utf-8 -*-
# ----------------------------------------------------------------------------
# Copyright © 2016, Continuum Analytics, Inc. All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
# ----------------------------------------------------------------------------
import conda_kapsel.plugins.network_util as network_util
import socket
def test_can_connect_to_socket():
# create a listening socket just to get a port number
# that (probably) won't be in use after we close it
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(("127.0.0.1", 0))
s.listen(1)
port = s.getsockname()[1]
try:
assert network_util.can_connect_to_socket("127.0.0.1", port)
finally:
s.close()
def test_cannot_connect_to_socket():
# create a listening socket just to get a port number
# that (probably) won't be in use after we close it
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(("127.0.0.1", 0))
s.listen(1)
port = s.getsockname()[1]
s.close()
assert not network_util.can_connect_to_socket("127.0.0.1", port)
| 32.361111
| 78
| 0.590558
|
e4132d2b8634c9b78f9710b551fab2722e0d307a
| 5,672
|
py
|
Python
|
datahub/search/omis/views.py
|
reupen/data-hub-api
|
d854188f4c45da0e89075add132a15bb1227ff79
|
[
"MIT"
] | null | null | null |
datahub/search/omis/views.py
|
reupen/data-hub-api
|
d854188f4c45da0e89075add132a15bb1227ff79
|
[
"MIT"
] | 16
|
2020-04-01T15:25:35.000Z
|
2020-04-14T14:07:30.000Z
|
datahub/search/omis/views.py
|
reupen/data-hub-api
|
d854188f4c45da0e89075add132a15bb1227ff79
|
[
"MIT"
] | null | null | null |
from django.db.models import DecimalField, OuterRef, Subquery, Sum
from django.db.models.functions import Cast
from datahub.core.query_utils import (
get_choices_as_case_expression,
get_front_end_url_expression,
get_full_name_expression,
)
from datahub.metadata.query_utils import get_sector_name_subquery
from datahub.oauth.scopes import Scope
from datahub.omis.order.models import Order as DBOrder
from datahub.omis.order.query_utils import get_lead_order_assignee_name_subquery
from datahub.omis.payment.constants import RefundStatus
from datahub.omis.payment.models import Refund
from datahub.search.omis import OrderSearchApp
from datahub.search.omis.serializers import SearchOrderQuerySerializer
from datahub.search.views import register_v3_view, SearchAPIView, SearchExportAPIView
class SearchOrderAPIViewMixin:
"""Defines common settings."""
required_scopes = (Scope.internal_front_end,)
search_app = OrderSearchApp
serializer_class = SearchOrderQuerySerializer
FILTER_FIELDS = [
'primary_market',
'sector_descends',
'uk_region',
'completed_on_before',
'completed_on_after',
'created_on_before',
'created_on_after',
'delivery_date_before',
'delivery_date_after',
'assigned_to_adviser',
'assigned_to_team',
'status',
'reference',
'total_cost',
'subtotal_cost',
'contact_name',
'company_name',
'company',
]
REMAP_FIELDS = {
'primary_market': 'primary_market.id',
'uk_region': 'uk_region.id',
'assigned_to_adviser': 'assignees.id',
'assigned_to_team': 'assignees.dit_team.id',
'company': 'company.id',
'reference': 'reference.trigram',
}
COMPOSITE_FILTERS = {
'contact_name': [
'contact.name',
'contact.name.trigram',
],
'company_name': [
'company.name',
'company.name.trigram',
'company.trading_names', # to find 2-letter words
'company.trading_names.trigram',
],
'sector_descends': [
'sector.id',
'sector.ancestors.id',
],
}
@register_v3_view()
class SearchOrderAPIView(SearchOrderAPIViewMixin, SearchAPIView):
"""Filtered order search view."""
subtotal_cost_field = 'subtotal_cost'
def get_base_query(self, request, validated_data):
"""Enhance entity query with the total subtotal cost."""
base_query = super().get_base_query(request, validated_data)
base_query.aggs.bucket(self.subtotal_cost_field, 'sum', field=self.subtotal_cost_field)
return base_query
def enhance_response(self, results, response):
"""Enhance response with total subtotal cost."""
summary = {}
if self.subtotal_cost_field in results.aggregations:
total_subtotal_cost = results.aggregations[self.subtotal_cost_field]['value']
summary[f'total_{self.subtotal_cost_field}'] = total_subtotal_cost
response['summary'] = summary
return response
@register_v3_view(sub_path='export')
class SearchOrderExportAPIView(SearchOrderAPIViewMixin, SearchExportAPIView):
"""Order search export view."""
queryset = DBOrder.objects.annotate(
subtotal_in_pounds=Cast(
'subtotal_cost',
DecimalField(max_digits=19, decimal_places=2),
) / 100,
# This follows the example from
# https://docs.djangoproject.com/en/2.1/ref/models/expressions/#using-aggregates-within-a-subquery-expression
net_refund_in_pounds=Subquery(
Refund.objects.filter(
order=OuterRef('pk'),
status=RefundStatus.APPROVED,
).order_by(
).values(
'order',
).annotate(
total_refund=Cast(
Sum('net_amount'),
DecimalField(max_digits=19, decimal_places=2),
) / 100,
).values(
'total_refund',
),
output_field=DecimalField(max_digits=19, decimal_places=2),
),
status_name=get_choices_as_case_expression(DBOrder, 'status'),
link=get_front_end_url_expression('order', 'pk'),
sector_name=get_sector_name_subquery('sector'),
company_link=get_front_end_url_expression('company', 'company__pk'),
contact_name=get_full_name_expression('contact'),
contact_link=get_front_end_url_expression('contact', 'contact__pk'),
lead_adviser=get_lead_order_assignee_name_subquery(),
)
field_titles = {
'reference': 'Order reference',
'subtotal_in_pounds': 'Net price',
'net_refund_in_pounds': 'Net refund',
'status_name': 'Status',
'link': 'Link',
'sector_name': 'Sector',
'primary_market__name': 'Market',
'uk_region__name': 'UK region',
'company__name': 'Company',
'company__address_country__name': 'Company country',
'company__uk_region__name': 'Company UK region',
'company_link': 'Company link',
'contact_name': 'Contact',
'contact__job_title': 'Contact job title',
'contact_link': 'Contact link',
'lead_adviser': 'Lead adviser',
'created_by__dit_team__name': 'Created by team',
'created_on': 'Date created',
'delivery_date': 'Delivery date',
'quote__created_on': 'Date quote sent',
'quote__accepted_on': 'Date quote accepted',
'paid_on': 'Date payment received',
'completed_on': 'Date completed',
}
| 35.45
| 117
| 0.645804
|
6a7865284b7165060968e883c88f1c6570c3909d
| 5,772
|
py
|
Python
|
src/go_bot/search_api/search_api.py
|
ai4eu/ai4eu-chatbot
|
9504fc73206b0fdfcbd8bad9e9625bd2776887b7
|
[
"Apache-2.0"
] | null | null | null |
src/go_bot/search_api/search_api.py
|
ai4eu/ai4eu-chatbot
|
9504fc73206b0fdfcbd8bad9e9625bd2776887b7
|
[
"Apache-2.0"
] | null | null | null |
src/go_bot/search_api/search_api.py
|
ai4eu/ai4eu-chatbot
|
9504fc73206b0fdfcbd8bad9e9625bd2776887b7
|
[
"Apache-2.0"
] | null | null | null |
# FOUNDATION OF RESEARCH AND TECHNOLOGY - HELLAS (FORTH-ICS)
#
# INFORMATION SYSTEMS LABORATORY (ISL)
#
# http://www.ics.forth.gr/isl
#
# LICENCE: TO BE ADDED
#
# Copyright 2021
# The AI4EU chatbot - Class making requests to the Search-API developed by Thales
# author: Papadakos Panagiotis
# e-mail: papadako@ics.forth.gr
# importing the requests library
import requests
#from .search_api_results import SearchAPIResults
class SearchAPI:
""""
Constructor for search API requests
Using JSON as a serialization format
"""
def __init__(self):
self._SEARCH_API_ENDPOINT = 'https://search.ai4eu.eu:8443/search/1.0/api/searching/querying'
self.json = None
self.headers = {'Content-type': 'application/json', 'apikey': 'ai4eu-7f906a41-ba45-4aae-9bc7-c1282ec6a23c'}
"""
Method that returns the results of a simple web query
Takes the query and the number of results
Returns the response as dict
"""
def web_query(self, query, results=1):
print('Search-API make web search query for "', query, '"')
# populate the json for the search API POST request
self.json = SearchAPI.__populate_web_query_data(query, results)
# Make the POST request
resp = requests.post(url=self._SEARCH_API_ENDPOINT, json=self.json, headers=self.headers, verify=False)
return resp.json()
"""
Method that returns the results of an ai_catalogue query
PP: TODO Check if we need to support other kind of slots
"""
def ai_catalogue_query(self, query, results=1,
research_area=None,
asset_type=None,
technical_categories=None,
business_categories=None):
print('Search-API make asset search query for "', query,
'" Research Area: ', research_area,
'Asset Type: ', asset_type,
'Technical Categories: ', technical_categories,
'Business Categories: ', business_categories)
# populate the json for the search API POST request
# Here we are also adding any values for the slots we are tracking
self.json = SearchAPI.__populate_ai_catalogue_query_data(query, results,
research_area=research_area,
asset_type=asset_type,
technical_categories=technical_categories,
business_categories=business_categories)
# Make the POST request
resp = requests.post(url=self._SEARCH_API_ENDPOINT, json=self.json, headers=self.headers, verify=False)
return resp.json()
@staticmethod
def __populate_web_query_data(query, results=1):
# json to be sent to api
json = {
'from': 0,
'size': results,
'content': query,
'options': {
'exclude': ['sentiment', 'text_suggester', 'lemma_title', 'lemma_content'],
'disable': ['qa', 'aggregator']
}
}
return json
"""
method that populates the json for the ai catalogue query
We need the query, the number of results and any value for the facets of researchArea, assetType,
technicalCategories or businessCategories
"""
@staticmethod
def __populate_ai_catalogue_query_data(query, results=1,
research_area=None,
asset_type=None,
technical_categories=None,
business_categories=None):
# Return only results is the ai-catalog
must = [{'wildcard': {'source_doc_id': 'https://www.ai4europe.eu/research/ai-catalog/*'}}]
# Check what other filters we want to add
# researchArea
if research_area is not None:
must.append({'match': {'content': research_area}})
# assetType
if asset_type is not None:
must.append({'match': {'content': asset_type}})
# technicalCategories
if technical_categories is not None:
must.append({'match': {'content': technical_categories}})
# businessCategories
if business_categories is not None:
must.append({'match': {'content': business_categories}})
# json to be sent to api
json = {
'from': 0,
'size': 3,
'content': query,
'options': {
'exclude': ['sentiment', 'text_suggester', 'lemma_title', 'lemma_content'],
'disable': ['qa', 'aggregator']
},
'add-clause': {
'type': 'must',
'clause': {
'bool': {
'must': must
}
}
}
}
return json
# Just trying out things
'''search_api = SearchAPI()
response = search_api.web_query('What is Yolo?', 3)
item = SearchAPIResults.get_item_from_results(response, 0)
print(item.get_summary())
item = SearchAPIResults.get_item_from_results(response, 2)
print(item.get_summary())
print(item.get_keywords())
print(item.get_content())
print(item.get_id())
print(item.get_url())
print(item.get_title())
print(item.get_score())
print(item.get_index())'''
#search_api.ai_catalogue_query('Earth Observation dataset?', asset_type='dataset',
# business_categories='agriculture',
# technical_categories='knowledge representation')
| 37.23871
| 115
| 0.574498
|
fe8bb3b211820523586b6767eb03b0938b578ba4
| 9,619
|
py
|
Python
|
data/map.py
|
kikacaty/AgentFormer
|
dbd0bfb3c4a7d1d0217a9aea892bb9d6c39a1ca0
|
[
"MIT"
] | 105
|
2021-03-26T06:15:08.000Z
|
2022-03-28T21:08:50.000Z
|
data/map.py
|
kikacaty/AgentFormer
|
dbd0bfb3c4a7d1d0217a9aea892bb9d6c39a1ca0
|
[
"MIT"
] | 17
|
2021-04-30T09:29:38.000Z
|
2022-03-31T12:29:35.000Z
|
data/map.py
|
kikacaty/AgentFormer
|
dbd0bfb3c4a7d1d0217a9aea892bb9d6c39a1ca0
|
[
"MIT"
] | 29
|
2021-03-29T06:53:29.000Z
|
2022-03-28T21:08:55.000Z
|
"""
Code borrowed from Trajectron++: https://github.com/StanfordASL/Trajectron-plus-plus/blob/ef0165a93ee5ba8cdc14f9b999b3e00070cd8588/trajectron/environment/map.py
"""
import torch
import numpy as np
import cv2
import os
from .homography_warper import get_rotation_matrix2d, warp_affine_crop
class Map(object):
def __init__(self, data, homography, description=None):
self.data = data
self.homography = homography
self.description = description
def as_image(self):
raise NotImplementedError
def get_cropped_maps(self, world_pts, patch_size, rotation=None, device='cpu'):
raise NotImplementedError
def to_map_points(self, scene_pts):
raise NotImplementedError
class GeometricMap(Map):
"""
A Geometric Map is a int tensor of shape [layers, x, y]. The homography must transform a point in scene
coordinates to the respective point in map coordinates.
:param data: Numpy array of shape [layers, x, y]
:param homography: Numpy array of shape [3, 3]
"""
def __init__(self, data, homography, origin=None, description=None):
#assert isinstance(data.dtype, np.floating), "Geometric Maps must be float values."
super(GeometricMap, self).__init__(data, homography, description=description)
if origin is None:
self.origin = np.zeros(2)
else:
self.origin = origin
self._last_padding = None
self._last_padded_map = None
self._torch_map = None
def torch_map(self, device):
if self._torch_map is not None:
return self._torch_map
self._torch_map = torch.tensor(self.data, dtype=torch.uint8, device=device)
return self._torch_map
def as_image(self):
# We have to transpose x and y to rows and columns. Assumes origin is lower left for image
# Also we move the channels to the last dimension
return (np.transpose(self.data, (2, 1, 0))).astype(np.uint)
def get_padded_map(self, padding_x, padding_y, device):
if self._last_padding == (padding_x, padding_y):
return self._last_padded_map
else:
self._last_padding = (padding_x, padding_y)
self._last_padded_map = torch.full((self.data.shape[0],
self.data.shape[1] + 2 * padding_x,
self.data.shape[2] + 2 * padding_y),
False, dtype=torch.uint8)
self._last_padded_map[..., padding_x:-padding_x, padding_y:-padding_y] = self.torch_map(device)
return self._last_padded_map
@staticmethod
def batch_rotate(map_batched, centers, angles, out_height, out_width):
"""
As the input is a map and the warp_affine works on an image coordinate system we would have to
flip the y axis updown, negate the angles, and flip it back after transformation.
This, however, is the same as not flipping at and not negating the radian.
:param map_batched:
:param centers:
:param angles:
:param out_height:
:param out_width:
:return:
"""
M = get_rotation_matrix2d(centers, angles, torch.ones_like(angles))
rotated_map_batched = warp_affine_crop(map_batched, centers, M,
dsize=(out_height, out_width), padding_mode='zeros')
return rotated_map_batched
@classmethod
def get_cropped_maps_from_scene_map_batch(cls, maps, scene_pts, patch_size, rotation=None, device='cpu'):
"""
Returns rotated patches of each map around the transformed scene points.
___________________
| | |
| |ps[3] |
| | |
| | |
| o|__________|
| | ps[2] |
| | |
|_______|__________|
ps = patch_size
:param maps: List of GeometricMap objects [bs]
:param scene_pts: Scene points: [bs, 2]
:param patch_size: Extracted Patch size after rotation: [-x, -y, +x, +y]
:param rotation: Rotations in degrees: [bs]
:param device: Device on which the rotated tensors should be returned.
:return: Rotated and cropped tensor patches.
"""
batch_size = scene_pts.shape[0]
lat_size = 2 * np.max((patch_size[0], patch_size[2]))
long_size = 2 * np.max((patch_size[1], patch_size[3]))
assert lat_size % 2 == 0, "Patch width must be divisible by 2"
assert long_size % 2 == 0, "Patch length must be divisible by 2"
lat_size_half = lat_size // 2
long_size_half = long_size // 2
context_padding_x = int(np.ceil(np.sqrt(2) * long_size))
context_padding_y = int(np.ceil(np.sqrt(2) * long_size))
centers = torch.tensor([s_map.to_map_points(scene_pts[np.newaxis, i]) for i, s_map in enumerate(maps)],
dtype=torch.long, device=device).squeeze(dim=1) \
+ torch.tensor([context_padding_x, context_padding_y], device=device, dtype=torch.long)
padded_map = [s_map.get_padded_map(context_padding_x, context_padding_y, device=device) for s_map in maps]
padded_map_batched = torch.stack([padded_map[i][...,
centers[i, 0] - context_padding_x: centers[i, 0] + context_padding_x,
centers[i, 1] - context_padding_y: centers[i, 1] + context_padding_y]
for i in range(centers.shape[0])], dim=0)
center_patches = torch.tensor([[context_padding_y, context_padding_x]],
dtype=torch.int,
device=device).repeat(batch_size, 1)
if rotation is not None:
angles = torch.Tensor(rotation)
else:
angles = torch.zeros(batch_size)
rotated_map_batched = cls.batch_rotate(padded_map_batched/255.,
center_patches.float(),
angles,
long_size,
lat_size)
del padded_map_batched
return rotated_map_batched[...,
long_size_half - patch_size[1]:(long_size_half + patch_size[3]),
lat_size_half - patch_size[0]:(lat_size_half + patch_size[2])]
def get_cropped_maps(self, scene_pts, patch_size, rotation=None, device='cpu'):
"""
Returns rotated patches of the map around the transformed scene points.
___________________
| | |
| |ps[3] |
| | |
| | |
| o|__________|
| | ps[2] |
| | |
|_______|__________|
ps = patch_size
:param scene_pts: Scene points: [bs, 2]
:param patch_size: Extracted Patch size after rotation: [-lat, -long, +lat, +long]
:param rotation: Rotations in degrees: [bs]
:param device: Device on which the rotated tensors should be returned.
:return: Rotated and cropped tensor patches.
"""
return self.get_cropped_maps_from_scene_map_batch([self]*scene_pts.shape[0], scene_pts,
patch_size, rotation=rotation, device=device)
def to_map_points(self, scene_pts):
org_shape = None
if len(scene_pts.shape) != 2:
org_shape = scene_pts.shape
scene_pts = scene_pts.reshape((-1, 2))
scene_pts = scene_pts - self.origin[None, :]
N, dims = scene_pts.shape
points_with_one = np.ones((dims + 1, N))
points_with_one[:dims] = scene_pts.T
map_points = (self.homography @ points_with_one).T[..., :dims]
if org_shape is not None:
map_points = map_points.reshape(org_shape)
return map_points
def visualize_data(self, data):
pre_motion = np.stack(data['pre_motion_3D']) * data['traj_scale']
fut_motion = np.stack(data['fut_motion_3D']) * data['traj_scale']
heading = data['heading']
img = np.transpose(self.data, (1, 2, 0))
for i in range(pre_motion.shape[0]):
cur_pos = pre_motion[i, -1]
# draw agent
cur_pos = np.round(self.to_map_points(cur_pos)).astype(int)
img = cv2.circle(img, (cur_pos[1], cur_pos[0]), 3, (0, 255, 0), -1)
prev_pos = cur_pos
# draw fut traj
for t in range(fut_motion.shape[0]):
pos = fut_motion[i, t]
pos = np.round(self.to_map_points(pos)).astype(int)
img = cv2.line(img, (prev_pos[1], prev_pos[0]), (pos[1], pos[0]), (0, 255, 0), 2)
# draw heading
theta = heading[i]
v= np.array([5.0, 0.0])
v_new = v.copy()
v_new[0] = v[0] * np.cos(theta) - v[1] * np.sin(theta)
v_new[1] = v[0] * np.sin(theta) + v[1] * np.cos(theta)
vend = pre_motion[i, -1] + v_new
vend = np.round(self.to_map_points(vend)).astype(int)
img = cv2.line(img, (cur_pos[1], cur_pos[0]), (vend[1], vend[0]), (0, 255, 255), 2)
fname = f'out/agent_maps/{data["seq"]}_{data["frame"]}_vis.png'
os.makedirs(os.path.dirname(fname), exist_ok=True)
cv2.imwrite(fname, img)
| 42.751111
| 160
| 0.573137
|
25c668c2a17a50072fdec397ddbe4cae00eaff28
| 1,379
|
py
|
Python
|
repositories/waterfall/waterfallRepo.py
|
saintaardvark/glouton-satnogs-data-downloader
|
dc8671340f558b1a21b41b9b04bab05fc15c7809
|
[
"MIT"
] | null | null | null |
repositories/waterfall/waterfallRepo.py
|
saintaardvark/glouton-satnogs-data-downloader
|
dc8671340f558b1a21b41b9b04bab05fc15c7809
|
[
"MIT"
] | null | null | null |
repositories/waterfall/waterfallRepo.py
|
saintaardvark/glouton-satnogs-data-downloader
|
dc8671340f558b1a21b41b9b04bab05fc15c7809
|
[
"MIT"
] | null | null | null |
from queue import Queue
from threading import Thread
from commands.download.downloadCommandParams import DownloadCommandParams
from commands.download.waterfallDownloadCommand import WaterfallDownloadCommand
from workers.downloadWorker import DownloadWorker
from domain.interfaces.downloadable import Downloadable
class WaterfallRepo(Downloadable):
def __init__(self, working_dir, modules):
self.__working_dir = working_dir
self.__waterfall_commands = Queue()
self.__modules = modules
def register_command(self, observation, start_date, end_date):
cmd_parameters = DownloadCommandParams(
self.__working_dir, self.__create_dir_name('waterfall', start_date, end_date), self.__modules)
waterfallDownloadCommand = WaterfallDownloadCommand(
cmd_parameters, observation)
self.__waterfall_commands.put(waterfallDownloadCommand)
def create_worker(self):
return self.__create_thread(self.__waterfall_commands)
def __create_thread(self, queue):
worker = DownloadWorker(queue)
thread = Thread(target=worker.execute)
thread.daemon = True
thread.start()
return thread
def __create_dir_name(self, target, start_date, end_date):
return target + '__' + start_date.strftime('%m-%d-%YT%H-%M-%S') + '__' + end_date.strftime('%m-%d-%YT%H-%M-%S')
| 40.558824
| 119
| 0.735315
|
819315bff1ede65bc2198a801161524d3796a5fd
| 21,077
|
py
|
Python
|
lib/clients/AwsClient.py
|
SAP/service-fabrik-backup-restore
|
27c29444a4fdea4430da93a05c7f78b23546d603
|
[
"Apache-2.0"
] | 2
|
2021-03-01T00:38:47.000Z
|
2021-12-15T04:25:41.000Z
|
lib/clients/AwsClient.py
|
cloudfoundry-incubator/service-fabrik-backup-restore
|
27c29444a4fdea4430da93a05c7f78b23546d603
|
[
"Apache-2.0"
] | 57
|
2017-12-04T16:46:41.000Z
|
2022-03-24T11:43:31.000Z
|
lib/clients/AwsClient.py
|
cloudfoundry-incubator/service-fabrik-backup-restore
|
27c29444a4fdea4430da93a05c7f78b23546d603
|
[
"Apache-2.0"
] | 16
|
2017-12-04T16:45:23.000Z
|
2019-09-17T11:42:19.000Z
|
import boto3
from botocore.config import Config
from .BaseClient import BaseClient
from ..models.Snapshot import Snapshot
from ..models.Volume import Volume
from ..models.Attachment import Attachment
class AwsClient(BaseClient):
def __init__(self, operation_name, configuration, directory_persistent, directory_work_list, poll_delay_time,
poll_maximum_time):
super(AwsClient, self).__init__(operation_name, configuration, directory_persistent, directory_work_list,
poll_delay_time, poll_maximum_time)
if configuration['credhub_url'] is None:
self.__setCredentials(
configuration['access_key_id'], configuration['secret_access_key'], configuration['region_name'])
else:
self.logger.info('fetching creds from credhub')
credentials = self._get_credentials_from_credhub(configuration)
self.__setCredentials(
credentials['access_key_id'], credentials['secret_access_key'], credentials['region_name'])
self.max_retries = (configuration.get('max_retries') if
type(configuration.get('max_retries'))
== int else 10)
# skipping some actions for blob operation
if operation_name != 'blob_operation':
self.ec2_config = Config(retries={'max_attempts': self.max_retries})
self.ec2 = self.create_ec2_resource()
self.ec2.client = self.create_ec2_client()
self.formatted_tags = self.format_tags()
# add config for s3
self.s3_config = Config(retries={'max_attempts': self.max_retries})
self.s3 = self.create_s3_resource()
self.s3.client = self.create_s3_client()
# +-> Check whether the given container exists
self.container = self.get_container()
if not self.container:
msg = 'Could not find or access the given container.'
self.last_operation(msg, 'failed')
raise Exception(msg)
# +-> Get the id of the persistent volume attached to this instance
if operation_name != 'blob_operation':
self.availability_zone = self._get_availability_zone_of_server(
configuration['instance_id'])
if not self.availability_zone:
msg = 'Could not retrieve the availability zone of the instance.'
self.last_operation(msg, 'failed')
raise Exception(msg)
def __setCredentials(self, access_key_id, secret_access_key, region_name):
self.__awsCredentials = {
'access_key_id': access_key_id,
'secret_access_key': secret_access_key,
'region_name': region_name
}
def format_tags(self):
return [{'Key': key, 'Value': value} for key, value in self.tags.items()]
def create_aws_session(self):
return boto3.Session(
aws_access_key_id=self.__awsCredentials['access_key_id'],
aws_secret_access_key=self.__awsCredentials['secret_access_key'],
region_name=self.__awsCredentials['region_name']
)
def create_ec2_resource(self):
return self.create_aws_session().resource('ec2', config=self.ec2_config)
def create_ec2_client(self):
try:
client = self.create_aws_session().client('ec2', config=self.ec2_config)
return client
except Exception as error:
raise Exception('Connection to AWS EC2 failed: {}'.format(error))
def create_s3_resource(self):
return self.create_aws_session().resource('s3', config=self.s3_config)
def create_s3_client(self):
return self.create_aws_session().client('s3', config=self.s3_config)
def _get_availability_zone_of_server(self, instance_id):
try:
instance = self.ec2.Instance(instance_id)
instance.load()
return instance.placement['AvailabilityZone']
except Exception as error:
self.logger.error(
'[EC2] ERROR: Unable to determine the availability zone of instance {}.\n{}'.format(instance_id, error))
return None
def get_container(self):
try:
container = self.s3.Bucket(self.CONTAINER)
# Test if the container is accessible
key = '{}/{}'.format(self.BLOB_PREFIX, 'AccessTestByServiceFabrikPythonLibrary')
container.put_object(Key=key)
container.delete_objects(Delete={
'Objects': [{
'Key': key
}]
})
return container
except Exception as error:
self.logger.error('[S3] ERROR: Unable to find or access container {}.\n{}'.format(
self.CONTAINER, error))
return None
def _get_snapshot(self, snapshot_id):
try:
snapshot = self.ec2.Snapshot(snapshot_id)
return Snapshot(snapshot.id, snapshot.volume_size, snapshot.start_time, snapshot.state)
except:
return None
def _get_volume(self, volume_id):
try:
volume = self.ec2.Volume(volume_id)
return Volume(volume.id, volume.state, volume.size)
except:
return None
def get_attached_volumes_for_instance(self, instance_id):
instance = self.ec2.Instance(instance_id)
try:
return [Volume(details['VolumeId'], 'none', self.ec2.Volume(details['VolumeId']).size, details['Device'])
for volumes in instance.volumes.all()
for details in volumes.attachments]
except:
return []
def has_nvme_persistent_volume(self):
device = self.shell(
'cat /proc/mounts | grep {}'.format(self.DIRECTORY_PERSISTENT))
nvme_dev_pattern = '/dev/nvme'
if nvme_dev_pattern in device:
return True
else:
return False
def get_persistent_volume_for_instance(self, instance_id):
if self.has_nvme_persistent_volume() == True:
device = self.shell('cat /proc/mounts | grep {}'.format(self.DIRECTORY_PERSISTENT)).split(' ')[0][:14]
vol_id = self.shell("nvme id-ctrl -v {} | egrep -wo 'vol.*'".format(device))
vol_id = "vol-" + vol_id.split("vol")[1]
vol_id = vol_id.rstrip("\n")
self.logger.info('Found volume id {} for device {}'.format(vol_id, device))
for volume in self.get_attached_volumes_for_instance(instance_id):
if volume.id == vol_id:
self._add_volume_device(volume.id, device)
return volume
return None
else:
device = self.shell(
'cat /proc/mounts | grep {}'.format(self.DIRECTORY_PERSISTENT)).split(' ')[0][:9]
# http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/device_naming.html
# --> /dev/xvdk on machine will be /dev/sdk on AWS
device = device.replace('xv', 's')
for volume in self.get_attached_volumes_for_instance(instance_id):
if volume.device == device:
self._add_volume_device(volume.id, device)
return volume
return None
def _create_snapshot(self, volume_id, description='Service-Fabrik: Automated backup'):
log_prefix = '[SNAPSHOT] [CREATE]'
snapshot = None
self.logger.info('{} START for volume id {} with tags {}'.format(
log_prefix, volume_id, self.formatted_tags))
try:
snapshot = self.ec2.create_snapshot(
VolumeId=volume_id,
Description=description
)
self._wait('Waiting for snapshot {} to get ready...'.format(snapshot.id),
lambda snap: snap.state == 'completed',
snapshot.reload,
snapshot)
snapshot = Snapshot(
snapshot.id, snapshot.volume_size, snapshot.start_time, snapshot.state)
self._add_snapshot(snapshot.id)
self.ec2.create_tags(
Resources=[
snapshot.id
],
Tags=self.formatted_tags
)
self.logger.info('{} SUCCESS: snapshot-id={}, volume-id={} with tags {}'.format(
log_prefix, snapshot.id, volume_id, self.formatted_tags))
except Exception as error:
message = '{} ERROR: volume-id={} and tags={}\n{}'.format(
log_prefix, volume_id, self.formatted_tags, error)
self.logger.error(message)
if snapshot:
self.delete_snapshot(snapshot.id)
snapshot = None
raise Exception(message)
return snapshot
def _copy_snapshot(self, snapshot_id):
log_prefix = '[SNAPSHOT] [COPY]'
snapshot = None
ec2_snapshot = self.ec2.Snapshot(snapshot_id)
try:
snapshot = ec2_snapshot.copy(
DryRun=False,
SourceRegion=self.__awsCredentials['region_name'],
Description='Service-Fabrik: Encrypted Backup',
Encrypted=True
)
new_snapshot = self.ec2.Snapshot(snapshot['SnapshotId'])
self._wait('Waiting for snapshot {} to get ready...'.format(new_snapshot.id),
lambda snap: snap.state == 'completed',
new_snapshot.reload,
new_snapshot)
snapshot = Snapshot(
new_snapshot.id, new_snapshot.volume_size, new_snapshot.start_time, new_snapshot.state)
self.logger.info('{} SUCCESS: snapshot-id={}, unencrypted-snapshot_id={}'.format(
log_prefix, snapshot.id, snapshot_id))
self.output_json['snapshotId'] = snapshot.id
self.ec2.create_tags(
Resources=[
snapshot.id
],
Tags=self.formatted_tags
)
except Exception as error:
message = '{} ERROR: snapshot-id={}\n{}'.format(
log_prefix, snapshot_id, error)
self.logger.error(message)
if snapshot:
self.delete_snapshot(snapshot.id)
snapshot = None
raise Exception(message)
return snapshot
def _delete_snapshot(self, snapshot_id):
log_prefix = '[SNAPSHOT] [DELETE]'
try:
self.ec2.client.delete_snapshot(
SnapshotId=snapshot_id
)
self._wait('Waiting for snapshot {} to be deleted...'.format(snapshot_id),
lambda id: not self._get_snapshot(id),
None,
snapshot_id)
self._remove_snapshot(snapshot_id)
self.logger.info(
'{} SUCCESS: snapshot-id={}'.format(log_prefix, snapshot_id))
return True
except Exception as error:
message = '{} ERROR: snapshot-id={}\n{}'.format(
log_prefix, snapshot_id, error)
if error.response['ResponseMetadata']['HTTPStatusCode'] == 404:
self.logger.info(message)
self.logger.info('ignoring this error for delete operation..')
return True
self.logger.error(message)
raise Exception(message)
def _create_volume(self, size, snapshot_id=None, volume_type='standard'):
log_prefix = '[VOLUME] [CREATE]'
volume = None
try:
kwargs = {
'Size': size,
'AvailabilityZone': self.availability_zone,
'VolumeType': volume_type
}
if snapshot_id:
kwargs['SnapshotId'] = snapshot_id
volume = self.ec2.create_volume(**kwargs)
self._wait('Waiting for volume {} to get ready...'.format(volume.id),
lambda vol: vol.state == 'available',
volume.reload,
volume)
volume = Volume(volume.id, 'none', volume.size)
self._add_volume(volume.id)
self.ec2.create_tags(
Resources=[
volume.id
],
Tags=self.formatted_tags
)
self.logger.info('{} SUCCESS: volume-id={} volume-type={} with tags = {} '.format(
log_prefix, volume.id, volume_type, self.formatted_tags))
except Exception as error:
message = '{} ERROR: size={}\n{}'.format(log_prefix, size, error)
self.logger.error(message)
if volume:
self.delete_volume(volume.id)
volume = None
raise Exception(message)
return volume
def _delete_volume(self, volume_id):
log_prefix = '[VOLUME] [DELETE]'
try:
self.ec2.client.delete_volume(
VolumeId=volume_id
)
self._wait('Waiting for volume {} to be deleted...'.format(volume_id),
lambda id: not self._get_volume(id),
None,
volume_id)
self._remove_volume(volume_id)
self.logger.info(
'{} SUCCESS: volume-id={}'.format(log_prefix, volume_id))
return True
except Exception as error:
message = '{} ERROR: volume-id={}\n{}'.format(
log_prefix, volume_id, error)
if error.response['ResponseMetadata']['HTTPStatusCode'] == 404:
self.logger.info(message)
self.logger.info('ignoring this error for delete operation..')
return True
self.logger.error(message)
raise Exception(message)
def _refresh_devices_list(self,instance_id):
for volume in self.get_attached_volumes_for_instance(instance_id):
device = volume.device.replace('xv', 's')
self._add_volume_device(volume.id, device)
def _create_attachment(self, volume_id, instance_id):
log_prefix = '[ATTACHMENT] [CREATE]'
attachment = None
try:
self._refresh_devices_list(instance_id)
volume = self.ec2.Volume(volume_id)
device = self._get_free_device()
volume.attach_to_instance(
InstanceId=instance_id,
Device=device
)
self._wait('Waiting for attachment of volume {} to get ready...'.format(volume_id),
lambda vol: vol.attachments[0]['State'] == 'attached',
volume.reload,
volume)
self._add_volume_device(volume_id, device)
attachment = Attachment(0, volume_id, instance_id)
self._add_attachment(volume_id, instance_id)
self.logger.info(
'{} SUCCESS: volume-id={}, instance-id={}'.format(log_prefix, volume_id, instance_id))
except Exception as error:
message = '{} ERROR: volume-id={}, instance-id={}\n{}'.format(
log_prefix, volume_id, instance_id, error)
self.logger.error(message)
# The following lines are a workaround for a boto3 bug:
# The attachment process (see _create_attachment() method) may end with throwing an Exception, e.g.
# 'list index out of range', but the attachment has been successful. Therefore, we must
# check whether the volume is attached and if yes, trigger the detachment
volume = self._get_volume(volume_id)
if volume.status == 'in-use':
self.logger.warning('[VOLUME] [DELETE] Volume is in state {} although the attaching process failed, '
'triggering detachment'.format(volume.status))
attachment = True
if attachment:
self.delete_attachment(volume_id, instance_id)
attachment = None
raise Exception(message)
return attachment
def _delete_attachment(self, volume_id, instance_id):
log_prefix = '[ATTACHMENT] [DELETE]'
try:
volume = self.ec2.Volume(volume_id)
volume.detach_from_instance(
InstanceId=instance_id,
Force=True
)
self._wait('Waiting for attachment of volume {} to be removed...'.format(volume_id),
lambda vol: len(vol.attachments) == 0,
volume.reload,
volume)
self._remove_volume_device(volume_id)
self._remove_attachment(volume_id, instance_id)
self.logger.info(
'{} SUCCESS: volume-id={}, instance-id={}'.format(log_prefix, volume_id, instance_id))
return True
except Exception as error:
message = '{} ERROR: volume-id={}, instance-id={}\n{}'.format(
log_prefix, volume_id, instance_id, error)
if error.response['ResponseMetadata']['HTTPStatusCode'] == 404:
self.logger.info(message)
self.logger.info('ignoring this error for delete operation..')
return True
self.logger.error(message)
raise Exception(message)
def _find_volume_device(self, volume_id):
# Nothing to do for AWS as the device name is specified manually while attaching a volume and therefore known
pass
def get_nvme_mountpoint(self, volume_id):
device_list = self.shell("lsblk --noheadings --raw -p -o NAME,TYPE | awk '$2 == \"part\" {print $1}'")
device_list = device_list.split("\n")
for device in device_list:
vol_id = self.shell("nvme id-ctrl -v {} | egrep -wo 'vol.*'".format(device))
vol_id = "vol-" + vol_id.split("vol")[1]
vol_id = vol_id.rstrip("\n")
if vol_id == volume_id:
return device
return None
def get_mountpoint(self, volume_id, partition=None):
if self.has_nvme_persistent_volume() == True:
return self.get_nvme_mountpoint(volume_id)
device = self._get_device_of_volume(volume_id)
if not device:
return None
# http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/device_naming.html
# --> /dev/sdk on AWS will be /dev/xvdk on machine
device = device.replace('s', 'xv')
if partition:
device += partition
return device
def _upload_to_blobstore(self, blob_to_upload_path, blob_target_name):
log_prefix = '[S3] [UPLOAD]'
if self.container:
self.logger.info(
'{} Started to upload the tarball to the object storage.'.format(log_prefix))
try:
self.container.upload_file(
blob_to_upload_path, blob_target_name)
self.logger.info('{} SUCCESS: blob_to_upload={}, blob_target_name={}, container={}'
.format(log_prefix, blob_to_upload_path, blob_target_name, self.CONTAINER))
return True
except Exception as error:
message = '{} ERROR: blob_to_upload={}, blob_target_name={}, container={}\n{}'.format(
log_prefix, blob_to_upload_path, blob_target_name, self.CONTAINER, error)
self.logger.error(message)
raise Exception(message)
def _download_from_blobstore(self, blob_to_download_name, blob_download_target_path):
log_prefix = '[S3] [DOWNLOAD]'
if self.container:
self.logger.info('{} Started to download the tarball to target{}.'
.format(log_prefix, blob_download_target_path))
try:
self.container.download_file(
blob_to_download_name, blob_download_target_path)
self.logger.info('{} SUCCESS: blob_to_download={}, blob_target_name={}, container={}'.format(
log_prefix, blob_to_download_name, self.CONTAINER, blob_download_target_path))
return True
except Exception as error:
message = '{} ERROR: blob_to_download={}, blob_target_name={}, container={}\n{}'.format(
log_prefix, blob_to_download_name, blob_download_target_path, self.CONTAINER, error)
self.logger.error(message)
raise Exception(message)
def _download_from_blobstore_and_pipe_to_process(self, process, blob_to_download_name, segment_size):
s3_object_body = self.s3.Object(
self.CONTAINER, blob_to_download_name).get()['Body']
chunk = s3_object_body.read(segment_size)
while chunk:
process.stdin.write(chunk)
chunk = s3_object_body.read(segment_size)
return True
| 41.819444
| 120
| 0.578972
|
b5e807f035b5833b59b7afc8706fb7d97db62198
| 4,270
|
py
|
Python
|
users/views.py
|
davospots/awadz
|
12ea9cfff80f8dc37f0016d53645861684812433
|
[
"MIT"
] | null | null | null |
users/views.py
|
davospots/awadz
|
12ea9cfff80f8dc37f0016d53645861684812433
|
[
"MIT"
] | null | null | null |
users/views.py
|
davospots/awadz
|
12ea9cfff80f8dc37f0016d53645861684812433
|
[
"MIT"
] | null | null | null |
from django.shortcuts import render, redirect, get_object_or_404
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.contrib.auth.forms import PasswordChangeForm
from django.contrib.auth import update_session_auth_hash
from .forms import UserRegisterForm, UserUpdateForm, ProfileUpdateForm,ReportUserForm
from awadz.models import Post, Notification
from django.contrib.auth.models import User
from django.http import JsonResponse
def register(request):
if request.method == 'POST':
form = UserRegisterForm(request.POST)
if form.is_valid():
form.save()
username = form.cleaned_data.get('username')
messages.success(request, f'Your account has been created!\
You are now able to log in')
return redirect('login')
else:
form = UserRegisterForm()
context = {'form': form}
template_name = 'users/register.html'
return render(request, template_name, context)
def validate_username(request):
username = request.GET.get('username',None)
data = {
'is_taken':User.objects.filter(username__iexact=username).exists()
}
print(data)
return JsonResponse(data)
# @login_required
def profile(request,username=None):
report_form = ReportUserForm()
user = get_object_or_404(User,username=username)
post_list = Post.objects.filter(author=user).order_by('-id')
post_count = post_list.count()
page = request.GET.get('page', 1)
paginator = Paginator(post_list, 4)
try:
posts = paginator.page(page)
except PageNotAnInteger:
posts = paginator.page(1)
except EmptyPage:
posts = paginator.page(paginator.num_pages)
context = {
'report_form':report_form,
'posts':posts,
'user_id':user,
'post_count':post_count,
}
template_name = 'users/profile.html'
return render(request, template_name, context)
@login_required
def updateProfile(request):
if request.method == 'POST':
u_form = UserUpdateForm(request.POST, instance=request.user)
p_form = ProfileUpdateForm(request.POST,
request.FILES,
instance=request.user.profile)
if u_form.is_valid() and p_form.is_valid():
u_form.save()
p_form.save()
messages.success(request, f'Your account has been updated!')
return redirect('profile',username=request.user.username)
else:
messages.error(request, f'Username already exists or in use!')
return redirect('profile-update')
else:
u_form = UserUpdateForm(instance=request.user)
p_form = ProfileUpdateForm(instance=request.user.profile)
context = {
'u_form': u_form,
'p_form': p_form,
}
template_name = 'users/update_profile.html'
return render(request, template_name, context)
@login_required
def userFollowUnfollow(request,pk=None):
current_user = request.user
other_user = User.objects.get(pk=pk)
if other_user not in current_user.profile.follows.all():
current_user.profile.follows.add(other_user)
other_user.profile.followers.add(current_user)
notify = Notification.objects.create(sender=current_user,receiver=other_user,action="started following you.")
else:
current_user.profile.follows.remove(other_user)
other_user.profile.followers.remove(current_user)
return redirect('profile',username=other_user.username)
@login_required
def change_password(request):
if request.method == 'POST':
form = PasswordChangeForm(data=request.POST, user=request.user)
if form.is_valid():
form.save()
update_session_auth_hash(request, form.user)
return redirect('profile',username=request.user.username)
else:
return redirect('change-password')
else:
form = PasswordChangeForm(user=request.user)
args = {'form':form}
return render(request, 'users/change_password.html',args)
| 34.715447
| 117
| 0.666979
|
88aed8c3723d286f9de7efd8fe6dff61cfdbb123
| 14,543
|
py
|
Python
|
lib/public/HtmlReport.py
|
xiaoxiaolulu/MeteorTears
|
5145c126c6f330c8633ed5e2eedd02a946f7b54d
|
[
"MIT"
] | 19
|
2019-05-09T06:19:47.000Z
|
2020-10-21T08:31:09.000Z
|
lib/public/HtmlReport.py
|
xiaoxiaolulu/MeteorTears
|
5145c126c6f330c8633ed5e2eedd02a946f7b54d
|
[
"MIT"
] | null | null | null |
lib/public/HtmlReport.py
|
xiaoxiaolulu/MeteorTears
|
5145c126c6f330c8633ed5e2eedd02a946f7b54d
|
[
"MIT"
] | 15
|
2019-05-20T05:47:01.000Z
|
2020-10-19T10:18:57.000Z
|
import os
import sys
from io import StringIO as StringIO
import time
import json
import unittest
import platform
import base64
import traceback
from functools import wraps
__all__ = ['Report']
HTML_IMG_TEMPLATE = """
<a href="data:image/png;base64, {}">
<img src="data:image/png;base64, {}" width="800px" height="500px"/>
</a>
<br></br>
"""
class OutputRedirector(object):
""" Wrapper to redirect stdout or stderr """
def __init__(self, fp):
self.fp = fp
def write(self, s):
self.fp.write(s)
def writelines(self, lines):
self.fp.writelines(lines)
def flush(self):
self.fp.flush()
stdout_redirector = OutputRedirector(sys.stdout)
stderr_redirector = OutputRedirector(sys.stderr)
SYSSTR = platform.system()
FIELDS = {
"testPass": 0,
"testResult": [
],
"testName": "",
"testAll": 0,
"testFail": 0,
"beginTime": "",
"totalTime": "",
"testSkip": 0
}
class PATH:
""" all file PATH meta """
config_tmp_path = './lib/templates/report'
class MakeResultJson:
""" make html table tags """
def __init__(self, datas: tuple):
"""
init self object
:param datas: 拿到所有返回数据结构
"""
self.datas = datas
print(datas)
self.result_schema = {}
def __setitem__(self, key, value):
"""
:param key: self[key]
:param value: value
:return:
"""
self[key] = value
def __repr__(self) -> str:
"""
返回对象的html结构体
:rtype: dict
:return: self的repr对象, 返回一个构造完成的tr表单
"""
keys = (
'className',
'methodName',
'description',
'spendTime',
'status',
'log',
)
obj_type = (int, tuple, bool, str, dict, list, bytes, float)
for key, data in zip(keys, self.datas):
try:
if isinstance(data, obj_type):
self.result_schema.setdefault(key, data)
except TypeError:
continue
return json.dumps(self.result_schema)
TestResult = unittest.TestResult
class ReportTestResult(TestResult):
""" override"""
def __init__(self, suite, stream=sys.stdout, retry=1, save_last_try=False):
super(ReportTestResult, self).__init__()
self.begin_time = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
self.start_time = 0
self.stream = stream
self.end_time = 0
self.failure_count = 0
self.error_count = 0
self.success_count = 0
self.skipped = 0
self.verbosity = 1
self.success_case_info = []
self.skipped_case_info = []
self.failures_case_info = []
self.errors_case_info = []
self.all_case_counter = 0
self.suite = suite
self.status = ''
self.result_list = []
self.fail_result = []
self.retry = retry
self.save_last_try = save_last_try
self.case_status = 0
self.trys = 0
self.trys = 1
self.case_log = ''
self.default_report_name = '自动化测试报告'
self.FIELDS = None
self.sys_stdout = None
self.sys_stderr = None
self.outputBuffer = None
@property
def success_counter(self) -> int:
""" set success counter """
return self.success_count
@success_counter.setter
def success_counter(self, value) -> None:
"""
success_counter函数的setter方法, 用于改变成功的case数量
:param value: 当前传递进来的成功次数的int数值
:return:
"""
self.success_count = value
def startTest(self, test) -> None:
"""
当测试用例测试即将运行时调用
:return:
"""
unittest.TestResult.startTest(self, test)
self.outputBuffer = StringIO()
stdout_redirector.fp = self.outputBuffer
stderr_redirector.fp = self.outputBuffer
self.sys_stdout = sys.stdout
self.sys_stdout = sys.stderr
sys.stdout = stdout_redirector
sys.stderr = stderr_redirector
self.start_time = time.time()
def stopTest(self, test) -> None:
"""
当测试用力执行完成后进行调用
:return:
"""
# FIXME: 重跑方法存在问题, 暂时影响不是很大
if self.retry and self.retry >= 1:
self.trys += 1
if self.case_status == 1:
if self.trys <= self.retry:
# if self.save_last_try:
# t = self.fail_result.pop(-1)
# if t[0] == 1:
# self.failure_count -= 1
# else:
# self.error_count -= 1
import copy
test = copy.copy(test)
sys.stderr.write("Retesting... ")
sys.stderr.write(str(test))
sys.stderr.write('..%d \n' % self.trys)
doc = getattr(test, '_testMethodDoc', u"") or u''
if doc.find('_retry') != -1:
doc = doc[:doc.find('_retry')]
desc = "%s__retry:%s" % (doc, '重跑用例')
test._testMethodDoc = desc
test(self)
else:
self.case_status = 0
self.trys = 0
self.end_time = '{0:.3} s'.format((time.time() - self.start_time))
self.result_list.append(self.get_all_result_info_tuple(test))
self.complete_output()
def complete_output(self):
"""
Disconnect output redirection and return buffer.
Safe to call multiple times.
"""
if self.sys_stdout:
sys.stdout = self.sys_stdout
sys.stderr = self.sys_stdout
self.sys_stdout = None
self.sys_stdout = None
return self.outputBuffer.getvalue()
def stopTestRun(self, title=None) -> dict:
"""
所有测试执行完成后, 执行该方法
:param title:
:return:
"""
FIELDS['testPass'] = self.success_counter
for item in self.result_list:
item = json.loads(str(MakeResultJson(item)))
FIELDS.get('testResult').append(item)
FIELDS['testAll'] = len(self.result_list)
FIELDS['testName'] = title if title else self.default_report_name
FIELDS['testFail'] = self.failure_count
FIELDS['beginTime'] = self.begin_time
end_time = int(time.time())
start_time = int(
time.mktime(
time.strptime(
self.begin_time,
'%Y-%m-%d %H:%M:%S')))
FIELDS['totalTime'] = str(end_time - start_time) + 's'
FIELDS['testError'] = self.error_count
FIELDS['testSkip'] = self.skipped
self.FIELDS = FIELDS
return FIELDS
def get_all_result_info_tuple(self, test) -> tuple:
"""
接受test 相关信息, 并拼接成一个完成的tuple结构返回
:param test:
:return:
"""
return tuple([*self.get_testcase_property(test),
self.end_time, self.status, self.case_log])
@staticmethod
def error_or_failure_text(err) -> str:
"""
获取sys.exc_info()的参数并返回字符串类型的数据, 去掉t6 error
:param err:
:return:
"""
return traceback.format_exception(*err)
def addSuccess(self, test) -> None:
"""
pass
:param test:
:return:
"""
logs = []
output = self.complete_output()
logs.append(output)
if self.verbosity > 1:
sys.stderr.write('ok ')
sys.stderr.write(str(test))
sys.stderr.write('\n')
else:
sys.stderr.write('.')
self.success_counter += 1
self.status = '成功'
self.case_status = 0
self.case_log = output.split('\n')
self._mirrorOutput = True # print(class_name, method_name, method_doc)
def addError(self, test, err):
"""
add Some Error report and infos
:param test:
:param err:
:return:
"""
logs = []
output = self.complete_output()
logs.append(output)
logs.extend(self.error_or_failure_text(err))
self.failure_count += 1
self.case_status = 1
self.add_test_type('失败', logs)
if self.verbosity > 1:
sys.stderr.write('F ')
sys.stderr.write(str(test))
sys.stderr.write('\n')
else:
sys.stderr.write('F')
self._mirrorOutput = True
def addFailure(self, test, err):
"""
add Some Failures report and infos
:param test:
:param err:
:return:
"""
logs = []
output = self.complete_output()
logs.append(output)
logs.extend(self.error_or_failure_text(err))
self.failure_count += 1
self.case_status = 1
TestResult.addFailure(self, test, err)
_, _exc_str = self.failures[-1]
self.fail_result.append((1, test))
# self.result_list.append((1, test, output, _exc_str))
self.add_test_type('失败', logs)
if self.verbosity > 1:
sys.stderr.write('F ')
sys.stderr.write(str(test))
sys.stderr.write('\n')
else:
sys.stderr.write('F')
self._mirrorOutput = True
def addSkip(self, test, reason) -> None:
"""
获取全部的跳过的case信息
:param test:
:param reason:
:return: None
"""
logs = [reason]
self.complete_output()
self.skipped += 1
self.case_status = 0
self.add_test_type('跳过', logs)
if self.verbosity > 1:
sys.stderr.write('S ')
sys.stderr.write(str(test))
sys.stderr.write('\n')
else:
sys.stderr.write('S')
self._mirrorOutput = True
def add_test_type(self, status: str, case_log: list) -> None:
"""
abstruct add test.j type and return tuple
:param status:
:param case_log:
:return:
"""
self.status = status
self.case_log = case_log
@staticmethod
def get_testcase_property(test) -> tuple:
"""
接受一个test, 并返回一个test的class_name, method_name, method_doc属性
:param test:
:return: (class_name, method_name, method_doc) -> tuple
"""
class_name = test.__class__.__qualname__
method_name = test.__dict__['_testMethodName']
method_doc = test.__dict__['_testMethodDoc']
return class_name, method_name, method_doc
class Report(ReportTestResult, PATH):
img_path = 'img/' if platform.system() != 'Windows' else 'img\\'
def __init__(self, suites, retry=0, save_last_try=True):
super(Report, self).__init__(suites)
self.suites = suites
self.log_path = None
self.title = '自动化测试报告'
self.filename = 'report.html'
self.retry = retry
self.save_last_try = save_last_try
def report(self, description, filename: str = None, log_path='.'):
"""
生成测试报告,并放在当前运行路径下
:param log_path: 生成report的文件存储路径
:param filename: 生成文件的filename
:param description: 生成文件的注释
:return:
"""
if filename:
self.filename = filename if filename.endswith('.html') else filename + '.html'
if description:
self.title = description
self.log_path = os.path.abspath(log_path)
self.suites.run(result=self)
self.stopTestRun(self.title)
self.output_report()
text = '\n测试已全部完成, 可前往{}查询测试报告'.format(self.log_path)
print(text)
def output_report(self):
"""
生成测试报告到指定路径下
:return:
"""
template_path = self.config_tmp_path
override_path = os.path.abspath(self.log_path) if \
os.path.abspath(self.log_path).endswith('/') else \
os.path.abspath(self.log_path) + '/'
with open(template_path, 'rb') as file:
body = file.readlines()
with open(override_path + self.filename, 'wb') as write_file:
for item in body:
if item.strip().startswith(b'var resultData'):
head = ' var resultData = '
item = item.decode().split(head)
item[1] = head + \
json.dumps(self.FIELDS, ensure_ascii=False, indent=4)
item = ''.join(item).encode()
item = bytes(item) + b';\n'
write_file.write(item)
@staticmethod
def img2base(img_path: str, file_name: str) -> str:
"""
接受传递进函数的filename 并找到文件转换为base64格式
:param img_path: 通过文件名及默认路径找到的img绝对路径
:param file_name: 用户在装饰器中传递进来的问价匿名
:return:
"""
pattern = '/' if platform != 'Windows' else '\\'
with open(img_path + pattern + file_name, 'rb') as file:
data = file.read()
return base64.b64encode(data).decode()
def add_test_img(*pargs):
"""
接受若干个图片元素, 并展示在测试报告中
:param pargs:
:return:
"""
def _wrap(func):
@wraps(func)
def __wrap(*args, **kwargs):
img_path = os.path.abspath(
'{}'.format(Report.img_path))
try:
result = func(*args, **kwargs)
except Exception:
if 'save_img' in dir(args[0]):
save_img = getattr(args[0], 'save_img')
save_img(func.__name__)
data = Report.img2base(img_path, pargs[0] + '.png')
print(HTML_IMG_TEMPLATE.format(data, data))
sys.exit(0)
print('<br></br>')
if len(pargs) > 1:
for parg in pargs:
print(parg + ':')
data = Report.img2base(img_path, parg + '.png')
print(HTML_IMG_TEMPLATE.format(data, data))
return result
if not os.path.exists(img_path + pargs[0] + '.png'):
return result
data = Report.img2base(img_path, pargs[0] + '.png')
print(HTML_IMG_TEMPLATE.format(data, data))
return result
return __wrap
return _wrap
| 30.109731
| 90
| 0.528089
|
0f611edad3d32dbff154c0fa9d0556d689ace9e9
| 1,191
|
py
|
Python
|
data/train/python/0f611edad3d32dbff154c0fa9d0556d689ace9e9tictac.py
|
harshp8l/deep-learning-lang-detection
|
2a54293181c1c2b1a2b840ddee4d4d80177efb33
|
[
"MIT"
] | 84
|
2017-10-25T15:49:21.000Z
|
2021-11-28T21:25:54.000Z
|
data/train/python/0f611edad3d32dbff154c0fa9d0556d689ace9e9tictac.py
|
vassalos/deep-learning-lang-detection
|
cbb00b3e81bed3a64553f9c6aa6138b2511e544e
|
[
"MIT"
] | 5
|
2018-03-29T11:50:46.000Z
|
2021-04-26T13:33:18.000Z
|
data/train/python/0f611edad3d32dbff154c0fa9d0556d689ace9e9tictac.py
|
vassalos/deep-learning-lang-detection
|
cbb00b3e81bed3a64553f9c6aa6138b2511e544e
|
[
"MIT"
] | 24
|
2017-11-22T08:31:00.000Z
|
2022-03-27T01:22:31.000Z
|
#!/usr/bin/python
# Copyright (c) 2014,All rights reserved.
# This shows an example of using the publish.single helper function.
import sys
import os
import time
#MQTT Initialize.=======================
try:
import paho.mqtt.publish as publish
except ImportError:
# If you have the module installed, just use "import paho.mqtt.publish"
import os
import inspect
cmd_subfolder = os.path.realpath(os.path.abspath(os.path.join(os.path.split(inspect.getfile( inspect.currentframe() ))[0],"../src")))
if cmd_subfolder not in sys.path:
sys.path.insert(0, cmd_subfolder)
import paho.mqtt.publish as publish
#========================================
strChannel = "/inode/info"
print "Pulish to channel:", strChannel
#Using Mosquitto MQTT Borker.
#Local Server.
strBroker = "localhost"
#Lan Server.
#strBroker = "192.168.105.8"
#public server by SMI.
#strBroker = "112.124.67.178"
#test server by eclipse funds.
#strBroker = "m2m.eclipse.org"
ifcontinue = 1;
while ifcontinue == 1:
publish.single(strChannel, "Tic...", hostname = strBroker)
time.sleep(0.1)
#publish.single(strChannel, "upload to server...", hostname = strBroker)
#time.sleep(1)
| 24.8125
| 137
| 0.675903
|
b9c92914bc3e834f32c2d4f3daac2f736c473f00
| 8,153
|
py
|
Python
|
download_rabbit.py
|
KellyHwong/Amiya-Is-Not-Donkey
|
398fea3282436d2222c409e3fafed7c786db3ea3
|
[
"MIT"
] | null | null | null |
download_rabbit.py
|
KellyHwong/Amiya-Is-Not-Donkey
|
398fea3282436d2222c409e3fafed7c786db3ea3
|
[
"MIT"
] | null | null | null |
download_rabbit.py
|
KellyHwong/Amiya-Is-Not-Donkey
|
398fea3282436d2222c409e3fafed7c786db3ea3
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# @Author: WuLC
# @Date: 2017-09-27 23:02:19
# @Last Modified by: LC
# @Last Modified time: 2017-09-30 10:54:36
####################################################################################################################
# Download images from google with specified keywords for searching
# search query is created by "main_keyword + supplemented_keyword"
# if there are multiple keywords, each main_keyword will join with each supplemented_keyword
# Use selenium and urllib, and each search query will download any number of images that google provide
# allow single process or multiple processes for downloading
# Pay attention that since selenium is used, geckodriver and firefox browser is required
####################################################################################################################
import os
import json
import time
import logging
import urllib.request
import urllib.error
from urllib.parse import urlparse, quote
from multiprocessing import Pool
from user_agent import generate_user_agent
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
def get_image_links(main_keyword, supplemented_keywords, link_file_path, num_requested=1000):
"""get image links with selenium
Args:
main_keyword (str): main keyword
supplemented_keywords (list[str]): list of supplemented keywords
link_file_path (str): path of the file to store the links
num_requested (int, optional): maximum number of images to download
Returns:
None
"""
print(main_keyword)
number_of_scrolls = int(num_requested / 400) + 1
# number_of_scrolls * 400 images will be opened in the browser
img_urls = set()
driver = webdriver.Chrome()
for i in range(len(supplemented_keywords)):
# search_query = quote(main_keyword + ' ' + supplemented_keywords[i])
search_query = quote(main_keyword)
url = "https://www.google.com/search?q="+search_query + \
"&source=lnms&tbm=isch&hl=en-US" # 加上 en-US
driver.get(url)
for _ in range(number_of_scrolls):
for __ in range(10):
# multiple scrolls needed to show all 400 images
driver.execute_script("window.scrollBy(0, 1000000)")
time.sleep(2)
# to load next 400 images
time.sleep(5)
try:
driver.find_element_by_xpath(
"//input[@value='Show more results']").click()
except Exception as e:
print(
"Process-{0} reach the end of page or get the maximum number of requested images".format(main_keyword))
break
# imgs = driver.find_elements_by_xpath('//div[@class="rg_meta"]') # not working anymore
imgs = driver.find_elements_by_xpath(
'//div[contains(@class,"rg_meta")]')
for img in imgs:
img_url = json.loads(img.get_attribute('innerHTML'))["ou"]
# img_type = json.loads(img.get_attribute('innerHTML'))["ity"]
img_urls.add(img_url)
print('Process-{0} add keyword {1} , got {2} image urls so far'.format(
main_keyword, supplemented_keywords[i], len(img_urls)))
print(
'Process-{0} totally get {1} images'.format(main_keyword, len(img_urls)))
driver.quit()
with open(link_file_path, 'w') as wf:
for url in img_urls:
wf.write(url + '\n')
print('Store all the links in file {0}'.format(link_file_path))
def download_images(link_file_path, download_dir, log_dir):
"""download images whose links are in the link file
Args:
link_file_path (str): path of file containing links of images
download_dir (str): directory to store the downloaded images
Returns:
None
"""
print('Start downloading with link file {0}..........'.format(
link_file_path))
if not os.path.exists(log_dir):
os.makedirs(log_dir)
main_keyword = link_file_path.split('/')[-1]
log_file = log_dir + 'download_selenium_{0}.log'.format(main_keyword)
logging.basicConfig(level=logging.DEBUG, filename=log_file, filemode="a+",
format="%(asctime)-15s %(levelname)-8s %(message)s")
img_dir = download_dir + main_keyword + '/'
count = 0
headers = {}
if not os.path.exists(img_dir):
os.makedirs(img_dir)
# start to download images
with open(link_file_path, 'r') as rf:
for link in rf:
try:
o = urlparse(link)
ref = o.scheme + '://' + o.hostname
#ref = 'https://www.google.com'
ua = generate_user_agent()
headers['User-Agent'] = ua
headers['referer'] = ref
print('\n{0}\n{1}\n{2}'.format(link.strip(), ref, ua))
req = urllib.request.Request(link.strip(), headers=headers)
response = urllib.request.urlopen(req)
data = response.read()
file_path = img_dir + '{0}.jpg'.format(count)
with open(file_path, 'wb') as wf:
wf.write(data)
print(
'Process-{0} download image {1}/{2}.jpg'.format(main_keyword, main_keyword, count))
count += 1
if count % 10 == 0:
print('Process-{0} is sleeping'.format(main_keyword))
time.sleep(5)
except urllib.error.URLError as e:
print('URLError')
logging.error(
'URLError while downloading image {0}reason:{1}'.format(link, e.reason))
continue
except urllib.error.HTTPError as e:
print('HTTPError')
logging.error('HTTPError while downloading image {0}http code {1}, reason:{2}'.format(
link, e.code, e.reason))
continue
except Exception as e:
print('Unexpected Error')
logging.error('Unexpeted error while downloading image {0}error type:{1}, args:{2}'.format(
link, type(e), e.args))
continue
if __name__ == "__main__":
# main_keywords = ["rabbit", "donkey"]
main_keywords = ["donkey", "rabbit"]
supplemented_keywords = ["placeholder"]
num_requested = 8000
# test for chinese
# main_keywords = ['高兴', '悲伤', '惊讶']
# supplemented_keywords = ['人脸']
# test for japanese
# main_keywords = ['喜びます', 'きょうがいする', '悲しみ']
# supplemented_keywords = ['顔つき']
download_dir = './data/'
link_files_dir = './data/link_files/'
log_dir = './logs/'
for d in [download_dir, link_files_dir, log_dir]:
if not os.path.exists(d):
os.makedirs(d)
###################################
# get image links and store in file
###################################
# single process
# for keyword in main_keywords:
# link_file_path = link_files_dir + keyword
# get_image_links(keyword, supplemented_keywords, link_file_path)
# thread_num = 2
"""
thread_num = 1
p = Pool(thread_num) # 只有两个query
for keyword in main_keywords:
p.apply_async(get_image_links, args=(
keyword, supplemented_keywords, link_files_dir + keyword, num_requested))
p.close()
p.join()
print('Fininsh getting all image links')
"""
###################################
# download images with link file
###################################
# single process
# for keyword in main_keywords:
# link_file_path = link_files_dir + keyword
# download_images(link_file_path, download_dir)
# multiple processes
# default number of process is the number of cores of your CPU, change it by yourself
thread_num = 2
p = Pool(thread_num)
for keyword in main_keywords:
p.apply_async(download_images, args=(
link_files_dir + keyword, download_dir, log_dir))
p.close()
p.join()
print('Finish downloading all images')
| 37.74537
| 123
| 0.581749
|
a95a40c0884792810faf5f07ded70536fd16f829
| 2,168
|
py
|
Python
|
osp/citations/hlom_record.py
|
davidmcclure/open-syllabus-project
|
078cfd4c5a257fbfb0901d43bfbc6350824eed4e
|
[
"Apache-2.0"
] | 220
|
2016-01-22T21:19:02.000Z
|
2022-01-25T04:33:55.000Z
|
osp/citations/hlom_record.py
|
davidmcclure/open-syllabus-project
|
078cfd4c5a257fbfb0901d43bfbc6350824eed4e
|
[
"Apache-2.0"
] | 14
|
2016-01-23T14:34:39.000Z
|
2016-09-19T19:58:37.000Z
|
osp/citations/hlom_record.py
|
davidmcclure/open-syllabus-project
|
078cfd4c5a257fbfb0901d43bfbc6350824eed4e
|
[
"Apache-2.0"
] | 14
|
2016-02-03T13:47:48.000Z
|
2019-03-27T13:09:05.000Z
|
from osp.citations.utils import clean_field, tokenize_field
class HLOM_Record:
def __init__(self, record):
"""
Set the MARC record.
Args:
record (pymarc.Record): The raw MARC record.
"""
self.record = record
@property
def control_number(self):
"""
Get the control number.
Returns: str
"""
return clean_field(self.record['001'].format_field())
@property
def title(self):
"""
Get the title.
Returns: str
"""
return clean_field(self.record.title())
@property
def authors(self):
"""
Get the author array.
Returns: list
"""
author = clean_field(self.record.author())
return [author] if author else []
@property
def surname(self):
"""
Extract a surname.
Returns: surname
"""
author = clean_field(self.record.author())
return author.split(',')[0] if author else None
@property
def publisher(self):
"""
Get the publisher.
Returns: str
"""
return clean_field(self.record.publisher())
@property
def date(self):
"""
Get the date.
Returns: str
"""
return clean_field(self.record.pubyear())
@property
def is_queryable(self):
"""
Does the record contain a query-able title and author?
Returns: bool
"""
title = self.title
surname = self.surname
return bool(
title and
len(tokenize_field(title)) and
surname and
len(tokenize_field(surname))
)
@property
def text(self):
"""
Assemble text fields.
Returns: dict
"""
return dict(
corpus = 'hlom',
identifier = self.control_number,
title = self.title,
surname = self.surname,
authors = self.authors,
publisher = self.publisher,
date = self.date,
)
| 17.483871
| 62
| 0.500923
|
1893f9ee94474e37d4f03db40d227c0eaf0c4146
| 4,172
|
py
|
Python
|
image_classification/CvT/utils.py
|
haribhutanadhu/PaddleViT
|
05319fb775606874d8d3024964f1823f1ac56134
|
[
"Apache-2.0"
] | 993
|
2021-08-30T01:58:57.000Z
|
2022-03-31T14:03:07.000Z
|
image_classification/CvT/utils.py
|
gdj8510/PaddleViT
|
6a2c063ddbd7e1d0f271eb4699516493e3fd327f
|
[
"Apache-2.0"
] | 120
|
2021-09-03T13:05:32.000Z
|
2022-03-29T02:08:22.000Z
|
image_classification/CvT/utils.py
|
gdj8510/PaddleViT
|
6a2c063ddbd7e1d0f271eb4699516493e3fd327f
|
[
"Apache-2.0"
] | 253
|
2021-08-30T08:50:27.000Z
|
2022-03-26T09:21:08.000Z
|
# Copyright (c) 2021 PPViT Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""utils for ViT
Contains AverageMeter for monitoring, get_exclude_from_decay_fn for training
and WarmupCosineScheduler for training
"""
import math
from paddle.optimizer.lr import LRScheduler
class AverageMeter():
""" Meter for monitoring losses"""
def __init__(self):
self.avg = 0
self.sum = 0
self.cnt = 0
self.reset()
def reset(self):
"""reset all values to zeros"""
self.avg = 0
self.sum = 0
self.cnt = 0
def update(self, val, n=1):
"""update avg by val and n, where val is the avg of n values"""
self.sum += val * n
self.cnt += n
self.avg = self.sum / self.cnt
def get_exclude_from_weight_decay_fn(exclude_list=[]):
""" Set params with no weight decay during the training
For certain params, e.g., positional encoding in ViT, weight decay
may not needed during the learning, this method is used to find
these params.
Args:
exclude_list: a list of params names which need to exclude
from weight decay.
Returns:
exclude_from_weight_decay_fn: a function returns True if param
will be excluded from weight decay
"""
if len(exclude_list) == 0:
exclude_from_weight_decay_fn = None
else:
def exclude_fn(param):
for name in exclude_list:
if param.endswith(name):
return False
return True
exclude_from_weight_decay_fn = exclude_fn
return exclude_from_weight_decay_fn
class WarmupCosineScheduler(LRScheduler):
"""Warmup Cosine Scheduler
First apply linear warmup, then apply cosine decay schedule.
Linearly increase learning rate from "warmup_start_lr" to "start_lr" over "warmup_epochs"
Cosinely decrease learning rate from "start_lr" to "end_lr" over remaining
"total_epochs - warmup_epochs"
Attributes:
learning_rate: the starting learning rate (without warmup), not used here!
warmup_start_lr: warmup starting learning rate
start_lr: the starting learning rate (without warmup)
end_lr: the ending learning rate after whole loop
warmup_epochs: # of epochs for warmup
total_epochs: # of total epochs (include warmup)
"""
def __init__(self,
learning_rate,
warmup_start_lr,
start_lr,
end_lr,
warmup_epochs,
total_epochs,
cycles=0.5,
last_epoch=-1,
verbose=False):
"""init WarmupCosineScheduler """
self.warmup_epochs = warmup_epochs
self.total_epochs = total_epochs
self.warmup_start_lr = warmup_start_lr
self.start_lr = start_lr
self.end_lr = end_lr
self.cycles = cycles
super(WarmupCosineScheduler, self).__init__(learning_rate, last_epoch, verbose)
def get_lr(self):
""" return lr value """
if self.last_epoch < self.warmup_epochs:
val = (self.start_lr - self.warmup_start_lr) * float(
self.last_epoch)/float(self.warmup_epochs) + self.warmup_start_lr
return val
progress = float(self.last_epoch - self.warmup_epochs) / float(
max(1, self.total_epochs - self.warmup_epochs))
val = max(0.0, 0.5 * (1. + math.cos(math.pi * float(self.cycles) * 2.0 * progress)))
val = max(0.0, val * (self.start_lr - self.end_lr) + self.end_lr)
return val
| 34.766667
| 93
| 0.639262
|
6b1c153bb2fb4a49f8f4471a3b6137efcfe26e04
| 1,769
|
py
|
Python
|
Algo and DSA/LeetCode-Solutions-master/Python/subsequence-of-size-k-with-the-largest-even-sum.py
|
Sourav692/FAANG-Interview-Preparation
|
f523e5c94d582328b3edc449ea16ac6ab28cdc81
|
[
"Unlicense"
] | 3,269
|
2018-10-12T01:29:40.000Z
|
2022-03-31T17:58:41.000Z
|
Algo and DSA/LeetCode-Solutions-master/Python/subsequence-of-size-k-with-the-largest-even-sum.py
|
Sourav692/FAANG-Interview-Preparation
|
f523e5c94d582328b3edc449ea16ac6ab28cdc81
|
[
"Unlicense"
] | 53
|
2018-12-16T22:54:20.000Z
|
2022-02-25T08:31:20.000Z
|
Algo and DSA/LeetCode-Solutions-master/Python/subsequence-of-size-k-with-the-largest-even-sum.py
|
Sourav692/FAANG-Interview-Preparation
|
f523e5c94d582328b3edc449ea16ac6ab28cdc81
|
[
"Unlicense"
] | 1,236
|
2018-10-12T02:51:40.000Z
|
2022-03-30T13:30:37.000Z
|
# Time: O(n) on average
# Space: O(1)
import random
# quick select solution
class Solution(object):
def largestEvenSum(self, nums, k):
"""
:type nums: List[int]
:type k: int
:rtype: int
"""
def nth_element(nums, n, compare=lambda a, b: a < b):
def tri_partition(nums, left, right, target, compare):
mid = left
while mid <= right:
if nums[mid] == target:
mid += 1
elif compare(nums[mid], target):
nums[left], nums[mid] = nums[mid], nums[left]
left += 1
mid += 1
else:
nums[mid], nums[right] = nums[right], nums[mid]
right -= 1
return left, right
left, right = 0, len(nums)-1
while left <= right:
pivot_idx = random.randint(left, right)
pivot_left, pivot_right = tri_partition(nums, left, right, nums[pivot_idx], compare)
if pivot_left <= n <= pivot_right:
return
elif pivot_left > n:
right = pivot_left-1
else: # pivot_right < n.
left = pivot_right+1
nth_element(nums, k-1, compare=lambda a, b: a > b)
total = sum(nums[i] for i in xrange(k))
if total%2 == 0:
return total
min_k = [float("inf")]*2
for i in xrange(k):
min_k[nums[i]%2] = min(min_k[nums[i]%2], nums[i])
result = -1
for i in xrange(k, len(nums)):
result = max(result, total-min_k[not (nums[i]%2)]+nums[i])
return result
| 34.019231
| 100
| 0.454494
|
e52654c14d0183704784f82d6f45bae996ace432
| 46,160
|
py
|
Python
|
Lib/test/test_coroutines.py
|
Hadron/python
|
73137f499ed658169f49273eee46845e3b53e800
|
[
"PSF-2.0"
] | null | null | null |
Lib/test/test_coroutines.py
|
Hadron/python
|
73137f499ed658169f49273eee46845e3b53e800
|
[
"PSF-2.0"
] | null | null | null |
Lib/test/test_coroutines.py
|
Hadron/python
|
73137f499ed658169f49273eee46845e3b53e800
|
[
"PSF-2.0"
] | null | null | null |
import contextlib
import copy
import inspect
import pickle
import sys
import types
import unittest
import warnings
from test import support
class AsyncYieldFrom:
def __init__(self, obj):
self.obj = obj
def __await__(self):
yield from self.obj
class AsyncYield:
def __init__(self, value):
self.value = value
def __await__(self):
yield self.value
def run_async(coro):
assert coro.__class__ in {types.GeneratorType, types.CoroutineType}
buffer = []
result = None
while True:
try:
buffer.append(coro.send(None))
except StopIteration as ex:
result = ex.args[0] if ex.args else None
break
return buffer, result
def run_async__await__(coro):
assert coro.__class__ is types.CoroutineType
aw = coro.__await__()
buffer = []
result = None
i = 0
while True:
try:
if i % 2:
buffer.append(next(aw))
else:
buffer.append(aw.send(None))
i += 1
except StopIteration as ex:
result = ex.args[0] if ex.args else None
break
return buffer, result
@contextlib.contextmanager
def silence_coro_gc():
with warnings.catch_warnings():
warnings.simplefilter("ignore")
yield
support.gc_collect()
class AsyncBadSyntaxTest(unittest.TestCase):
def test_badsyntax_1(self):
with self.assertRaisesRegex(SyntaxError, "'await' outside"):
import test.badsyntax_async1
def test_badsyntax_2(self):
with self.assertRaisesRegex(SyntaxError, "'await' outside"):
import test.badsyntax_async2
def test_badsyntax_3(self):
with self.assertRaisesRegex(SyntaxError, 'invalid syntax'):
import test.badsyntax_async3
def test_badsyntax_4(self):
with self.assertRaisesRegex(SyntaxError, 'invalid syntax'):
import test.badsyntax_async4
def test_badsyntax_5(self):
with self.assertRaisesRegex(SyntaxError, 'invalid syntax'):
import test.badsyntax_async5
def test_badsyntax_6(self):
with self.assertRaisesRegex(
SyntaxError, "'yield' inside async function"):
import test.badsyntax_async6
def test_badsyntax_7(self):
with self.assertRaisesRegex(
SyntaxError, "'yield from' inside async function"):
import test.badsyntax_async7
def test_badsyntax_8(self):
with self.assertRaisesRegex(SyntaxError, 'invalid syntax'):
import test.badsyntax_async8
def test_badsyntax_9(self):
ns = {}
for comp in {'(await a for a in b)',
'[await a for a in b]',
'{await a for a in b}',
'{await a: c for a in b}'}:
with self.assertRaisesRegex(SyntaxError, 'await.*in comprehen'):
exec('async def f():\n\t{}'.format(comp), ns, ns)
def test_badsyntax_10(self):
# Tests for issue 24619
samples = [
"""async def foo():
def bar(): pass
await = 1
""",
"""async def foo():
def bar(): pass
await = 1
""",
"""async def foo():
def bar(): pass
if 1:
await = 1
""",
"""def foo():
async def bar(): pass
if 1:
await a
""",
"""def foo():
async def bar(): pass
await a
""",
"""def foo():
def baz(): pass
async def bar(): pass
await a
""",
"""def foo():
def baz(): pass
# 456
async def bar(): pass
# 123
await a
""",
"""async def foo():
def baz(): pass
# 456
async def bar(): pass
# 123
await = 2
""",
"""def foo():
def baz(): pass
async def bar(): pass
await a
""",
"""async def foo():
def baz(): pass
async def bar(): pass
await = 2
""",
"""async def foo():
def async(): pass
""",
"""async def foo():
def await(): pass
""",
"""async def foo():
def bar():
await
""",
"""async def foo():
return lambda async: await
""",
"""async def foo():
return lambda a: await
""",
"""await a()""",
"""async def foo(a=await b):
pass
""",
"""async def foo(a:await b):
pass
""",
"""def baz():
async def foo(a=await b):
pass
""",
"""async def foo(async):
pass
""",
"""async def foo():
def bar():
def baz():
async = 1
""",
"""async def foo():
def bar():
def baz():
pass
async = 1
""",
"""def foo():
async def bar():
async def baz():
pass
def baz():
42
async = 1
""",
"""async def foo():
def bar():
def baz():
pass\nawait foo()
""",
"""def foo():
def bar():
async def baz():
pass\nawait foo()
""",
"""async def foo(await):
pass
""",
"""def foo():
async def bar(): pass
await a
""",
"""def foo():
async def bar():
pass\nawait a
"""]
for code in samples:
with self.subTest(code=code), self.assertRaises(SyntaxError):
compile(code, "<test>", "exec")
def test_goodsyntax_1(self):
# Tests for issue 24619
def foo(await):
async def foo(): pass
async def foo():
pass
return await + 1
self.assertEqual(foo(10), 11)
def foo(await):
async def foo(): pass
async def foo(): pass
return await + 2
self.assertEqual(foo(20), 22)
def foo(await):
async def foo(): pass
async def foo(): pass
return await + 2
self.assertEqual(foo(20), 22)
def foo(await):
"""spam"""
async def foo(): \
pass
# 123
async def foo(): pass
# 456
return await + 2
self.assertEqual(foo(20), 22)
def foo(await):
def foo(): pass
def foo(): pass
async def bar(): return await_
await_ = await
try:
bar().send(None)
except StopIteration as ex:
return ex.args[0]
self.assertEqual(foo(42), 42)
async def f():
async def g(): pass
await z
await = 1
self.assertTrue(inspect.iscoroutinefunction(f))
class TokenizerRegrTest(unittest.TestCase):
def test_oneline_defs(self):
buf = []
for i in range(500):
buf.append('def i{i}(): return {i}'.format(i=i))
buf = '\n'.join(buf)
# Test that 500 consequent, one-line defs is OK
ns = {}
exec(buf, ns, ns)
self.assertEqual(ns['i499'](), 499)
# Test that 500 consequent, one-line defs *and*
# one 'async def' following them is OK
buf += '\nasync def foo():\n return'
ns = {}
exec(buf, ns, ns)
self.assertEqual(ns['i499'](), 499)
self.assertTrue(inspect.iscoroutinefunction(ns['foo']))
class CoroutineTest(unittest.TestCase):
def test_gen_1(self):
def gen(): yield
self.assertFalse(hasattr(gen, '__await__'))
def test_func_1(self):
async def foo():
return 10
f = foo()
self.assertIsInstance(f, types.CoroutineType)
self.assertTrue(bool(foo.__code__.co_flags & inspect.CO_COROUTINE))
self.assertFalse(bool(foo.__code__.co_flags & inspect.CO_GENERATOR))
self.assertTrue(bool(f.cr_code.co_flags & inspect.CO_COROUTINE))
self.assertFalse(bool(f.cr_code.co_flags & inspect.CO_GENERATOR))
self.assertEqual(run_async(f), ([], 10))
self.assertEqual(run_async__await__(foo()), ([], 10))
def bar(): pass
self.assertFalse(bool(bar.__code__.co_flags & inspect.CO_COROUTINE))
def test_func_2(self):
async def foo():
raise StopIteration
with self.assertRaisesRegex(
RuntimeError, "coroutine raised StopIteration"):
run_async(foo())
def test_func_3(self):
async def foo():
raise StopIteration
with silence_coro_gc():
self.assertRegex(repr(foo()), '^<coroutine object.* at 0x.*>$')
def test_func_4(self):
async def foo():
raise StopIteration
check = lambda: self.assertRaisesRegex(
TypeError, "'coroutine' object is not iterable")
with check():
list(foo())
with check():
tuple(foo())
with check():
sum(foo())
with check():
iter(foo())
with silence_coro_gc(), check():
for i in foo():
pass
with silence_coro_gc(), check():
[i for i in foo()]
def test_func_5(self):
@types.coroutine
def bar():
yield 1
async def foo():
await bar()
check = lambda: self.assertRaisesRegex(
TypeError, "'coroutine' object is not iterable")
with check():
for el in foo(): pass
# the following should pass without an error
for el in bar():
self.assertEqual(el, 1)
self.assertEqual([el for el in bar()], [1])
self.assertEqual(tuple(bar()), (1,))
self.assertEqual(next(iter(bar())), 1)
def test_func_6(self):
@types.coroutine
def bar():
yield 1
yield 2
async def foo():
await bar()
f = foo()
self.assertEqual(f.send(None), 1)
self.assertEqual(f.send(None), 2)
with self.assertRaises(StopIteration):
f.send(None)
def test_func_7(self):
async def bar():
return 10
def foo():
yield from bar()
with silence_coro_gc(), self.assertRaisesRegex(
TypeError,
"cannot 'yield from' a coroutine object in a non-coroutine generator"):
list(foo())
def test_func_8(self):
@types.coroutine
def bar():
return (yield from foo())
async def foo():
return 'spam'
self.assertEqual(run_async(bar()), ([], 'spam') )
def test_func_9(self):
async def foo(): pass
with self.assertWarnsRegex(
RuntimeWarning, "coroutine '.*test_func_9.*foo' was never awaited"):
foo()
support.gc_collect()
def test_func_10(self):
N = 0
@types.coroutine
def gen():
nonlocal N
try:
a = yield
yield (a ** 2)
except ZeroDivisionError:
N += 100
raise
finally:
N += 1
async def foo():
await gen()
coro = foo()
aw = coro.__await__()
self.assertIs(aw, iter(aw))
next(aw)
self.assertEqual(aw.send(10), 100)
self.assertEqual(N, 0)
aw.close()
self.assertEqual(N, 1)
coro = foo()
aw = coro.__await__()
next(aw)
with self.assertRaises(ZeroDivisionError):
aw.throw(ZeroDivisionError, None, None)
self.assertEqual(N, 102)
def test_func_11(self):
async def func(): pass
coro = func()
# Test that PyCoro_Type and _PyCoroWrapper_Type types were properly
# initialized
self.assertIn('__await__', dir(coro))
self.assertIn('__iter__', dir(coro.__await__()))
self.assertIn('coroutine_wrapper', repr(coro.__await__()))
coro.close() # avoid RuntimeWarning
def test_func_12(self):
async def g():
i = me.send(None)
await foo
me = g()
with self.assertRaisesRegex(ValueError,
"coroutine already executing"):
me.send(None)
def test_func_13(self):
async def g():
pass
with self.assertRaisesRegex(
TypeError,
"can't send non-None value to a just-started coroutine"):
g().send('spam')
def test_func_14(self):
@types.coroutine
def gen():
yield
async def coro():
try:
await gen()
except GeneratorExit:
await gen()
c = coro()
c.send(None)
with self.assertRaisesRegex(RuntimeError,
"coroutine ignored GeneratorExit"):
c.close()
def test_func_15(self):
# See http://bugs.python.org/issue25887 for details
async def spammer():
return 'spam'
async def reader(coro):
return await coro
spammer_coro = spammer()
with self.assertRaisesRegex(StopIteration, 'spam'):
reader(spammer_coro).send(None)
with self.assertRaisesRegex(RuntimeError,
'cannot reuse already awaited coroutine'):
reader(spammer_coro).send(None)
def test_func_16(self):
# See http://bugs.python.org/issue25887 for details
@types.coroutine
def nop():
yield
async def send():
await nop()
return 'spam'
async def read(coro):
await nop()
return await coro
spammer = send()
reader = read(spammer)
reader.send(None)
reader.send(None)
with self.assertRaisesRegex(Exception, 'ham'):
reader.throw(Exception('ham'))
reader = read(spammer)
reader.send(None)
with self.assertRaisesRegex(RuntimeError,
'cannot reuse already awaited coroutine'):
reader.send(None)
with self.assertRaisesRegex(RuntimeError,
'cannot reuse already awaited coroutine'):
reader.throw(Exception('wat'))
def test_func_17(self):
# See http://bugs.python.org/issue25887 for details
async def coroutine():
return 'spam'
coro = coroutine()
with self.assertRaisesRegex(StopIteration, 'spam'):
coro.send(None)
with self.assertRaisesRegex(RuntimeError,
'cannot reuse already awaited coroutine'):
coro.send(None)
with self.assertRaisesRegex(RuntimeError,
'cannot reuse already awaited coroutine'):
coro.throw(Exception('wat'))
# Closing a coroutine shouldn't raise any exception even if it's
# already closed/exhausted (similar to generators)
coro.close()
coro.close()
def test_func_18(self):
# See http://bugs.python.org/issue25887 for details
async def coroutine():
return 'spam'
coro = coroutine()
await_iter = coro.__await__()
it = iter(await_iter)
with self.assertRaisesRegex(StopIteration, 'spam'):
it.send(None)
with self.assertRaisesRegex(RuntimeError,
'cannot reuse already awaited coroutine'):
it.send(None)
with self.assertRaisesRegex(RuntimeError,
'cannot reuse already awaited coroutine'):
# Although the iterator protocol requires iterators to
# raise another StopIteration here, we don't want to do
# that. In this particular case, the iterator will raise
# a RuntimeError, so that 'yield from' and 'await'
# expressions will trigger the error, instead of silently
# ignoring the call.
next(it)
with self.assertRaisesRegex(RuntimeError,
'cannot reuse already awaited coroutine'):
it.throw(Exception('wat'))
with self.assertRaisesRegex(RuntimeError,
'cannot reuse already awaited coroutine'):
it.throw(Exception('wat'))
# Closing a coroutine shouldn't raise any exception even if it's
# already closed/exhausted (similar to generators)
it.close()
it.close()
def test_func_19(self):
CHK = 0
@types.coroutine
def foo():
nonlocal CHK
yield
try:
yield
except GeneratorExit:
CHK += 1
async def coroutine():
await foo()
coro = coroutine()
coro.send(None)
coro.send(None)
self.assertEqual(CHK, 0)
coro.close()
self.assertEqual(CHK, 1)
for _ in range(3):
# Closing a coroutine shouldn't raise any exception even if it's
# already closed/exhausted (similar to generators)
coro.close()
self.assertEqual(CHK, 1)
def test_cr_await(self):
@types.coroutine
def a():
self.assertEqual(inspect.getcoroutinestate(coro_b), inspect.CORO_RUNNING)
self.assertIsNone(coro_b.cr_await)
yield
self.assertEqual(inspect.getcoroutinestate(coro_b), inspect.CORO_RUNNING)
self.assertIsNone(coro_b.cr_await)
async def c():
await a()
async def b():
self.assertIsNone(coro_b.cr_await)
await c()
self.assertIsNone(coro_b.cr_await)
coro_b = b()
self.assertEqual(inspect.getcoroutinestate(coro_b), inspect.CORO_CREATED)
self.assertIsNone(coro_b.cr_await)
coro_b.send(None)
self.assertEqual(inspect.getcoroutinestate(coro_b), inspect.CORO_SUSPENDED)
self.assertEqual(coro_b.cr_await.cr_await.gi_code.co_name, 'a')
with self.assertRaises(StopIteration):
coro_b.send(None) # complete coroutine
self.assertEqual(inspect.getcoroutinestate(coro_b), inspect.CORO_CLOSED)
self.assertIsNone(coro_b.cr_await)
def test_corotype_1(self):
ct = types.CoroutineType
self.assertIn('into coroutine', ct.send.__doc__)
self.assertIn('inside coroutine', ct.close.__doc__)
self.assertIn('in coroutine', ct.throw.__doc__)
self.assertIn('of the coroutine', ct.__dict__['__name__'].__doc__)
self.assertIn('of the coroutine', ct.__dict__['__qualname__'].__doc__)
self.assertEqual(ct.__name__, 'coroutine')
async def f(): pass
c = f()
self.assertIn('coroutine object', repr(c))
c.close()
def test_await_1(self):
async def foo():
await 1
with self.assertRaisesRegex(TypeError, "object int can.t.*await"):
run_async(foo())
def test_await_2(self):
async def foo():
await []
with self.assertRaisesRegex(TypeError, "object list can.t.*await"):
run_async(foo())
def test_await_3(self):
async def foo():
await AsyncYieldFrom([1, 2, 3])
self.assertEqual(run_async(foo()), ([1, 2, 3], None))
self.assertEqual(run_async__await__(foo()), ([1, 2, 3], None))
def test_await_4(self):
async def bar():
return 42
async def foo():
return await bar()
self.assertEqual(run_async(foo()), ([], 42))
def test_await_5(self):
class Awaitable:
def __await__(self):
return
async def foo():
return (await Awaitable())
with self.assertRaisesRegex(
TypeError, "__await__.*returned non-iterator of type"):
run_async(foo())
def test_await_6(self):
class Awaitable:
def __await__(self):
return iter([52])
async def foo():
return (await Awaitable())
self.assertEqual(run_async(foo()), ([52], None))
def test_await_7(self):
class Awaitable:
def __await__(self):
yield 42
return 100
async def foo():
return (await Awaitable())
self.assertEqual(run_async(foo()), ([42], 100))
def test_await_8(self):
class Awaitable:
pass
async def foo(): return await Awaitable()
with self.assertRaisesRegex(
TypeError, "object Awaitable can't be used in 'await' expression"):
run_async(foo())
def test_await_9(self):
def wrap():
return bar
async def bar():
return 42
async def foo():
b = bar()
db = {'b': lambda: wrap}
class DB:
b = wrap
return (await bar() + await wrap()() + await db['b']()()() +
await bar() * 1000 + await DB.b()())
async def foo2():
return -await bar()
self.assertEqual(run_async(foo()), ([], 42168))
self.assertEqual(run_async(foo2()), ([], -42))
def test_await_10(self):
async def baz():
return 42
async def bar():
return baz()
async def foo():
return await (await bar())
self.assertEqual(run_async(foo()), ([], 42))
def test_await_11(self):
def ident(val):
return val
async def bar():
return 'spam'
async def foo():
return ident(val=await bar())
async def foo2():
return await bar(), 'ham'
self.assertEqual(run_async(foo2()), ([], ('spam', 'ham')))
def test_await_12(self):
async def coro():
return 'spam'
class Awaitable:
def __await__(self):
return coro()
async def foo():
return await Awaitable()
with self.assertRaisesRegex(
TypeError, "__await__\(\) returned a coroutine"):
run_async(foo())
def test_await_13(self):
class Awaitable:
def __await__(self):
return self
async def foo():
return await Awaitable()
with self.assertRaisesRegex(
TypeError, "__await__.*returned non-iterator of type"):
run_async(foo())
def test_await_14(self):
class Wrapper:
# Forces the interpreter to use CoroutineType.__await__
def __init__(self, coro):
assert coro.__class__ is types.CoroutineType
self.coro = coro
def __await__(self):
return self.coro.__await__()
class FutureLike:
def __await__(self):
return (yield)
class Marker(Exception):
pass
async def coro1():
try:
return await FutureLike()
except ZeroDivisionError:
raise Marker
async def coro2():
return await Wrapper(coro1())
c = coro2()
c.send(None)
with self.assertRaisesRegex(StopIteration, 'spam'):
c.send('spam')
c = coro2()
c.send(None)
with self.assertRaises(Marker):
c.throw(ZeroDivisionError)
def test_await_15(self):
@types.coroutine
def nop():
yield
async def coroutine():
await nop()
async def waiter(coro):
await coro
coro = coroutine()
coro.send(None)
with self.assertRaisesRegex(RuntimeError,
"coroutine is being awaited already"):
waiter(coro).send(None)
def test_with_1(self):
class Manager:
def __init__(self, name):
self.name = name
async def __aenter__(self):
await AsyncYieldFrom(['enter-1-' + self.name,
'enter-2-' + self.name])
return self
async def __aexit__(self, *args):
await AsyncYieldFrom(['exit-1-' + self.name,
'exit-2-' + self.name])
if self.name == 'B':
return True
async def foo():
async with Manager("A") as a, Manager("B") as b:
await AsyncYieldFrom([('managers', a.name, b.name)])
1/0
f = foo()
result, _ = run_async(f)
self.assertEqual(
result, ['enter-1-A', 'enter-2-A', 'enter-1-B', 'enter-2-B',
('managers', 'A', 'B'),
'exit-1-B', 'exit-2-B', 'exit-1-A', 'exit-2-A']
)
async def foo():
async with Manager("A") as a, Manager("C") as c:
await AsyncYieldFrom([('managers', a.name, c.name)])
1/0
with self.assertRaises(ZeroDivisionError):
run_async(foo())
def test_with_2(self):
class CM:
def __aenter__(self):
pass
async def foo():
async with CM():
pass
with self.assertRaisesRegex(AttributeError, '__aexit__'):
run_async(foo())
def test_with_3(self):
class CM:
def __aexit__(self):
pass
async def foo():
async with CM():
pass
with self.assertRaisesRegex(AttributeError, '__aenter__'):
run_async(foo())
def test_with_4(self):
class CM:
def __enter__(self):
pass
def __exit__(self):
pass
async def foo():
async with CM():
pass
with self.assertRaisesRegex(AttributeError, '__aexit__'):
run_async(foo())
def test_with_5(self):
# While this test doesn't make a lot of sense,
# it's a regression test for an early bug with opcodes
# generation
class CM:
async def __aenter__(self):
return self
async def __aexit__(self, *exc):
pass
async def func():
async with CM():
assert (1, ) == 1
with self.assertRaises(AssertionError):
run_async(func())
def test_with_6(self):
class CM:
def __aenter__(self):
return 123
def __aexit__(self, *e):
return 456
async def foo():
async with CM():
pass
with self.assertRaisesRegex(
TypeError, "object int can't be used in 'await' expression"):
# it's important that __aexit__ wasn't called
run_async(foo())
def test_with_7(self):
class CM:
async def __aenter__(self):
return self
def __aexit__(self, *e):
return 444
async def foo():
async with CM():
1/0
try:
run_async(foo())
except TypeError as exc:
self.assertRegex(
exc.args[0], "object int can't be used in 'await' expression")
self.assertTrue(exc.__context__ is not None)
self.assertTrue(isinstance(exc.__context__, ZeroDivisionError))
else:
self.fail('invalid asynchronous context manager did not fail')
def test_with_8(self):
CNT = 0
class CM:
async def __aenter__(self):
return self
def __aexit__(self, *e):
return 456
async def foo():
nonlocal CNT
async with CM():
CNT += 1
with self.assertRaisesRegex(
TypeError, "object int can't be used in 'await' expression"):
run_async(foo())
self.assertEqual(CNT, 1)
def test_with_9(self):
CNT = 0
class CM:
async def __aenter__(self):
return self
async def __aexit__(self, *e):
1/0
async def foo():
nonlocal CNT
async with CM():
CNT += 1
with self.assertRaises(ZeroDivisionError):
run_async(foo())
self.assertEqual(CNT, 1)
def test_with_10(self):
CNT = 0
class CM:
async def __aenter__(self):
return self
async def __aexit__(self, *e):
1/0
async def foo():
nonlocal CNT
async with CM():
async with CM():
raise RuntimeError
try:
run_async(foo())
except ZeroDivisionError as exc:
self.assertTrue(exc.__context__ is not None)
self.assertTrue(isinstance(exc.__context__, ZeroDivisionError))
self.assertTrue(isinstance(exc.__context__.__context__,
RuntimeError))
else:
self.fail('exception from __aexit__ did not propagate')
def test_with_11(self):
CNT = 0
class CM:
async def __aenter__(self):
raise NotImplementedError
async def __aexit__(self, *e):
1/0
async def foo():
nonlocal CNT
async with CM():
raise RuntimeError
try:
run_async(foo())
except NotImplementedError as exc:
self.assertTrue(exc.__context__ is None)
else:
self.fail('exception from __aenter__ did not propagate')
def test_with_12(self):
CNT = 0
class CM:
async def __aenter__(self):
return self
async def __aexit__(self, *e):
return True
async def foo():
nonlocal CNT
async with CM() as cm:
self.assertIs(cm.__class__, CM)
raise RuntimeError
run_async(foo())
def test_with_13(self):
CNT = 0
class CM:
async def __aenter__(self):
1/0
async def __aexit__(self, *e):
return True
async def foo():
nonlocal CNT
CNT += 1
async with CM():
CNT += 1000
CNT += 10000
with self.assertRaises(ZeroDivisionError):
run_async(foo())
self.assertEqual(CNT, 1)
def test_for_1(self):
aiter_calls = 0
class AsyncIter:
def __init__(self):
self.i = 0
async def __aiter__(self):
nonlocal aiter_calls
aiter_calls += 1
return self
async def __anext__(self):
self.i += 1
if not (self.i % 10):
await AsyncYield(self.i * 10)
if self.i > 100:
raise StopAsyncIteration
return self.i, self.i
buffer = []
async def test1():
with self.assertWarnsRegex(PendingDeprecationWarning, "legacy"):
async for i1, i2 in AsyncIter():
buffer.append(i1 + i2)
yielded, _ = run_async(test1())
# Make sure that __aiter__ was called only once
self.assertEqual(aiter_calls, 1)
self.assertEqual(yielded, [i * 100 for i in range(1, 11)])
self.assertEqual(buffer, [i*2 for i in range(1, 101)])
buffer = []
async def test2():
nonlocal buffer
with self.assertWarnsRegex(PendingDeprecationWarning, "legacy"):
async for i in AsyncIter():
buffer.append(i[0])
if i[0] == 20:
break
else:
buffer.append('what?')
buffer.append('end')
yielded, _ = run_async(test2())
# Make sure that __aiter__ was called only once
self.assertEqual(aiter_calls, 2)
self.assertEqual(yielded, [100, 200])
self.assertEqual(buffer, [i for i in range(1, 21)] + ['end'])
buffer = []
async def test3():
nonlocal buffer
with self.assertWarnsRegex(PendingDeprecationWarning, "legacy"):
async for i in AsyncIter():
if i[0] > 20:
continue
buffer.append(i[0])
else:
buffer.append('what?')
buffer.append('end')
yielded, _ = run_async(test3())
# Make sure that __aiter__ was called only once
self.assertEqual(aiter_calls, 3)
self.assertEqual(yielded, [i * 100 for i in range(1, 11)])
self.assertEqual(buffer, [i for i in range(1, 21)] +
['what?', 'end'])
def test_for_2(self):
tup = (1, 2, 3)
refs_before = sys.getrefcount(tup)
async def foo():
async for i in tup:
print('never going to happen')
with self.assertRaisesRegex(
TypeError, "async for' requires an object.*__aiter__.*tuple"):
run_async(foo())
self.assertEqual(sys.getrefcount(tup), refs_before)
def test_for_3(self):
class I:
def __aiter__(self):
return self
aiter = I()
refs_before = sys.getrefcount(aiter)
async def foo():
async for i in aiter:
print('never going to happen')
with self.assertRaisesRegex(
TypeError,
"async for' received an invalid object.*__aiter.*\: I"):
run_async(foo())
self.assertEqual(sys.getrefcount(aiter), refs_before)
def test_for_4(self):
class I:
def __aiter__(self):
return self
def __anext__(self):
return ()
aiter = I()
refs_before = sys.getrefcount(aiter)
async def foo():
async for i in aiter:
print('never going to happen')
with self.assertRaisesRegex(
TypeError,
"async for' received an invalid object.*__anext__.*tuple"):
run_async(foo())
self.assertEqual(sys.getrefcount(aiter), refs_before)
def test_for_5(self):
class I:
async def __aiter__(self):
return self
def __anext__(self):
return 123
async def foo():
with self.assertWarnsRegex(PendingDeprecationWarning, "legacy"):
async for i in I():
print('never going to happen')
with self.assertRaisesRegex(
TypeError,
"async for' received an invalid object.*__anext.*int"):
run_async(foo())
def test_for_6(self):
I = 0
class Manager:
async def __aenter__(self):
nonlocal I
I += 10000
async def __aexit__(self, *args):
nonlocal I
I += 100000
class Iterable:
def __init__(self):
self.i = 0
def __aiter__(self):
return self
async def __anext__(self):
if self.i > 10:
raise StopAsyncIteration
self.i += 1
return self.i
##############
manager = Manager()
iterable = Iterable()
mrefs_before = sys.getrefcount(manager)
irefs_before = sys.getrefcount(iterable)
async def main():
nonlocal I
async with manager:
async for i in iterable:
I += 1
I += 1000
with warnings.catch_warnings():
warnings.simplefilter("error")
# Test that __aiter__ that returns an asynchronous iterator
# directly does not throw any warnings.
run_async(main())
self.assertEqual(I, 111011)
self.assertEqual(sys.getrefcount(manager), mrefs_before)
self.assertEqual(sys.getrefcount(iterable), irefs_before)
##############
async def main():
nonlocal I
async with Manager():
async for i in Iterable():
I += 1
I += 1000
async with Manager():
async for i in Iterable():
I += 1
I += 1000
run_async(main())
self.assertEqual(I, 333033)
##############
async def main():
nonlocal I
async with Manager():
I += 100
async for i in Iterable():
I += 1
else:
I += 10000000
I += 1000
async with Manager():
I += 100
async for i in Iterable():
I += 1
else:
I += 10000000
I += 1000
run_async(main())
self.assertEqual(I, 20555255)
def test_for_7(self):
CNT = 0
class AI:
async def __aiter__(self):
1/0
async def foo():
nonlocal CNT
with self.assertWarnsRegex(PendingDeprecationWarning, "legacy"):
async for i in AI():
CNT += 1
CNT += 10
with self.assertRaises(ZeroDivisionError):
run_async(foo())
self.assertEqual(CNT, 0)
def test_for_8(self):
CNT = 0
class AI:
def __aiter__(self):
1/0
async def foo():
nonlocal CNT
async for i in AI():
CNT += 1
CNT += 10
with self.assertRaises(ZeroDivisionError):
with warnings.catch_warnings():
warnings.simplefilter("error")
# Test that if __aiter__ raises an exception it propagates
# without any kind of warning.
run_async(foo())
self.assertEqual(CNT, 0)
def test_for_9(self):
# Test that PendingDeprecationWarning can safely be converted into
# an exception (__aiter__ should not have a chance to raise
# a ZeroDivisionError.)
class AI:
async def __aiter__(self):
1/0
async def foo():
async for i in AI():
pass
with self.assertRaises(PendingDeprecationWarning):
with warnings.catch_warnings():
warnings.simplefilter("error")
run_async(foo())
def test_for_10(self):
# Test that PendingDeprecationWarning can safely be converted into
# an exception.
class AI:
async def __aiter__(self):
pass
async def foo():
async for i in AI():
pass
with self.assertRaises(PendingDeprecationWarning):
with warnings.catch_warnings():
warnings.simplefilter("error")
run_async(foo())
def test_copy(self):
async def func(): pass
coro = func()
with self.assertRaises(TypeError):
copy.copy(coro)
aw = coro.__await__()
try:
with self.assertRaises(TypeError):
copy.copy(aw)
finally:
aw.close()
def test_pickle(self):
async def func(): pass
coro = func()
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
with self.assertRaises((TypeError, pickle.PicklingError)):
pickle.dumps(coro, proto)
aw = coro.__await__()
try:
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
with self.assertRaises((TypeError, pickle.PicklingError)):
pickle.dumps(aw, proto)
finally:
aw.close()
def test_fatal_coro_warning(self):
# Issue 27811
async def func(): pass
with warnings.catch_warnings(), support.captured_stderr() as stderr:
warnings.filterwarnings("error")
func()
support.gc_collect()
self.assertIn("was never awaited", stderr.getvalue())
class CoroAsyncIOCompatTest(unittest.TestCase):
def test_asyncio_1(self):
# asyncio cannot be imported when Python is compiled without thread
# support
asyncio = support.import_module('asyncio')
class MyException(Exception):
pass
buffer = []
class CM:
async def __aenter__(self):
buffer.append(1)
await asyncio.sleep(0.01)
buffer.append(2)
return self
async def __aexit__(self, exc_type, exc_val, exc_tb):
await asyncio.sleep(0.01)
buffer.append(exc_type.__name__)
async def f():
async with CM() as c:
await asyncio.sleep(0.01)
raise MyException
buffer.append('unreachable')
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
try:
loop.run_until_complete(f())
except MyException:
pass
finally:
loop.close()
asyncio.set_event_loop(None)
self.assertEqual(buffer, [1, 2, 'MyException'])
class SysSetCoroWrapperTest(unittest.TestCase):
def test_set_wrapper_1(self):
async def foo():
return 'spam'
wrapped = None
def wrap(gen):
nonlocal wrapped
wrapped = gen
return gen
self.assertIsNone(sys.get_coroutine_wrapper())
sys.set_coroutine_wrapper(wrap)
self.assertIs(sys.get_coroutine_wrapper(), wrap)
try:
f = foo()
self.assertTrue(wrapped)
self.assertEqual(run_async(f), ([], 'spam'))
finally:
sys.set_coroutine_wrapper(None)
self.assertIsNone(sys.get_coroutine_wrapper())
wrapped = None
with silence_coro_gc():
foo()
self.assertFalse(wrapped)
def test_set_wrapper_2(self):
self.assertIsNone(sys.get_coroutine_wrapper())
with self.assertRaisesRegex(TypeError, "callable expected, got int"):
sys.set_coroutine_wrapper(1)
self.assertIsNone(sys.get_coroutine_wrapper())
def test_set_wrapper_3(self):
async def foo():
return 'spam'
def wrapper(coro):
async def wrap(coro):
return await coro
return wrap(coro)
sys.set_coroutine_wrapper(wrapper)
try:
with silence_coro_gc(), self.assertRaisesRegex(
RuntimeError,
"coroutine wrapper.*\.wrapper at 0x.*attempted to "
"recursively wrap .* wrap .*"):
foo()
finally:
sys.set_coroutine_wrapper(None)
def test_set_wrapper_4(self):
@types.coroutine
def foo():
return 'spam'
wrapped = None
def wrap(gen):
nonlocal wrapped
wrapped = gen
return gen
sys.set_coroutine_wrapper(wrap)
try:
foo()
self.assertIs(
wrapped, None,
"generator-based coroutine was wrapped via "
"sys.set_coroutine_wrapper")
finally:
sys.set_coroutine_wrapper(None)
class CAPITest(unittest.TestCase):
def test_tp_await_1(self):
from _testcapi import awaitType as at
async def foo():
future = at(iter([1]))
return (await future)
self.assertEqual(foo().send(None), 1)
def test_tp_await_2(self):
# Test tp_await to __await__ mapping
from _testcapi import awaitType as at
future = at(iter([1]))
self.assertEqual(next(future.__await__()), 1)
def test_tp_await_3(self):
from _testcapi import awaitType as at
async def foo():
future = at(1)
return (await future)
with self.assertRaisesRegex(
TypeError, "__await__.*returned non-iterator of type 'int'"):
self.assertEqual(foo().send(None), 1)
if __name__=="__main__":
unittest.main()
| 26.682081
| 85
| 0.50091
|
83160f8dcc593905512c5a61f7684051a25f6b5a
| 20,259
|
py
|
Python
|
ServidorPython/python32_web/Lib/distutils/sysconfig.py
|
mak213k/Servidor_automatizado_python
|
4403ef8027a2f814220baacc95856cf5fbf01d21
|
[
"MIT"
] | 3
|
2019-04-30T23:38:57.000Z
|
2020-04-06T10:05:08.000Z
|
ServidorPython/python32_web/Lib/distutils/sysconfig.py
|
mak213k/Servidor_automatizado_python
|
4403ef8027a2f814220baacc95856cf5fbf01d21
|
[
"MIT"
] | 6
|
2020-09-05T02:31:22.000Z
|
2022-02-26T12:14:53.000Z
|
Lib/distutils/sysconfig.py
|
koyota79/weekly-report
|
a9f518965d9f51e81c13711a8b0bfda336eae2a0
|
[
"bzip2-1.0.6"
] | 1
|
2019-09-30T23:47:05.000Z
|
2019-09-30T23:47:05.000Z
|
"""Provide access to Python's configuration information. The specific
configuration variables available depend heavily on the platform and
configuration. The values may be retrieved using
get_config_var(name), and the list of variables is available via
get_config_vars().keys(). Additional convenience functions are also
available.
Written by: Fred L. Drake, Jr.
Email: <fdrake@acm.org>
"""
import _imp
import os
import re
import sys
from .errors import DistutilsPlatformError
# These are needed in a couple of spots, so just compute them once.
PREFIX = os.path.normpath(sys.prefix)
EXEC_PREFIX = os.path.normpath(sys.exec_prefix)
BASE_PREFIX = os.path.normpath(sys.base_prefix)
BASE_EXEC_PREFIX = os.path.normpath(sys.base_exec_prefix)
# Path to the base directory of the project. On Windows the binary may
# live in project/PCbuild/win32 or project/PCbuild/amd64.
# set for cross builds
if "_PYTHON_PROJECT_BASE" in os.environ:
project_base = os.path.abspath(os.environ["_PYTHON_PROJECT_BASE"])
else:
project_base = os.path.dirname(os.path.abspath(sys.executable))
# python_build: (Boolean) if true, we're either building Python or
# building an extension with an un-installed Python, so we use
# different (hard-wired) directories.
# Setup.local is available for Makefile builds including VPATH builds,
# Setup.dist is available on Windows
def _is_python_source_dir(d):
for fn in ("Setup.dist", "Setup.local"):
if os.path.isfile(os.path.join(d, "Modules", fn)):
return True
return False
_sys_home = getattr(sys, '_home', None)
if os.name == 'nt':
def _fix_pcbuild(d):
if d and os.path.normcase(d).startswith(
os.path.normcase(os.path.join(PREFIX, "PCbuild"))):
return PREFIX
return d
project_base = _fix_pcbuild(project_base)
_sys_home = _fix_pcbuild(_sys_home)
def _python_build():
if _sys_home:
return _is_python_source_dir(_sys_home)
return _is_python_source_dir(project_base)
python_build = _python_build()
# Calculate the build qualifier flags if they are defined. Adding the flags
# to the include and lib directories only makes sense for an installation, not
# an in-source build.
build_flags = ''
try:
if not python_build:
build_flags = sys.abiflags
except AttributeError:
# It's not a configure-based build, so the sys module doesn't have
# this attribute, which is fine.
pass
def get_python_version():
"""Return a string containing the major and minor Python version,
leaving off the patchlevel. Sample return values could be '1.5'
or '2.2'.
"""
return '%d.%d' % sys.version_info[:2]
def get_python_inc(plat_specific=0, prefix=None):
"""Return the directory containing installed Python header files.
If 'plat_specific' is false (the default), this is the path to the
non-platform-specific header files, i.e. Python.h and so on;
otherwise, this is the path to platform-specific header files
(namely pyconfig.h).
If 'prefix' is supplied, use it instead of sys.base_prefix or
sys.base_exec_prefix -- i.e., ignore 'plat_specific'.
"""
if prefix is None:
prefix = plat_specific and BASE_EXEC_PREFIX or BASE_PREFIX
if os.name == "posix":
if python_build:
# Assume the executable is in the build directory. The
# pyconfig.h file should be in the same directory. Since
# the build directory may not be the source directory, we
# must use "srcdir" from the makefile to find the "Include"
# directory.
if plat_specific:
return _sys_home or project_base
else:
incdir = os.path.join(get_config_var('srcdir'), 'Include')
return os.path.normpath(incdir)
python_dir = 'python' + get_python_version() + build_flags
return os.path.join(prefix, "include", python_dir)
elif os.name == "nt":
if python_build:
# Include both the include and PC dir to ensure we can find
# pyconfig.h
return (os.path.join(prefix, "include") + os.path.pathsep +
os.path.join(prefix, "PC"))
return os.path.join(prefix, "include")
else:
raise DistutilsPlatformError(
"I don't know where Python installs its C header files "
"on platform '%s'" % os.name)
def get_python_lib(plat_specific=0, standard_lib=0, prefix=None):
"""Return the directory containing the Python library (standard or
site additions).
If 'plat_specific' is true, return the directory containing
platform-specific modules, i.e. any module from a non-pure-Python
module distribution; otherwise, return the platform-shared library
directory. If 'standard_lib' is true, return the directory
containing standard Python library modules; otherwise, return the
directory for site-specific modules.
If 'prefix' is supplied, use it instead of sys.base_prefix or
sys.base_exec_prefix -- i.e., ignore 'plat_specific'.
"""
if prefix is None:
if standard_lib:
prefix = plat_specific and BASE_EXEC_PREFIX or BASE_PREFIX
else:
prefix = plat_specific and EXEC_PREFIX or PREFIX
if os.name == "posix":
libpython = os.path.join(prefix,
"lib", "python" + get_python_version())
if standard_lib:
return libpython
else:
return os.path.join(libpython, "site-packages")
elif os.name == "nt":
if standard_lib:
return os.path.join(prefix, "Lib")
else:
return os.path.join(prefix, "Lib", "site-packages")
else:
raise DistutilsPlatformError(
"I don't know where Python installs its library "
"on platform '%s'" % os.name)
def customize_compiler(compiler):
"""Do any platform-specific customization of a CCompiler instance.
Mainly needed on Unix, so we can plug in the information that
varies across Unices and is stored in Python's Makefile.
"""
if compiler.compiler_type == "unix":
if sys.platform == "darwin":
# Perform first-time customization of compiler-related
# config vars on OS X now that we know we need a compiler.
# This is primarily to support Pythons from binary
# installers. The kind and paths to build tools on
# the user system may vary significantly from the system
# that Python itself was built on. Also the user OS
# version and build tools may not support the same set
# of CPU architectures for universal builds.
global _config_vars
# Use get_config_var() to ensure _config_vars is initialized.
if not get_config_var('CUSTOMIZED_OSX_COMPILER'):
import _osx_support
_osx_support.customize_compiler(_config_vars)
_config_vars['CUSTOMIZED_OSX_COMPILER'] = 'True'
(cc, cxx, opt, cflags, ccshared, ldshared, shlib_suffix, ar, ar_flags) = \
get_config_vars('CC', 'CXX', 'OPT', 'CFLAGS',
'CCSHARED', 'LDSHARED', 'SHLIB_SUFFIX', 'AR', 'ARFLAGS')
if 'CC' in os.environ:
newcc = os.environ['CC']
if (sys.platform == 'darwin'
and 'LDSHARED' not in os.environ
and ldshared.startswith(cc)):
# On OS X, if CC is overridden, use that as the default
# command for LDSHARED as well
ldshared = newcc + ldshared[len(cc):]
cc = newcc
if 'CXX' in os.environ:
cxx = os.environ['CXX']
if 'LDSHARED' in os.environ:
ldshared = os.environ['LDSHARED']
if 'CPP' in os.environ:
cpp = os.environ['CPP']
else:
cpp = cc + " -E" # not always
if 'LDFLAGS' in os.environ:
ldshared = ldshared + ' ' + os.environ['LDFLAGS']
if 'CFLAGS' in os.environ:
cflags = opt + ' ' + os.environ['CFLAGS']
ldshared = ldshared + ' ' + os.environ['CFLAGS']
if 'CPPFLAGS' in os.environ:
cpp = cpp + ' ' + os.environ['CPPFLAGS']
cflags = cflags + ' ' + os.environ['CPPFLAGS']
ldshared = ldshared + ' ' + os.environ['CPPFLAGS']
if 'AR' in os.environ:
ar = os.environ['AR']
if 'ARFLAGS' in os.environ:
archiver = ar + ' ' + os.environ['ARFLAGS']
else:
archiver = ar + ' ' + ar_flags
cc_cmd = cc + ' ' + cflags
compiler.set_executables(
preprocessor=cpp,
compiler=cc_cmd,
compiler_so=cc_cmd + ' ' + ccshared,
compiler_cxx=cxx,
linker_so=ldshared,
linker_exe=cc,
archiver=archiver)
compiler.shared_lib_extension = shlib_suffix
def get_config_h_filename():
"""Return full pathname of installed pyconfig.h file."""
if python_build:
if os.name == "nt":
inc_dir = os.path.join(_sys_home or project_base, "PC")
else:
inc_dir = _sys_home or project_base
else:
inc_dir = get_python_inc(plat_specific=1)
return os.path.join(inc_dir, 'pyconfig.h')
def get_makefile_filename():
"""Return full pathname of installed Makefile from the Python build."""
if python_build:
return os.path.join(_sys_home or project_base, "Makefile")
lib_dir = get_python_lib(plat_specific=0, standard_lib=1)
config_file = 'config-{}{}'.format(get_python_version(), build_flags)
if hasattr(sys.implementation, '_multiarch'):
config_file += '-%s' % sys.implementation._multiarch
return os.path.join(lib_dir, config_file, 'Makefile')
def parse_config_h(fp, g=None):
"""Parse a config.h-style file.
A dictionary containing name/value pairs is returned. If an
optional dictionary is passed in as the second argument, it is
used instead of a new dictionary.
"""
if g is None:
g = {}
define_rx = re.compile("#define ([A-Z][A-Za-z0-9_]+) (.*)\n")
undef_rx = re.compile("/[*] #undef ([A-Z][A-Za-z0-9_]+) [*]/\n")
#
while True:
line = fp.readline()
if not line:
break
m = define_rx.match(line)
if m:
n, v = m.group(1, 2)
try: v = int(v)
except ValueError: pass
g[n] = v
else:
m = undef_rx.match(line)
if m:
g[m.group(1)] = 0
return g
# Regexes needed for parsing Makefile (and similar syntaxes,
# like old-style Setup files).
_variable_rx = re.compile(r"([a-zA-Z][a-zA-Z0-9_]+)\s*=\s*(.*)")
_findvar1_rx = re.compile(r"\$\(([A-Za-z][A-Za-z0-9_]*)\)")
_findvar2_rx = re.compile(r"\${([A-Za-z][A-Za-z0-9_]*)}")
def parse_makefile(fn, g=None):
"""Parse a Makefile-style file.
A dictionary containing name/value pairs is returned. If an
optional dictionary is passed in as the second argument, it is
used instead of a new dictionary.
"""
from distutils.text_file import TextFile
fp = TextFile(fn, strip_comments=1, skip_blanks=1, join_lines=1, errors="surrogateescape")
if g is None:
g = {}
done = {}
notdone = {}
while True:
line = fp.readline()
if line is None: # eof
break
m = _variable_rx.match(line)
if m:
n, v = m.group(1, 2)
v = v.strip()
# `$$' is a literal `$' in make
tmpv = v.replace('$$', '')
if "$" in tmpv:
notdone[n] = v
else:
try:
v = int(v)
except ValueError:
# insert literal `$'
done[n] = v.replace('$$', '$')
else:
done[n] = v
# Variables with a 'PY_' prefix in the makefile. These need to
# be made available without that prefix through sysconfig.
# Special care is needed to ensure that variable expansion works, even
# if the expansion uses the name without a prefix.
renamed_variables = ('CFLAGS', 'LDFLAGS', 'CPPFLAGS')
# do variable interpolation here
while notdone:
for name in list(notdone):
value = notdone[name]
m = _findvar1_rx.search(value) or _findvar2_rx.search(value)
if m:
n = m.group(1)
found = True
if n in done:
item = str(done[n])
elif n in notdone:
# get it on a subsequent round
found = False
elif n in os.environ:
# do it like make: fall back to environment
item = os.environ[n]
elif n in renamed_variables:
if name.startswith('PY_') and name[3:] in renamed_variables:
item = ""
elif 'PY_' + n in notdone:
found = False
else:
item = str(done['PY_' + n])
else:
done[n] = item = ""
if found:
after = value[m.end():]
value = value[:m.start()] + item + after
if "$" in after:
notdone[name] = value
else:
try: value = int(value)
except ValueError:
done[name] = value.strip()
else:
done[name] = value
del notdone[name]
if name.startswith('PY_') \
and name[3:] in renamed_variables:
name = name[3:]
if name not in done:
done[name] = value
else:
# bogus variable reference; just drop it since we can't deal
del notdone[name]
fp.close()
# strip spurious spaces
for k, v in done.items():
if isinstance(v, str):
done[k] = v.strip()
# save the results in the global dictionary
g.update(done)
return g
def expand_makefile_vars(s, vars):
"""Expand Makefile-style variables -- "${foo}" or "$(foo)" -- in
'string' according to 'vars' (a dictionary mapping variable names to
values). Variables not present in 'vars' are silently expanded to the
empty string. The variable values in 'vars' should not contain further
variable expansions; if 'vars' is the output of 'parse_makefile()',
you're fine. Returns a variable-expanded version of 's'.
"""
# This algorithm does multiple expansion, so if vars['foo'] contains
# "${bar}", it will expand ${foo} to ${bar}, and then expand
# ${bar}... and so forth. This is fine as long as 'vars' comes from
# 'parse_makefile()', which takes care of such expansions eagerly,
# according to make's variable expansion semantics.
while True:
m = _findvar1_rx.search(s) or _findvar2_rx.search(s)
if m:
(beg, end) = m.span()
s = s[0:beg] + vars.get(m.group(1)) + s[end:]
else:
break
return s
_config_vars = None
def _init_posix():
"""Initialize the module as appropriate for POSIX systems."""
# _sysconfigdata is generated at build time, see the sysconfig module
name = os.environ.get('_PYTHON_SYSCONFIGDATA_NAME',
'_sysconfigdata_{abi}_{platform}_{multiarch}'.format(
abi=sys.abiflags,
platform=sys.platform,
multiarch=getattr(sys.implementation, '_multiarch', ''),
))
_temp = __import__(name, globals(), locals(), ['build_time_vars'], 0)
build_time_vars = _temp.build_time_vars
global _config_vars
_config_vars = {}
_config_vars.update(build_time_vars)
def _init_nt():
"""Initialize the module as appropriate for NT"""
g = {}
# set basic install directories
g['LIBDEST'] = get_python_lib(plat_specific=0, standard_lib=1)
g['BINLIBDEST'] = get_python_lib(plat_specific=1, standard_lib=1)
# XXX hmmm.. a normal install puts include files here
g['INCLUDEPY'] = get_python_inc(plat_specific=0)
g['EXT_SUFFIX'] = _imp.extension_suffixes()[0]
g['EXE'] = ".exe"
g['VERSION'] = get_python_version().replace(".", "")
g['BINDIR'] = os.path.dirname(os.path.abspath(sys.executable))
global _config_vars
_config_vars = g
def get_config_vars(*args):
"""With no arguments, return a dictionary of all configuration
variables relevant for the current platform. Generally this includes
everything needed to build extensions and install both pure modules and
extensions. On Unix, this means every variable defined in Python's
installed Makefile; on Windows it's a much smaller set.
With arguments, return a list of values that result from looking up
each argument in the configuration variable dictionary.
"""
global _config_vars
if _config_vars is None:
func = globals().get("_init_" + os.name)
if func:
func()
else:
_config_vars = {}
# Normalized versions of prefix and exec_prefix are handy to have;
# in fact, these are the standard versions used most places in the
# Distutils.
_config_vars['prefix'] = PREFIX
_config_vars['exec_prefix'] = EXEC_PREFIX
# For backward compatibility, see issue19555
SO = _config_vars.get('EXT_SUFFIX')
if SO is not None:
_config_vars['SO'] = SO
# Always convert srcdir to an absolute path
srcdir = _config_vars.get('srcdir', project_base)
if os.name == 'posix':
if python_build:
# If srcdir is a relative path (typically '.' or '..')
# then it should be interpreted relative to the directory
# containing Makefile.
base = os.path.dirname(get_makefile_filename())
srcdir = os.path.join(base, srcdir)
else:
# srcdir is not meaningful since the installation is
# spread about the filesystem. We choose the
# directory containing the Makefile since we know it
# exists.
srcdir = os.path.dirname(get_makefile_filename())
_config_vars['srcdir'] = os.path.abspath(os.path.normpath(srcdir))
# Convert srcdir into an absolute path if it appears necessary.
# Normally it is relative to the build directory. However, during
# testing, for example, we might be running a non-installed python
# from a different directory.
if python_build and os.name == "posix":
base = project_base
if (not os.path.isabs(_config_vars['srcdir']) and
base != os.getcwd()):
# srcdir is relative and we are not in the same directory
# as the executable. Assume executable is in the build
# directory and make srcdir absolute.
srcdir = os.path.join(base, _config_vars['srcdir'])
_config_vars['srcdir'] = os.path.normpath(srcdir)
# OS X platforms require special customization to handle
# multi-architecture, multi-os-version installers
if sys.platform == 'darwin':
import _osx_support
_osx_support.customize_config_vars(_config_vars)
if args:
vals = []
for name in args:
vals.append(_config_vars.get(name))
return vals
else:
return _config_vars
def get_config_var(name):
"""Return the value of a single variable using the dictionary
returned by 'get_config_vars()'. Equivalent to
get_config_vars().get(name)
"""
if name == 'SO':
import warnings
warnings.warn('SO is deprecated, use EXT_SUFFIX', DeprecationWarning, 2)
return get_config_vars().get(name)
| 37.104396
| 94
| 0.597611
|
58726b72221d1d44b451a2a8e5b0f802779033fe
| 24,199
|
py
|
Python
|
Packs/MicrosoftManagementActivity/Integrations/MicrosoftManagementActivity/MicrosoftManagementActivity.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 799
|
2016-08-02T06:43:14.000Z
|
2022-03-31T11:10:11.000Z
|
Packs/MicrosoftManagementActivity/Integrations/MicrosoftManagementActivity/MicrosoftManagementActivity.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 9,317
|
2016-08-07T19:00:51.000Z
|
2022-03-31T21:56:04.000Z
|
Packs/MicrosoftManagementActivity/Integrations/MicrosoftManagementActivity/MicrosoftManagementActivity.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 1,297
|
2016-08-04T13:59:00.000Z
|
2022-03-31T23:43:06.000Z
|
import demistomock as demisto # noqa: F401
import jwt
from CommonServerPython import * # noqa: F401
# Disable insecure warnings
requests.packages.urllib3.disable_warnings()
# CONSTANTS
DATE_FORMAT = '%Y-%m-%dT%H:%M:%S'
APP_NAME = 'ms-management-api'
PUBLISHER_IDENTIFIER = 'ebac1a16-81bf-449b-8d43-5732c3c1d999' # This isn't a secret and is public knowledge.
TIMEOUT_DEFAULT = 15
CONTENT_TYPE_TO_TYPE_ID_MAPPING = {
'ExchangeAdmin': 1,
'ExchangeItem': 2,
'ExchangeItemGroup': 3,
'SharePoint': 4,
'SharePointFileOperation': 6,
'AzureActiveDirectory': 8,
'AzureActiveDirectoryAccountLogon': 9,
'DataCenterSecurityCmdlet': 10,
'ComplianceDLPSharePoint': 11,
'Sway': 12,
'ComplianceDLPExchange': 13,
'SharePointSharingOperation': 14,
'AzureActiveDirectoryStsLogon': 15,
'SecurityComplianceCenterEOPCmdlet': 18,
'PowerBIAudit': 20,
'CRM': 21,
'Yammer': 22,
'SkypeForBusinessCmdlets': 23,
'Discovery': 24,
'MicrosoftTeams': 25,
'ThreatIntelligence': 28,
'MailSubmission': 29,
'MicrosoftFlow': 30,
'AeD': 31,
'MicrosoftStream': 32,
'ComplianceDLPSharePointClassification': 33,
'Project': 35,
'SharePointListOperation': 36,
'DataGovernance': 38,
'SecurityComplianceAlerts': 40,
'ThreatIntelligenceUrl': 41,
'SecurityComplianceInsights': 42,
'WorkplaceAnalytics': 44,
'PowerAppsApp': 45,
'ThreatIntelligenceAtpContent': 47,
'TeamsHealthcare': 49,
'DataInsightsRestApiAudit': 52,
'SharePointListItemOperation': 54,
'SharePointContentTypeOperation': 55,
'SharePointFieldOperation': 56,
'AirInvestigation': 64,
'Quarantine': 65,
'MicrosoftForms': 66
}
# Transferring content types to lowercase to prevent user errors (such as 'quarantine' instead of 'Quarantine')
CONTENT_TYPE_TO_TYPE_ID_MAPPING = {key.lower(): value for key, value in CONTENT_TYPE_TO_TYPE_ID_MAPPING.items()}
class Client(BaseClient):
"""
Client will implement the service API, and should not contain any Demisto logic.
Should only do requests and return data.
"""
def __init__(self, base_url: str, verify: bool,
proxy: bool, self_deployed, refresh_token: str, auth_and_token_url: str,
enc_key: str, auth_code: str, tenant_id: str, redirect_uri: str, timeout: int):
super().__init__(base_url=base_url, verify=verify, proxy=proxy)
self.tenant_id = tenant_id
self.suffix_template = '{}/activity/feed/subscriptions/{}'
self.access_token = None
self.self_deployed = self_deployed
self.refresh_token = refresh_token
self.auth_and_token_url = auth_and_token_url
self.enc_key = enc_key
self.timeout = timeout
self.ms_client = MicrosoftClient(self_deployed=self.self_deployed,
tenant_id=self.tenant_id,
auth_id=self.auth_and_token_url,
enc_key=self.enc_key,
app_name=APP_NAME,
base_url=base_url,
grant_type=AUTHORIZATION_CODE,
verify=verify,
proxy=proxy,
refresh_token=self.refresh_token,
ok_codes=(200, 201, 202, 204),
timeout=self.timeout,
scope='',
auth_code=auth_code,
resource='https://manage.office.com',
token_retrieval_url='https://login.windows.net/common/oauth2/token',
redirect_uri=redirect_uri)
def http_request(self, method, url_suffix='', full_url=None, headers=None, params=None, timeout=None, ok_codes=None,
return_empty_response=False, **kwargs):
"""
Calls the built in http_request, replacing a None timeout with self.timeout
"""
if timeout is None:
timeout = self.timeout
return self._http_request(method=method, url_suffix=url_suffix, full_url=full_url, params=params,
ok_codes=ok_codes, headers=headers, return_empty_response=return_empty_response,
timeout=timeout, **kwargs)
def get_access_token_data(self):
access_token_jwt = self.ms_client.get_access_token()
token_data = jwt.decode(access_token_jwt, options={'verify_signature': False})
return access_token_jwt, token_data
def get_authentication_string(self):
return f'Bearer {self.access_token}'
def get_blob_data_request(self, blob_url):
"""
Args:
blob_url: The URL for the blob.
"""
auth_string = self.get_authentication_string()
headers = {
'Content-Type': 'application/json',
'Authorization': auth_string
}
params = {
'PublisherIdentifier': PUBLISHER_IDENTIFIER
}
response = self.http_request(
method='GET',
url_suffix='',
full_url=blob_url,
headers=headers,
params=params,
)
return response
def list_content_request(self, content_type, start_time, end_time):
"""
Args:
content_type: the content type
start_time: start time to fetch content
end_time: end time to fetch content
"""
auth_string = self.get_authentication_string()
headers = {
'Authorization': auth_string
}
params = {
'PublisherIdentifier': PUBLISHER_IDENTIFIER,
'contentType': content_type
}
if start_time and end_time:
params['startTime'] = start_time
params['endTime'] = end_time
response = self.http_request(
method='GET',
url_suffix=self.suffix_template.format(self.tenant_id, 'content'),
headers=headers,
params=params,
)
return response
def list_subscriptions_request(self):
auth_string = self.get_authentication_string()
headers = {
'Authorization': auth_string
}
params = {
'PublisherIdentifier': PUBLISHER_IDENTIFIER
}
response = self.http_request(
method='GET',
url_suffix=self.suffix_template.format(self.tenant_id, 'list'),
headers=headers,
params=params
)
return response
def start_or_stop_subscription_request(self, content_type, start_or_stop_suffix):
auth_string = self.get_authentication_string()
headers = {
'Authorization': auth_string
}
params = {
'PublisherIdentifier': PUBLISHER_IDENTIFIER,
'contentType': content_type
}
return self.http_request(
method='POST',
url_suffix=self.suffix_template.format(self.tenant_id, start_or_stop_suffix),
headers=headers,
params=params,
ok_codes=(200, 201, 202, 203, 204),
return_empty_response=True
)
def test_module():
params = demisto.params()
fetch_delta = params.get('first_fetch_delta', '10 minutes')
user_input_fetch_start_date, _ = parse_date_range(fetch_delta)
if datetime.now() - timedelta(days=7) - timedelta(minutes=5) >= user_input_fetch_start_date:
return 'Error: first fetch time delta should not be over one week.'
if params.get('self_deployed'):
if not params.get('auth_code') or not params.get('redirect_uri'):
return 'Error: in the self_deployed authentication flow the authentication code parameter and ' \
'redirect uri cannot be empty.'
return 'The basic parameters are ok, authentication cannot be checked using the test module. ' \
'Please run ms-management-activity-list-subscriptions to test your credentials.'
def get_start_or_stop_subscription_human_readable(content_type, start_or_stop):
if start_or_stop == 'start':
human_readable = f'Successfully started subscription to content type: {content_type}'
else:
human_readable = f'Successfully stopped subscription to content type: {content_type}'
return human_readable
def get_start_or_stop_subscription_context(content_type, start_or_stop):
is_subscription_enabled = True if start_or_stop == 'start' else False
subscription_context = {
'ContentType': content_type,
'Enabled': is_subscription_enabled
}
entry_context = {
'MicrosoftManagement.Subscription(val.ContentType && val.ContentType === obj.ContentType)': subscription_context
}
return entry_context
def start_or_stop_subscription_command(client, args, start_or_stop):
content_type = args.get('content_type')
try:
client.start_or_stop_subscription_request(content_type, start_or_stop)
human_readable = get_start_or_stop_subscription_human_readable(content_type, start_or_stop)
entry_context = get_start_or_stop_subscription_context(content_type, start_or_stop)
return_outputs(
readable_output=human_readable,
outputs=entry_context,
raw_response={}
)
except Exception as e:
if start_or_stop == 'start' and 'The subscription is already enabled. No property change' in str(e):
return_outputs('The subscription is already enabled.')
else:
raise
def get_enabled_subscriptions_content_types(enabled_subscriptions):
enabled_subscriptions_content_types = [subscription.get('contentType') for subscription in enabled_subscriptions
if subscription.get('status') == 'enabled']
return enabled_subscriptions_content_types
def get_subscriptions_context(enabled_subscriptions):
subscriptions_contexts = []
for subscription_content_type in enabled_subscriptions:
subscription_context = {
'ContentType': subscription_content_type,
'Enabled': True
}
subscriptions_contexts.append(subscription_context)
return subscriptions_contexts
def list_subscriptions_command(client):
subscriptions = client.list_subscriptions_request()
enabled_subscriptions_content_types = get_enabled_subscriptions_content_types(
subscriptions) # Subscriptions are defined by their content type
enabled_subscriptions_context = get_subscriptions_context(enabled_subscriptions_content_types)
human_readable = tableToMarkdown('Current Subscriptions', enabled_subscriptions_content_types,
headers='Current Subscriptions')
entry_context = {
'MicrosoftManagement.Subscription(val.ContentType && val.ContentType === obj.ContentType)': enabled_subscriptions_context
}
return_outputs(
readable_output=human_readable,
raw_response=enabled_subscriptions_context,
outputs=entry_context
)
def build_event_context(event_record):
event_context = {
'CreationTime': event_record.get('CreationTime'),
'ID': event_record.get('Id'),
'RecordType': event_record.get('RecordType'),
'Operation': event_record.get('Operation'),
'OrganizationID': event_record.get('OrganizationId'),
'UserType': event_record.get('UserType'),
'UserKey': event_record.get('UserKey'),
'Workload': event_record.get('Workload'),
'ResultsStatus': event_record.get('ResultStatus'),
'ObjectID': event_record.get('ObjectId'),
'UserID': event_record.get('UserId'),
'ClientIP': event_record.get('ClientIP'),
'Scope': event_record.get('Scope'),
}
# Remove keys with None value
event_context = assign_params(**event_context)
return event_context
def get_content_records_context(content_records):
content_records_context = []
for content_record in content_records:
record_context = build_event_context(content_record)
content_records_context.append(record_context)
return content_records_context
def get_all_content_type_records(client, content_type, start_time, end_time):
content_blobs = client.list_content_request(content_type, start_time, end_time)
# The list_content request returns a list of content records, each containing a url that holds the actual data
content_uris = [content_blob.get('contentUri') for content_blob in content_blobs]
content_records: List = []
for uri in content_uris:
content_records_in_uri = client.get_blob_data_request(uri)
content_records.extend(content_records_in_uri)
return content_records
def create_events_human_readable(events_context, content_type):
headers = ['ID', 'CreationTime', 'Workload', 'Operation']
content_header = f'Content for content type {content_type}'
human_readable = tableToMarkdown(content_header, events_context, headers=headers)
return human_readable
def get_filter_accepted_values_list(filtered_field, filter_data):
filter_accepted_values_string = filter_data.get(filtered_field)
if filter_accepted_values_string:
return filter_accepted_values_string.split(',')
return None
def verify_record_type_is_legal(record_type):
record_type_lowercase = record_type.lower()
if record_type_lowercase not in CONTENT_TYPE_TO_TYPE_ID_MAPPING:
return_error(f'Error: {record_type} is not a legal record type in the Microsoft Management Activity API.')
def record_types_to_type_ids(record_types_to_fetch):
record_type_ids_to_fetch = []
for record_type in record_types_to_fetch:
verify_record_type_is_legal(record_type)
# To lowercase to avoid user errors, such as 'quarantine' and 'Quarantine'
record_type_lowercase = record_type.lower()
record_type_id = CONTENT_TYPE_TO_TYPE_ID_MAPPING[record_type_lowercase]
record_type_ids_to_fetch.append(record_type_id)
return record_type_ids_to_fetch
def does_record_match_filters(record, filter_accepted_record_type_ids, filter_accepted_workloads,
filter_accepted_operations):
should_filter_by_record_types = filter_accepted_record_type_ids is not None
record_matches_record_type_filter = not should_filter_by_record_types or record.get('RecordType') in \
filter_accepted_record_type_ids
should_filter_by_workloads = filter_accepted_workloads is not None
record_matches_workloads_filter = not should_filter_by_workloads or record.get('Workload') in \
filter_accepted_workloads
should_filter_by_operations = filter_accepted_operations is not None
record_matches_operations_filter = not should_filter_by_operations or record.get('Operation') in \
filter_accepted_operations
return record_matches_record_type_filter and record_matches_workloads_filter and record_matches_operations_filter
def filter_records(content_records, filter_data):
filter_accepted_workloads = get_filter_accepted_values_list('workloads_filter', filter_data)
filter_accepted_operations = get_filter_accepted_values_list('operations_filter', filter_data)
filter_accepted_record_types = get_filter_accepted_values_list('record_types_filter', filter_data)
# User specifies the record types by type name, but the API returns the record types by ID.
# Therefore we transform the names to IDs.
filter_accepted_record_type_ids = record_types_to_type_ids(
filter_accepted_record_types) if filter_accepted_record_types else None
filtered_records = []
for record in content_records:
if does_record_match_filters(record, filter_accepted_record_type_ids, filter_accepted_workloads,
filter_accepted_operations):
filtered_records.append(record)
return filtered_records
def list_content_command(client, args):
content_type = args['content_type']
start_time = args.get('start_time')
end_time = args.get('end_time')
content_records = get_all_content_type_records(client, content_type, start_time, end_time)
filtered_content_records = filter_records(content_records, args)
content_records_context = get_content_records_context(filtered_content_records)
human_readable = create_events_human_readable(content_records_context, content_type)
return_outputs(
readable_output=human_readable,
outputs={
'MicrosoftManagement.ContentRecord(val.ID && val.ID === obj.ID)': content_records_context
},
raw_response=filtered_content_records
)
def get_content_types_to_fetch(client):
content_types_to_fetch = demisto.params().get('content_types_to_fetch')
if not content_types_to_fetch:
# Was not supplied by the user, so we will return all content types the user is subscribed to
subscriptions = client.list_subscriptions_request()
content_types_to_fetch = get_enabled_subscriptions_content_types(
subscriptions)
return content_types_to_fetch
def get_fetch_end_time_based_on_start_time(fetch_start_datetime):
is_fetch_start_time_over_10_minutes_ago = (datetime.now() - timedelta(minutes=10) >= fetch_start_datetime)
if is_fetch_start_time_over_10_minutes_ago:
# Start and end time can't be over 24, so the fetch will end 24 hours after it's start.
fetch_end_datetime = fetch_start_datetime + timedelta(minutes=10)
else:
fetch_end_datetime = datetime.now()
return fetch_end_datetime
def get_fetch_start_and_end_time(last_run, first_fetch_datetime):
if not last_run:
fetch_start_datetime = first_fetch_datetime
else:
last_fetch = last_run.get('last_fetch')
fetch_start_datetime = datetime.strptime(last_fetch, DATE_FORMAT)
fetch_end_datetime = get_fetch_end_time_based_on_start_time(fetch_start_datetime)
# The API expects strings of format YYYY:DD:MMTHH:MM:SS
fetch_start_time_str = fetch_start_datetime.strftime(DATE_FORMAT)
fetch_end_time_str = fetch_end_datetime.strftime(DATE_FORMAT)
return fetch_start_time_str, fetch_end_time_str
def get_all_content_records_of_specified_types(client, content_types_to_fetch, start_time, end_time):
all_content_records: List = list()
content_types_to_fetch = content_types_to_fetch.split(',') if type(content_types_to_fetch) is str \
else content_types_to_fetch
for content_type in content_types_to_fetch:
content_records_of_current_type = get_all_content_type_records(client, content_type, start_time, end_time)
all_content_records.extend(content_records_of_current_type)
return all_content_records
def content_records_to_incidents(content_records, start_time, end_time):
incidents = []
start_time_datetime = datetime.strptime(start_time, DATE_FORMAT)
latest_creation_time_datetime = start_time_datetime
record_ids_already_found: Set = set()
for content_record in content_records:
incident_creation_time_str = content_record['CreationTime']
incident_creation_time_datetime = datetime.strptime(incident_creation_time_str, DATE_FORMAT)
if incident_creation_time_datetime < start_time_datetime:
pass
incident_creation_time_in_incidents_format = incident_creation_time_str + 'Z'
record_id = content_record['Id']
incident = {
'name': f'Microsoft Management Activity: {record_id}',
'occurred': incident_creation_time_in_incidents_format,
'rawJSON': json.dumps(content_record)
}
if incident['name'] in record_ids_already_found:
pass
else:
record_ids_already_found.add(incident['name'])
incidents.append(incident)
if incident_creation_time_datetime > latest_creation_time_datetime:
latest_creation_time_datetime = incident_creation_time_datetime
latest_creation_time_str = datetime.strftime(latest_creation_time_datetime, DATE_FORMAT)
if len(content_records) == 0 or latest_creation_time_str == start_time:
latest_creation_time_str = end_time
return incidents, latest_creation_time_str
def fetch_incidents(client, last_run, first_fetch_datetime):
start_time, end_time = get_fetch_start_and_end_time(last_run, first_fetch_datetime)
content_types_to_fetch = get_content_types_to_fetch(client)
content_records = get_all_content_records_of_specified_types(client, content_types_to_fetch, start_time, end_time)
filtered_content_records = filter_records(content_records, demisto.params())
incidents, last_fetch = content_records_to_incidents(filtered_content_records, start_time, end_time)
next_run = {'last_fetch': last_fetch}
return next_run, incidents
def calculate_timeout_value(params: dict, args: dict) -> int:
if arg_timeout := int(args.get('timeout') or 0):
return arg_timeout
elif param_timeout := int(params.get('timeout') or 0):
return param_timeout
return TIMEOUT_DEFAULT # for unit tests
def main():
base_url = demisto.params().get('base_url', 'https://manage.office.com/api/v1.0/')
verify_certificate = not demisto.params().get('insecure', False)
first_fetch_delta = demisto.params().get('first_fetch_delta', '10 minutes').strip()
first_fetch_datetime, _ = parse_date_range(first_fetch_delta)
proxy = demisto.params().get('proxy', False)
args = demisto.args()
params = demisto.params()
LOG(f'Command being called is {demisto.command()}')
try:
if demisto.command() == 'test-module':
result = test_module()
return_error(result)
refresh_token = params.get('refresh_token', '')
self_deployed = params.get('self_deployed', False)
redirect_uri = params.get('redirect_uri', '')
tenant_id = refresh_token if self_deployed else ''
auth_id = params['auth_id']
enc_key = params['enc_key']
refresh_token = get_integration_context().get('current_refresh_token') or refresh_token
client = Client(
base_url=base_url,
tenant_id=tenant_id,
verify=verify_certificate,
proxy=proxy,
self_deployed=self_deployed,
refresh_token=refresh_token,
auth_and_token_url=auth_id,
timeout=calculate_timeout_value(params=params, args=args),
enc_key=enc_key,
auth_code=params.get('auth_code', ''),
redirect_uri=redirect_uri
)
access_token, token_data = client.get_access_token_data()
client.access_token = access_token
client.tenant_id = token_data['tid']
if demisto.command() == 'fetch-incidents':
next_run, incidents = fetch_incidents(
client=client,
last_run=demisto.getLastRun(),
first_fetch_datetime=first_fetch_datetime)
demisto.setLastRun(next_run)
demisto.incidents(incidents)
elif demisto.command() == 'ms-management-activity-start-subscription':
start_or_stop_subscription_command(client, args, 'start')
elif demisto.command() == 'ms-management-activity-stop-subscription':
start_or_stop_subscription_command(client, args, 'stop')
elif demisto.command() == 'ms-management-activity-list-subscriptions':
list_subscriptions_command(client)
elif demisto.command() == 'ms-management-activity-list-content':
list_content_command(client, args)
# Log exceptions
except Exception as e:
return_error(f'Failed to execute {demisto.command()} command. Error: {str(e)}')
from MicrosoftApiModule import * # noqa: E402
if __name__ in ['__main__', '__builtin__', 'builtins']:
main()
| 40.807757
| 129
| 0.689326
|
5fa9bb13bd0ef6127465d705ff0720ce2eca5b92
| 23
|
py
|
Python
|
plugins/pdf-img/__init__.py
|
mohnjahoney/website_source
|
edc86a869b90ae604f32e736d9d5ecd918088e6a
|
[
"MIT"
] | 13
|
2020-01-27T09:02:25.000Z
|
2022-01-20T07:45:26.000Z
|
plugins/pdf-img/__init__.py
|
mohnjahoney/website_source
|
edc86a869b90ae604f32e736d9d5ecd918088e6a
|
[
"MIT"
] | 29
|
2020-03-22T06:57:57.000Z
|
2022-01-24T22:46:42.000Z
|
plugins/pdf-img/__init__.py
|
mohnjahoney/website_source
|
edc86a869b90ae604f32e736d9d5ecd918088e6a
|
[
"MIT"
] | 6
|
2020-07-10T00:13:30.000Z
|
2022-01-26T08:22:33.000Z
|
from .pdf_img import *
| 11.5
| 22
| 0.73913
|
a647c837b28e0f9c6993728a46a6efb799cc4d6a
| 1,009
|
py
|
Python
|
tests/helpers/helpers_compute_engine.py
|
around-media/jam
|
9778c57594aa88992e51c7099f1afed0718574c0
|
[
"MIT"
] | null | null | null |
tests/helpers/helpers_compute_engine.py
|
around-media/jam
|
9778c57594aa88992e51c7099f1afed0718574c0
|
[
"MIT"
] | 94
|
2018-04-02T15:29:16.000Z
|
2022-03-28T18:46:24.000Z
|
tests/helpers/helpers_compute_engine.py
|
around-media/jam
|
9778c57594aa88992e51c7099f1afed0718574c0
|
[
"MIT"
] | 2
|
2019-03-25T08:59:24.000Z
|
2019-03-27T10:11:18.000Z
|
import contextlib
import datetime
import jam.libs.compute_engine
def make_info_instantly_stale(instance):
instance.info_ts -= datetime.timedelta(
milliseconds=2 * jam.libs.compute_engine.ComputeEngineInstance.DEFAULT_STALE_AFTER_MS
)
@contextlib.contextmanager
def no_pause():
saved_wait_op = jam.libs.compute_engine.TIME_SLEEP_WAIT_FOR_OPERATION
saved_wait_st = jam.libs.compute_engine.TIME_SLEEP_WAIT_FOR_STATUS
saved_stale = jam.libs.compute_engine.ComputeEngineInstance.DEFAULT_STALE_AFTER_MS
jam.libs.compute_engine.TIME_SLEEP_WAIT_FOR_OPERATION = 0
jam.libs.compute_engine.TIME_SLEEP_WAIT_FOR_STATUS = 0
jam.libs.compute_engine.ComputeEngineInstance.DEFAULT_STALE_AFTER_MS = 1
try:
yield
finally:
jam.libs.compute_engine.TIME_SLEEP_WAIT_FOR_OPERATION = saved_wait_op
jam.libs.compute_engine.TIME_SLEEP_WAIT_FOR_STATUS = saved_wait_st
jam.libs.compute_engine.ComputeEngineInstance.DEFAULT_STALE_AFTER_MS = saved_stale
| 36.035714
| 93
| 0.804757
|
fdce97db6429dacd18fc254f9267d83bce1dfa2e
| 6,563
|
py
|
Python
|
examples/semi_lagrangian_experiments/unsteady_semi_experiment.py
|
dbstein/ipde
|
834e16a617f47a3eabe3307ba151d5b7db527b30
|
[
"Apache-2.0"
] | 2
|
2019-10-17T15:29:38.000Z
|
2021-02-19T20:01:34.000Z
|
examples/semi_lagrangian_experiments/unsteady_semi_experiment.py
|
dbstein/ipde
|
834e16a617f47a3eabe3307ba151d5b7db527b30
|
[
"Apache-2.0"
] | null | null | null |
examples/semi_lagrangian_experiments/unsteady_semi_experiment.py
|
dbstein/ipde
|
834e16a617f47a3eabe3307ba151d5b7db527b30
|
[
"Apache-2.0"
] | null | null | null |
import numpy as np
import finufftpy
import time
"""
Test semi-lagrangian solve...
"""
# max time
max_time = 1.0
# set timestep
dt = 0.01
# discretization size
n = 32
# generate a velocity field (assume steady in time)
u_function = lambda x, y, t: np.sin(x)*np.cos(y)*(1+np.cos(2*np.pi*t))
v_function = lambda x, y, t: -np.cos(x)*np.sin(y)*(1+np.cos(2*np.pi*t))
# gradient function
def gradient(f):
fh = np.fft.fft2(f)
fx = np.fft.ifft2(fh*ikx).real
fy = np.fft.ifft2(fh*iky).real
return fx, fy
################################################################################
# Forward-Euler, Small Timestep, double grid for "Truth"
adj = 4
n *= adj
dt /= adj
# generate a grid
v, h = np.linspace(0, 2*np.pi, n, endpoint=False, retstep=True)
x, y = np.meshgrid(v, v, indexing='ij')
# fourier modes
kv = np.fft.fftfreq(n, h/(2*np.pi))
kv[int(n/2)] = 0.0
kx, ky = np.meshgrid(kv, kv, indexing='ij')
ikx, iky = 1j*kx, 1j*ky
# initial c field
c0 = np.exp(np.cos(x))*np.sin(y)
print('')
print("Getting 'truth'")
t = 0.0
c = c0.copy()
while t < max_time-1e-10:
cx, cy = gradient(c)
u = u_function(x, y, t)
v = v_function(x, y, t)
c -= dt*(u*cx + v*cy)
t += dt
print(' t = {:0.3f}'.format(t), max_time, '\r', end='')
c_true = c.copy()[::adj,::adj]
n = int(n/adj)
dt *= adj
################################################################################
# Forward-Euler Timestepper
# generate a grid
v, h = np.linspace(0, 2*np.pi, n, endpoint=False, retstep=True)
x, y = np.meshgrid(v, v, indexing='ij')
# fourier modes
kv = np.fft.fftfreq(n, h/(2*np.pi))
kv[int(n/2)] = 0.0
kx, ky = np.meshgrid(kv, kv, indexing='ij')
ikx, iky = 1j*kx, 1j*ky
# initial c field
c0 = np.exp(np.cos(x))*np.sin(y)
print('Testing Forward-Euler Method')
st = time.time()
t = 0.0
c = c0.copy()
while t < max_time-1e-10:
cx, cy = gradient(c)
u = u_function(x, y, t)
v = v_function(x, y, t)
c -= dt*(u*cx + v*cy)
t += dt
print(' t = {:0.3f}'.format(t), max_time, '\r', end='')
c_eulerian = c.copy()
time_eulerian = time.time() - st
################################################################################
# Semi-Lagrangian Non-linear departure point method
print('Testing Non-linear Departure Method')
class interp2d(object):
def __init__(self, u):
self.uh = np.fft.fft2(u).copy()
self.scale = 1.0/np.prod(self.uh.shape)
def __call__(self, x, y):
sh = x.shape
x, y = x.ravel(), y.ravel()
work = np.empty(x.size, dtype=complex)
info = finufftpy.nufft2d2(x, y, work, 1, 1e-14, self.uh, modeord=1)
return (work.real*self.scale).reshape(sh)
def objective_function(xx, yy):
ox = xx + dt*u_interpolater(xx, yy) - x
oy = yy + dt*v_interpolater(xx, yy) - y
return ox, oy
def Jacobian(xx, yy):
Jxx = 1.0 + dt*ux_interpolater(xx, yy)
Jxy = dt*uy_interpolater(xx, yy)
Jyx = dt*vx_interpolater(xx, yy)
Jyy = 1.0 + dt*vy_interpolater(xx, yy)
return Jxx, Jxy, Jyx, Jyy
st = time.time()
t = 0.0
c = c0.copy()
while t < max_time-1e-10:
# get velocity fields and gradients
u = u_function(x, y, t)
v = v_function(x, y, t)
ux, uy = gradient(u)
vx, vy = gradient(v)
# define needed interpolaters
u_interpolater = interp2d(u)
v_interpolater = interp2d(v)
ux_interpolater = interp2d(ux)
uy_interpolater = interp2d(uy)
vx_interpolater = interp2d(vx)
vy_interpolater = interp2d(vy)
c_interpolater = interp2d(c)
# get initial guess for each gridpoint
gx = x - dt*u
gy = y - dt*v
xn = gx.copy()
yn = gy.copy()
resx, resy = objective_function(xn, yn)
res = np.hypot(resx, resy).max()
tol = 1e-12
i = 0
while res > tol:
Jxx, Jxy, Jyx, Jyy = Jacobian(xn, yn)
det = Jxx*Jyy - Jxy*Jyx
idet = 1.0/det
dx = -idet*(Jyy*resx - Jyx*resy)
dy = -idet*(-Jxy*resx + Jxx*resy)
xn += dx
yn += dy
resx, resy = objective_function(xn, yn)
res = np.hypot(resx, resy).max()
i += 1
c = c_interpolater(xn, yn)
t += dt
print(' t = {:0.3f}'.format(t), max_time, '\r', end='')
c_nonlinear_departure = c.copy()
time_nonlinear_departure = time.time() - st
################################################################################
# Semi-Lagrangian Linear departure point method
print('Testing Linear Departure Method')
st = time.time()
t = 0.0
c = c0.copy()
SLM = np.zeros([n**2,] + [2,2], dtype=float)
SLR = np.zeros([n**2,] + [2,], dtype=float)
while t < max_time-1e-10:
# get velocity fields and gradients
u = u_function(x, y, t)
v = v_function(x, y, t)
ux, uy = gradient(u)
vx, vy = gradient(v)
# define needed interpolaters
c_interpolater = interp2d(c)
# solve for departure points
SLM[:,0,0] = 1 + dt*ux.ravel()
SLM[:,0,1] = dt*uy.ravel()
SLM[:,1,0] = dt*vx.ravel()
SLM[:,1,1] = 1 + dt*vy.ravel()
SLR[:,0] = dt*u.ravel()
SLR[:,1] = dt*v.ravel()
OUT = np.linalg.solve(SLM, SLR)
xdt, ydt = OUT[:,0].reshape(n,n), OUT[:,1].reshape(n,n)
xd, yd = x - xdt, y - ydt
# interpolate onto departure points
c = c_interpolater(xd, yd)
t += dt
print(' t = {:0.3f}'.format(t), max_time, '\r', end='')
c_linear_departure = c.copy()
time_linear_departure = time.time() - st
################################################################################
# Semi-Lagrangian arrival point method
print('Testing Arrival Method')
st = time.time()
t = 0.0
c = c0.copy()
while t < max_time-1e-10:
# get velocity fields and gradients
u = u_function(x, y, t)
v = v_function(x, y, t)
ux, uy = gradient(u)
vx, vy = gradient(v)
# get arrival points
xn = x + dt*u
yn = y + dt*v
# compute weights
Jxx = 1 + dt*ux
Jxy = dt*uy
Jyx = dt*vx
Jyy = 1 + dt*vy
det = Jxx*Jyy - Jxy*Jyx
cw = c*det
ch = np.zeros([n,n], dtype=complex, order='F')
finufftpy.nufft2d1(xn, yn, cw.astype(complex), -1, 1e-14, n, n, ch, modeord=1)
c = np.fft.ifft2(ch).real
t += dt
print(' t = {:0.3f}'.format(t), max_time, '\r', end='')
c_arrival = c.copy()
time_arrival = time.time() - st
################################################################################
# Evaluate
print('\n')
print('Err, true vs. euler {:0.1e}'.format(np.abs(c_true-c_eulerian).max()))
print('Err, eul vs. dep {:0.1e}'.format(np.abs(c_true-c_nonlinear_departure).max()))
print('Err, true vs. lin dep {:0.1e}'.format(np.abs(c_true-c_linear_departure).max()))
print('Err, true vs. arrival {:0.1e}'.format(np.abs(c_true-c_arrival).max()))
print('')
print('Time, eulerian {:0.1f}'.format(time_eulerian*1000))
print('Time, departure {:0.1f}'.format(time_nonlinear_departure*1000))
print('Time, linear dep {:0.1f}'.format(time_linear_departure*1000))
print('Time, arrival {:0.1f}'.format(time_arrival*1000))
| 26.897541
| 90
| 0.586622
|
fa113ce5d264a35c216d2944b27af330e00b22f4
| 2,009
|
py
|
Python
|
Recognize/recognize_time_test.py
|
l976308589/cnn_captcha_copy
|
ebc440300b84a87df41393f5de2b97d7ff3cad26
|
[
"Apache-2.0"
] | null | null | null |
Recognize/recognize_time_test.py
|
l976308589/cnn_captcha_copy
|
ebc440300b84a87df41393f5de2b97d7ff3cad26
|
[
"Apache-2.0"
] | null | null | null |
Recognize/recognize_time_test.py
|
l976308589/cnn_captcha_copy
|
ebc440300b84a87df41393f5de2b97d7ff3cad26
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
# -*- coding: UTF-8 -*-
"""
使用自建的接口识别来自网络的验证码
需要配置参数:
remote_url = "https://www.xxxxxxx.com/getImg" 验证码链接地址
rec_times = 1 识别的次数
"""
import datetime
import requests
from io import BytesIO
import time
import json
from Config.config import sample_conf
def recognize_captcha(index, test_path, save_path, image_suffix):
image_file_name = 'captcha.{}'.format(image_suffix)
with open(test_path, "rb") as f:
content = f.read()
# 识别
s = time.time()
url = "http://127.0.0.1:6000/b"
files = {'image_file': (image_file_name, BytesIO(content), 'application')}
r = requests.post(url=url, files=files)
e = time.time()
# 测试参数
result_dict = json.loads(r.text)["value"] # 响应
predict_text = result_dict["value"] # 识别结果
whole_time_for_work = int((e - s) * 1000)
speed_time_by_rec = result_dict["speed_time(ms)"] # 模型识别耗时
request_time_by_rec = whole_time_for_work - speed_time_by_rec # 请求耗时
now_time = datetime.datetime.now().strftime('%Y-%m-%d@%H:%M:%S') # 当前时间
# 记录日志
log = "{},{},{},{},{},{}\n" \
.format(index, predict_text, now_time, whole_time_for_work, speed_time_by_rec, request_time_by_rec)
with open("./test.csv", "a+") as f:
f.write(log)
# 输出结果到控制台
print("次数:{},结果:{},时刻:{},总耗时:{}ms,识别:{}ms,请求:{}ms"
.format(index, predict_text, now_time, whole_time_for_work, speed_time_by_rec, request_time_by_rec))
# 保存文件
# img_name = "{}_{}.{}".format(predict_text, str(time.time()).replace(".", ""), image_suffix)
# path = os.path.join(save_path, img_name)
# with open(path, "wb") as f:
# f.write(content)
def main():
# 配置相关参数
test_file = "sample/test/0001_15430304076164024.png" # 测试识别的图片路径
save_path = '.' + sample_conf["local_image_dir"] # 保存的地址
image_suffix = sample_conf["image_suffix"] # 文件后缀
for i in range(20000):
recognize_captcha(i, test_file, save_path, image_suffix)
if __name__ == '__main__':
main()
| 30.439394
| 110
| 0.642608
|
4682f508bc3816a27543d42f221f4ad98e503483
| 554
|
py
|
Python
|
clist/migrations/0034_auto_20200503_0125.py
|
horacexd/clist
|
9759dfea97b86514bec9825d2430abc36decacf0
|
[
"Apache-2.0"
] | 166
|
2019-05-16T23:46:08.000Z
|
2022-03-31T05:20:23.000Z
|
clist/migrations/0034_auto_20200503_0125.py
|
horacexd/clist
|
9759dfea97b86514bec9825d2430abc36decacf0
|
[
"Apache-2.0"
] | 92
|
2020-01-18T22:51:53.000Z
|
2022-03-12T01:23:57.000Z
|
clist/migrations/0034_auto_20200503_0125.py
|
VadVergasov/clist
|
4afcdfe88250d224043b28efa511749347cec71c
|
[
"Apache-2.0"
] | 23
|
2020-02-09T17:38:43.000Z
|
2021-12-09T14:39:07.000Z
|
# Generated by Django 2.2.10 on 2020-05-03 01:25
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('clist', '0033_remove_banner_start_time'),
]
operations = [
migrations.AddField(
model_name='resource',
name='num_accounts',
field=models.IntegerField(default=0),
),
migrations.AddField(
model_name='resource',
name='num_contests',
field=models.IntegerField(default=0),
),
]
| 23.083333
| 51
| 0.584838
|
80f1f897d368371b3a0c31b84521ef2a8cda5a14
| 6,830
|
py
|
Python
|
pipenv/vendor/vistir/contextmanagers.py
|
ehebert/pipenv
|
b771621274fcdb6980b4c9682bd2b2879e3354d1
|
[
"MIT"
] | 1
|
2018-10-29T18:41:01.000Z
|
2018-10-29T18:41:01.000Z
|
pipenv/vendor/vistir/contextmanagers.py
|
ehebert/pipenv
|
b771621274fcdb6980b4c9682bd2b2879e3354d1
|
[
"MIT"
] | null | null | null |
pipenv/vendor/vistir/contextmanagers.py
|
ehebert/pipenv
|
b771621274fcdb6980b4c9682bd2b2879e3354d1
|
[
"MIT"
] | null | null | null |
# -*- coding=utf-8 -*-
from __future__ import absolute_import, unicode_literals
import os
import stat
import sys
from contextlib import contextmanager
import six
from .compat import NamedTemporaryFile, Path
from .path import is_file_url, is_valid_url, path_to_url, url_to_path
__all__ = ["temp_environ", "temp_path", "cd", "atomic_open_for_write", "open_file"]
# Borrowed from Pew.
# See https://github.com/berdario/pew/blob/master/pew/_utils.py#L82
@contextmanager
def temp_environ():
"""Allow the ability to set os.environ temporarily"""
environ = dict(os.environ)
try:
yield
finally:
os.environ.clear()
os.environ.update(environ)
@contextmanager
def temp_path():
"""A context manager which allows the ability to set sys.path temporarily
>>> path_from_virtualenv = load_path("/path/to/venv/bin/python")
>>> print(sys.path)
['/home/user/.pyenv/versions/3.7.0/bin', '/home/user/.pyenv/versions/3.7.0/lib/python37.zip', '/home/user/.pyenv/versions/3.7.0/lib/python3.7', '/home/user/.pyenv/versions/3.7.0/lib/python3.7/lib-dynload', '/home/user/.pyenv/versions/3.7.0/lib/python3.7/site-packages']
>>> with temp_path():
sys.path = path_from_virtualenv
# Running in the context of the path above
run(["pip", "install", "stuff"])
>>> print(sys.path)
['/home/user/.pyenv/versions/3.7.0/bin', '/home/user/.pyenv/versions/3.7.0/lib/python37.zip', '/home/user/.pyenv/versions/3.7.0/lib/python3.7', '/home/user/.pyenv/versions/3.7.0/lib/python3.7/lib-dynload', '/home/user/.pyenv/versions/3.7.0/lib/python3.7/site-packages']
"""
path = [p for p in sys.path]
try:
yield
finally:
sys.path = [p for p in path]
@contextmanager
def cd(path):
"""Context manager to temporarily change working directories
:param str path: The directory to move into
>>> print(os.path.abspath(os.curdir))
'/home/user/code/myrepo'
>>> with cd("/home/user/code/otherdir/subdir"):
print("Changed directory: %s" % os.path.abspath(os.curdir))
Changed directory: /home/user/code/otherdir/subdir
>>> print(os.path.abspath(os.curdir))
'/home/user/code/myrepo'
"""
if not path:
return
prev_cwd = Path.cwd().as_posix()
if isinstance(path, Path):
path = path.as_posix()
os.chdir(str(path))
try:
yield
finally:
os.chdir(prev_cwd)
@contextmanager
def atomic_open_for_write(target, binary=False, newline=None, encoding=None):
"""Atomically open `target` for writing.
This is based on Lektor's `atomic_open()` utility, but simplified a lot
to handle only writing, and skip many multi-process/thread edge cases
handled by Werkzeug.
:param str target: Target filename to write
:param bool binary: Whether to open in binary mode, default False
:param str newline: The newline character to use when writing, determined from system if not supplied
:param str encoding: The encoding to use when writing, defaults to system encoding
How this works:
* Create a temp file (in the same directory of the actual target), and
yield for surrounding code to write to it.
* If some thing goes wrong, try to remove the temp file. The actual target
is not touched whatsoever.
* If everything goes well, close the temp file, and replace the actual
target with this new file.
.. code:: python
>>> fn = "test_file.txt"
>>> def read_test_file(filename=fn):
with open(filename, 'r') as fh:
print(fh.read().strip())
>>> with open(fn, "w") as fh:
fh.write("this is some test text")
>>> read_test_file()
this is some test text
>>> def raise_exception_while_writing(filename):
with open(filename, "w") as fh:
fh.write("writing some new text")
raise RuntimeError("Uh oh, hope your file didn't get overwritten")
>>> raise_exception_while_writing(fn)
Traceback (most recent call last):
...
RuntimeError: Uh oh, hope your file didn't get overwritten
>>> read_test_file()
writing some new text
# Now try with vistir
>>> def raise_exception_while_writing(filename):
with vistir.contextmanagers.atomic_open_for_write(filename) as fh:
fh.write("Overwriting all the text from before with even newer text")
raise RuntimeError("But did it get overwritten now?")
>>> raise_exception_while_writing(fn)
Traceback (most recent call last):
...
RuntimeError: But did it get overwritten now?
>>> read_test_file()
writing some new text
"""
mode = "w+b" if binary else "w"
f = NamedTemporaryFile(
dir=os.path.dirname(target),
prefix=".__atomic-write",
mode=mode,
encoding=encoding,
newline=newline,
delete=False,
)
# set permissions to 0644
os.chmod(f.name, stat.S_IWUSR | stat.S_IRUSR | stat.S_IRGRP | stat.S_IROTH)
try:
yield f
except BaseException:
f.close()
try:
os.remove(f.name)
except OSError:
pass
raise
else:
f.close()
try:
os.remove(target) # This is needed on Windows.
except OSError:
pass
os.rename(f.name, target) # No os.replace() on Python 2.
@contextmanager
def open_file(link, session=None):
"""
Open local or remote file for reading.
:type link: pip._internal.index.Link or str
:type session: requests.Session
:raises ValueError: If link points to a local directory.
:return: a context manager to the opened file-like object
"""
if not isinstance(link, six.string_types):
try:
link = link.url_without_fragment
except AttributeError:
raise ValueError("Cannot parse url from unkown type: {0!r}".format(link))
if not is_valid_url(link) and os.path.exists(link):
link = path_to_url(link)
if is_file_url(link):
# Local URL
local_path = url_to_path(link)
if os.path.isdir(local_path):
raise ValueError("Cannot open directory for read: {}".format(link))
else:
with open(local_path, "rb") as local_file:
yield local_file
else:
# Remote URL
headers = {"Accept-Encoding": "identity"}
if not session:
from requests import Session
session = Session()
response = session.get(link, headers=headers, stream=True)
try:
yield response.raw
finally:
response.close()
| 32.679426
| 273
| 0.624451
|
712b28274a10f73a4bf976e570336c0c736ea580
| 11,654
|
py
|
Python
|
pythx/api/client.py
|
s0b0lev/pythx
|
c34d81421a2cbea71e60c33245d54fb19b6ad68a
|
[
"MIT"
] | null | null | null |
pythx/api/client.py
|
s0b0lev/pythx
|
c34d81421a2cbea71e60c33245d54fb19b6ad68a
|
[
"MIT"
] | null | null | null |
pythx/api/client.py
|
s0b0lev/pythx
|
c34d81421a2cbea71e60c33245d54fb19b6ad68a
|
[
"MIT"
] | null | null | null |
import logging
from datetime import datetime
from typing import Dict, List
import jwt
from pythx.api.handler import APIHandler
from pythx.middleware.analysiscache import AnalysisCacheMiddleware
from pythx.middleware.base import BaseMiddleware
from pythx.middleware.toolname import ClientToolNameMiddleware
from mythx_models import request as reqmodels
from mythx_models import response as respmodels
LOGGER = logging.getLogger(__name__)
class Client:
"""The main class for API interaction.
The client makes sure that you are authenticated at all times. For authentication data it
required either the account's Ethereum address *and* password, or a valid combination of
access *and* refresh token. If any token expires, the client will automatically try to
refresh the access token, or log the user in again. After that, the original request is
executed.
Furthermore, the client class supports various actions for high-level usage to easily submit
new analysis jobs, check their status, get notified whether they are ready, and fetch analysis
job report data.
A user can inject custom middlewares. There are two required internal ones:
1. :code:`ClientToolNameMiddleware` Fills in the :code:`clientToolName` field for new analysis submissions
2. :code:`AnalysisCacheMiddleware` Sets the :code:`noCacheLookup` field in new analysis submissions
These middlewares can also be overwritten by the user (even though using the Client parameters is
recommended!). If any of these middleware instances are missing in the user-defined list, e.g.
because they simply add their own ones, the Client constructor will automatically add them with their
default or parameter-defined values (if given).
"""
def __init__(
self,
eth_address: str = None,
password: str = None,
access_token: str = None,
refresh_token: str = None,
handler: APIHandler = None,
staging: bool = False,
no_cache: bool = False,
middlewares: List[BaseMiddleware] = None,
):
self.eth_address = eth_address
self.password = password
if not middlewares:
# initialize without custom middlewares
middlewares = [
ClientToolNameMiddleware(),
AnalysisCacheMiddleware(no_cache),
]
else:
# add tool name and analysis cache middleware
type_list = [type(m) for m in middlewares]
if ClientToolNameMiddleware not in type_list:
middlewares.append(ClientToolNameMiddleware())
if AnalysisCacheMiddleware not in type_list:
middlewares.append(AnalysisCacheMiddleware(no_cache))
self.handler = handler or APIHandler(middlewares=middlewares, staging=staging)
self.access_token = access_token
self.refresh_token = refresh_token
def _assemble_send_parse(
self, req_obj, resp_model, assert_authentication=True, include_auth_header=True
):
"""Assemble the request, send it, parse and return the response."""
if assert_authentication:
self.assert_authentication()
auth_header = (
{"Authorization": "Bearer {}".format(self.access_token)}
if include_auth_header
else None
)
req_dict = self.handler.assemble_request(req_obj)
LOGGER.debug("Sending request")
resp = self.handler.send_request(req_dict, auth_header=auth_header)
LOGGER.debug("Parsing response")
return self.handler.parse_response(resp, resp_model)
@staticmethod
def _get_jwt_expiration_ts(token):
"""Decode the APIs JWT to get their expiration time."""
return datetime.utcfromtimestamp((jwt.decode(token, verify=False)["exp"]))
def assert_authentication(self):
"""Make sure the user is authenticated.
If necessary, this method will refresh the access token, or perform another
login to get a fresh combination of tokens if both are expired.
:return: None
"""
if self.access_token is not None and self.refresh_token is None:
# Override with access token if it's the only thing we were given
return
elif self.access_token is None and self.refresh_token is None:
# We haven't authenticated yet
self.login()
return
now = datetime.utcnow()
access_expiration = self._get_jwt_expiration_ts(self.access_token)
refresh_expiration = self._get_jwt_expiration_ts(self.refresh_token)
if now < access_expiration:
# auth token still valid - continue
LOGGER.debug(
"Auth check passed, token still valid: {} < {}".format(
now, access_expiration
)
)
elif access_expiration < now < refresh_expiration:
# access token expired, but refresh token hasn't - use it to get new access token
LOGGER.debug(
"Auth refresh needed: {} < {} < {}".format(
access_expiration, now, refresh_expiration
)
)
self.refresh()
else:
# refresh token has also expired - let's login again
LOGGER.debug("Access and refresh token have expired - logging in again")
self.login()
def login(self) -> respmodels.AuthLoginResponse:
"""Perform a login request on the API and return the response.
:return: AuthLoginResponse
"""
req = reqmodels.AuthLoginRequest(
eth_address=self.eth_address, password=self.password
)
resp_model = self._assemble_send_parse(
req,
respmodels.AuthLoginResponse,
assert_authentication=False,
include_auth_header=False,
)
self.access_token = resp_model.access_token
self.refresh_token = resp_model.refresh_token
return resp_model
def logout(self) -> respmodels.AuthLogoutResponse:
"""Perform a logout request on the API and return the response.
:return: AuthLogoutResponse
"""
req = reqmodels.AuthLogoutRequest()
resp_model = self._assemble_send_parse(req, respmodels.AuthLogoutResponse)
self.access_token = None
self.refresh_token = None
return resp_model
def refresh(self) -> respmodels.AuthRefreshResponse:
"""Perform a JWT refresh on the API and return the response.
:return: AuthRefreshResponse
"""
req = reqmodels.AuthRefreshRequest(
access_token=self.access_token, refresh_token=self.refresh_token
)
resp_model = self._assemble_send_parse(
req,
respmodels.AuthRefreshResponse,
assert_authentication=False,
include_auth_header=False,
)
self.access_token = resp_model.access_token
self.refresh_token = resp_model.refresh_token
return resp_model
def analysis_list(
self, date_from: datetime = None, date_to: datetime = None, offset: int = None
) -> respmodels.AnalysisListResponse:
"""Get a list of the user's analyses jobs.
:param date_from: Start of the date range (optional)
:param date_to: End of the date range (optional)
:param offset: The number of results to skip (used for pagination)
:return: AnalysisListResponse
"""
req = reqmodels.AnalysisListRequest(
offset=offset, date_from=date_from, date_to=date_to
)
return self._assemble_send_parse(req, respmodels.AnalysisListResponse)
def analyze(
self,
contract_name: str = None,
bytecode: str = None,
source_map: str = None,
deployed_bytecode: str = None,
deployed_source_map: str = None,
main_source: str = None,
sources: Dict[str, Dict[str, str]] = None,
source_list: List[str] = None,
solc_version: str = None,
analysis_mode: str = "quick",
) -> respmodels.AnalysisSubmissionResponse:
"""Submit a new analysis job.
At least the smart contracts bytecode, or it's source code must be given. The more
information the MythX API gets, the more precise and verbose the results will be.
:param contract_name:
:param bytecode:
:param source_map:
:param deployed_bytecode:
:param deployed_source_map:
:param sources:
:param source_list:
:param solc_version:
:param analysis_mode:
:return: AnalysisSubmissionResponse
"""
req = reqmodels.AnalysisSubmissionRequest(
contract_name=contract_name,
bytecode=bytecode,
source_map=source_map,
deployed_bytecode=deployed_bytecode,
deployed_source_map=deployed_source_map,
main_source=main_source,
sources=sources,
source_list=source_list,
solc_version=solc_version,
analysis_mode=analysis_mode,
)
# req.validate()
return self._assemble_send_parse(req, respmodels.AnalysisSubmissionResponse)
def status(self, uuid: str) -> respmodels.AnalysisStatusResponse:
"""Get the status of an analysis job based on its UUID.
:param uuid: The job's UUID
:return: AnalysisStatusResponse
"""
req = reqmodels.AnalysisStatusRequest(uuid)
return self._assemble_send_parse(req, respmodels.AnalysisStatusResponse)
def analysis_ready(self, uuid: str) -> bool:
"""Return a boolean whether the analysis job with the given UUID has finished processing.
:param uuid:
:return: bool
"""
resp = self.status(uuid)
return (
resp.analysis.status == respmodels.AnalysisStatus.FINISHED
or resp.analysis.status == respmodels.AnalysisStatus.ERROR
)
def report(self, uuid: str) -> respmodels.DetectedIssuesResponse:
"""Get the report holding found issues for an analysis job based on its UUID.
:param uuid:
:return: DetectedIssuesResponse
"""
req = reqmodels.DetectedIssuesRequest(uuid)
return self._assemble_send_parse(req, respmodels.DetectedIssuesResponse)
def request_by_uuid(self, uuid: str) -> respmodels.AnalysisInputResponse:
req = reqmodels.AnalysisInputRequest(uuid)
return self._assemble_send_parse(req, respmodels.AnalysisInputResponse)
def openapi(self, mode="yaml") -> respmodels.OASResponse:
"""Return the OpenAPI specification either in HTML or YAML.
:param mode: "yaml" or "html"
:return: OASResponse
"""
req = reqmodels.OASRequest(mode=mode)
return self._assemble_send_parse(
req,
respmodels.OASResponse,
assert_authentication=False,
include_auth_header=False,
)
def version(self) -> respmodels.VersionResponse:
"""Call the APIs version endpoint to get its backend version numbers.
:return: VersionResponse
"""
req = reqmodels.VersionRequest()
return self._assemble_send_parse(
req,
respmodels.VersionResponse,
assert_authentication=False,
include_auth_header=False,
)
def __enter__(self):
self.assert_authentication()
return self
def __exit__(self, exc_type, exc_value, traceback):
self.logout()
| 37.960912
| 114
| 0.651279
|
fb2528da63f0a9ae863eb3b00214d7ee944b0473
| 2,088
|
py
|
Python
|
extra/bsmalea-notes-1c/test.py
|
cookieblues/cookieblues.github.io
|
9b570d83887eb2d6f92cfaa927a1adf136124a90
|
[
"MIT"
] | null | null | null |
extra/bsmalea-notes-1c/test.py
|
cookieblues/cookieblues.github.io
|
9b570d83887eb2d6f92cfaa927a1adf136124a90
|
[
"MIT"
] | 2
|
2020-03-30T14:58:30.000Z
|
2020-12-10T15:15:06.000Z
|
extra/bsmalea-notes-1c/test.py
|
cookieblues/cookieblues.github.io
|
9b570d83887eb2d6f92cfaa927a1adf136124a90
|
[
"MIT"
] | null | null | null |
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
from scipy.special import binom, gamma
from scipy.stats import beta
# turq 06C2AC, mag C20076, mixed 646191
# Set matplotlib font
mpl.rc("text", usetex=True)
mpl.rc("font", family="serif")
def likelihood(theta, n, k):
return binom(n, k) * theta**k * (1-theta)**(n-k)
fig = plt.figure(figsize=(7, 7))
for i in range(4):
ax = fig.add_subplot(2, 2, int(i+1))
n = 11
k = 8
if i == 0:
a = 1.5
b = 2
elif i == 1:
a = 1.5
b = 3
elif i == 2:
a = 1.5
b = 4
else:
a = 1.5
b = 5
X = np.linspace(0, 1, num=1000)
t = likelihood(X, n, k) * gamma(n+2) / (gamma(k+1)*gamma((n-k)+1)*binom(n, k))
prior = beta.pdf(X, a, b)
posterior = beta.pdf(X, a+k, b+(n-k))
y_max = 4
turq = mpl.colors.to_rgb("turquoise")
mag = mpl.colors.to_rgb("magenta")
mix = [(turq[i]+mag[i])/2 for i in range(3)]
ax.plot(X, prior,
color = turq,
label = "Prior",
zorder = 2
)
ax.plot(X, t,
color = mag,
label = "Likelihood",
zorder = 2
)
ax.plot(X, posterior,
color = mix,
label = "Posterior",
zorder = 2
)
# X axis
#ax.set_xlabel(r"$\theta$", fontsize=10)
ax.set_xlim(0, 1)
x_ticks = [i/4 for i in range(5)]
ax.set_xticks(x_ticks)
ax.set_xticklabels(x_ticks)
# Y axis
#ax.set_ylabel(r"Pr$(\mathcal{D} | \theta)$")
#ax.set_ylabel(r"PDF", fontsize=10)
ax.set_ylim(0, y_max)
y_ticks = [i for i in range(5)]
ax.set_yticks(y_ticks)
ax.set_yticklabels(y_ticks)
if i == 0:
ax.legend(
loc="upper left",
framealpha=0,
fontsize=14
)
ax.text(
0.77,
0.025,
'cookieblues.github.io',
fontsize=11,
horizontalalignment='center',
verticalalignment='center',
transform=ax.transAxes,
color='dimgrey',
zorder=5
)
plt.tight_layout()
plt.savefig("test.png", bbox_inches="tight")
plt.show()
| 20.271845
| 82
| 0.543103
|
e8325857b507b994c51d3b581bae034b15b07237
| 989
|
py
|
Python
|
Scripts/day20.py
|
HarrisonGreen/Advent-of-Code-2015
|
6a81395882c79135548eb7984bfd98c279c5f258
|
[
"MIT"
] | null | null | null |
Scripts/day20.py
|
HarrisonGreen/Advent-of-Code-2015
|
6a81395882c79135548eb7984bfd98c279c5f258
|
[
"MIT"
] | null | null | null |
Scripts/day20.py
|
HarrisonGreen/Advent-of-Code-2015
|
6a81395882c79135548eb7984bfd98c279c5f258
|
[
"MIT"
] | null | null | null |
from sympy.ntheory import factorint
import numpy as np
def lowest_house_num(num, step):
house = 0
while True:
house += step
factorisation = factorint(house)
factors = np.ones(1, dtype=int)
for prime, power in factorisation.items():
factors = np.outer(np.array([prime**k for k in range(power+1)]), factors).ravel()
if sum(factors)*10 >= num:
return house
def lowest_house_lazy_elves(num, step):
house = 0
while True:
house += step
factors = []
for i in range(1, 51):
if house/i == house//i:
factors.append(house//i)
if sum(factors)*11 >= num:
return house
if __name__ == "__main__":
step = 2520
num = 34000000
print(f"Part one: {lowest_house_num(num, step)}")
print(f"Part two: {lowest_house_lazy_elves(num, step)}")
| 26.72973
| 94
| 0.52275
|
f2c9bed57ec57d391b5d4d17c06fdd65522449f2
| 782
|
py
|
Python
|
scraping/requests/scraping_requests_idealista.py
|
txtbits/daw-python
|
5dde1207e2791e90aa5e9ce2b6afc4116129efab
|
[
"MIT"
] | null | null | null |
scraping/requests/scraping_requests_idealista.py
|
txtbits/daw-python
|
5dde1207e2791e90aa5e9ce2b6afc4116129efab
|
[
"MIT"
] | null | null | null |
scraping/requests/scraping_requests_idealista.py
|
txtbits/daw-python
|
5dde1207e2791e90aa5e9ce2b6afc4116129efab
|
[
"MIT"
] | null | null | null |
import requests
from amara.bindery import html
from amara.lib import U
URL = 'http://www.idealista.com/pagina/inmueble?codigoinmueble=VP0000005600220&numInm=4&edd=list'
URL2 = 'http://www.idealista.com/pagina/inmueble?codigoinmueble=VP0000005027406&numInm=3&edd=list'
''' la x es de xpath '''
xpiso = '/html/body/div[2]/div/div/div/div[2]/h1'
xprecio = '//*[@id="price"]'
xmetros = '/html/body/div[2]/div/div/div[2]/div[2]/div/div/div/div[2]/p/strong'
xtelf = '/html/body/div[2]/div/div/div[2]/div/form/div[2]/p/strong'
#pagina = requests.get(URL)
doc = html.parse(URL) #(pagina.content.decode('utf-8'))
print U(doc.xml_select(xpiso)[0]).strip()
print U(doc.xml_select(xprecio)[0]).strip()
print U(doc.xml_select(xmetros)[0]).strip()
print U(doc.xml_select(xtelf)[0]).strip()
| 34
| 98
| 0.710997
|
bbc1cf6eeb72485e00baa296363bf59f992bc515
| 2,435
|
py
|
Python
|
protocols/home_assistant/serrialize_protocol.py
|
ReanGD/smart-home
|
0d3ebe3213ad275f64490218ca3dbc0128c12339
|
[
"Apache-2.0"
] | 1
|
2018-07-31T21:17:37.000Z
|
2018-07-31T21:17:37.000Z
|
protocols/home_assistant/serrialize_protocol.py
|
ReanGD/smart-home
|
0d3ebe3213ad275f64490218ca3dbc0128c12339
|
[
"Apache-2.0"
] | null | null | null |
protocols/home_assistant/serrialize_protocol.py
|
ReanGD/smart-home
|
0d3ebe3213ad275f64490218ca3dbc0128c12339
|
[
"Apache-2.0"
] | null | null | null |
import asyncio
from hashlib import md5
from typing import List
from struct import pack, unpack
from google.protobuf import message as gp_message
from protocols.transport import TransportError, SerrializeProtocol
class HASerrializeProtocol(SerrializeProtocol):
def __init__(self, protobuf_types: List[object], logger):
super().__init__(protobuf_types, logger)
self._type_size = md5().digest_size
self._types_map = {HASerrializeProtocol._hash(it.__name__): it for it in protobuf_types}
@staticmethod
def _hash(message: str) -> bytes:
return md5(message.encode('utf-8')).digest()
async def send(self, writer: asyncio.streams.StreamWriter, message: gp_message) -> None:
message_bin = message.SerializeToString()
message_len = len(message_bin)
message_name = message.DESCRIPTOR.name
try:
self._logger.debug('Send protobuf message "%s" (%d bytes)', message_name, message_len)
writer.write(pack('>I', message_len + self._type_size))
writer.write(HASerrializeProtocol._hash(message_name))
writer.write(message_bin)
await writer.drain()
except ConnectionResetError:
msg = 'Send protobuf message "%s" (%d bytes) finished with error: Connection lost'
self._logger.error(msg, message_name, message_len)
raise
except Exception as ex:
msg = 'Send protobuf message "%s" (%d bytes) finished with error: <%s> %s'
self._logger.error(msg, message_name, message_len, type(ex), ex)
raise
async def recv(self, reader: asyncio.streams.StreamReader) -> gp_message:
package_size_bin = await reader.readexactly(4)
package_size = unpack('>I', package_size_bin)[0]
package_bin = await reader.readexactly(package_size)
proto_type = self._types_map.get(package_bin[:self._type_size], None)
if proto_type is None:
self._logger.debug('Recv unknown protobuf message (%d bytes)',
package_size - self._type_size)
raise TransportError('Recv unknown protobuf message')
message = proto_type()
message.ParseFromString(package_bin[self._type_size:])
self._logger.debug('Recv protobuf message "%s" (%d bytes)',
message.DESCRIPTOR.name, package_size - self._type_size)
return message
| 45.092593
| 98
| 0.664887
|
3f8ee444f62dc8f1cb713f2d571d39a1c63635ad
| 11,829
|
py
|
Python
|
mycroft-core-dev/mycroft/util/lang/format_hu.py
|
edegeyer/masterarbeit
|
a310de258a325faf3f4283f9db5ccf57905d3c0e
|
[
"Apache-2.0"
] | 5
|
2019-08-15T13:22:05.000Z
|
2020-05-10T04:04:48.000Z
|
mycroft-core-dev/mycroft/util/lang/format_hu.py
|
edegeyer/masterarbeit
|
a310de258a325faf3f4283f9db5ccf57905d3c0e
|
[
"Apache-2.0"
] | 8
|
2019-08-15T16:26:51.000Z
|
2021-09-08T01:05:04.000Z
|
mycroft/util/lang/format_hu.py
|
Mel1818/mycroft-changes
|
f3598569f452fa1514a4c4c9b33be4073f5a5d1f
|
[
"Apache-2.0"
] | 4
|
2018-06-09T11:58:56.000Z
|
2019-02-10T07:28:58.000Z
|
# -*- coding: utf-8 -*-
#
# Copyright 2017 Mycroft AI Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from mycroft.util.lang.format_common import convert_to_mixed_fraction
from math import floor
months = ['január', 'február', 'március', 'április', 'május', 'június',
'július', 'augusztus', 'szeptember', 'október', 'november',
'december']
NUM_STRING_HU = {
0: 'nulla',
1: 'egy',
2: 'kettő',
3: 'három',
4: 'négy',
5: 'öt',
6: 'hat',
7: 'hét',
8: 'nyolc',
9: 'kilenc',
10: 'tíz',
11: 'tizenegy',
12: 'tizenkettő',
13: 'tizenhárom',
14: 'tizennégy',
15: 'tizenöt',
16: 'tizenhat',
17: 'tizenhét',
18: 'tizennyolc',
19: 'tizenkilenc',
20: 'húsz',
30: 'harminc',
40: 'negyven',
50: 'ötven',
60: 'hatvan',
70: 'hetven',
80: 'nyolcvan',
90: 'kilencven',
100: 'száz'
}
# Hungarian uses "long scale"
# https://en.wikipedia.org/wiki/Long_and_short_scales
# Currently, numbers are limited to 1000000000000000000000000,
# but NUM_POWERS_OF_TEN can be extended to include additional number words
NUM_POWERS_OF_TEN = [
'', 'ezer', 'millió', 'milliárd', 'billió', 'billiárd', 'trillió',
'trilliárd'
]
FRACTION_STRING_HU = {
2: 'fél',
3: 'harmad',
4: 'negyed',
5: 'ötöd',
6: 'hatod',
7: 'heted',
8: 'nyolcad',
9: 'kilenced',
10: 'tized',
11: 'tizenegyed',
12: 'tizenketted',
13: 'tizenharmad',
14: 'tizennegyed',
15: 'tizenötöd',
16: 'tizenhatod',
17: 'tizenheted',
18: 'tizennyolcad',
19: 'tizenkilenced',
20: 'huszad'
}
# Numbers below 2 thousand are written in one word in Hungarian
# Numbers above 2 thousand are separated by hyphens
# In some circumstances it may better to seperate individual words
# Set EXTRA_SPACE=" " for separating numbers below 2 thousand (
# orthographically incorrect)
# Set EXTRA_SPACE="" for correct spelling, this is standard
# EXTRA_SPACE = " "
EXTRA_SPACE = ""
def _get_vocal_type(word):
# checks the vocal attributes of a word
vowels_high = len([char for char in word if char in 'eéiíöőüű'])
vowels_low = len([char for char in word if char in 'aáoóuú'])
if vowels_high != 0 and vowels_low != 0:
return 2 # 2: type is mixed
return 0 if vowels_high == 0 else 1 # 0: type is low, 1: is high
def nice_number_hu(number, speech, denominators):
""" Hungarian helper for nice_number
This function formats a float to human understandable functions. Like
4.5 becomes "4 és fél" for speech and "4 1/2" for text
Args:
number (int or float): the float to format
speech (bool): format for speech (True) or display (False)
denominators (iter of ints): denominators to use, default [1 .. 20]
Returns:
(str): The formatted string.
"""
result = convert_to_mixed_fraction(number, denominators)
if not result:
# Give up, just represent as a 3 decimal number
return str(round(number, 3)).replace(".", ",")
whole, num, den = result
if not speech:
if num == 0:
# TODO: Number grouping? E.g. "1,000,000"
return str(whole)
else:
return '{} {}/{}'.format(whole, num, den)
if num == 0:
return str(whole)
den_str = FRACTION_STRING_HU[den]
if whole == 0:
if num == 1:
one = 'egy ' if den != 2 else ''
return_string = '{}{}'.format(one, den_str)
else:
return_string = '{} {}'.format(num, den_str)
elif num == 1:
pointOne = 'egész egy' if den != 2 else 'és'
return_string = '{} {} {}'.format(whole, pointOne, den_str)
else:
return_string = '{} egész {} {}'.format(whole, num, den_str)
return return_string
def pronounce_number_hu(num, places=2):
"""
Convert a number to its spoken equivalent
For example, '5.2' would return 'öt egész két tized'
Args:
num(float or int): the number to pronounce (set limit below)
places(int): maximum decimal places to speak
Returns:
(str): The pronounced number
"""
def pronounce_triplet_hu(num):
result = ""
num = floor(num)
if num > 99:
hundreds = floor(num / 100)
if hundreds > 0:
hundredConst = EXTRA_SPACE + 'száz' + EXTRA_SPACE
if hundreds == 1:
result += hundredConst
elif hundreds == 2:
result += 'két' + hundredConst
else:
result += NUM_STRING_HU[hundreds] + hundredConst
num -= hundreds * 100
if num == 0:
result += '' # do nothing
elif num <= 20:
result += NUM_STRING_HU[num] # + EXTRA_SPACE
elif num > 20:
ones = num % 10
tens = num - ones
if tens > 0:
if tens != 20:
result += NUM_STRING_HU[tens] + EXTRA_SPACE
else:
result += "huszon" + EXTRA_SPACE
if ones > 0:
result += NUM_STRING_HU[ones] + EXTRA_SPACE
return result
def pronounce_whole_number_hu(num, scale_level=0):
if num == 0:
return ''
num = floor(num)
result = ''
last_triplet = num % 1000
if last_triplet == 1:
if scale_level == 0:
if result != '':
result += '' + "egy"
else:
result += "egy"
elif scale_level == 1:
result += EXTRA_SPACE + NUM_POWERS_OF_TEN[1] + EXTRA_SPACE
else:
result += "egy" + NUM_POWERS_OF_TEN[scale_level]
elif last_triplet > 1:
result += pronounce_triplet_hu(last_triplet)
if scale_level != 0:
result = result.replace(NUM_STRING_HU[2], 'két')
if scale_level == 1:
result += NUM_POWERS_OF_TEN[1] + EXTRA_SPACE
if scale_level >= 2:
result += NUM_POWERS_OF_TEN[scale_level]
if scale_level > 0:
result += '-'
num = floor(num / 1000)
scale_level += 1
return pronounce_whole_number_hu(num,
scale_level) + result
result = ""
if abs(num) >= 1000000000000000000000000: # cannot do more than this
return str(num)
elif num == 0:
return str(NUM_STRING_HU[0])
elif num < 0:
return "mínusz " + pronounce_number_hu(abs(num), places)
else:
if num == int(num):
return pronounce_whole_number_hu(num).strip('-')
else:
whole_number_part = floor(num)
fractional_part = num - whole_number_part
if whole_number_part == 0:
result += NUM_STRING_HU[0]
result += pronounce_whole_number_hu(whole_number_part)
if places > 0:
result += " egész "
fraction = pronounce_whole_number_hu(
round(fractional_part * 10 ** places))
result += fraction.replace(NUM_STRING_HU[2], 'két')
fraction_suffixes = [
'tized', 'század', 'ezred', 'tízezred', 'százezred']
if places <= len(fraction_suffixes):
result += ' ' + fraction_suffixes[places - 1]
return result
def pronounce_ordinal_hu(num):
ordinals = ["nulladik", "első", "második", "harmadik", "negyedik",
"ötödik", "hatodik", "hetedik", "nyolcadik", "kilencedik",
"tizedik"]
big_ordinals = ["", "ezredik", "milliomodik"]
# only for whole positive numbers including zero
if num < 0 or num != int(num):
return num
elif num < 11:
return ordinals[num]
else:
# concatenate parts and inflect them accordingly
root = pronounce_number_hu(num)
vtype = _get_vocal_type(root)
last_digit = num - floor(num/10) * 10
if root == "húsz":
root = "husz"
if num % 1000000 == 0:
return root.replace(NUM_POWERS_OF_TEN[2], big_ordinals[2])
if num % 1000 == 0:
return root.replace(NUM_POWERS_OF_TEN[1], big_ordinals[1])
if last_digit == 1:
return root + "edik"
elif root[-1] == 'ő':
return root[:-1] + 'edik'
elif last_digit != 0:
return ordinals[last_digit].join(
root.rsplit(NUM_STRING_HU[last_digit], 1))
return root + "edik" if vtype == 1 else root + "adik"
def nice_time_hu(dt, speech=True, use_24hour=False, use_ampm=False):
"""
Format a time to a comfortable human format
For example, generate 'five thirty' for speech or '5:30' for
text display.
Args:
dt (datetime): date to format (assumes already in local timezone)
speech (bool): format for speech (default/True) or display (False)=Fal
use_24hour (bool): output in 24-hour/military or 12-hour format
use_ampm (bool): include the am/pm for 12-hour format
Returns:
(str): The formatted time string
"""
if use_24hour:
# e.g. "03:01" or "14:22"
string = dt.strftime("%H:%M")
else:
if use_ampm:
# e.g. "3:01 AM" or "2:22 PM"
string = dt.strftime("%I:%M %p")
else:
# e.g. "3:01" or "2:22"
string = dt.strftime("%I:%M")
if string[0] == '0':
string = string[1:] # strip leading zeros
if not speech:
return string
# Generate a speakable version of the time
speak = ""
if use_24hour:
speak += pronounce_number_hu(dt.hour)
speak = speak.replace(NUM_STRING_HU[2], 'két')
speak += " óra"
if not dt.minute == 0: # zero minutes are not pronounced
speak += " " + pronounce_number_hu(dt.minute)
return speak # ampm is ignored when use_24hour is true
else:
if dt.hour == 0 and dt.minute == 0:
return "éjfél"
if dt.hour == 12 and dt.minute == 0:
return "dél"
# TODO: "half past 3", "a quarter of 4" and other idiomatic times
if dt.hour == 0:
speak += pronounce_number_hu(12)
elif dt.hour < 13:
speak = pronounce_number_hu(dt.hour)
else:
speak = pronounce_number_hu(dt.hour - 12)
speak = speak.replace(NUM_STRING_HU[2], 'két')
speak += " óra"
if not dt.minute == 0:
speak += " " + pronounce_number_hu(dt.minute)
if use_ampm:
if dt.hour > 11:
if dt.hour < 18:
speak = "délután " + speak # 12:01 - 17:59
elif dt.hour < 22:
speak = "este " + speak # 18:00 - 21:59 este/evening
else:
speak = "éjjel " + speak # 22:00 - 23:59 éjjel/at night
elif dt.hour < 3:
speak = "éjjel " + speak # 00:01 - 02:59 éjjel/at night
else:
speak = "reggel " + speak # 03:00 - 11:59 reggel/in t. morning
return speak
| 32.319672
| 79
| 0.550004
|
f836318d0ce0fe4788cfc239c2c729b0659d4e3b
| 4,343
|
py
|
Python
|
assignment1/dTree.py
|
samarth-math/machine-learning
|
fb25569395093d62e8013d5454ed3f1a0f7811e1
|
[
"MIT"
] | null | null | null |
assignment1/dTree.py
|
samarth-math/machine-learning
|
fb25569395093d62e8013d5454ed3f1a0f7811e1
|
[
"MIT"
] | null | null | null |
assignment1/dTree.py
|
samarth-math/machine-learning
|
fb25569395093d62e8013d5454ed3f1a0f7811e1
|
[
"MIT"
] | null | null | null |
import numpy as np
from math import log
import fileinput
#assumed last column to be label
# read file as a numpy array
def readFileRecs(fileName, many=None):
ticTac = []
if many!=None:
for i in range(1,7):
f=open(fileName+str(i)+".txt", 'r')
for line in f:
lineArray = line.strip('\n').split(",")
ticTac.append(lineArray)
else:
f=open(fileName, 'r')
for line in f:
lineArray = line.strip('\n').split(",")
ticTac.append(lineArray)
npTicTac = np.array(ticTac)
return npTicTac
# DECISION TREE Data Type : python Dictionary
#Node Names : positions : 0,1,2,3,4,5,6,7,8 win:9
# currentNode is a number
# data is a 2D array of all the data
#easy to change Node choosing function
def chooseNode(data,labelCol,validAttributes):
return id3Funct(data,labelCol,validAttributes)
# get counts for each type of label
def getRatios(column):
uniqueVals = np.unique(column)
ratios = {}
total = 0
for val in uniqueVals:
count = list(column).count(val)
total += count
ratios.update({val:count})
maxVal = max(ratios)
ratios.update({'total':total})
return ratios, maxVal
def getColumn(data, colPos):
return data[:,colPos]
def entropyFunction(dataSubset,labelColN=9):
labelCol = getColumn(dataSubset, labelColN)
ratios = getRatios(labelCol)[0]
sumofVal=0.0
for val in ratios:
if val!="total":
p = float(ratios[val])/ratios["total"]
term = -1*p*log(p,2)
sumofVal+=term
return sumofVal
def getEntropy (data, colPos):
columnVals = getColumn(data, colPos)
ratios = getRatios(columnVals)[0]
sumE = 0.0
for val in ratios:
if val!="total":
dataSubset = data[[data[:,colPos]==val]]
prob = float(ratios[val])/ratios["total"]
entropySmall = entropyFunction(dataSubset)
sumE+= prob*entropySmall
return sumE
def id3Funct(data, labelPos, validAttributes=None): #data must consist of only valid rows
#totalEntropy = getEntropy(data, labelPos) - you don't actually need information gain, you just need minimum entropy
minE = getEntropy(data, validAttributes[0])
minVal = validAttributes[0]
for val in validAttributes:
currEntropy = getEntropy(data, val)
if currEntropy<minE:
minE=currEntropy
minVal = val
return minVal
def makeDecisionTree (data,labelColN,mostFrequent,validAttributes=None):
# Make base conditions
currentData = data
if validAttributes==None:
numrows, numcols = currentData.shape
validAttributes = [i for i in range((numcols-1))]
if len(currentData)<1 or len(validAttributes)<1: # if out of data, or attribute values
return mostFrequent
labelCol = getColumn(currentData,9)
ratios = getRatios(labelCol)[0]
subTree = {}
if len(ratios)==2: # implies, only a total and one label value in the dictionary
if ratios.keys()[0]=="total":
return ratios.keys()[1]
else:
return ratios.keys()[0]
currentNode=chooseNode(currentData, labelColN, validAttributes) # ID3 function
validAttributes.remove(currentNode)
currentNodeCol = getColumn(currentData,currentNode)
currentNodeValues = getRatios(currentNodeCol)[0]
for val in currentNodeValues:
if val!="total":
dataSubset = currentData[[currentData[:,currentNode]!=val]]
subTree[val]= makeDecisionTree(dataSubset,9,mostFrequent,validAttributes)
theTree={} #theTree[str(currentNode)+"."+val] = makeDecisionTree(dataSubset,9,mostFrequent,validAttributes)
theTree[currentNode]=subTree;
return theTree
def testTree(record, tree, mostFrequent, unparsedAtt=None):
if unparsedAtt==None:
unparsedAtt = [i for i in range(len(record))]
#print unparsedAtt
#if len(unparsedAtt)<1:
# return mostFrequent
currentNode = int(tree.keys()[0])
currNodeVal=record[currentNode]
unparsedAtt.remove(currentNode)
if type(tree[currentNode][currNodeVal])!=dict:
return tree[currentNode][currNodeVal]
else:
return testTree(record,tree[currentNode][currNodeVal],unparsedAtt)
#def useDecisionTree():
def mainFunction():
ticTac = readFileRecs("tic-tac-toe/tic-tac-toe-train-",'true')
testTicTac = readFileRecs("tic-tac-toe/tic-tac-toe-test.txt")
#print
labelCol = getColumn(ticTac,9)
mostFrequent = getRatios(labelCol)[1]
tree = makeDecisionTree(ticTac,9,mostFrequent)
count=0
for record in testTicTac:
result = testTree(record,tree,mostFrequent)
if result==record[9]:
count+=1
print "accuracy = ", float(count)/len(testTicTac)
mainFunction();
| 26.321212
| 118
| 0.728989
|
1c2d4bbb9696658238315a913f60295083292da6
| 2,835
|
py
|
Python
|
tests/test_serialized_schema.py
|
TheBigSasha/OpenTimelineIO
|
0d857086cbb4fc39c1303947c61318aa6e523ea5
|
[
"Apache-2.0"
] | 5
|
2018-07-27T03:52:26.000Z
|
2021-04-02T04:10:15.000Z
|
tests/test_serialized_schema.py
|
TheBigSasha/OpenTimelineIO
|
0d857086cbb4fc39c1303947c61318aa6e523ea5
|
[
"Apache-2.0"
] | 1
|
2019-06-20T04:02:54.000Z
|
2019-06-20T04:02:54.000Z
|
tests/test_serialized_schema.py
|
TheBigSasha/OpenTimelineIO
|
0d857086cbb4fc39c1303947c61318aa6e523ea5
|
[
"Apache-2.0"
] | 1
|
2019-11-15T21:20:24.000Z
|
2019-11-15T21:20:24.000Z
|
#
# Copyright Contributors to the OpenTimelineIO project
#
# Licensed under the Apache License, Version 2.0 (the "Apache License")
# with the following modification; you may not use this file except in
# compliance with the Apache License and the following modification to it:
# Section 6. Trademarks. is deleted and replaced with:
#
# 6. Trademarks. This License does not grant permission to use the trade
# names, trademarks, service marks, or product names of the Licensor
# and its affiliates, except as required to comply with Section 4(c) of
# the License and to reproduce the content of the NOTICE file.
#
# You may obtain a copy of the Apache License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the Apache License with the above modification is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the Apache License for the specific
# language governing permissions and limitations under the Apache License.
#
import unittest
import os
from opentimelineio.console import (
autogen_serialized_datamodel as asd,
autogen_plugin_documentation as apd,
)
class SerializedSchemaTester(unittest.TestCase):
def test_serialized_schema(self):
"""Test if the schema has changed since last time the serialized schema
documentation was generated.
"""
pt = os.path.dirname(os.path.dirname(__file__))
fp = os.path.join(pt, "docs", "tutorials", "otio-serialized-schema.md")
with open(fp) as fi:
baseline_text = fi.read()
test_text, _ = asd.generate_and_write_documentation()
self.maxDiff = None
self.longMessage = True
self.assertMultiLineEqual(
baseline_text,
test_text,
"\n The schema has changed and the autogenerated documentation in {}"
" needs to be updated. run: `make doc-model-update`".format(fp)
)
class PluginDocumentationTester(unittest.TestCase):
def test_plugin_documentation(self):
"""Verify that the plugin manifest matches what is checked into the
documentation.
"""
pt = os.path.dirname(os.path.dirname(__file__))
fp = os.path.join(pt, "docs", "tutorials", "otio-plugins.md")
with open(fp) as fi:
baseline_text = fi.read()
test_text = apd.generate_and_write_documentation_plugins(True, True)
self.maxDiff = None
self.longMessage = True
self.assertMultiLineEqual(
baseline_text,
test_text,
"\n The schema has changed and the autogenerated documentation in {}"
" needs to be updated. run: `make doc-plugins-update`".format(fp)
)
| 36.346154
| 81
| 0.687478
|
91ada965ebd3c920c908612db56d0b51a1212583
| 5,177
|
py
|
Python
|
kdl_wagtail/core/migrations/0017_add_show_in_menus.py
|
kingsdigitallab/django-kdl-wagtail
|
457623a35057f88ee575397ac2c68797f35085e1
|
[
"MIT"
] | 3
|
2020-02-18T07:19:13.000Z
|
2021-06-14T20:35:08.000Z
|
kdl_wagtail/core/migrations/0017_add_show_in_menus.py
|
kingsdigitallab/django-kdl-wagtail
|
457623a35057f88ee575397ac2c68797f35085e1
|
[
"MIT"
] | 16
|
2019-02-08T19:39:27.000Z
|
2020-07-30T20:01:38.000Z
|
kdl_wagtail/core/migrations/0017_add_show_in_menus.py
|
kingsdigitallab/django-kdl-wagtail
|
457623a35057f88ee575397ac2c68797f35085e1
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.2.1 on 2019-05-15 16:32
from django.db import migrations
import wagtail.contrib.table_block.blocks
import wagtail.core.blocks
import wagtail.core.fields
import wagtail.documents.blocks
import wagtail.embeds.blocks
import wagtail.images.blocks
class Migration(migrations.Migration):
dependencies = [
('kdl_wagtail_core', '0016_auto_20190505_0202'),
]
operations = [
migrations.AlterField(
model_name='streampage',
name='body',
field=wagtail.core.fields.StreamField([('heading_block', wagtail.core.blocks.StructBlock([('show_in_menus', wagtail.core.blocks.BooleanBlock(default=True, required=False)), ('heading_text', wagtail.core.blocks.CharBlock(classname='title', required=True)), ('size', wagtail.core.blocks.ChoiceBlock(choices=[('', 'Select a header size'), ('h2', 'H2'), ('h3', 'H3'), ('h4', 'H4'), ('h5', 'H5')]))])), ('richtext_block', wagtail.core.blocks.RichTextBlock(icon='pilcrow', template='kdl_wagtail_core/blocks/richtext_block.html')), ('document_block', wagtail.core.blocks.StructBlock([('show_in_menus', wagtail.core.blocks.BooleanBlock(default=True, required=False)), ('transcription', wagtail.core.blocks.RichTextBlock(required=False)), ('description', wagtail.core.blocks.RichTextBlock(required=False)), ('attribution', wagtail.core.blocks.CharBlock(required=False)), ('caption', wagtail.core.blocks.CharBlock(required=False)), ('document', wagtail.documents.blocks.DocumentChooserBlock(required=True))])), ('gallery_block', wagtail.core.blocks.StructBlock([('show_in_menus', wagtail.core.blocks.BooleanBlock(default=True, required=False)), ('images_block', wagtail.core.blocks.ListBlock(wagtail.core.blocks.StructBlock([('show_in_menus', wagtail.core.blocks.BooleanBlock(default=True, required=False)), ('transcription', wagtail.core.blocks.RichTextBlock(required=False)), ('description', wagtail.core.blocks.RichTextBlock(required=False)), ('attribution', wagtail.core.blocks.CharBlock(required=False)), ('caption', wagtail.core.blocks.CharBlock(required=False)), ('page', wagtail.core.blocks.PageChooserBlock(help_text='Link to a page', required=False)), ('url', wagtail.core.blocks.URLBlock(help_text='External link', required=False)), ('alignment', wagtail.core.blocks.ChoiceBlock(choices=[('', 'Select block alignment'), ('float-left', 'Left'), ('float-right', 'Right'), ('float-center', 'Centre'), ('full-width', 'Full width')])), ('image', wagtail.images.blocks.ImageChooserBlock(required=True))])))])), ('image_block', wagtail.core.blocks.StructBlock([('show_in_menus', wagtail.core.blocks.BooleanBlock(default=True, required=False)), ('transcription', wagtail.core.blocks.RichTextBlock(required=False)), ('description', wagtail.core.blocks.RichTextBlock(required=False)), ('attribution', wagtail.core.blocks.CharBlock(required=False)), ('caption', wagtail.core.blocks.CharBlock(required=False)), ('page', wagtail.core.blocks.PageChooserBlock(help_text='Link to a page', required=False)), ('url', wagtail.core.blocks.URLBlock(help_text='External link', required=False)), ('alignment', wagtail.core.blocks.ChoiceBlock(choices=[('', 'Select block alignment'), ('float-left', 'Left'), ('float-right', 'Right'), ('float-center', 'Centre'), ('full-width', 'Full width')])), ('image', wagtail.images.blocks.ImageChooserBlock(required=True))])), ('link_block', wagtail.core.blocks.StructBlock([('show_in_menus', wagtail.core.blocks.BooleanBlock(default=True, required=False)), ('url', wagtail.core.blocks.URLBlock(required=False)), ('page', wagtail.core.blocks.PageChooserBlock(required=False)), ('label', wagtail.core.blocks.CharBlock())])), ('pullquote_block', wagtail.core.blocks.StructBlock([('show_in_menus', wagtail.core.blocks.BooleanBlock(default=True, required=False)), ('quote', wagtail.core.blocks.RichTextBlock()), ('attribution', wagtail.core.blocks.CharBlock(required=False))])), ('embed_block', wagtail.core.blocks.StructBlock([('show_in_menus', wagtail.core.blocks.BooleanBlock(default=True, required=False)), ('transcription', wagtail.core.blocks.RichTextBlock(required=False)), ('description', wagtail.core.blocks.RichTextBlock(required=False)), ('attribution', wagtail.core.blocks.CharBlock(required=False)), ('caption', wagtail.core.blocks.CharBlock(required=False)), ('display', wagtail.core.blocks.ChoiceBlock(choices=[('', 'Select a display ratio'), ('widescreen', '16:9'), ('fourbythree', '4:3'), ('audio', 'Audio'), ('panorama', 'Panorama'), ('square', 'Square'), ('vertical', 'Vertical')], required=False)), ('embed_block', wagtail.embeds.blocks.EmbedBlock(help_text='Insert an embed URL', icon='media'))])), ('table_block', wagtail.core.blocks.StructBlock([('show_in_menus', wagtail.core.blocks.BooleanBlock(default=True, required=False)), ('transcription', wagtail.core.blocks.RichTextBlock(required=False)), ('description', wagtail.core.blocks.RichTextBlock(required=False)), ('attribution', wagtail.core.blocks.CharBlock(required=False)), ('caption', wagtail.core.blocks.CharBlock(required=False)), ('table', wagtail.contrib.table_block.blocks.TableBlock(required=True))]))], blank=True, verbose_name='Page body'),
),
]
| 207.08
| 4,652
| 0.745026
|
0fbb8dea07d7ecb2f2f0ff9216cce6d08df8659a
| 2,631
|
py
|
Python
|
catkin_ws/src/60-templates/pkg_name/src/talker.py
|
spadma3/duck
|
d4763cd49b00733278d394fc7399e920fb2990a8
|
[
"CC-BY-2.0"
] | 1
|
2021-04-27T13:20:55.000Z
|
2021-04-27T13:20:55.000Z
|
catkin_ws/src/60-templates/pkg_name/src/talker.py
|
spadma3/duck
|
d4763cd49b00733278d394fc7399e920fb2990a8
|
[
"CC-BY-2.0"
] | null | null | null |
catkin_ws/src/60-templates/pkg_name/src/talker.py
|
spadma3/duck
|
d4763cd49b00733278d394fc7399e920fb2990a8
|
[
"CC-BY-2.0"
] | null | null | null |
#!/usr/bin/env python
import rospy
from pkg_name.util import HelloGoodbye #Imports module. Not limited to modules in this pkg.
from std_msgs.msg import String #Imports msg
#from sensor_msgs.msg import CameraInfo, CompressedImage, Image
#from duckietown_utils.jpg import image_cv_from_jpg
#from cv_bridge import CvBridge
from sys import argv
class Talker(object):
def __init__(self):
# Save the name of the node
self.node_name = rospy.get_name()
rospy.loginfo("[%s] Initialzing." %(self.node_name))
# Setup publishers
self.pub_topic_a = rospy.Publisher("~topic_a",String, queue_size=1)
#self.pub_img = rospy.Publisher("/talker/filter/compressed",Image,queue_size=1)
# Setup subscriber ,"" has to be equal to launch file channel
self.sub_topic_b = rospy.Subscriber("~topic_b", String, self.cbTopic)
# Read parameters
print("Hi a")
self.pub_timestep = self.setupParameter("~pub_timestep",1.0)
self.filter=self.setupParameter("~filter", "x")
filter_st=self.filter.value
print("filter_st " + filter_st)
# change inside "" will be displayed at the beginning in terminal
# Create a timer that calls the cbTimer function every 1.0 second
print("hi b")
self.timer = rospy.Timer(rospy.Duration.from_sec(self.pub_timestep),self.cbTimer)
rospy.loginfo("[%s] Initialzed." %(self.node_name))
def setupParameter(self,param_name,default_value):
print("hi1")
value = rospy.get_param(param_name,default_value)
rospy.set_param(param_name,value) #Write to parameter server for transparancy
rospy.loginfo("[%s] %s = %s " %(self.node_name,param_name,value))
return value
def cbTopic(self,msg):
rospy.loginfo("[%s] %s" %(self.node_name,msg.data))
print("Hi2") #not in rospy loginfo
s = "I heard: %s" % (msg.data)
rospy.loginfo("[%s] %s" %(self.node_name, s))
def cbTimer(self,event):
print("hi3")
singer = HelloGoodbye()
# Simulate hearing something
msg = String()
msg.data = singer.sing("duckietown duck")
self.pub_topic_a.publish(msg)
def on_shutdown(self):
rospy.loginfo("[%s] Shutting down." %(self.node_name))
if __name__ == '__main__':
# Initialize the node with rospy
rospy.init_node('talker', anonymous=False)
# Create the NodeName object
node = Talker()
#filters = argv[3]
# Setup proper shutdown behavior
rospy.on_shutdown(node.on_shutdown)
# Keep it spinning to keep the node alive
rospy.spin()
| 37.056338
| 92
| 0.660965
|
63d293ef9814622d4c6a3ecb1b4753ed22294b6c
| 3,579
|
py
|
Python
|
var/spack/repos/builtin/packages/yaml-cpp/package.py
|
lcnzg/spack
|
5b9f60f9bb159113bfd8a0c8f3f4a8a0c2f55d7e
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 2
|
2018-11-16T02:42:57.000Z
|
2019-06-06T19:18:50.000Z
|
var/spack/repos/builtin/packages/yaml-cpp/package.py
|
lcnzg/spack
|
5b9f60f9bb159113bfd8a0c8f3f4a8a0c2f55d7e
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 18
|
2021-03-12T16:22:58.000Z
|
2022-03-02T17:07:08.000Z
|
var/spack/repos/builtin/packages/yaml-cpp/package.py
|
lcnzg/spack
|
5b9f60f9bb159113bfd8a0c8f3f4a8a0c2f55d7e
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 1
|
2021-06-28T04:48:37.000Z
|
2021-06-28T04:48:37.000Z
|
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack.spec import ConflictsInSpecError
yaml_cpp_tests_libcxx_error_msg = 'yaml-cpp tests incompatible with libc++'
class YamlCpp(CMakePackage):
"""A YAML parser and emitter in C++"""
homepage = "https://github.com/jbeder/yaml-cpp"
url = "https://github.com/jbeder/yaml-cpp/archive/yaml-cpp-0.5.3.tar.gz"
git = "https://github.com/jbeder/yaml-cpp.git"
maintainers = ['eschnett']
version('develop', branch='master')
version('0.7.0', sha256='43e6a9fcb146ad871515f0d0873947e5d497a1c9c60c58cb102a97b47208b7c3')
version('0.6.3', sha256='77ea1b90b3718aa0c324207cb29418f5bced2354c2e483a9523d98c3460af1ed')
version('0.6.2', sha256='e4d8560e163c3d875fd5d9e5542b5fd5bec810febdcba61481fe5fc4e6b1fd05')
version('0.5.3', sha256='decc5beabb86e8ed9ebeb04358d5363a5c4f72d458b2c788cb2f3ac9c19467b2')
version('0.3.0', sha256='ab8d0e07aa14f10224ed6682065569761f363ec44bc36fcdb2946f6d38fe5a89')
variant('shared', default=True,
description='Build shared instead of static libraries')
variant('pic', default=True,
description='Build with position independent code')
variant('tests', default=False,
description='Build yaml-cpp tests using internal gtest')
depends_on('boost@:1.66.99', when='@0.5.0:0.5.3')
conflicts('%gcc@:4.7', when='@0.6.0:', msg="versions 0.6.0: require c++11 support")
conflicts('%clang@:3.3.0', when='@0.6.0:', msg="versions 0.6.0: require c++11 support")
conflicts('%apple-clang@:4.0.0', when='@0.6.0:', msg="versions 0.6.0: require c++11 support")
conflicts('%intel@:11.1', when='@0.6.0:', msg="versions 0.6.0: require c++11 support")
conflicts('%xl@:13.1', when='@0.6.0:', msg="versions 0.6.0: require c++11 support")
conflicts('%xl_r@:13.1', when='@0.6.0:', msg="versions 0.6.0: require c++11 support")
conflicts('%clang cxxflags="-stdlib=libc++"', when='+tests',
msg=yaml_cpp_tests_libcxx_error_msg)
def flag_handler(self, name, flags):
# We cannot catch all conflicts with the conflicts directive because
# the user can add arbitrary strings to the flags. Here we can at least
# fail early.
# We'll include cppflags in case users mistakenly put c++ flags there.
spec = self.spec
if name in ('cxxflags', 'cppflags') and spec.satisfies('+tests'):
if '-stdlib=libc++' in flags:
raise ConflictsInSpecError(
spec,
[(spec,
spec.compiler_flags[name],
spec.variants['tests'],
yaml_cpp_tests_libcxx_error_msg)]
)
return (flags, None, None)
def cmake_args(self):
options = []
options.extend([
self.define_from_variant('BUILD_SHARED_LIBS', 'shared'),
self.define_from_variant('YAML_BUILD_SHARED_LIBS', 'shared'),
self.define_from_variant('CMAKE_POSITION_INDEPENDENT_CODE', 'pic'),
self.define_from_variant('YAML_CPP_BUILD_TESTS', 'tests'),
])
return options
def url_for_version(self, version):
url = "https://github.com/jbeder/yaml-cpp/archive/{0}-{1}.tar.gz"
if version < Version('0.5.3'):
return url.format('release', version)
else:
return url.format('yaml-cpp', version)
| 45.303797
| 97
| 0.64627
|
9a8f4e349e16715008447a1a57faf74e9b4edf35
| 771
|
py
|
Python
|
dj_audit/settings.py
|
Miftaudeen/django-audit
|
d159869f46bbc73ec002b95ade78e399b08cfa56
|
[
"Apache-2.0"
] | null | null | null |
dj_audit/settings.py
|
Miftaudeen/django-audit
|
d159869f46bbc73ec002b95ade78e399b08cfa56
|
[
"Apache-2.0"
] | null | null | null |
dj_audit/settings.py
|
Miftaudeen/django-audit
|
d159869f46bbc73ec002b95ade78e399b08cfa56
|
[
"Apache-2.0"
] | null | null | null |
from django.conf import settings as global_settings
AUDIT_LOG_DJ_EXTRA_CONDITIONS_FOR_200 = getattr(
global_settings, 'AUDIT_LOG_DJ_EXTRA_CONDITIONS_FOR_200', False)
AUDIT_LOG_DJ_EXTRA_CONDITIONS = getattr(
global_settings, 'AUDIT_LOG_DJ_EXTRA_CONDITIONS', [])
AUDIT_LOG_DJ_REST_CONTENT_TYPES = getattr(
global_settings, 'AUDIT_LOG_DJ_REST_CONTENT_TYPES', ['application/json', 'application/xml'])
AUDIT_LOG_TEMPLATE = getattr(
global_settings, 'AUDIT_LOG_TEMPLATE', 'dj_audit/audit_list_page.html')
REQUEST_STATUS_TEMPLATE = getattr(
global_settings, 'REQUEST_STATUS_TEMPLATE', 'dj_audit/request_status_page.html')
IGNORE_FILE_EXTENSIONS = getattr(
global_settings, 'IGNORE_FILE_EXTENSIONS', ['.svg', '.js', '.css', '.png', '.jpg', '.ico'])
| 38.55
| 96
| 0.785992
|
5134a0474b246f6c04482d2aee8b39469e8662b1
| 3,424
|
py
|
Python
|
project/old/ACTPol/reproduce_choi_rotation.py
|
simonsobs/ps_py
|
fd34612f6756f693df92e01912fd71b291f1774a
|
[
"BSD-3-Clause"
] | 12
|
2019-01-25T13:42:52.000Z
|
2022-03-30T22:07:33.000Z
|
project/old/ACTPol/reproduce_choi_rotation.py
|
simonsobs/ps_py
|
fd34612f6756f693df92e01912fd71b291f1774a
|
[
"BSD-3-Clause"
] | 20
|
2018-11-22T06:51:44.000Z
|
2022-03-22T19:31:14.000Z
|
project/old/ACTPol/reproduce_choi_rotation.py
|
sgiardie/PSpipe
|
046c1d68c06fd3e8b7f0d9c068d0ff999bf95a0b
|
[
"BSD-3-Clause"
] | 10
|
2019-04-19T09:32:11.000Z
|
2022-01-21T10:26:09.000Z
|
#import matplotlib
#matplotlib.use('Agg')
import numpy as np
import pylab as plt
from pspy import so_dict, so_map,so_mcm,sph_tools,so_spectra,pspy_utils, so_map_preprocessing,so_mpi
import os,sys
from pixell import enmap
import time,os
d = so_dict.so_dict()
d.read_from_file(sys.argv[1])
spectraDir='spectra'
mcmDir='mcm'
pspy_utils.create_directory(spectraDir)
pspy_utils.create_directory(mcmDir)
spectra=['TT','TE','TB','ET','BT','EE','EB','BE','BB']
arrays=d['arrays']
niter=d['niter']
lmax=3000
type=d['type']
binning_file=d['binning_file']
theoryfile=d['theoryfile']
fsky={}
fsky['pa1']='fsky0.01081284'
fsky['pa2']='fsky0.01071187'
apo = so_map.read_map(d['apo_path'])
box=so_map.bounding_box_from_map(apo)
recompute_mcm=False
clfile=d['theoryfile']
for ar in ['pa1']:
t=time.time()
window=so_map.read_map(d['window_T_%s'%ar])
window=so_map.get_submap_car(window,box,mode='round')
window_tuple=(window,window)
print ("compute mcm and Bbl ...")
beam= np.loadtxt(d['beam_%s'%ar])
l,bl=beam[:,0],beam[:,1]
bl_tuple=(bl,bl)
if recompute_mcm==True:
mbb_inv,Bbl=so_mcm.mcm_and_bbl_spin0and2(window_tuple, binning_file,niter=niter, bl1=bl_tuple, lmax=lmax, type=type,save_file='%s/%s'%(mcmDir,ar))
else:
spin_pairs=['spin0xspin0','spin0xspin2','spin2xspin0', 'spin2xspin2']
mbb_inv,Bbl=so_mcm.read_coupling(prefix='%s/%s'%(mcmDir,ar),spin_pairs=spin_pairs)
almList=[]
nameList=[]
map_T=d['map_T_%s'%ar][0]
map_Q=d['map_Q_%s'%ar][0]
map_U=d['map_U_%s'%ar][0]
print ("compute harmonic transform ...")
so_mpi.init(True)
subtasks = so_mpi.taskrange(imin=d['iStart'], imax=d['iStop'])
for iii in range(subtasks):
t0=time.time()
print (iii)
template=so_map.from_components(map_T,map_Q,map_U)
template=so_map.get_submap_car(template,box,mode='floor')
cmb_car=template.synfast(clfile)
noise0 = so_map.white_noise(template,rms_uKarcmin_T=15)
noise1 = so_map.white_noise(template,rms_uKarcmin_T=15)
split0=cmb_car.copy()
split0.data+=noise0.data
split1=cmb_car.copy()
split1.data+=noise1.data
split0_filt=split0.copy()
split1_filt=split1.copy()
split0_filt = so_map_preprocessing.get_map_kx_ky_filtered_pyfftw(split0_filt,apo,d['filter_dict'])
split1_filt = so_map_preprocessing.get_map_kx_ky_filtered_pyfftw(split1_filt,apo,d['filter_dict'])
alm0= sph_tools.get_alms(split0,window_tuple,niter,lmax)
alm1= sph_tools.get_alms(split1,window_tuple,niter,lmax)
alm0_filt= sph_tools.get_alms(split0_filt,window_tuple,niter,lmax)
alm1_filt= sph_tools.get_alms(split1_filt,window_tuple,niter,lmax)
l,ps= so_spectra.get_spectra(alm0,alm1,spectra=spectra)
l,ps_filt= so_spectra.get_spectra(alm0_filt,alm1_filt,spectra=spectra)
lb,Db_dict=so_spectra.bin_spectra(l,ps,binning_file,lmax,type=type,mbb_inv=mbb_inv,spectra=spectra)
lb,Db_dict_filt=so_spectra.bin_spectra(l,ps_filt,binning_file,lmax,type=type,mbb_inv=mbb_inv,spectra=spectra)
so_spectra.write_ps('%s/spectra_%03d.dat'%(spectraDir,iii),lb,Db_dict,type=type,spectra=spectra)
so_spectra.write_ps('%s/spectra_filt_%03d.dat'%(spectraDir,iii),lb,Db_dict_filt,type=type,spectra=spectra)
print (time.time()-t0)
| 31.703704
| 154
| 0.69889
|
b0dbfd1ed87d39bb2f75db5297c972fab1bc9de2
| 2,416
|
py
|
Python
|
interview/traxn/chef_ingredients.py
|
rjsnh1522/geeks-4-geeks-python
|
9bea0ce4f3fae9b5f9e5952fb5b4b3a8c6186cf4
|
[
"MIT"
] | null | null | null |
interview/traxn/chef_ingredients.py
|
rjsnh1522/geeks-4-geeks-python
|
9bea0ce4f3fae9b5f9e5952fb5b4b3a8c6186cf4
|
[
"MIT"
] | 5
|
2021-03-10T11:49:39.000Z
|
2022-02-27T01:35:59.000Z
|
interview/traxn/chef_ingredients.py
|
rjsnh1522/geeks-4-geeks-python
|
9bea0ce4f3fae9b5f9e5952fb5b4b3a8c6186cf4
|
[
"MIT"
] | null | null | null |
# number_of_days = int(input())
ingredients_cat = {
'fat': [],
'fiber': [],
'carb': []
}
# number_of_days = int(input())
arra = ["CARBBeetroot", "FIBERCarrot", "FATOlive", "CARBCorn",
"CARBPotato", "FIBERBroccoli", "FATEgg", "FIBERBeans", "FATCheese",
"CARBRice", "FIBERSpinach", "FATOil"]
# arra = input().split()
current_rank = 1
for ingredient in arra:
a = ingredient.lower()
if 'fat' in a:
ingredients_cat['fat'].append(ingredient)
elif 'fiber' in a:
ingredients_cat['fiber'].append(ingredient)
elif 'carb' in a:
ingredients_cat['carb'].append(ingredient)
else:
pass
current_ingredients_length = len(ingredients_cat['fat']) + len(ingredients_cat['fiber']) + len(ingredients_cat['carb'])
print(ingredients_cat)
if current_ingredients_length >= 3:
fat = len(ingredients_cat['fat'])
fiber = len(ingredients_cat['fiber'])
carb = len(ingredients_cat['carb'])
if fat >= 2:
fat1 = ingredients_cat['fat'].pop(0)
fat2 = ingredients_cat['fat'].pop(0)
if fat == 3:
fat3 = ingredients_cat['fat'].pop(0)
print("1", end="")
elif fiber >= 1:
fiber1 = ingredients_cat['fiber'].pop(0)
print("1", end="")
else:
carb1 = ingredients_cat['carb'].pop(0)
print("1", end="")
elif carb >= 2:
carb1 = ingredients_cat['carb'].pop(0)
carb2 = ingredients_cat['carb'].pop(0)
if carb == 3:
ingredients_cat['carb'].pop(0)
print("1", end="")
elif fat >= 1:
ingredients_cat['fat'].pop(0)
print("1", end="")
else:
fiber1 = ingredients_cat['fiber'].pop(0)
print("1", end="")
elif fiber >= 2:
fiber1 = ingredients_cat['fiber'].pop(0)
fiber2 = ingredients_cat['fiber'].pop(0)
if fiber == 3:
ingredients_cat['fiber'].pop(0)
elif fat >= 1:
ingredients_cat['fat'].pop(0)
print("1", end="")
else:
ingredients_cat['carb'].pop(0)
print("1", end="")
else:
print("0", end="")
else:
print("0", end="")
# print(ingredients_cat)
| 31.376623
| 123
| 0.501242
|
89b87bab76e7011fa634a37c9041d62ca4ba4c34
| 576
|
py
|
Python
|
libs/PrettyOutput.py
|
DD-MB-PISS/spoofcheck
|
d62b99ab642d22411ee462659c3d79e301d9a591
|
[
"MIT"
] | null | null | null |
libs/PrettyOutput.py
|
DD-MB-PISS/spoofcheck
|
d62b99ab642d22411ee462659c3d79e301d9a591
|
[
"MIT"
] | null | null | null |
libs/PrettyOutput.py
|
DD-MB-PISS/spoofcheck
|
d62b99ab642d22411ee462659c3d79e301d9a591
|
[
"MIT"
] | null | null | null |
from colorama import Fore, Back, Style
from colorama import init as color_init
def output_good(line):
print(Fore.GREEN + Style.BRIGHT + "[+]" + Style.RESET_ALL + line)
def output_indifferent(line):
print(Fore.BLUE + Style.BRIGHT + "[*]" + Style.RESET_ALL + line)
def output_error(line):
print(Fore.RED + Style.BRIGHT + "[-] !!! " + Style.NORMAL + line + Style.BRIGHT + "!!!")
def output_bad(line):
print(Fore.RED + Style.BRIGHT + "[-]" + Style.RESET_ALL + line)
def output_info(line):
print(Fore.WHITE + Style.BRIGHT + "[*]" + Style.RESET_ALL + line)
| 32
| 92
| 0.659722
|
63843705b13af76f49c89380ac88583d632713e6
| 678
|
py
|
Python
|
www/src/Lib/test/test_openpty.py
|
stefanhoelzl/brython
|
433d272e7bb0e3c0994392f8f265bc39e87854f7
|
[
"BSD-3-Clause"
] | 652
|
2015-07-26T00:00:17.000Z
|
2022-02-24T18:30:04.000Z
|
www/src/Lib/test/test_openpty.py
|
SungBeom/BBAM_Brython
|
107036ad20a94af1d43e5ce5bd7c73e6a470d687
|
[
"BSD-3-Clause"
] | 8
|
2015-09-07T03:38:19.000Z
|
2021-05-23T03:18:51.000Z
|
check-python33-manual/samples/standard_library_337/Lib/test/test_openpty.py
|
DaveKaretnyk/parsing-utils2
|
40085bbd399fa605f2f2a4708d385a64ffc907de
|
[
"MIT"
] | 40
|
2015-07-24T19:45:08.000Z
|
2021-11-01T14:54:56.000Z
|
# Test to see if openpty works. (But don't worry if it isn't available.)
import os, unittest
from test.support import run_unittest
if not hasattr(os, "openpty"):
raise unittest.SkipTest("No openpty() available.")
class OpenptyTest(unittest.TestCase):
def test(self):
master, slave = os.openpty()
self.addCleanup(os.close, master)
self.addCleanup(os.close, slave)
if not os.isatty(slave):
self.fail("Slave-end of pty is not a terminal.")
os.write(slave, b'Ping!')
self.assertEqual(os.read(master, 1024), b'Ping!')
def test_main():
run_unittest(OpenptyTest)
if __name__ == '__main__':
test_main()
| 26.076923
| 72
| 0.656342
|
c52ff42c1460ef0f5731b062571c8635671be0e4
| 423
|
py
|
Python
|
shiSock-0.3.0/test/test_one_unsecure/s.py
|
AnanyaRamanA/shiSock
|
51efb0eba17eb106b9480598d278536ddd7732c3
|
[
"MIT"
] | null | null | null |
shiSock-0.3.0/test/test_one_unsecure/s.py
|
AnanyaRamanA/shiSock
|
51efb0eba17eb106b9480598d278536ddd7732c3
|
[
"MIT"
] | null | null | null |
shiSock-0.3.0/test/test_one_unsecure/s.py
|
AnanyaRamanA/shiSock
|
51efb0eba17eb106b9480598d278536ddd7732c3
|
[
"MIT"
] | 1
|
2021-10-31T13:47:42.000Z
|
2021-10-31T13:47:42.000Z
|
from PySock import server
def client_msg(data):
print(f"Message From : {data['sender_name']} => {data['data']}")
s = server(secure = False,debug = True)
s.SERVER("localhost",8888,10)
s.CREATE_CHANNEL("test")
new_client = []
while True:
for d in s.conClients:
if d not in new_client:
s.SEND(d,"test","Hello From Server")
new_client.append(d)
s.LISTEN("test",client_msg)
| 21.15
| 68
| 0.624113
|
509e18d3c4db7bd49ceb779604f3829253072e1c
| 16,012
|
py
|
Python
|
yolo_tiny.py
|
dicastro/tfm
|
af875dd099288494a1f02a831e42d900d6cac29b
|
[
"MIT"
] | null | null | null |
yolo_tiny.py
|
dicastro/tfm
|
af875dd099288494a1f02a831e42d900d6cac29b
|
[
"MIT"
] | null | null | null |
yolo_tiny.py
|
dicastro/tfm
|
af875dd099288494a1f02a831e42d900d6cac29b
|
[
"MIT"
] | null | null | null |
from keras.layers import Conv2D, Input, BatchNormalization, LeakyReLU, ZeroPadding2D, UpSampling2D, Lambda, MaxPooling2D
from keras.layers.merge import add, concatenate
from keras.models import Model
from keras.engine.topology import Layer
import tensorflow as tf
class YoloLayer(Layer):
def __init__(self, anchors, max_grid, batch_size, warmup_batches, ignore_thresh,
grid_scale, obj_scale, noobj_scale, xywh_scale, class_scale,
**kwargs):
# make the model settings persistent
self.ignore_thresh = ignore_thresh
self.warmup_batches = warmup_batches
self.anchors = tf.constant(anchors, dtype='float', shape=[1,1,1,3,2])
self.grid_scale = grid_scale
self.obj_scale = obj_scale
self.noobj_scale = noobj_scale
self.xywh_scale = xywh_scale
self.class_scale = class_scale
self.batch_size = batch_size
# make a persistent mesh grid
max_grid_h, max_grid_w = max_grid
cell_x = tf.to_float(tf.reshape(tf.tile(tf.range(max_grid_w), [max_grid_h]), (1, max_grid_h, max_grid_w, 1, 1)))
cell_y = tf.transpose(cell_x, (0,2,1,3,4))
self.cell_grid = tf.tile(tf.concat([cell_x,cell_y],-1), [batch_size, 1, 1, 3, 1])
super(YoloLayer, self).__init__(**kwargs)
def build(self, input_shape):
super(YoloLayer, self).build(input_shape) # Be sure to call this somewhere!
def call(self, x):
input_image, y_pred, y_true, true_boxes = x
# adjust the shape of the y_predict [batch, grid_h, grid_w, 3, 4+1+nb_class]
y_pred = tf.reshape(y_pred, tf.concat([tf.shape(y_pred)[:3], tf.constant([3, -1])], axis=0))
# initialize the masks
object_mask = tf.expand_dims(y_true[..., 4], 4)
# the variable to keep track of number of batches processed
batch_seen = tf.Variable(0.)
# compute grid factor and net factor
grid_h = tf.shape(y_true)[1]
grid_w = tf.shape(y_true)[2]
grid_factor = tf.reshape(tf.cast([grid_w, grid_h], tf.float32), [1,1,1,1,2])
net_h = tf.shape(input_image)[1]
net_w = tf.shape(input_image)[2]
net_factor = tf.reshape(tf.cast([net_w, net_h], tf.float32), [1,1,1,1,2])
"""
Adjust prediction
"""
pred_box_xy = (self.cell_grid[:,:grid_h,:grid_w,:,:] + tf.sigmoid(y_pred[..., :2])) # sigma(t_xy) + c_xy
pred_box_wh = y_pred[..., 2:4] # t_wh
pred_box_conf = tf.expand_dims(tf.sigmoid(y_pred[..., 4]), 4) # adjust confidence
pred_box_class = y_pred[..., 5:] # adjust class probabilities
"""
Adjust ground truth
"""
true_box_xy = y_true[..., 0:2] # (sigma(t_xy) + c_xy)
true_box_wh = y_true[..., 2:4] # t_wh
true_box_conf = tf.expand_dims(y_true[..., 4], 4)
true_box_class = tf.argmax(y_true[..., 5:], -1)
#print ">>>> true_box_wh", true_box_wh
"""
Compare each predicted box to all true boxes
"""
# initially, drag all objectness of all boxes to 0
conf_delta = pred_box_conf - 0
# then, ignore the boxes which have good overlap with some true box
true_xy = true_boxes[..., 0:2] / grid_factor
true_wh = true_boxes[..., 2:4] / net_factor
true_wh_half = true_wh / 2.
true_mins = true_xy - true_wh_half
true_maxes = true_xy + true_wh_half
pred_xy = tf.expand_dims(pred_box_xy / grid_factor, 4)
pred_wh = tf.expand_dims(tf.exp(pred_box_wh) * self.anchors / net_factor, 4)
pred_wh_half = pred_wh / 2.
pred_mins = pred_xy - pred_wh_half
pred_maxes = pred_xy + pred_wh_half
intersect_mins = tf.maximum(pred_mins, true_mins)
intersect_maxes = tf.minimum(pred_maxes, true_maxes)
intersect_wh = tf.maximum(intersect_maxes - intersect_mins, 0.)
intersect_areas = intersect_wh[..., 0] * intersect_wh[..., 1]
true_areas = true_wh[..., 0] * true_wh[..., 1]
pred_areas = pred_wh[..., 0] * pred_wh[..., 1]
union_areas = pred_areas + true_areas - intersect_areas
iou_scores = tf.truediv(intersect_areas, union_areas)
best_ious = tf.reduce_max(iou_scores, axis=4)
conf_delta *= tf.expand_dims(tf.to_float(best_ious < self.ignore_thresh), 4)
"""
Compute some online statistics
"""
true_xy = true_box_xy / grid_factor
true_wh = tf.exp(true_box_wh) * self.anchors / net_factor
true_wh_half = true_wh / 2.
true_mins = true_xy - true_wh_half
true_maxes = true_xy + true_wh_half
pred_xy = pred_box_xy / grid_factor
pred_wh = tf.exp(pred_box_wh) * self.anchors / net_factor
pred_wh_half = pred_wh / 2.
pred_mins = pred_xy - pred_wh_half
pred_maxes = pred_xy + pred_wh_half
intersect_mins = tf.maximum(pred_mins, true_mins)
intersect_maxes = tf.minimum(pred_maxes, true_maxes)
intersect_wh = tf.maximum(intersect_maxes - intersect_mins, 0.)
intersect_areas = intersect_wh[..., 0] * intersect_wh[..., 1]
true_areas = true_wh[..., 0] * true_wh[..., 1]
pred_areas = pred_wh[..., 0] * pred_wh[..., 1]
union_areas = pred_areas + true_areas - intersect_areas
iou_scores = tf.truediv(intersect_areas, union_areas)
iou_scores = object_mask * tf.expand_dims(iou_scores, 4)
count = tf.reduce_sum(object_mask)
count_noobj = tf.reduce_sum(1 - object_mask)
detect_mask = tf.to_float((pred_box_conf*object_mask) >= 0.5)
class_mask = tf.expand_dims(tf.to_float(tf.equal(tf.argmax(pred_box_class, -1), true_box_class)), 4)
recall50 = tf.reduce_sum(tf.to_float(iou_scores >= 0.5 ) * detect_mask * class_mask) / (count + 1e-3)
recall75 = tf.reduce_sum(tf.to_float(iou_scores >= 0.75) * detect_mask * class_mask) / (count + 1e-3)
avg_iou = tf.reduce_sum(iou_scores) / (count + 1e-3)
avg_obj = tf.reduce_sum(pred_box_conf * object_mask) / (count + 1e-3)
avg_noobj = tf.reduce_sum(pred_box_conf * (1-object_mask)) / (count_noobj + 1e-3)
avg_cat = tf.reduce_sum(object_mask * class_mask) / (count + 1e-3)
"""
Warm-up training
"""
batch_seen = tf.assign_add(batch_seen, 1.)
true_box_xy, true_box_wh, xywh_mask = tf.cond(tf.less(batch_seen, self.warmup_batches+1),
lambda: [true_box_xy + (0.5 + self.cell_grid[:,:grid_h,:grid_w,:,:]) * (1-object_mask),
true_box_wh + tf.zeros_like(true_box_wh) * (1-object_mask),
tf.ones_like(object_mask)],
lambda: [true_box_xy,
true_box_wh,
object_mask])
"""
Compare each true box to all anchor boxes
"""
wh_scale = tf.exp(true_box_wh) * self.anchors / net_factor
wh_scale = tf.expand_dims(2 - wh_scale[..., 0] * wh_scale[..., 1], axis=4) # the smaller the box, the bigger the scale
xy_delta = xywh_mask * (pred_box_xy-true_box_xy) * wh_scale * self.xywh_scale
wh_delta = xywh_mask * (pred_box_wh-true_box_wh) * wh_scale * self.xywh_scale
conf_delta = object_mask * (pred_box_conf-true_box_conf) * self.obj_scale + (1-object_mask) * conf_delta * self.noobj_scale
class_delta = object_mask * \
tf.expand_dims(tf.nn.sparse_softmax_cross_entropy_with_logits(labels=true_box_class, logits=pred_box_class), 4) * \
self.class_scale
loss_xy = tf.reduce_sum(tf.square(xy_delta), list(range(1,5)))
loss_wh = tf.reduce_sum(tf.square(wh_delta), list(range(1,5)))
loss_conf = tf.reduce_sum(tf.square(conf_delta), list(range(1,5)))
loss_class = tf.reduce_sum(class_delta, list(range(1,5)))
#loss = loss_xy + loss_wh + loss_conf + loss_class
loss = (loss_xy + loss_wh + loss_conf + loss_class) / self.batch_size
loss = tf.Print(loss, [grid_h, count, avg_obj, avg_noobj, avg_cat, avg_iou], message=' INFO: grid_h, count, avg_obj, avg_noobj, avg_cat, avg_iou\t', summarize=1000)
loss = tf.Print(loss, [recall50, recall75], message=" RECALL: recall-50, recall-75\t", summarize=1000)
loss = tf.Print(loss, [tf.reduce_sum(loss_xy), tf.reduce_sum(loss_wh), tf.reduce_sum(loss_conf), tf.reduce_sum(loss_class)], message=' LOSS: xy, wh, conf, class\t', summarize=1000)
return loss*self.grid_scale
def compute_output_shape(self, input_shape):
return [(None, 1)]
def _conv(inp, conv):
x = inp
if conv['init']:
x = Conv2D(filters=conv['filter'],
kernel_size=conv['kernel'],
strides=conv['stride'],
padding='same',
name='conv_'+str(conv['layer_idx']),
kernel_initializer='glorot_normal',
use_bias=False if conv['bnorm'] else True)(x)
else:
x = Conv2D(filters=conv['filter'],
kernel_size=conv['kernel'],
strides=conv['stride'],
padding='same',
name='conv_'+str(conv['layer_idx']),
use_bias=False if conv['bnorm'] else True)(x)
if conv['bnorm']: x = BatchNormalization(epsilon=0.001, name='bnorm_' + str(conv['layer_idx']))(x)
if conv['activation']=='leaky': x = LeakyReLU(alpha=0.1, name='leaky_' + str(conv['layer_idx']))(x)
return x
def _maxpool(x, maxpool):
return MaxPooling2D(pool_size=(maxpool['size'],maxpool['size']),
strides=(maxpool['stride'], maxpool['stride']),
padding='same',
name='maxpool_'+str(maxpool['layer_idx']))(x)
def _upsample(x, upsample):
return UpSampling2D(2, name='upsample_'+str(upsample['layer_idx']))(x)
def create_tinyx5_model(
nb_class,
anchors,
max_box_per_image,
max_grid,
batch_size,
warmup_batches,
ignore_thresh,
grid_scales,
obj_scale,
noobj_scale,
xywh_scale,
class_scale,
init=False,
width=None,
height=None
):
input_image = Input(shape=(width, height, 3)) # net_h, net_w, 3 (min, 512, max)
true_boxes = Input(shape=(1, 1, 1, max_box_per_image, 4))
true_yolo_1 = Input(shape=(None, None, 3, 4+1+nb_class)) # len(anchors//2)//2, 4+1+nb_class)) # grid_h, grid_w, nb_anchor, 5+nb_class
true_yolo_2 = Input(shape=(None, None, 3, 4+1+nb_class)) # len(anchors//2)//2, 4+1+nb_class)) # grid_h, grid_w, nb_anchor, 5+nb_class
## TinyX5 backbone
x0 = _conv(input_image, {'layer_idx': 0, 'bnorm': True, 'filter': 16, 'kernel': 3, 'stride': 1, 'pad': 1, 'activation': 'leaky', 'init': init})
x1 = _maxpool(x0, {'layer_idx': 1, 'size': 2, 'stride': 2})
x2 = _conv(x1, {'layer_idx': 2, 'bnorm': True, 'filter': 32, 'kernel': 3, 'stride': 1, 'pad': 1, 'activation': 'leaky', 'init': init})
x3 = _maxpool(x2, {'layer_idx': 3, 'size': 2, 'stride': 2})
x4 = _conv(x3, {'layer_idx': 4, 'bnorm': True, 'filter': 64, 'kernel': 3, 'stride': 1, 'pad': 1, 'activation': 'leaky', 'init': init})
x5 = _maxpool(x4, {'layer_idx': 5, 'size': 2, 'stride': 2})
x6 = _conv(x5, {'layer_idx': 6, 'bnorm': True, 'filter': 128, 'kernel': 3, 'stride': 1, 'pad': 1, 'activation': 'leaky', 'init': init})
x7 = _maxpool(x6, {'layer_idx': 7, 'size': 2, 'stride': 2})
x8 = _conv(x7, {'layer_idx': 8, 'bnorm': True, 'filter': 256, 'kernel': 3, 'stride': 1, 'pad': 1, 'activation': 'leaky', 'init': init})
x9 = _maxpool(x8, {'layer_idx': 9, 'size': 2, 'stride': 2})
x10 = _conv(x9, {'layer_idx': 10, 'bnorm': True, 'filter': 512, 'kernel': 3, 'stride': 1, 'pad': 1, 'activation': 'leaky', 'init': init})
x11 = _maxpool(x10, {'layer_idx': 11, 'size': 2, 'stride': 1})
x12 = _conv(x11, {'layer_idx': 12, 'bnorm': True, 'filter': 1024, 'kernel': 3, 'stride': 1, 'pad': 1, 'activation': 'leaky', 'init': init})
x13 = _conv(x12, {'layer_idx': 13, 'bnorm': True, 'filter': 256, 'kernel': 1, 'stride': 1, 'pad': 1, 'activation': 'leaky', 'init': init})
x14 = _conv(x13, {'layer_idx': 14, 'bnorm': True, 'filter': 512, 'kernel': 3, 'stride': 1, 'pad': 1, 'activation': 'leaky', 'init': init})
## yolo-layer-1 : layer 15 ==> 16
pred_yolo_1 = _conv(x14, {'layer_idx': 15, 'bnorm':False, 'filter': 3*(4+1+nb_class), 'kernel': 1, 'stride': 1, 'pad': 1, 'activation': 'linear', 'init': init})
#pred_yolo_1 = _conv(x14, {'layer_idx': 15, 'bnorm':False, 'filter': 255, 'kernel': 1, 'stride': 1, 'pad': 1, 'activation': 'linear', 'init': init})
loss_yolo_1 = YoloLayer(anchors[6:],
[1*num for num in max_grid], ### ? not the feature size but the origin size, why?
batch_size,
warmup_batches,
ignore_thresh,
grid_scales[0],
obj_scale,
noobj_scale,
xywh_scale,
class_scale)([input_image, pred_yolo_1, true_yolo_1, true_boxes])
## layer 17 ==> 21
x17 = x13
x18 = _conv(x17, {'layer_idx': 18, 'bnorm': True, 'filter': 128, 'kernel': 1, 'stride': 1, 'pad': 1, 'activation': 'leaky', 'init': init})
x19 = _upsample(x18, {'layer_idx': 19})
x20 = concatenate([x19, x8])
x21 = _conv(x20, {'layer_idx': 21, 'bnorm': True, 'filter': 256, 'kernel': 3, 'stride': 1, 'pad': 1, 'activation': 'leaky', 'init': init})
## yolo-layer-2 : layer 22 ==> 23
pred_yolo_2 = _conv(x21, {'layer_idx': 22, 'bnorm':False, 'filter': 3*(4+1+nb_class), 'kernel': 1, 'stride': 1, 'pad': 1, 'activation': 'linear', 'init': init})
#pred_yolo_2 = _conv(x21, {'layer_idx': 22, 'bnorm':False, 'filter': 255, 'kernel': 1, 'stride': 1, 'pad': 1, 'activation': 'linear', 'init': init})
loss_yolo_2 = YoloLayer(anchors[:6],
[2*num for num in max_grid], ### ? not the feature size but the origin size, why?
batch_size,
warmup_batches,
ignore_thresh,
grid_scales[1],
obj_scale,
noobj_scale,
xywh_scale,
class_scale)([input_image, pred_yolo_2, true_yolo_2, true_boxes])
## keras.Model(input, output):
## -- train_model is to set train routine of specific network, so the output should be the loss for back-prop calculation
## -- infer_model only for forward calculation and focus on result, so the output should the prediction
train_model = Model([input_image, true_boxes, true_yolo_1, true_yolo_2], [loss_yolo_1, loss_yolo_2])
infer_model = Model(input_image, [pred_yolo_1, pred_yolo_2])
return [train_model, infer_model]
def dummy_loss(y_true, y_pred):
return tf.sqrt(tf.reduce_sum(y_pred))
| 51.15655
| 188
| 0.561329
|
ffff876ebd6f1babef82734804b0c8dfccc68d06
| 539
|
py
|
Python
|
webServer/sample/migrations/0002_auto_20190411_0132.py
|
getCurrentThread/KITBigMountain
|
1537c6f995212044e87d3ee3c8079e3deabfea1f
|
[
"MIT"
] | null | null | null |
webServer/sample/migrations/0002_auto_20190411_0132.py
|
getCurrentThread/KITBigMountain
|
1537c6f995212044e87d3ee3c8079e3deabfea1f
|
[
"MIT"
] | null | null | null |
webServer/sample/migrations/0002_auto_20190411_0132.py
|
getCurrentThread/KITBigMountain
|
1537c6f995212044e87d3ee3c8079e3deabfea1f
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.1.7 on 2019-04-10 16:32
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('sample', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='samplemodel',
name='memo',
field=models.TextField(blank=True),
),
migrations.AddField(
model_name='samplemodel',
name='status',
field=models.ImageField(null=True, upload_to=''),
),
]
| 22.458333
| 61
| 0.564007
|
eabad39b08835dc91072b93266f5fe4841082ad5
| 1,498
|
py
|
Python
|
tests/conftest.py
|
molodoj88/mainfluxpy
|
7895b2b76dabe232effe8a1350ca8c1a31bf050e
|
[
"MIT"
] | 1
|
2020-04-23T17:32:45.000Z
|
2020-04-23T17:32:45.000Z
|
tests/conftest.py
|
molodoj88/mainfluxpy
|
7895b2b76dabe232effe8a1350ca8c1a31bf050e
|
[
"MIT"
] | 1
|
2020-06-09T17:56:30.000Z
|
2020-06-09T17:56:30.000Z
|
tests/conftest.py
|
molodoj88/mainfluxpy
|
7895b2b76dabe232effe8a1350ca8c1a31bf050e
|
[
"MIT"
] | 1
|
2021-06-30T05:54:31.000Z
|
2021-06-30T05:54:31.000Z
|
from mainflux.app import MainfluxApp
import pytest
import uuid
import httpx
from .conf import APP_KWARGS
"""
You should create file named conf.py in same directory where tests run
and create dictionary APP_KWARGS with following content:
APP_KWARGS = {
'url': "ip-to-mainflux",
'port': "80",
'user': "username",
'password': "password"
}
"""
class TestManager:
def __init__(self, **app_args):
self.app = MainfluxApp(url=app_args['url'],
port=app_args['port'],
user_email=app_args['user'],
user_password=app_args['password'])
def __new__(cls, **kwargs):
if not hasattr(cls, 'instance'):
cls.instance = super(TestManager, cls).__new__(cls)
return cls.instance
@pytest.fixture(scope='module')
def app():
test_manager = TestManager(**APP_KWARGS)
app = test_manager.app
return app
@pytest.fixture(scope='module')
def http_client():
client = httpx.Client()
yield client
client.close()
@pytest.fixture
def random_name():
def make_random_name():
return str(uuid.uuid4())
return make_random_name
@pytest.fixture(scope='function', autouse=True)
def clean_things():
yield
app = TestManager(**APP_KWARGS).app
app.api.delete_all_things()
@pytest.fixture(scope='function', autouse=True)
def clean_channels():
yield
app = TestManager(**APP_KWARGS).app
app.api.delete_all_channels()
| 22.69697
| 70
| 0.642857
|
e7d20835f710ea53d2f57ba7d7c875a2d45a34ab
| 5,095
|
py
|
Python
|
spams/views.py
|
6ba/bbgo
|
dfa9b55b8d40c53940105333c2e03a3c6abddb88
|
[
"MIT"
] | 22
|
2017-07-13T04:07:03.000Z
|
2021-06-10T05:39:29.000Z
|
spams/views.py
|
genonfire/bbgo
|
5f374f0b620f4dc3e106de5969f26f4585044605
|
[
"MIT"
] | 7
|
2017-08-25T06:33:45.000Z
|
2019-10-14T05:49:32.000Z
|
spams/views.py
|
6ba/bbgo
|
dfa9b55b8d40c53940105333c2e03a3c6abddb88
|
[
"MIT"
] | 9
|
2017-12-31T02:45:58.000Z
|
2021-01-22T03:09:02.000Z
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from core.utils import error_to_response, get_referrer, get_useragent
from django.conf import settings
from django.contrib.admin.views.decorators import staff_member_required
from django.http import JsonResponse
from django.shortcuts import get_object_or_404, render, render_to_response
from django.utils import timezone
import requests
from .forms import SpamIPEditForm, SpamWordEditForm
from .models import IP, Word
def akismet_comment_check(request, comment):
"""Akismet comment check"""
if settings.ENABLE_AKISMET:
url_verify_key = 'https://rest.akismet.com/1.1/verify-key'
key = settings.AKISMET_API_KEY
blog = settings.BLOG_URL
data = {'key': key, 'blog': blog}
response = requests.post(url_verify_key, data=data)
if response.text == 'valid':
url = 'https://%s.rest.akismet.com/1.1/comment-check' % key
data = {
'blog': blog,
'user_ip': comment.ip,
'user_agent': get_useragent(request),
'referrer': get_referrer(request),
'comment_type': 'comment',
'comment_author': comment.username,
'comment_content': comment.content,
'comment_date_gmt': timezone.now(),
'blog_lang': settings.LANGUAGE_CODE,
'blog_charset': 'UTF-8',
}
result = requests.post(url, data=data)
if result.text == 'true':
return True
return False
def check_spam(request, comment):
"""Check spam"""
ip = comment.ip
ip_exist = IP.objects.filter(ip__iexact=ip).exists()
if ip_exist:
return True
words = Word.objects.all()
for word in words:
if word.word in comment.content:
return True
if settings.ENABLE_AKISMET:
is_spam = akismet_comment_check(request, comment)
if is_spam:
return True
return False
@staff_member_required
def setting(request):
"""Spam setting"""
return render(
request,
"spams/spam_setting.html",
{
}
)
@staff_member_required
def add_ip(request):
"""API add_ip"""
if request.method == 'POST':
ip = request.POST['ip']
exist = IP.objects.filter(ip__iexact=ip).exists()
if exist is True:
return JsonResponse({'status': 'false'}, status=412)
form = SpamIPEditForm(request.POST)
if form.is_valid():
form.save()
ips = IP.objects.all()
return render_to_response(
'spams/show_ips.html',
{
'ips': ips,
}
)
else:
return JsonResponse({'status': 'false'}, status=500)
else:
return error_to_response(request)
@staff_member_required
def delete_ip(request):
"""API delete_ip"""
if request.method == 'POST':
id = request.POST['id']
ip = get_object_or_404(IP, pk=id)
ip.delete()
ips = IP.objects.all()
return render_to_response(
'spams/show_ips.html',
{
'ips': ips,
}
)
else:
return error_to_response(request)
@staff_member_required
def add_word(request):
"""API add_word"""
if request.method == 'POST':
word = request.POST['word']
exist = Word.objects.filter(word__iexact=word).exists()
if exist is True:
return JsonResponse({'status': 'false'}, status=412)
form = SpamWordEditForm(request.POST)
if form.is_valid():
form.save()
words = Word.objects.all()
return render_to_response(
'spams/show_words.html',
{
'words': words,
}
)
else:
return JsonResponse({'status': 'false'}, status=500)
else:
return error_to_response(request)
@staff_member_required
def delete_word(request):
"""API delete_word"""
if request.method == 'POST':
id = request.POST['id']
word = get_object_or_404(Word, pk=id)
word.delete()
words = Word.objects.all()
return render_to_response(
'spams/show_words.html',
{
'words': words,
}
)
else:
return error_to_response(request)
@staff_member_required
def register_ip(request):
"""API register_ip"""
if request.method == 'POST':
ip = request.POST['ip']
exist = IP.objects.filter(ip__iexact=ip).exists()
if exist is True:
return JsonResponse({'status': 'false'}, status=412)
form = SpamIPEditForm(request.POST)
if form.is_valid():
form.save()
return JsonResponse({'status': 'true'}, status=201)
else:
return JsonResponse({'status': 'false'}, status=500)
else:
return JsonResponse({'status': 'false'}, status=400)
| 27.245989
| 74
| 0.569774
|
9d77fc12bd4a14d51f2e7eb321fdbc0702edb288
| 16,264
|
py
|
Python
|
anki_vector/nav_map.py
|
rmcolbert/vector-python-sdk
|
7e29e81578bd862b6462d7f7502c4aa67de29fb5
|
[
"Apache-2.0"
] | 516
|
2018-12-12T06:05:03.000Z
|
2022-03-30T10:00:20.000Z
|
anki_vector/nav_map.py
|
rmcolbert/vector-python-sdk
|
7e29e81578bd862b6462d7f7502c4aa67de29fb5
|
[
"Apache-2.0"
] | 37
|
2018-12-12T09:41:46.000Z
|
2022-03-06T13:42:24.000Z
|
anki_vector/nav_map.py
|
rmcolbert/vector-python-sdk
|
7e29e81578bd862b6462d7f7502c4aa67de29fb5
|
[
"Apache-2.0"
] | 350
|
2018-12-11T23:24:01.000Z
|
2022-03-16T12:57:33.000Z
|
# Copyright (c) 2018 Anki, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License in the file LICENSE.txt or at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A 2D navigation memory map of the world around Vector.
Vector builds a memory map of the navigable world around him as he drives
around. This is mostly based on where objects are seen (the cubes, charger, and
any custom objects), and also includes where Vector detects cliffs/drops, and
visible edges (e.g. sudden changes in color).
This differs from a standard occupancy map in that it doesn't deal with
probabilities of occupancy, but instead encodes what type of content is there.
To use the map you must first call :meth:`anki_vector.nav_map.NavMapComponent.init_nav_map_feed`
with a positive frequency so that the data is streamed to the SDK.
"""
# __all__ should order by constants, event classes, other classes, functions.
__all__ = ['EvtNavMapUpdate',
'NavMapComponent', 'NavMapGrid', 'NavMapGridNode',
'NavNodeContentTypes']
import asyncio
from concurrent.futures import CancelledError
from enum import Enum
from logging import Logger
from typing import List
from . import util
from .events import Events
from .exceptions import VectorException
from .messaging import protocol
class EvtNavMapUpdate(): # pylint: disable=too-few-public-methods
"""Dispatched when a new nav map is received.
:param nav_map: The current state of the robot's nav map.
"""
def __init__(self, nav_map):
self.nav_map = nav_map
class NavNodeContentTypes(Enum): # pylint: disable=too-few-public-methods
"""The content types for a :class:`NavMapGridNode`.
"""
#: The contents of the node is unknown.
Unknown = protocol.NavNodeContentType.Value("NAV_NODE_UNKNOWN")
#: The node is clear of obstacles, because Vector has seen objects on the
#: other side, but it might contain a cliff. The node will be marked as
#: either :attr:`Cliff` or :attr:`ClearOfCliff` once Vector has driven there.
ClearOfObstacle = protocol.NavNodeContentType.Value("NAV_NODE_CLEAR_OF_OBSTACLE")
#: The node is clear of any cliffs (a sharp drop) or obstacles.
ClearOfCliff = protocol.NavNodeContentType.Value("NAV_NODE_CLEAR_OF_CLIFF")
#: The node contains a :class:`~anki_vector.objects.LightCube`.
ObstacleCube = protocol.NavNodeContentType.Value("NAV_NODE_OBSTACLE_CUBE")
#: The node contains a proximity detected obstacle which has not been explored.
ObstacleProximity = protocol.NavNodeContentType.Value("NAV_NODE_OBSTACLE_PROXIMITY")
#: The node contains a proximity detected obstacle which has been explored.
ObstacleProximityExplored = protocol.NavNodeContentType.Value("NAV_NODE_OBSTACLE_PROXIMITY_EXPLORED")
#: The node contains an unrecognized obstacle.
ObstacleUnrecognized = protocol.NavNodeContentType.Value("NAV_NODE_OBSTACLE_UNRECOGNIZED")
#: The node contains a cliff (a sharp drop).
Cliff = protocol.NavNodeContentType.Value("NAV_NODE_CLIFF")
#: The node contains a visible edge (based on the camera feed).
InterestingEdge = protocol.NavNodeContentType.Value("NAV_NODE_INTERESTING_EDGE")
# This entry is undocumented and not currently used
NonInterestingEdge = protocol.NavNodeContentType.Value("NAV_NODE_NON_INTERESTING_EDGE")
class NavMapGridNode:
"""A node in a :class:`NavMapGrid`.
Leaf nodes contain content, all other nodes are split into 4 equally sized
children.
Child node indices are stored in the following X,Y orientation:
+---+----+---+
| ^ | 2 | 0 |
+---+----+---+
| Y | 3 | 1 |
+---+----+---+
| | X->| |
+---+----+---+
"""
def __init__(self, depth: int, size: float, center: util.Vector3, parent: 'NavMapGridNode', logger: Logger):
#: The depth of this node (i.e. how far down the quad-tree it is).
self.depth = depth
#: The size (width or length) of this square node.
self.size = size
#: The center of this node.
self.center = center
#: The parent of this node. Is ``None`` for the root node.
self.parent = parent
#: ``None`` for leaf nodes, a list of 4 child nodes otherwise.
self.children: List[NavMapGridNode] = None
#: The content type in this node. Only leaf nodes have content,
#: this is ``None`` for all other nodes.
self.content: protocol.NavNodeContentType = None
self._next_child = 0 # Used when building to track which branch to follow
self._logger = logger
def __repr__(self):
return '<%s center: %s size: %s content: %s>' % (
self.__class__.__name__, self.center, self.size, self.content)
def contains_point(self, x: float, y: float) -> bool:
"""Test if the node contains the given x,y coordinates.
:param x: x coordinate for the point.
:param y: y coordinate for the point.
Returns:
True if the node contains the point, False otherwise.
"""
half_size = self.size * 0.5
dist_x = abs(self.center.x - x)
dist_y = abs(self.center.y - y)
return (dist_x <= half_size) and (dist_y <= half_size)
def _get_node(self, x: float, y: float, assumed_in_bounds: bool) -> 'NavMapGridNode':
if not assumed_in_bounds and not self.contains_point(x, y):
# point is out of bounds
return None
if self.children is None:
return self
x_offset = 2 if x < self.center.x else 0
y_offset = 1 if y < self.center.y else 0
child_node = self.children[x_offset + y_offset]
# child node is by definition in bounds / on boundary
return child_node._get_node(x, y, True) # pylint: disable=protected-access
def get_node(self, x: float, y: float) -> 'NavMapGridNode':
"""Get the node at the given x,y coordinates.
:param x: x coordinate for the point.
:param y: y coordinate for the point.
Returns:
The smallest node that includes the point.
Will be ``None`` if the point is outside of the map.
"""
return self._get_node(x, y, assumed_in_bounds=False)
def get_content(self, x: float, y: float) -> protocol.NavNodeContentType:
"""Get the node's content at the given x,y coordinates.
:param x: x coordinate for the point.
:param y: y coordinate for the point.
Returns:
The content included at that point. Will be :attr:`NavNodeContentTypes.Unknown`
if the point is outside of the map.
"""
node = self.get_node(x, y)
if node:
return node.content
return NavNodeContentTypes.Unknown
def add_child(self, content: protocol.NavNodeContentType, depth: int) -> bool:
"""Add a child node to the quad tree.
The quad-tree is serialized to a flat list of nodes, we deserialize
back to a quad-tree structure here, with the depth of each node
indicating where it is placed.
:param content: The content to store in the leaf node.
:param depth: The depth that this leaf node is located at.
Returns:
True if parent should use the next child for future add_child
calls.
"""
if depth > self.depth:
self._logger.error("NavMapGridNode depth %s > %s", depth, self.depth)
if self._next_child > 3:
self._logger.error("NavMapGridNode _next_child %s (>3) at depth %s", self._next_child, self.depth)
if self.depth == depth:
if self.content is not None:
self._logger.error("NavMapGridNode: Clobbering %s at depth %s with %s",
self.content, self.depth, content)
self.content = content
# This node won't be further subdivided, and is now full
return True
if self.children is None:
# Create 4 child nodes for quad-tree structure
next_depth = self.depth - 1
next_size = self.size * 0.5
offset = next_size * 0.5
center1 = util.Vector3(self.center.x + offset, self.center.y + offset, self.center.z)
center2 = util.Vector3(self.center.x + offset, self.center.y - offset, self.center.z)
center3 = util.Vector3(self.center.x - offset, self.center.y + offset, self.center.z)
center4 = util.Vector3(self.center.x - offset, self.center.y - offset, self.center.z)
self.children = [NavMapGridNode(next_depth, next_size, center1, self, self._logger),
NavMapGridNode(next_depth, next_size, center2, self, self._logger),
NavMapGridNode(next_depth, next_size, center3, self, self._logger),
NavMapGridNode(next_depth, next_size, center4, self, self._logger)]
if self.children[self._next_child].add_child(content, depth):
# Child node is now full, start using the next child
self._next_child += 1
if self._next_child > 3:
# All children are now full - parent should start using the next child
return True
# Empty children remain - parent can keep using this child
return False
class NavMapGrid:
"""A navigation memory map, stored as a quad-tree."""
def __init__(self, msg: protocol.NavMapFeedResponse, logger: Logger):
#: The origin ID for the map. Only maps and :class:`~anki_vector.util.Pose`
#: objects of the same origin ID are in the same coordinate frame and
#: can therefore be compared.
self.origin_id = msg.origin_id
root_center = util.Vector3(msg.map_info.root_center_x, msg.map_info.root_center_y, msg.map_info.root_center_z)
self._root_node = NavMapGridNode(msg.map_info.root_depth, msg.map_info.root_size_mm, root_center, None, logger)
for quad in msg.quad_infos:
self.add_quad(quad.content, quad.depth)
self._logger = logger
def __repr__(self):
return '<%s center: %s size: %s>' % (
self.__class__.__name__, self.center, self.size)
@property
def root_node(self) -> NavMapGridNode:
"""The root node for the grid, contains all other nodes."""
return self._root_node
@property
def size(self) -> float:
"""The size (width or length) of the square grid."""
return self._root_node.size
@property
def center(self) -> util.Vector3:
"""The center of this map."""
return self._root_node.center
def contains_point(self, x: float, y: float) -> bool:
"""Test if the map contains the given x,y coordinates.
:param x: x coordinate for the point.
:param y: y coordinate for the point.
Returns:
True if the map contains the point, False otherwise.
"""
return self._root_node.contains_point(x, y)
def get_node(self, x: float, y: float) -> NavMapGridNode:
"""Get the node at the given x,y coordinates.
:param x: x coordinate for the point.
:param y: y coordinate for the point.
Returns:
The smallest node that includes the point.
Will be ``None`` if the point is outside of the map.
"""
return self._root_node.get_node(x, y)
def get_content(self, x: float, y: float) -> protocol.NavNodeContentType:
"""Get the map's content at the given x,y coordinates.
:param x: x coordinate for the point.
:param y: y coordinate for the point.
.. testcode::
import anki_vector
with anki_vector.Robot(enable_nav_map_feed=True) as robot:
# Make sure Vector drives around so the nav map will update
robot.behavior.drive_off_charger()
robot.motors.set_wheel_motors(-100, 100)
latest_nav_map = robot.nav_map.latest_nav_map
content = latest_nav_map.get_content(0.0, 100.0)
print(f"Sampling point at 0.0, 100.0 and found content: {content}")
Returns:
The content included at that point. Will be :attr:`NavNodeContentTypes.Unknown`
if the point is outside of the map.
"""
return self._root_node.get_content(x, y)
def add_quad(self, content: protocol.NavNodeContentType, depth: int):
"""Adds a new quad to the nav map.
:param content: What content this node contains.
:param depth: How deep in the navMap this node is.
"""
self._root_node.add_child(content, depth)
class NavMapComponent(util.Component):
"""Represents Vector's navigation memory map.
The NavMapComponent object subscribes for nav memory map updates from the robot to store and dispatch.
The :class:`anki_vector.robot.Robot` or :class:`anki_vector.robot.AsyncRobot` instance hosts this component.
.. testcode::
import anki_vector
with anki_vector.Robot(enable_nav_map_feed=True) as robot:
# Make sure Vector drives around so the nav map will update
robot.behavior.drive_off_charger()
robot.motors.set_wheel_motors(-100, 100)
latest_nav_map = robot.nav_map.latest_nav_map
:param robot: A reference to the owner Robot object.
"""
def __init__(self, robot):
super().__init__(robot)
self._latest_nav_map: NavMapGrid = None
self._nav_map_feed_task: asyncio.Task = None
@property
@util.block_while_none()
def latest_nav_map(self) -> NavMapGrid:
""":class:`NavMapGrid`: The most recently processed image received from the robot.
.. testcode::
import anki_vector
with anki_vector.Robot(enable_nav_map_feed=True) as robot:
# Make sure Vector drives around so the nav map will update
robot.behavior.drive_off_charger()
robot.motors.set_wheel_motors(-100, 100)
latest_nav_map = robot.nav_map.latest_nav_map
"""
if not self._nav_map_feed_task or self._nav_map_feed_task.done():
raise VectorException("Nav map not initialized. Check that Robot parameter enable_nav_map_feed is set to True.")
return self._latest_nav_map
def init_nav_map_feed(self, frequency: float = 0.5) -> None:
"""Begin nav map feed task.
:param frequency: How frequently to send nav map updates.
"""
if not self._nav_map_feed_task or self._nav_map_feed_task.done():
self._nav_map_feed_task = self.conn.loop.create_task(self._request_and_handle_nav_maps(frequency))
def close_nav_map_feed(self) -> None:
"""Cancel nav map feed task."""
if self._nav_map_feed_task:
self._nav_map_feed_task.cancel()
future = self.conn.run_coroutine(self._nav_map_feed_task)
future.result()
self._nav_map_feed_task = None
async def _request_and_handle_nav_maps(self, frequency: float) -> None:
"""Queries and listens for nav map feed events from the robot.
Received events are parsed by a helper function.
:param frequency: How frequently to send nav map updates.
"""
try:
req = protocol.NavMapFeedRequest(frequency=frequency)
async for evt in self.grpc_interface.NavMapFeed(req):
self._latest_nav_map = NavMapGrid(evt, self.logger)
await self._robot.events.dispatch_event(evt, Events.nav_map_update)
except CancelledError:
self.logger.debug('Nav Map feed task was cancelled. This is expected during disconnection.')
| 39.668293
| 124
| 0.653099
|
c860ecd79e3876954349470b41bb984b31a816d4
| 29,767
|
py
|
Python
|
ambari-server/src/main/python/ambari_server/setupSecurity.py
|
nexr/ambari
|
8452f207d7b9343a162698f2a2b79bf2c512e9d3
|
[
"Apache-2.0"
] | 1
|
2015-05-04T12:19:05.000Z
|
2015-05-04T12:19:05.000Z
|
ambari-server/src/main/python/ambari_server/setupSecurity.py
|
nexr/ambari
|
8452f207d7b9343a162698f2a2b79bf2c512e9d3
|
[
"Apache-2.0"
] | null | null | null |
ambari-server/src/main/python/ambari_server/setupSecurity.py
|
nexr/ambari
|
8452f207d7b9343a162698f2a2b79bf2c512e9d3
|
[
"Apache-2.0"
] | 1
|
2021-01-07T08:55:01.000Z
|
2021-01-07T08:55:01.000Z
|
#!/usr/bin/env python
'''
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import base64
import fileinput
import json
import os
import re
import shutil
import urllib2
import time
import sys
from ambari_commons.exceptions import FatalException, NonFatalException
from ambari_commons.logging_utils import print_warning_msg, print_error_msg, print_info_msg, get_verbose
from ambari_commons.os_check import OSConst
from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
from ambari_commons.os_utils import is_root, set_file_permissions, \
run_os_command, search_file, is_valid_filepath
from ambari_server.serverConfiguration import configDefaults, \
encrypt_password, find_jdk, find_properties_file, get_alias_string, get_ambari_properties, get_conf_dir, \
get_credential_store_location, get_full_ambari_classpath, get_is_persisted, get_is_secure, get_master_key_location, \
get_original_master_key, get_value_from_properties, get_java_exe_path, is_alias_string, read_ambari_user, \
read_passwd_for_alias, remove_password_file, save_passwd_for_alias, store_password_file, update_properties_2, \
BLIND_PASSWORD, BOOTSTRAP_DIR_PROPERTY, IS_LDAP_CONFIGURED, JDBC_PASSWORD_FILENAME, JDBC_PASSWORD_PROPERTY, \
JDBC_RCA_PASSWORD_ALIAS, JDBC_RCA_PASSWORD_FILE_PROPERTY, JDBC_USE_INTEGRATED_AUTH_PROPERTY, \
LDAP_MGR_PASSWORD_ALIAS, LDAP_MGR_PASSWORD_FILENAME, LDAP_MGR_PASSWORD_PROPERTY, LDAP_MGR_USERNAME_PROPERTY, \
LDAP_PRIMARY_URL_PROPERTY, SECURITY_IS_ENCRYPTION_ENABLED, SECURITY_KEY_ENV_VAR_NAME, SECURITY_KERBEROS_JASS_FILENAME, \
SECURITY_PROVIDER_KEY_CMD, SECURITY_MASTER_KEY_FILENAME, SSL_TRUSTSTORE_PASSWORD_ALIAS, \
SSL_TRUSTSTORE_PASSWORD_PROPERTY, SSL_TRUSTSTORE_PATH_PROPERTY, SSL_TRUSTSTORE_TYPE_PROPERTY, \
SSL_API, SSL_API_PORT, DEFAULT_SSL_API_PORT, CLIENT_API_PORT
from ambari_server.serverUtils import is_server_runing, get_ambari_server_api_base
from ambari_server.setupActions import SETUP_ACTION, LDAP_SETUP_ACTION
from ambari_server.userInput import get_validated_string_input, get_prompt_default, read_password, get_YN_input
REGEX_IP_ADDRESS = "^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])$"
REGEX_HOSTNAME = "^(([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9])\.)*([A-Za-z0-9]|[A-Za-z0-9][A-Za-z0-9\-]*[A-Za-z0-9])$"
REGEX_HOSTNAME_PORT = "^(.*:[0-9]{1,5}$)"
REGEX_TRUE_FALSE = "^(true|false)?$"
REGEX_REFERRAL = "^(follow|ignore)?$"
REGEX_ANYTHING = ".*"
CLIENT_SECURITY_KEY = "client.security"
SERVER_API_LDAP_URL = 'ldap_sync_events'
def read_master_key(isReset=False):
passwordPattern = ".*"
passwordPrompt = "Please provide master key for locking the credential store: "
passwordDescr = "Invalid characters in password. Use only alphanumeric or "\
"_ or - characters"
passwordDefault = ""
if isReset:
passwordPrompt = "Enter new Master Key: "
input = True
while(input):
masterKey = get_validated_string_input(passwordPrompt, passwordDefault,
passwordPattern, passwordDescr, True, True)
if not masterKey:
print "Master Key cannot be empty!"
continue
masterKey2 = get_validated_string_input("Re-enter master key: ",
passwordDefault, passwordPattern, passwordDescr, True, True)
if masterKey != masterKey2:
print "Master key did not match!"
continue
input = False
return masterKey
def save_master_key(master_key, key_location, persist=True):
if master_key:
jdk_path = find_jdk()
if jdk_path is None:
print_error_msg("No JDK found, please run the \"setup\" "
"command to install a JDK automatically or install any "
"JDK manually to " + configDefaults.JDK_INSTALL_DIR)
return 1
command = SECURITY_PROVIDER_KEY_CMD.format(get_java_exe_path(),
get_full_ambari_classpath(), master_key, key_location, persist)
(retcode, stdout, stderr) = run_os_command(command)
print_info_msg("Return code from credential provider save KEY: " +
str(retcode))
else:
print_error_msg("Master key cannot be None.")
def adjust_directory_permissions(ambari_user):
properties = get_ambari_properties()
bootstrap_dir = os.path.abspath(get_value_from_properties(properties, BOOTSTRAP_DIR_PROPERTY))
print_info_msg("Cleaning bootstrap directory ({0}) contents...".format(bootstrap_dir))
shutil.rmtree(bootstrap_dir, True) #Ignore the non-existent dir error
#Protect against directories lingering around
del_attempts = 0
while os.path.exists(bootstrap_dir) and del_attempts < 100:
time.sleep(50)
del_attempts += 1
if not os.path.exists(bootstrap_dir):
try:
os.makedirs(bootstrap_dir)
except Exception, ex:
print_warning_msg("Failed recreating the bootstrap directory: {0}".format(str(ex)))
pass
else:
print_warning_msg("Bootstrap directory lingering around after 5s. Unable to complete the cleanup.")
pass
# Add master key and credential store if exists
keyLocation = get_master_key_location(properties)
masterKeyFile = search_file(SECURITY_MASTER_KEY_FILENAME, keyLocation)
if masterKeyFile:
configDefaults.NR_ADJUST_OWNERSHIP_LIST.append((masterKeyFile, configDefaults.MASTER_KEY_FILE_PERMISSIONS, "{0}", "{0}", False))
credStoreFile = get_credential_store_location(properties)
if os.path.exists(credStoreFile):
configDefaults.NR_ADJUST_OWNERSHIP_LIST.append((credStoreFile, configDefaults.CREDENTIALS_STORE_FILE_PERMISSIONS, "{0}", "{0}", False))
trust_store_location = properties[SSL_TRUSTSTORE_PATH_PROPERTY]
if trust_store_location:
configDefaults.NR_ADJUST_OWNERSHIP_LIST.append((trust_store_location, configDefaults.TRUST_STORE_LOCATION_PERMISSIONS, "{0}", "{0}", False))
print "Adjusting ambari-server permissions and ownership..."
for pack in configDefaults.NR_ADJUST_OWNERSHIP_LIST:
file = pack[0]
mod = pack[1]
user = pack[2].format(ambari_user)
recursive = pack[3]
set_file_permissions(file, mod, user, recursive)
def configure_ldap_password():
passwordDefault = ""
passwordPrompt = 'Enter Manager Password* : '
passwordPattern = ".*"
passwordDescr = "Invalid characters in password."
password = read_password(passwordDefault, passwordPattern, passwordPrompt,
passwordDescr)
return password
#
# Get the principal names from the given CSV file and set them on the given LDAP event specs.
#
def get_ldap_event_spec_names(file, specs, new_specs):
try:
if os.path.exists(file):
new_spec = new_specs[0]
with open(file, 'r') as names_file:
names = names_file.read()
new_spec['names'] = names.replace('\n', '').replace('\t', '')
names_file.close()
specs += new_specs
else:
err = 'Sync event creation failed. File ' + file + ' not found.'
raise FatalException(1, err)
except Exception as exception:
err = 'Caught exception reading file ' + file + ' : ' + str(exception)
raise FatalException(1, err)
class LdapSyncOptions:
def __init__(self, options):
try:
self.ldap_sync_all = options.ldap_sync_all
except AttributeError:
self.ldap_sync_all = False
try:
self.ldap_sync_existing = options.ldap_sync_existing
except AttributeError:
self.ldap_sync_existing = False
try:
self.ldap_sync_users = options.ldap_sync_users
except AttributeError:
self.ldap_sync_users = None
try:
self.ldap_sync_groups = options.ldap_sync_groups
except AttributeError:
self.ldap_sync_groups = None
def no_ldap_sync_options_set(self):
return not self.ldap_sync_all and not self.ldap_sync_existing and self.ldap_sync_users is None and self.ldap_sync_groups is None
#
# Sync users and groups with configured LDAP
#
def sync_ldap(options):
if not is_root():
err = 'Ambari-server sync-ldap should be run with ' \
'root-level privileges'
raise FatalException(4, err)
server_status, pid = is_server_runing()
if not server_status:
err = 'Ambari Server is not running.'
raise FatalException(1, err)
properties = get_ambari_properties()
if properties == -1:
raise FatalException(1, "Failed to read properties file.")
ldap_configured = properties.get_property(IS_LDAP_CONFIGURED)
if ldap_configured != 'true':
err = "LDAP is not configured. Run 'ambari-server setup-ldap' first."
raise FatalException(1, err)
# set ldap sync options
ldap_sync_options = LdapSyncOptions(options)
if ldap_sync_options.no_ldap_sync_options_set():
err = 'Must specify a sync option (all, existing, users or groups). Please invoke ambari-server.py --help to print the options.'
raise FatalException(1, err)
admin_login = get_validated_string_input(prompt="Enter Ambari Admin login: ", default=None,
pattern=None, description=None,
is_pass=False, allowEmpty=False)
admin_password = get_validated_string_input(prompt="Enter Ambari Admin password: ", default=None,
pattern=None, description=None,
is_pass=True, allowEmpty=False)
url = get_ambari_server_api_base(properties) + SERVER_API_LDAP_URL
admin_auth = base64.encodestring('%s:%s' % (admin_login, admin_password)).replace('\n', '')
request = urllib2.Request(url)
request.add_header('Authorization', 'Basic %s' % admin_auth)
request.add_header('X-Requested-By', 'ambari')
if ldap_sync_options.ldap_sync_all:
sys.stdout.write('Syncing all.')
bodies = [{"Event":{"specs":[{"principal_type":"users","sync_type":"all"},{"principal_type":"groups","sync_type":"all"}]}}]
elif ldap_sync_options.ldap_sync_existing:
sys.stdout.write('Syncing existing.')
bodies = [{"Event":{"specs":[{"principal_type":"users","sync_type":"existing"},{"principal_type":"groups","sync_type":"existing"}]}}]
else:
sys.stdout.write('Syncing specified users and groups.')
bodies = [{"Event":{"specs":[]}}]
body = bodies[0]
events = body['Event']
specs = events['specs']
if ldap_sync_options.ldap_sync_users is not None:
new_specs = [{"principal_type":"users","sync_type":"specific","names":""}]
get_ldap_event_spec_names(ldap_sync_options.ldap_sync_users, specs, new_specs)
if ldap_sync_options.ldap_sync_groups is not None:
new_specs = [{"principal_type":"groups","sync_type":"specific","names":""}]
get_ldap_event_spec_names(ldap_sync_options.ldap_sync_groups, specs, new_specs)
if get_verbose():
sys.stdout.write('\nCalling API ' + url + ' : ' + str(bodies) + '\n')
request.add_data(json.dumps(bodies))
request.get_method = lambda: 'POST'
try:
response = urllib2.urlopen(request)
except Exception as e:
err = 'Sync event creation failed. Error details: %s' % e
raise FatalException(1, err)
response_status_code = response.getcode()
if response_status_code != 201:
err = 'Error during syncing. Http status code - ' + str(response_status_code)
raise FatalException(1, err)
response_body = json.loads(response.read())
url = response_body['resources'][0]['href']
request = urllib2.Request(url)
request.add_header('Authorization', 'Basic %s' % admin_auth)
request.add_header('X-Requested-By', 'ambari')
body = [{"LDAP":{"synced_groups":"*","synced_users":"*"}}]
request.add_data(json.dumps(body))
request.get_method = lambda: 'GET'
request_in_progress = True
while request_in_progress:
sys.stdout.write('.')
sys.stdout.flush()
try:
response = urllib2.urlopen(request)
except Exception as e:
request_in_progress = False
err = 'Sync event check failed. Error details: %s' % e
raise FatalException(1, err)
response_status_code = response.getcode()
if response_status_code != 200:
err = 'Error during syncing. Http status code - ' + str(response_status_code)
raise FatalException(1, err)
response_body = json.loads(response.read())
sync_info = response_body['Event']
if sync_info['status'] == 'ERROR':
raise FatalException(1, str(sync_info['status_detail']))
elif sync_info['status'] == 'COMPLETE':
print '\n\nCompleted LDAP Sync.'
print 'Summary:'
for principal_type, summary in sync_info['summary'].iteritems():
print ' {0}:'.format(principal_type)
for action, amount in summary.iteritems():
print ' {0} = {1!s}'.format(action, amount)
request_in_progress = False
else:
time.sleep(1)
sys.stdout.write('\n')
sys.stdout.flush()
def setup_master_key():
if not is_root():
err = 'Ambari-server setup should be run with ' \
'root-level privileges'
raise FatalException(4, err)
properties = get_ambari_properties()
if properties == -1:
raise FatalException(1, "Failed to read properties file.")
db_windows_auth_prop = properties.get_property(JDBC_USE_INTEGRATED_AUTH_PROPERTY)
db_sql_auth = False if db_windows_auth_prop and db_windows_auth_prop.lower() == 'true' else True
db_password = properties.get_property(JDBC_PASSWORD_PROPERTY)
# Encrypt passwords cannot be called before setup
if db_sql_auth and not db_password:
print 'Please call "setup" before "encrypt-passwords". Exiting...'
return 1
# Check configuration for location of master key
isSecure = get_is_secure(properties)
(isPersisted, masterKeyFile) = get_is_persisted(properties)
# Read clear text DB password from file
if db_sql_auth and not is_alias_string(db_password) and os.path.isfile(db_password):
with open(db_password, 'r') as passwdfile:
db_password = passwdfile.read()
ldap_password = properties.get_property(LDAP_MGR_PASSWORD_PROPERTY)
if ldap_password:
# Read clear text LDAP password from file
if not is_alias_string(ldap_password) and os.path.isfile(ldap_password):
with open(ldap_password, 'r') as passwdfile:
ldap_password = passwdfile.read()
ts_password = properties.get_property(SSL_TRUSTSTORE_PASSWORD_PROPERTY)
resetKey = False
masterKey = None
if isSecure:
print "Password encryption is enabled."
resetKey = get_YN_input("Do you want to reset Master Key? [y/n] (n): ", False)
# For encrypting of only unencrypted passwords without resetting the key ask
# for master key if not persisted.
if isSecure and not isPersisted and not resetKey:
print "Master Key not persisted."
masterKey = get_original_master_key(properties)
pass
# Make sure both passwords are clear-text if master key is lost
if resetKey:
if not isPersisted:
print "Master Key not persisted."
masterKey = get_original_master_key(properties)
# Unable get the right master key or skipped question <enter>
if not masterKey:
print "To disable encryption, do the following:"
print "- Edit " + find_properties_file() + \
" and set " + SECURITY_IS_ENCRYPTION_ENABLED + " = " + "false."
err = "{0} is already encrypted. Please call {1} to store unencrypted" \
" password and call 'encrypt-passwords' again."
if db_sql_auth and db_password and is_alias_string(db_password):
print err.format('- Database password', "'" + SETUP_ACTION + "'")
if ldap_password and is_alias_string(ldap_password):
print err.format('- LDAP manager password', "'" + LDAP_SETUP_ACTION + "'")
if ts_password and is_alias_string(ts_password):
print err.format('TrustStore password', "'" + LDAP_SETUP_ACTION + "'")
return 1
pass
pass
pass
# Read back any encrypted passwords
if db_sql_auth and db_password and is_alias_string(db_password):
db_password = read_passwd_for_alias(JDBC_RCA_PASSWORD_ALIAS, masterKey)
if ldap_password and is_alias_string(ldap_password):
ldap_password = read_passwd_for_alias(LDAP_MGR_PASSWORD_ALIAS, masterKey)
if ts_password and is_alias_string(ts_password):
ts_password = read_passwd_for_alias(SSL_TRUSTSTORE_PASSWORD_ALIAS, masterKey)
# Read master key, if non-secure or reset is true
if resetKey or not isSecure:
masterKey = read_master_key(resetKey)
persist = get_YN_input("Do you want to persist master key. If you choose " \
"not to persist, you need to provide the Master " \
"Key while starting the ambari server as an env " \
"variable named " + SECURITY_KEY_ENV_VAR_NAME + \
" or the start will prompt for the master key."
" Persist [y/n] (y)? ", True)
if persist:
save_master_key(masterKey, get_master_key_location(properties) + os.sep +
SECURITY_MASTER_KEY_FILENAME, persist)
elif not persist and masterKeyFile:
try:
os.remove(masterKeyFile)
print_info_msg("Deleting master key file at location: " + str(
masterKeyFile))
except Exception, e:
print 'ERROR: Could not remove master key file. %s' % e
# Blow up the credential store made with previous key, if any
store_file = get_credential_store_location(properties)
if os.path.exists(store_file):
try:
os.remove(store_file)
except:
print_warning_msg("Failed to remove credential store file.")
pass
pass
pass
propertyMap = {SECURITY_IS_ENCRYPTION_ENABLED: 'true'}
# Encrypt only un-encrypted passwords
if db_password and not is_alias_string(db_password):
retCode = save_passwd_for_alias(JDBC_RCA_PASSWORD_ALIAS, db_password, masterKey)
if retCode != 0:
print 'Failed to save secure database password.'
else:
propertyMap[JDBC_PASSWORD_PROPERTY] = get_alias_string(JDBC_RCA_PASSWORD_ALIAS)
remove_password_file(JDBC_PASSWORD_FILENAME)
if properties.get_property(JDBC_RCA_PASSWORD_FILE_PROPERTY):
propertyMap[JDBC_RCA_PASSWORD_FILE_PROPERTY] = get_alias_string(JDBC_RCA_PASSWORD_ALIAS)
pass
if ldap_password and not is_alias_string(ldap_password):
retCode = save_passwd_for_alias(LDAP_MGR_PASSWORD_ALIAS, ldap_password, masterKey)
if retCode != 0:
print 'Failed to save secure LDAP password.'
else:
propertyMap[LDAP_MGR_PASSWORD_PROPERTY] = get_alias_string(LDAP_MGR_PASSWORD_ALIAS)
remove_password_file(LDAP_MGR_PASSWORD_FILENAME)
pass
if ts_password and not is_alias_string(ts_password):
retCode = save_passwd_for_alias(SSL_TRUSTSTORE_PASSWORD_ALIAS, ts_password, masterKey)
if retCode != 0:
print 'Failed to save secure TrustStore password.'
else:
propertyMap[SSL_TRUSTSTORE_PASSWORD_PROPERTY] = get_alias_string(SSL_TRUSTSTORE_PASSWORD_ALIAS)
pass
update_properties_2(properties, propertyMap)
# Since files for store and master are created we need to ensure correct
# permissions
ambari_user = read_ambari_user()
if ambari_user:
adjust_directory_permissions(ambari_user)
return 0
def setup_ambari_krb5_jaas():
jaas_conf_file = search_file(SECURITY_KERBEROS_JASS_FILENAME, get_conf_dir())
if os.path.exists(jaas_conf_file):
print 'Setting up Ambari kerberos JAAS configuration to access ' + \
'secured Hadoop daemons...'
principal = get_validated_string_input('Enter ambari server\'s kerberos '
'principal name (ambari@EXAMPLE.COM): ', 'ambari@EXAMPLE.COM', '.*', '', False,
False)
keytab = get_validated_string_input('Enter keytab path for ambari '
'server\'s kerberos principal: ',
'/etc/security/keytabs/ambari.keytab', '.*', False, False,
validatorFunction=is_valid_filepath)
for line in fileinput.FileInput(jaas_conf_file, inplace=1):
line = re.sub('keyTab=.*$', 'keyTab="' + keytab + '"', line)
line = re.sub('principal=.*$', 'principal="' + principal + '"', line)
print line,
else:
raise NonFatalException('No jaas config file found at location: ' +
jaas_conf_file)
class LdapPropTemplate:
def __init__(self, properties, i_prop_name, i_prop_val_pattern, i_prompt_regex, i_allow_empty_prompt, i_prop_name_default=None):
self.prop_name = i_prop_name
self.ldap_prop_name = get_value_from_properties(properties, i_prop_name, i_prop_name_default)
self.ldap_prop_val_prompt = i_prop_val_pattern.format(get_prompt_default(self.ldap_prop_name))
self.prompt_regex = i_prompt_regex
self.allow_empty_prompt = i_allow_empty_prompt
@OsFamilyFuncImpl(OSConst.WINSRV_FAMILY)
def init_ldap_properties_list_reqd(properties):
# python2.x dict is not ordered
ldap_properties = [
LdapPropTemplate(properties, "authentication.ldap.primaryUrl", "Primary URL* {{host:port}} {0}: ", REGEX_HOSTNAME_PORT, False),
LdapPropTemplate(properties, "authentication.ldap.secondaryUrl", "Secondary URL {{host:port}} {0}: ", REGEX_HOSTNAME_PORT, True),
LdapPropTemplate(properties, "authentication.ldap.useSSL", "Use SSL* [true/false] {0}: ", REGEX_TRUE_FALSE, False, "false"),
LdapPropTemplate(properties, "authentication.ldap.usernameAttribute", "User name attribute* {0}: ", REGEX_ANYTHING, False, "uid"),
LdapPropTemplate(properties, "authentication.ldap.baseDn", "Base DN* {0}: ", REGEX_ANYTHING, False),
LdapPropTemplate(properties, "authentication.ldap.referral", "Referral method [follow/ignore] {0}: ", REGEX_REFERRAL, True),
LdapPropTemplate(properties, "authentication.ldap.bindAnonymously" "Bind anonymously* [true/false] {0}: ", REGEX_TRUE_FALSE, False, "false")
]
return ldap_properties
@OsFamilyFuncImpl(OsFamilyImpl.DEFAULT)
def init_ldap_properties_list_reqd(properties):
ldap_properties = [
LdapPropTemplate(properties, LDAP_PRIMARY_URL_PROPERTY, "Primary URL* {{host:port}} {0}: ", REGEX_HOSTNAME_PORT, False),
LdapPropTemplate(properties, "authentication.ldap.secondaryUrl", "Secondary URL {{host:port}} {0}: ", REGEX_HOSTNAME_PORT, True),
LdapPropTemplate(properties, "authentication.ldap.useSSL", "Use SSL* [true/false] {0}: ", REGEX_TRUE_FALSE, False, "false"),
LdapPropTemplate(properties, "authentication.ldap.userObjectClass", "User object class* {0}: ", REGEX_ANYTHING, False, "posixAccount"),
LdapPropTemplate(properties, "authentication.ldap.usernameAttribute", "User name attribute* {0}: ", REGEX_ANYTHING, False, "uid"),
LdapPropTemplate(properties, "authentication.ldap.groupObjectClass", "Group object class* {0}: ", REGEX_ANYTHING, False, "posixGroup"),
LdapPropTemplate(properties, "authentication.ldap.groupNamingAttr", "Group name attribute* {0}: ", REGEX_ANYTHING, False, "cn"),
LdapPropTemplate(properties, "authentication.ldap.groupMembershipAttr", "Group member attribute* {0}: ", REGEX_ANYTHING, False, "memberUid"),
LdapPropTemplate(properties, "authentication.ldap.dnAttribute", "Distinguished name attribute* {0}: ", REGEX_ANYTHING, False, "dn"),
LdapPropTemplate(properties, "authentication.ldap.baseDn", "Base DN* {0}: ", REGEX_ANYTHING, False),
LdapPropTemplate(properties, "authentication.ldap.referral", "Referral method [follow/ignore] {0}: ", REGEX_REFERRAL, True),
LdapPropTemplate(properties, "authentication.ldap.bindAnonymously", "Bind anonymously* [true/false] {0}: ", REGEX_TRUE_FALSE, False, "false")
]
return ldap_properties
def setup_ldap():
if not is_root():
err = 'Ambari-server setup-ldap should be run with ' \
'root-level privileges'
raise FatalException(4, err)
properties = get_ambari_properties()
isSecure = get_is_secure(properties)
ldap_property_list_reqd = init_ldap_properties_list_reqd(properties)
ldap_property_list_opt = ["authentication.ldap.managerDn",
LDAP_MGR_PASSWORD_PROPERTY,
SSL_TRUSTSTORE_TYPE_PROPERTY,
SSL_TRUSTSTORE_PATH_PROPERTY,
SSL_TRUSTSTORE_PASSWORD_PROPERTY]
ldap_property_list_truststore=[SSL_TRUSTSTORE_TYPE_PROPERTY,
SSL_TRUSTSTORE_PATH_PROPERTY,
SSL_TRUSTSTORE_PASSWORD_PROPERTY]
ldap_property_list_passwords=[LDAP_MGR_PASSWORD_PROPERTY,
SSL_TRUSTSTORE_PASSWORD_PROPERTY]
LDAP_MGR_DN_DEFAULT = get_value_from_properties(properties, ldap_property_list_opt[0])
SSL_TRUSTSTORE_TYPE_DEFAULT = get_value_from_properties(properties, SSL_TRUSTSTORE_TYPE_PROPERTY, "jks")
SSL_TRUSTSTORE_PATH_DEFAULT = get_value_from_properties(properties, SSL_TRUSTSTORE_PATH_PROPERTY)
ldap_property_value_map = {}
for ldap_prop in ldap_property_list_reqd:
input = get_validated_string_input(ldap_prop.ldap_prop_val_prompt, ldap_prop.ldap_prop_name, ldap_prop.prompt_regex,
"Invalid characters in the input!", False, ldap_prop.allow_empty_prompt)
if input is not None and input != "":
ldap_property_value_map[ldap_prop.prop_name] = input
bindAnonymously = ldap_property_value_map["authentication.ldap.bindAnonymously"]
anonymous = (bindAnonymously and bindAnonymously.lower() == 'true')
mgr_password = None
# Ask for manager credentials only if bindAnonymously is false
if not anonymous:
username = get_validated_string_input("Manager DN* {0}: ".format(
get_prompt_default(LDAP_MGR_DN_DEFAULT)), LDAP_MGR_DN_DEFAULT, ".*",
"Invalid characters in the input!", False, False)
ldap_property_value_map[LDAP_MGR_USERNAME_PROPERTY] = username
mgr_password = configure_ldap_password()
ldap_property_value_map[LDAP_MGR_PASSWORD_PROPERTY] = mgr_password
useSSL = ldap_property_value_map["authentication.ldap.useSSL"]
ldaps = (useSSL and useSSL.lower() == 'true')
ts_password = None
if ldaps:
truststore_default = "n"
truststore_set = bool(SSL_TRUSTSTORE_PATH_DEFAULT)
if truststore_set:
truststore_default = "y"
custom_trust_store = get_YN_input("Do you want to provide custom TrustStore for Ambari [y/n] ({0})?".
format(truststore_default),
truststore_set)
if custom_trust_store:
ts_type = get_validated_string_input(
"TrustStore type [jks/jceks/pkcs12] {0}:".format(get_prompt_default(SSL_TRUSTSTORE_TYPE_DEFAULT)),
SSL_TRUSTSTORE_TYPE_DEFAULT,
"^(jks|jceks|pkcs12)?$", "Wrong type", False)
ts_path = None
while True:
ts_path = get_validated_string_input(
"Path to TrustStore file {0}:".format(get_prompt_default(SSL_TRUSTSTORE_PATH_DEFAULT)),
SSL_TRUSTSTORE_PATH_DEFAULT,
".*", False, False)
if os.path.exists(ts_path):
break
else:
print 'File not found.'
ts_password = read_password("", ".*", "Password for TrustStore:", "Invalid characters in password")
ldap_property_value_map[SSL_TRUSTSTORE_TYPE_PROPERTY] = ts_type
ldap_property_value_map[SSL_TRUSTSTORE_PATH_PROPERTY] = ts_path
ldap_property_value_map[SSL_TRUSTSTORE_PASSWORD_PROPERTY] = ts_password
pass
else:
properties.removeOldProp(SSL_TRUSTSTORE_TYPE_PROPERTY)
properties.removeOldProp(SSL_TRUSTSTORE_PATH_PROPERTY)
properties.removeOldProp(SSL_TRUSTSTORE_PASSWORD_PROPERTY)
pass
pass
print '=' * 20
print 'Review Settings'
print '=' * 20
for property in ldap_property_list_reqd:
if property in ldap_property_value_map:
print("%s: %s" % (property, ldap_property_value_map[property]))
for property in ldap_property_list_opt:
if ldap_property_value_map.has_key(property):
if property not in ldap_property_list_passwords:
print("%s: %s" % (property, ldap_property_value_map[property]))
else:
print("%s: %s" % (property, BLIND_PASSWORD))
save_settings = get_YN_input("Save settings [y/n] (y)? ", True)
if save_settings:
ldap_property_value_map[CLIENT_SECURITY_KEY] = 'ldap'
if isSecure:
if mgr_password:
encrypted_passwd = encrypt_password(LDAP_MGR_PASSWORD_ALIAS, mgr_password)
if mgr_password != encrypted_passwd:
ldap_property_value_map[LDAP_MGR_PASSWORD_PROPERTY] = encrypted_passwd
pass
if ts_password:
encrypted_passwd = encrypt_password(SSL_TRUSTSTORE_PASSWORD_ALIAS, ts_password)
if ts_password != encrypted_passwd:
ldap_property_value_map[SSL_TRUSTSTORE_PASSWORD_PROPERTY] = encrypted_passwd
pass
pass
# Persisting values
ldap_property_value_map[IS_LDAP_CONFIGURED] = "true"
if mgr_password:
ldap_property_value_map[LDAP_MGR_PASSWORD_PROPERTY] = store_password_file(mgr_password, LDAP_MGR_PASSWORD_FILENAME)
update_properties_2(properties, ldap_property_value_map)
print 'Saving...done'
return 0
| 43.968981
| 145
| 0.716599
|
8a892997ba5b77ea886ba495e7dd42dbd3696d35
| 3,102
|
py
|
Python
|
ch07/im2col.py
|
lemolatoon/DeepLearningFromZero
|
6d45410ff25f971fe856643967688023fd10f4bc
|
[
"MIT"
] | null | null | null |
ch07/im2col.py
|
lemolatoon/DeepLearningFromZero
|
6d45410ff25f971fe856643967688023fd10f4bc
|
[
"MIT"
] | null | null | null |
ch07/im2col.py
|
lemolatoon/DeepLearningFromZero
|
6d45410ff25f971fe856643967688023fd10f4bc
|
[
"MIT"
] | null | null | null |
def im2col(input_data, filter_h, filter_w, stride=1, pad=0):
"""
Parameters
----------
input_data : (データ数, チャンネル, 高さ, 幅)の4次元配列からなる入力データ
filter_h : フィルターの高さ
filter_w : フィルターの幅
stride : ストライド
pad : パディング
Returns
-------
col : 2次元配列
"""
N, C, H, W = input_data.shape
out_h = (H + 2*pad - filter_h)//stride + 1 #//は切り捨て除算
out_w = (W + 2*pad - filter_w)//stride + 1
img = np.pad(input_data, [(0,0), (0,0), (pad, pad), (pad, pad)], 'constant') #N, Cにはpaddingなし, H, Wにそれぞれpad分padding
col = np.zeros((N, C, filter_h, filter_w, out_h, out_w)) #形状だけ保持
for y in range(filter_h): #filterの高さだけループ
y_max = y + stride*out_h
for x in range(filter_w):
x_max = x + stride*out_w
col[:, :, y, x, :, :] = img[:, :, y:y_max:stride, x:x_max:stride]
col = col.transpose(0, 4, 5, 1, 2, 3).reshape(N*out_h*out_w, -1)
return col
def col2im(col, input_shape, filter_h, filter_w, stride=1, pad=0):
"""
Parameters
----------
col :
input_shape : 入力データの形状(例:(10, 1, 28, 28))
filter_h :
filter_w
stride
pad
Returns
-------
"""
N, C, H, W = input_shape
out_h = (H + 2*pad - filter_h)//stride + 1
out_w = (W + 2*pad - filter_w)//stride + 1
col = col.reshape(N, out_h, out_w, C, filter_h, filter_w).transpose(0, 3, 4, 5, 1, 2)
img = np.zeros((N, C, H + 2*pad + stride - 1, W + 2*pad + stride - 1))
for y in range(filter_h):
y_max = y + stride*out_h
for x in range(filter_w):
x_max = x + stride*out_w
img[:, :, y:y_max:stride, x:x_max:stride] += col[:, :, y, x, :, :]
return img[:, :, pad:H + pad, pad:W + pad]
class Convolution:
def __init__(self, W, b, stride=1, pad=0):
self.W = W
self.b = b
self.stride = stride
self.pad = pad
# 中間データ(backward時に使用)
self.x = None
self.col = None
self.col_W = None
# 重み・バイアスパラメータの勾配
self.dW = None
self.db = None
def forward(self, x):
FN, C, FH, FW = self.W.shape
N, C, H, W = x.shape
out_h = 1 + int((H + 2*self.pad - FH) / self.stride)
out_w = 1 + int((W + 2*self.pad - FW) / self.stride)
col = im2col(x, FH, FW, self.stride, self.pad)
col_W = self.W.reshape(FN, -1).T
out = np.dot(col, col_W) + self.b
out = out.reshape(N, out_h, out_w, -1).transpose(0, 3, 1, 2)
self.x = x
self.col = col
self.col_W = col_W
return out
def backward(self, dout):
FN, C, FH, FW = self.W.shape
dout = dout.transpose(0,2,3,1).reshape(-1, FN)
self.db = np.sum(dout, axis=0)
self.dW = np.dot(self.col.T, dout)
self.dW = self.dW.transpose(1, 0).reshape(FN, C, FH, FW)
dcol = np.dot(dout, self.col_W.T)
dx = col2im(dcol, self.x.shape, FH, FW, self.stride, self.pad)
return dx
| 28.2
| 120
| 0.503868
|
f77b598a6f188797a170afad66055404d3eef009
| 5,432
|
py
|
Python
|
tensorflow_probability/python/experimental/mcmc/covariance_reducer_test.py
|
ZhiqingXiao/probability
|
06a2ca643792c0cf8f047fab7971ba6784dec1c4
|
[
"Apache-2.0"
] | null | null | null |
tensorflow_probability/python/experimental/mcmc/covariance_reducer_test.py
|
ZhiqingXiao/probability
|
06a2ca643792c0cf8f047fab7971ba6784dec1c4
|
[
"Apache-2.0"
] | null | null | null |
tensorflow_probability/python/experimental/mcmc/covariance_reducer_test.py
|
ZhiqingXiao/probability
|
06a2ca643792c0cf8f047fab7971ba6784dec1c4
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2020 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for CovarianceReducer and VarianceReducer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Dependency imports
import numpy as np
import tensorflow.compat.v2 as tf
import tensorflow_probability as tfp
from tensorflow_probability.python.internal import test_util
@test_util.test_all_tf_execution_regimes
class CovarianceReducersTest(test_util.TestCase):
def test_zero_covariance(self):
cov_reducer = tfp.experimental.mcmc.CovarianceReducer()
state = cov_reducer.initialize(0.)
for _ in range(2):
state = cov_reducer.one_step(0., state)
final_num_samples, final_mean, final_cov = self.evaluate([
state.cov_state.num_samples,
state.cov_state.mean,
cov_reducer.finalize(state)])
self.assertEqual(final_num_samples, 2)
self.assertEqual(final_mean, 0)
self.assertEqual(final_cov, 0)
def test_random_sanity_check(self):
rng = test_util.test_np_rng()
x = rng.rand(100)
cov_reducer = tfp.experimental.mcmc.CovarianceReducer()
state = cov_reducer.initialize(0.)
for sample in x:
state = cov_reducer.one_step(sample, state)
final_mean, final_cov = self.evaluate([
state.cov_state.mean,
cov_reducer.finalize(state)])
self.assertNear(np.mean(x), final_mean, err=1e-6)
self.assertNear(np.var(x, ddof=0), final_cov, err=1e-6)
def test_covariance_shape(self):
cov_reducer = tfp.experimental.mcmc.CovarianceReducer(event_ndims=1)
state = cov_reducer.initialize(tf.ones((9, 3)))
for _ in range(2):
state = cov_reducer.one_step(
tf.zeros((5, 9, 3)), state, axis=0)
final_mean, final_cov = self.evaluate([
state.cov_state.mean,
cov_reducer.finalize(state)])
self.assertEqual(final_mean.shape, (9, 3))
self.assertEqual(final_cov.shape, (9, 3, 3))
def test_variance_shape(self):
var_reducer = tfp.experimental.mcmc.VarianceReducer()
state = var_reducer.initialize(tf.ones((9, 3)))
for _ in range(2):
state = var_reducer.one_step(
tf.zeros((5, 9, 3)), state, axis=0)
final_mean, final_var = self.evaluate([
state.cov_state.mean,
var_reducer.finalize(state)])
self.assertEqual(final_mean.shape, (9, 3))
self.assertEqual(final_var.shape, (9, 3))
def test_attributes(self):
cov_reducer = tfp.experimental.mcmc.CovarianceReducer(
event_ndims=1, ddof=1)
state = cov_reducer.initialize(tf.ones((2, 3), dtype=tf.float64))
# check attributes are correct right after initialization
self.assertEqual(cov_reducer.event_ndims, 1)
self.assertEqual(cov_reducer.ddof, 1)
for _ in range(2):
state = cov_reducer.one_step(
tf.zeros((2, 3), dtype=tf.float64), state)
# check attributes don't change after stepping through
self.assertEqual(cov_reducer.event_ndims, 1)
self.assertEqual(cov_reducer.ddof, 1)
def test_tf_while(self):
cov_reducer = tfp.experimental.mcmc.CovarianceReducer()
state = cov_reducer.initialize(tf.ones((2, 3)))
_, state = tf.while_loop(
lambda i, _: i < 100,
lambda i, state: (i + 1, cov_reducer.one_step(tf.ones((2, 3)), state)),
(0., state)
)
final_cov = self.evaluate(cov_reducer.finalize(state))
self.assertAllClose(final_cov, tf.zeros((2, 3, 2, 3)), rtol=1e-6)
def test_nested_chain_state(self):
cov_reducer = tfp.experimental.mcmc.CovarianceReducer(event_ndims=0)
chain_state = ({'one': tf.ones((2, 3)), 'zero': tf.zeros((2, 3))},
{'two': tf.ones((2, 3)) * 2})
state = cov_reducer.initialize(chain_state)
_, state = tf.while_loop(
lambda i, _: i < 10,
lambda i, state: (i + 1, cov_reducer.one_step(chain_state, state)),
(0., state)
)
final_cov = self.evaluate(cov_reducer.finalize(state))
self.assertAllEqualNested(
final_cov, ({'one': tf.zeros((2, 3)), 'zero': tf.zeros((2, 3))},
{'two': tf.zeros((2, 3))}))
def test_nested_with_batching_and_chunking(self):
cov_reducer = tfp.experimental.mcmc.CovarianceReducer(event_ndims=1)
chain_state = ({'one': tf.ones((3, 4)), 'zero': tf.zeros((3, 4))},
{'two': tf.ones((3, 4)) * 2})
state = cov_reducer.initialize(chain_state)
_, state = tf.while_loop(
lambda i, _: i < 10,
lambda i, state: (i + 1, cov_reducer.one_step(chain_state, state, 0)),
(0., state)
)
final_cov = self.evaluate(cov_reducer.finalize(state))
self.assertAllEqualNested(
final_cov, ({'one': tf.zeros((3, 4, 4)), 'zero': tf.zeros((3, 4, 4))},
{'two': tf.zeros((3, 4, 4))}))
if __name__ == '__main__':
tf.test.main()
| 37.986014
| 79
| 0.665133
|
b220b6aaf903223df1f8727084059528397ca9cc
| 21,470
|
py
|
Python
|
trac/test.py
|
mikiec84/trac
|
d51a7119b9fcb9061d7fe135c7d648fa671555dd
|
[
"BSD-3-Clause"
] | null | null | null |
trac/test.py
|
mikiec84/trac
|
d51a7119b9fcb9061d7fe135c7d648fa671555dd
|
[
"BSD-3-Clause"
] | null | null | null |
trac/test.py
|
mikiec84/trac
|
d51a7119b9fcb9061d7fe135c7d648fa671555dd
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2003-2020 Edgewall Software
# Copyright (C) 2003-2005 Jonas Borgström <jonas@edgewall.com>
# Copyright (C) 2005 Christopher Lenz <cmlenz@gmx.de>
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at https://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at https://trac.edgewall.org/log/.
#
# Author: Jonas Borgström <jonas@edgewall.com>
# Christopher Lenz <cmlenz@gmx.de>
import abc
import doctest
import importlib
import inspect
import io
import logging
import logging.handlers
import numbers
import os
import shutil
import sys
import tempfile
import time
import types
import unittest
try:
from babel import Locale
except ImportError:
locale_en = None
else:
locale_en = Locale.parse('en_US')
import trac.log
from trac.config import Configuration
from trac.core import ComponentManager, ComponentMeta, TracError
from trac.db.api import DatabaseManager, parse_connection_uri
from trac.env import Environment
from trac.perm import PermissionCache
from trac.ticket.default_workflow import load_workflow_config_snippet
from trac.util import translation
from trac.util.datefmt import time_now, utc
from trac.web.api import _RequestArgs, Request, arg_list_to_args
from trac.web.chrome import Chrome
from trac.web.session import Session
def Mock(bases=(), *initargs, **kw):
"""
Simple factory for dummy classes that can be used as replacement for the
real implementation in tests.
Base classes for the mock can be specified using the first parameter, which
must be either a tuple of class objects or a single class object. If the
bases parameter is omitted, the base class of the mock will be object.
So to create a mock that is derived from the builtin dict type, you can do:
>>> mock = Mock(dict)
>>> mock['foo'] = 'bar'
>>> mock['foo']
'bar'
Attributes of the class are provided by any additional keyword parameters.
>>> mock = Mock(foo='bar')
>>> mock.foo
'bar'
Objects produces by this function have the special feature of not requiring
the 'self' parameter on methods, because you should keep data at the scope
of the test function. So you can just do:
>>> mock = Mock(add=lambda x,y: x+y)
>>> mock.add(1, 1)
2
To access attributes from the mock object from inside a lambda function,
just access the mock itself:
>>> mock = Mock(dict, do=lambda x: 'going to the %s' % mock[x])
>>> mock['foo'] = 'bar'
>>> mock.do('foo')
'going to the bar'
Because assignments or other types of statements don't work in lambda
functions, assigning to a local variable from a mock function requires some
extra work:
>>> myvar = [None]
>>> mock = Mock(set=lambda x: myvar.__setitem__(0, x))
>>> mock.set(1)
>>> myvar[0]
1
"""
if not isinstance(bases, tuple):
bases = (bases,)
# if base classes have abstractmethod and abstractproperty,
# create dummy methods for abstracts
attrs = {}
def dummyfn(self, *args, **kwargs):
raise NotImplementedError
for base in bases:
if getattr(base, '__metaclass__', None) is not abc.ABCMeta:
continue
fn = types.MethodType(dummyfn, base)
for name, attr in inspect.getmembers(base):
if name in attrs:
continue
if isinstance(attr, abc.abstractproperty) or \
getattr(attr, '__isabstractmethod__', False):
attrs[name] = fn
cls = type('Mock', bases, attrs)
mock = cls(*initargs)
for k, v in kw.items():
setattr(mock, k, v)
return mock
class MockPerm(object):
"""Fake permission class. Necessary as Mock can not be used with operator
overloading."""
username = ''
def has_permission(self, action, realm_or_resource=None, id=False,
version=False):
return True
__contains__ = has_permission
def __call__(self, realm_or_resource, id=False, version=False):
return self
def require(self, action, realm_or_resource=None, id=False, version=False,
message=None):
pass
assert_permission = require
def MockRequest(env, **kwargs):
"""Request object for testing. Keyword arguments populate an
`environ` dictionary and the callbacks.
If `authname` is specified in a keyword arguments a `PermissionCache`
object is created, otherwise if `authname` is not specified or is
`None` a `MockPerm` object is used and the `authname` is set to
'anonymous'.
The following keyword arguments are commonly used:
:keyword args: dictionary of request arguments
:keyword authname: the name of the authenticated user, or 'anonymous'
:keyword method: the HTTP request method
:keyword path_info: the request path inside the application
Additionally `cookie`, `format`, `language`, `lc_time`, `locale`,
`remote_addr`, `remote_user`, `script_name`, `server_name`, `server_port`
and `tz` can be specified as keyword arguments.
:since: 1.0.11
"""
authname = kwargs.get('authname')
if authname is None:
authname = 'anonymous'
perm = MockPerm()
else:
perm = PermissionCache(env, authname)
def convert(val):
if isinstance(val, bool):
return unicode(int(val))
elif isinstance(val, numbers.Real):
return unicode(val)
elif isinstance(val, (list, tuple)):
return [convert(v) for v in val]
else:
return val
if 'arg_list' in kwargs:
arg_list = [(k, convert(v)) for k, v in kwargs['arg_list']]
args = arg_list_to_args(arg_list)
else:
args = _RequestArgs()
args.update((k, convert(v))
for k, v in kwargs.get('args', {}).iteritems())
arg_list = [(name, value) for name in args
for value in args.getlist(name)]
environ = {
'trac.base_url': env.abs_href(),
'wsgi.url_scheme': 'http',
'HTTP_ACCEPT_LANGUAGE': kwargs.get('language', ''),
'HTTP_COOKIE': kwargs.get('cookie', ''),
'PATH_INFO': kwargs.get('path_info', '/'),
'REQUEST_METHOD': kwargs.get('method', 'GET'),
'REMOTE_ADDR': kwargs.get('remote_addr', '127.0.0.1'),
'REMOTE_USER': kwargs.get('remote_user', authname),
'SCRIPT_NAME': kwargs.get('script_name', '/trac.cgi'),
'SERVER_NAME': kwargs.get('server_name', 'localhost'),
'SERVER_PORT': kwargs.get('server_port', '80'),
}
for key in environ:
if isinstance(environ[key], unicode):
environ[key] = environ[key].encode('utf-8')
status_sent = []
headers_sent = {}
response_sent = io.BytesIO()
def start_response(status, headers, exc_info=None):
status_sent.append(status)
headers_sent.update(dict(headers))
return response_sent.write
req = Mock(Request, environ, start_response)
req.status_sent = status_sent
req.headers_sent = headers_sent
req.response_sent = response_sent
req.callbacks.update({
'arg_list': lambda req: arg_list,
'args': lambda req: args,
'authname': lambda req: authname,
'chrome': Chrome(env).prepare_request,
'form_token': lambda req: kwargs.get('form_token', 0),
'lc_time': lambda req: kwargs.get('lc_time', locale_en),
'locale': lambda req: kwargs.get('locale'),
'perm': lambda req: perm,
'session': lambda req: Session(env, req),
'tz': lambda req: kwargs.get('tz', utc),
'use_xsendfile': lambda req: False,
'xsendfile_header': lambda req: None,
'configurable_headers': lambda req: [],
})
return req
class TestSetup(unittest.TestSuite):
"""
Test suite decorator that allows a fixture to be setup for a complete
suite of test cases.
"""
def setUp(self):
"""Sets up the fixture, and sets self.fixture if needed"""
pass
def tearDown(self):
"""Tears down the fixture"""
pass
def run(self, result):
"""Setup the fixture (self.setUp), call .setFixture on all the tests,
and tear down the fixture (self.tearDown)."""
self.setUp()
if hasattr(self, 'fixture'):
for test in self._tests:
if hasattr(test, 'setFixture'):
test.setFixture(self.fixture)
unittest.TestSuite.run(self, result)
self.tearDown()
return result
def _wrapped_run(self, *args, **kwargs):
"""Python 2.7 / unittest2 compatibility - there must be a better
way..."""
self.setUp()
if hasattr(self, 'fixture'):
for test in self._tests:
if hasattr(test, 'setFixture'):
test.setFixture(self.fixture)
unittest.TestSuite._wrapped_run(self, *args, **kwargs)
self.tearDown()
class TestCaseSetup(unittest.TestCase):
def setFixture(self, fixture):
self.fixture = fixture
# -- Database utilities
def get_dburi():
dburi = os.environ.get('TRAC_TEST_DB_URI')
if dburi:
scheme, db_prop = parse_connection_uri(dburi)
# Assume the schema 'tractest' for PostgreSQL
if scheme == 'postgres' and \
not db_prop.get('params', {}).get('schema'):
dburi += ('&' if '?' in dburi else '?') + 'schema=tractest'
elif scheme == 'sqlite' and db_prop['path'] != ':memory:' and \
not db_prop.get('params', {}).get('synchronous'):
# Speed-up tests with SQLite database
dburi += ('&' if '?' in dburi else '?') + 'synchronous=off'
else:
scheme = 'sqlite'
dburi = '%s::memory:' % scheme
importlib.import_module('trac.db.%s_backend' % scheme)
return dburi
class EnvironmentStub(Environment):
"""A stub of the trac.env.Environment class for testing."""
required = False
abstract = True
def __init__(self, default_data=False, enable=None, disable=None,
path=None, destroying=False, config=None):
"""Construct a new Environment stub object.
:param default_data: If True, populate the database with some
defaults.
:param enable: A list of component classes or name globs to
activate in the stub environment.
:param disable: A list of component classes or name globs to
deactivate in the stub environment.
:param path: The location of the environment in the file system.
No files or directories are created when specifying
this parameter.
:param destroying: If True, the database will not be reset. This is
useful for cases when the object is being
constructed in order to call `destroy_db`.
:param config: A list of (section, key, value) configuration
tuples.
"""
if enable is not None and not isinstance(enable, (list, tuple)):
raise TypeError('Keyword argument "enable" must be a list')
if disable is not None and not isinstance(disable, (list, tuple)):
raise TypeError('Keyword argument "disable" must be a list')
ComponentManager.__init__(self)
self._old_registry = None
self._old_components = None
import trac
self.path = path
if self.path is None:
self.path = os.path.abspath(os.path.dirname(trac.__file__))
self.path = os.path.normpath(os.path.normcase(self.path))
# -- configuration
self.config = Configuration(None)
# We have to have a ticket-workflow config for ''lots'' of things to
# work. So insert the basic-workflow config here. There may be a
# better solution than this.
load_workflow_config_snippet(self.config, 'basic-workflow.ini')
self.config.set('logging', 'log_level', 'DEBUG')
self.config.set('logging', 'log_type', 'none') # Ignored.
if enable is not None:
self.config.set('components', 'trac.*', 'disabled')
else:
self.config.set('components', 'tracopt.versioncontrol.*',
'enabled')
for name_or_class in enable or ():
config_key = self._component_name(name_or_class)
self.config.set('components', config_key, 'enabled')
for name_or_class in disable or ():
config_key = self._component_name(name_or_class)
self.config.set('components', config_key, 'disabled')
self.config.set('trac', 'permission_policies',
'DefaultPermissionPolicy, LegacyAttachmentPolicy')
for item in config or []:
self.config.set(*item)
# -- logging
self.setup_log()
# -- database
self.dburi = get_dburi()
self.config.set('components', 'trac.db.*', 'enabled')
self.config.set('trac', 'database', self.dburi)
if not destroying:
self.reset_db(default_data)
self.config.set('trac', 'base_url', 'http://example.org/trac.cgi')
translation.activate(locale_en)
def setup_log(self):
self.log = logging.getLogger('trac.test')
level = self.log_level.upper()
level_as_int = trac.log.LOG_LEVEL_MAP.get(level)
self.log.setLevel(level_as_int)
handler_cls = logging.handlers.BufferingHandler
if not self.log.handlers:
log_handler = handler_cls(sys.maxsize) # Never flush implicitly.
formatter = logging.Formatter(self.log_format)
log_handler.setFormatter(formatter)
self.log.addHandler(log_handler)
elif len(self.log.handlers) == 1 and \
isinstance(self.log.handlers[0], handler_cls):
self.log.handlers[0].flush() # Reset buffer.
else:
raise TracError("Logger has unexpected handler(s).")
@property
def log_messages(self):
"""Returns a list of tuples (level, message)."""
return [(record.levelname, record.getMessage())
for record in self.log.handlers[0].buffer]
def reset_db(self, default_data=None):
"""Remove all data from Trac tables, keeping the tables themselves.
:param default_data: after clean-up, initialize with default data
:return: True upon success
"""
from trac import db_default
tables = []
dbm = DatabaseManager(self)
try:
db_version = dbm.get_database_version()
except (TracError, self.db_exc.DatabaseError):
pass
else:
if db_version == db_default.db_version:
# same version, simply clear the tables (faster)
tables = dbm.reset_tables()
else:
# different version or version unknown, drop the tables
self.destroy_db()
if not tables:
dbm.init_db()
# Make sure the next db_query()/db_transaction() will create
# a new connection aware of the new data model - see #8518.
if self.dburi != 'sqlite::memory:':
dbm.shutdown()
if default_data:
dbm.insert_default_data()
dbm.set_database_version(db_default.db_version)
def destroy_db(self):
"""Destroy the database."""
try:
DatabaseManager(self).destroy_db()
except (TracError, self.db_exc.DatabaseError):
pass
# tearDown helper
def reset_db_and_disk(self):
"""Performs a complete environment reset in a robust way.
The database is reset, then the connections are shut down, and
finally all environment files are removed from the disk.
"""
self.reset_db()
self.shutdown() # really closes the db connections
rmtree(self.env.path)
if self._old_registry is not None:
self.restore_component_registry()
# other utilities
def insert_users(self, users):
"""Insert a tuple representing a user session to the
`session` and `session_attributes` tables.
The tuple can be length 3 with entries username, name and
email, in which case an authenticated user is assumed. The
tuple can also be length 4, with the last entry specifying
`1` for an authenticated user or `0` for an unauthenticated
user.
"""
with self.db_transaction as db:
for row in users:
if len(row) == 3:
username, name, email = row
authenticated = 1
else: # len(row) == 4
username, name, email, authenticated = row
db("INSERT INTO session VALUES (%s, %s, %s)",
(username, authenticated, int(time_now())))
db("INSERT INTO session_attribute VALUES (%s,%s,'name',%s)",
(username, authenticated, name))
db("INSERT INTO session_attribute VALUES (%s,%s,'email',%s)",
(username, authenticated, email))
# overridden
def is_component_enabled(self, cls):
if self._component_name(cls).startswith('__main__.'):
return True
return Environment.is_component_enabled(self, cls)
def mkdtemp():
"""Create a temp directory with prefix `trac-tempenv`
and return the directory name.
"""
return os.path.realpath(tempfile.mkdtemp(prefix='trac-testdir-'))
def locate(fn):
"""Locates a binary on the path.
Returns the fully-qualified path, or None.
"""
exec_suffix = '.exe' if os.name == 'nt' else ''
for p in ["."] + os.environ['PATH'].split(os.pathsep):
f = os.path.join(p, fn + exec_suffix)
if os.path.exists(f):
return f
return None
def rmtree(path):
import errno
def onerror(function, path, excinfo, retry=1):
# `os.remove` fails for a readonly file on Windows.
# Then, it attempts to be writable and remove.
if function != os.remove:
raise
e = excinfo[1]
if isinstance(e, OSError) and e.errno == errno.EACCES:
mode = os.stat(path).st_mode
os.chmod(path, mode | 0o666)
try:
function(path)
except Exception:
# print "%d: %s %o" % (retry, path, os.stat(path).st_mode)
if retry > 10:
raise
time.sleep(0.1)
onerror(function, path, excinfo, retry + 1)
else:
raise
if os.name == 'nt' and isinstance(path, str):
# Use unicode characters in order to allow non-ansi characters
# on Windows.
path = unicode(path, sys.getfilesystemencoding())
shutil.rmtree(path, onerror=onerror)
INCLUDE_FUNCTIONAL_TESTS = True
def test_suite():
import trac.tests
import trac.admin.tests
import trac.db.tests
import trac.mimeview.tests
import trac.notification.tests
import trac.search.tests
import trac.ticket.tests
import trac.timeline.tests
import trac.upgrades.tests
import trac.util.tests
import trac.versioncontrol.tests
import trac.versioncontrol.web_ui.tests
import trac.web.tests
import trac.wiki.tests
import tracopt.perm.tests
import tracopt.ticket.tests
import tracopt.versioncontrol.git.tests
import tracopt.versioncontrol.svn.tests
if os.environ.get('SKIP_FUNCTIONAL_TESTS'):
global INCLUDE_FUNCTIONAL_TESTS
INCLUDE_FUNCTIONAL_TESTS = False
suite = unittest.TestSuite()
suite.addTest(trac.tests.basicSuite())
suite.addTest(trac.admin.tests.test_suite())
suite.addTest(trac.db.tests.test_suite())
suite.addTest(trac.mimeview.tests.test_suite())
suite.addTest(trac.notification.tests.test_suite())
suite.addTest(trac.search.tests.test_suite())
suite.addTest(trac.ticket.tests.test_suite())
suite.addTest(trac.timeline.tests.test_suite())
suite.addTest(trac.upgrades.tests.test_suite())
suite.addTest(trac.util.tests.test_suite())
suite.addTest(trac.versioncontrol.tests.test_suite())
suite.addTest(trac.versioncontrol.web_ui.tests.test_suite())
suite.addTest(trac.web.tests.test_suite())
suite.addTest(trac.wiki.tests.test_suite())
suite.addTest(tracopt.perm.tests.test_suite())
suite.addTest(tracopt.ticket.tests.test_suite())
suite.addTest(tracopt.versioncontrol.git.tests.test_suite())
suite.addTest(tracopt.versioncontrol.svn.tests.test_suite())
suite.addTest(doctest.DocTestSuite(sys.modules[__name__]))
if INCLUDE_FUNCTIONAL_TESTS:
suite.addTest(trac.tests.functionalSuite())
return suite
if __name__ == '__main__':
# FIXME: this is a bit inelegant
if '--skip-functional-tests' in sys.argv:
sys.argv.remove('--skip-functional-tests')
INCLUDE_FUNCTIONAL_TESTS = False
unittest.main(defaultTest='test_suite')
| 35.196721
| 79
| 0.626409
|
b7df58eee6272289c672a7267df4fc4cd0f43b84
| 2,466
|
py
|
Python
|
example_base.py
|
shadim/eg-01-python-jwt
|
efe9f686f091754a54428502a456eac4f51c7ee3
|
[
"MIT"
] | 1
|
2020-10-27T02:15:44.000Z
|
2020-10-27T02:15:44.000Z
|
example_base.py
|
shadim/eg-01-python-jwt
|
efe9f686f091754a54428502a456eac4f51c7ee3
|
[
"MIT"
] | null | null | null |
example_base.py
|
shadim/eg-01-python-jwt
|
efe9f686f091754a54428502a456eac4f51c7ee3
|
[
"MIT"
] | 1
|
2020-10-27T02:15:48.000Z
|
2020-10-27T02:15:48.000Z
|
import time
from ds_config import DSConfig
from ds_helper import DSHelper
TOKEN_REPLACEMENT_IN_SECONDS = 10 * 60
TOKEN_EXPIRATION_IN_SECONDS = 3600
class ExampleBase:
"""
Example Base class
"""
accountID = None
api_client = None
_token_received = False
account = None
expiresTimestamp = 0
def __init__(self, api_client):
ExampleBase.api_client = api_client
def check_token(self):
current_time = int(round(time.time()))
if not ExampleBase._token_received \
or ((current_time + TOKEN_REPLACEMENT_IN_SECONDS) > ExampleBase.expiresTimestamp):
self.update_token()
def update_token(self):
client = ExampleBase.api_client
private_key_file = DSHelper.create_private_key_temp_file("private-key")
print ("Requesting an access token via JWT grant...", end='')
client.configure_jwt_authorization_flow(private_key_file.name,
DSConfig.aud(),
DSConfig.client_id(),
DSConfig.impersonated_user_guid(), TOKEN_EXPIRATION_IN_SECONDS)
private_key_file.close()
if ExampleBase.account is None:
account = self.get_account_info(client)
ExampleBase.base_uri = account['base_uri'] + '/restapi'
ExampleBase.accountID = account['account_id']
client.host = ExampleBase.base_uri
ExampleBase._token_received = True
ExampleBase.expiresTimestamp = (int(round(time.time())) + TOKEN_EXPIRATION_IN_SECONDS)
print ("Done. Continuing...")
def get_account_info(self, client):
client.host = DSConfig.auth_server()
response = client.call_api("/oauth/userinfo", "GET", response_type="object")
if len(response) > 1 and 200 > response[1] > 300:
raise Exception("can not get user info: %d".format(response[1]))
accounts = response[0]['accounts']
target = DSConfig.target_account_id()
if target is None or target == "FALSE":
# Look for default
for acct in accounts:
if acct['is_default']:
return acct
# Look for specific account
for acct in accounts:
if acct['account_id'] == target:
return acct
raise Exception(f"\n\nUser does not have access to account {target}\n\n")
| 32.88
| 111
| 0.612733
|
6b9ddf7a220fa0230531c8e4b6ddb56347be68cc
| 120
|
py
|
Python
|
interfaces/typing.py
|
lig/python-interfaces
|
cf8c5419a827ed74732b938db82e293ea7c9f43f
|
[
"MIT"
] | 15
|
2019-04-29T05:35:04.000Z
|
2021-07-17T01:35:19.000Z
|
interfaces/typing.py
|
lig/python-interfaces
|
cf8c5419a827ed74732b938db82e293ea7c9f43f
|
[
"MIT"
] | 2
|
2019-05-04T20:51:46.000Z
|
2019-05-06T07:09:32.000Z
|
interfaces/typing.py
|
lig/python-interfaces
|
cf8c5419a827ed74732b938db82e293ea7c9f43f
|
[
"MIT"
] | 1
|
2020-12-04T07:37:45.000Z
|
2020-12-04T07:37:45.000Z
|
import typing
if typing.TYPE_CHECKING:
import interfaces.base
InterfaceType = interfaces.base._InterfaceMeta
| 15
| 50
| 0.783333
|
6e90b8e54bc50f734eab44a55ad1e20efb957a08
| 11,416
|
py
|
Python
|
tools/gengl.py
|
bitcraft/pyglet
|
144257c365ca85528c6a4c5bed8141e683d7a9b6
|
[
"BSD-3-Clause"
] | 15
|
2015-01-21T12:29:01.000Z
|
2018-12-09T09:17:33.000Z
|
tools/gengl.py
|
bitcraft/pyglet
|
144257c365ca85528c6a4c5bed8141e683d7a9b6
|
[
"BSD-3-Clause"
] | null | null | null |
tools/gengl.py
|
bitcraft/pyglet
|
144257c365ca85528c6a4c5bed8141e683d7a9b6
|
[
"BSD-3-Clause"
] | 9
|
2015-12-12T09:12:46.000Z
|
2021-12-26T13:29:14.000Z
|
#!/usr/bin/env python
"""Generate files in pyglet.gl and pyglet/GLU
"""
__docformat__ = 'restructuredtext'
__version__ = '$Id$'
import marshal
import optparse
import os.path
import urllib.request
import urllib.error
import urllib.parse
import sys
import textwrap
from wraptypes.wrap import CtypesWrapper
script_dir = os.path.abspath(os.path.dirname(__file__))
GLEXT_ABI_H = 'http://www.opengl.org/registry/api/glext.h'
GLEXT_NV_H = 'http://developer.download.nvidia.com/opengl/includes/glext.h'
GLXEXT_ABI_H = 'http://www.opengl.org/registry/api/glxext.h'
GLXEXT_NV_H = 'http://developer.download.nvidia.com/opengl/includes/glxext.h'
WGLEXT_ABI_H = 'http://www.opengl.org/registry/api/wglext.h'
WGLEXT_NV_H = 'http://developer.download.nvidia.com/opengl/includes/wglext.h'
AGL_H = '/System/Library/Frameworks/AGL.framework/Headers/agl.h'
GL_H = '/usr/include/GL/gl.h'
GLU_H = '/usr/include/GL/glu.h'
GLX_H = '/usr/include/GL/glx.h'
WGL_H = os.path.join(script_dir, 'wgl.h')
CACHE_FILE = os.path.join(script_dir, '.gengl.cache')
_cache = dict()
def load_cache():
global _cache
if os.path.exists(CACHE_FILE):
try:
_cache = marshal.load(open(CACHE_FILE, 'rb')) or dict()
except:
pass
_cache = dict()
def save_cache():
try:
marshal.dump(_cache, open(CACHE_FILE, 'wb'))
except:
pass
def read_url(url):
if url in _cache:
return _cache[url]
if os.path.exists(url):
data = open(url).read()
else:
data = urllib.request.urlopen(url).read()
_cache[url] = data
save_cache()
return data
class GLWrapper(CtypesWrapper):
requires = None
requires_prefix = None
def __init__(self, header):
self.header = header
super().__init__()
def print_preamble(self):
import time
print(textwrap.dedent("""
# This content is generated by %(script)s.
# Wrapper for %(header)s
""" % {
'header': self.header,
'date': time.ctime(),
'script': __file__,
}).lstrip(), file=self.file)
def handle_ctypes_function(self, name, restype, argtypes, filename,
lineno):
if self.does_emit(name, filename):
self.emit_type(restype)
for a in argtypes:
self.emit_type(a)
self.all_names.append(name)
print('# %s:%d' % (filename, lineno), file=self.file)
print('%s = _link_function(%r, %s, [%s], %r)' %
(name, name, str(restype),
', '.join([str(a) for a in argtypes]), self.requires),
file=self.file)
print(file=self.file)
def handle_ifndef(self, name, filename, lineno):
if self.requires_prefix and \
name[:len(
self.requires_prefix)] == self.requires_prefix:
self.requires = name[len(self.requires_prefix):]
print('# %s (%s:%d)' %
(self.requires, filename, lineno), file=self.file)
def progress(msg):
print(msg, file=sys.stderr)
marker_begin = '# BEGIN GENERATED CONTENT (do not edit below this line)\n'
marker_end = '# END GENERATED CONTENT (do not edit above this line)\n'
class ModuleWrapper:
def __init__(self, header, filename,
prologue='', requires_prefix=None,
system_header=None,
link_modules=()):
self.header = header
self.filename = filename
self.prologue = prologue
self.requires_prefix = requires_prefix
self.system_header = system_header
self.link_modules = link_modules
def wrap(self, dir):
progress('Updating %s...' % self.filename)
source = read_url(self.header)
filename = os.path.join(dir, self.filename)
prologue = list()
epilogue = list()
state = 'prologue'
try:
for line in open(filename):
if state == 'prologue':
prologue.append(line)
if line == marker_begin:
state = 'generated'
elif state == 'generated':
if line == marker_end:
state = 'epilogue'
epilogue.append(line)
elif state == 'epilogue':
epilogue.append(line)
except IOError:
prologue = [marker_begin]
epilogue = [marker_end]
state = 'epilogue'
if state != 'epilogue':
raise Exception(
'File exists, but generated markers are corrupt '
'or missing')
outfile = open(filename, 'w')
print(''.join(prologue), file=outfile)
wrapper = GLWrapper(self.header)
if self.system_header:
wrapper.preprocessor_parser.system_headers[
self.system_header] = \
source
header_name = self.system_header or self.header
wrapper.begin_output(outfile,
library=None,
link_modules=self.link_modules,
emit_filenames=(header_name,))
wrapper.requires_prefix = self.requires_prefix
source = self.prologue + source
wrapper.wrap(header_name, source)
wrapper.end_output()
print(''.join(epilogue), file=outfile)
modules = {
'gl':
ModuleWrapper(GL_H, 'gl.py'),
'glu':
ModuleWrapper(GLU_H, 'glu.py'),
'glext_arb':
ModuleWrapper(GLEXT_ABI_H, 'glext_arb.py',
requires_prefix='GL_',
system_header='GL/glext.h',
prologue='#define GL_GLEXT_PROTOTYPES\n#include <GL/gl.h>\n'),
'glext_nv':
ModuleWrapper(GLEXT_NV_H, 'glext_nv.py',
requires_prefix='GL_',
system_header='GL/glext.h',
prologue='#define GL_GLEXT_PROTOTYPES\n#include <GL/gl.h>\n'),
'glx':
ModuleWrapper(GLX_H, 'glx.py',
requires_prefix='GLX_',
link_modules=(
'pyglet.libs.x11.xlib',)),
'glxext_arb':
ModuleWrapper(GLXEXT_ABI_H, 'glxext_arb.py',
requires_prefix='GLX_',
system_header='GL/glxext.h',
prologue='#define GLX_GLXEXT_PROTOTYPES\n#include <GL/glx.h>\n',
link_modules=('pyglet.libs.x11.xlib',
'pyglet.gl.glx')),
'glxext_nv':
ModuleWrapper(GLXEXT_NV_H, 'glxext_nv.py',
requires_prefix='GLX_',
system_header='GL/glxext.h',
prologue='#define GLX_GLXEXT_PROTOTYPES\n#include <GL/glx.h>\n',
link_modules=('pyglet.libs.x11.xlib',
'pyglet.gl.glx')),
'agl':
ModuleWrapper(AGL_H, 'agl.py'),
'wgl':
ModuleWrapper(WGL_H, 'wgl.py'),
'wglext_arb':
ModuleWrapper(WGLEXT_ABI_H, 'wglext_arb.py',
requires_prefix='WGL_',
prologue='#define WGL_WGLEXT_PROTOTYPES\n'
'#include "%s"\n' % WGL_H.encode(
'string_escape')),
'wglext_nv':
ModuleWrapper(WGLEXT_NV_H, 'wglext_nv.py',
requires_prefix='WGL_',
prologue='#define WGL_WGLEXT_PROTOTYPES\n'
'#include "%s"\n' % WGL_H.encode(
'string_escape')),
}
op = optparse.OptionParser()
op.add_option('-D', '--dir', dest='dir',
help='output directory')
op.add_option('-r', '--refresh-cache',
dest='refresh_cache',
help='clear cache first',
action='store_true')
options, args = op.parse_args()
if not options.refresh_cache:
load_cache()
else:
save_cache()
if not args:
print(
'Specify module(s) to generate:', file=sys.stderr)
print(' %s' % ' '.join(list(modules.keys())),
file=sys.stderr)
if not options.dir:
options.dir = os.path.join(script_dir,
os.path.pardir,
'pyglet', 'gl')
if not os.path.exists(options.dir):
os.makedirs(options.dir)
for arg in args:
if arg not in modules:
print("Don't know how to make '%s'" % arg,
file=sys.stderr)
continue
modules[arg].wrap(options.dir)
| 43.406844
| 106
| 0.413805
|
ff6993dffc70b3c76ef8b7cd25b6e70e56d469ee
| 4,319
|
py
|
Python
|
instana/util/__init__.py
|
tirkarthi/python-sensor
|
9872d146ac00baff2673fde5ba97fdbe596869a4
|
[
"MIT"
] | 61
|
2017-09-27T02:50:17.000Z
|
2022-03-22T12:13:37.000Z
|
instana/util/__init__.py
|
tirkarthi/python-sensor
|
9872d146ac00baff2673fde5ba97fdbe596869a4
|
[
"MIT"
] | 82
|
2017-07-11T13:47:33.000Z
|
2022-03-22T10:10:38.000Z
|
instana/util/__init__.py
|
takeaway/python-sensor
|
52d6eaa2d6a8e625201bad36ac2448201c4bd63d
|
[
"MIT"
] | 27
|
2017-09-11T16:22:32.000Z
|
2022-03-11T17:21:49.000Z
|
# (c) Copyright IBM Corp. 2021
# (c) Copyright Instana Inc. 2020
import json
import sys
import time
from collections import defaultdict
import pkg_resources
try:
from urllib import parse
except ImportError:
import urlparse as parse
import urllib
from ..log import logger
if sys.version_info.major == 2:
string_types = basestring
else:
string_types = str
PY2 = sys.version_info[0] == 2
PY3 = sys.version_info[0] == 3
def nested_dictionary():
return defaultdict(DictionaryOfStan)
# Simple implementation of a nested dictionary.
DictionaryOfStan = nested_dictionary
def to_json(obj):
"""
Convert obj to json. Used mostly to convert the classes in json_span.py until we switch to nested
dicts (or something better)
:param obj: the object to serialize to json
:return: json string
"""
try:
def extractor(o):
if not hasattr(o, '__dict__'):
logger.debug("Couldn't serialize non dict type: %s", type(o))
return {}
else:
return {k.lower(): v for k, v in o.__dict__.items() if v is not None}
return json.dumps(obj, default=extractor, sort_keys=False, separators=(',', ':')).encode()
except Exception:
logger.debug("to_json non-fatal encoding issue: ", exc_info=True)
def to_pretty_json(obj):
"""
Convert obj to pretty json. Used mostly in logging/debugging.
:param obj: the object to serialize to json
:return: json string
"""
try:
def extractor(o):
if not hasattr(o, '__dict__'):
logger.debug("Couldn't serialize non dict type: %s", type(o))
return {}
else:
return {k.lower(): v for k, v in o.__dict__.items() if v is not None}
return json.dumps(obj, default=extractor, sort_keys=True, indent=4, separators=(',', ':'))
except Exception:
logger.debug("to_pretty_json non-fatal encoding issue: ", exc_info=True)
def package_version():
"""
Determine the version of this package.
:return: String representing known version
"""
version = ""
try:
version = pkg_resources.get_distribution('instana').version
except pkg_resources.DistributionNotFound:
version = 'unknown'
return version
def get_default_gateway():
"""
Attempts to read /proc/self/net/route to determine the default gateway in use.
:return: String - the ip address of the default gateway or None if not found/possible/non-existant
"""
try:
hip = None
# The first line is the header line
# We look for the line where the Destination is 00000000 - that is the default route
# The Gateway IP is encoded backwards in hex.
with open("/proc/self/net/route") as routes:
for line in routes:
parts = line.split('\t')
if parts[1] == '00000000':
hip = parts[2]
if hip is not None and len(hip) == 8:
# Reverse order, convert hex to int
return "%i.%i.%i.%i" % (int(hip[6:8], 16), int(hip[4:6], 16), int(hip[2:4], 16), int(hip[0:2], 16))
except Exception:
logger.warning("get_default_gateway: ", exc_info=True)
def every(delay, task, name):
"""
Executes a task every `delay` seconds
:param delay: the delay in seconds
:param task: the method to run. The method should return False if you want the loop to stop.
:return: None
"""
next_time = time.time() + delay
while True:
time.sleep(max(0, next_time - time.time()))
try:
if task() is False:
break
except Exception:
logger.debug("Problem while executing repetitive task: %s", name, exc_info=True)
# skip tasks if we are behind schedule:
next_time += (time.time() - next_time) // delay * delay + delay
def validate_url(url):
"""
Validate if <url> is a valid url
Examples:
- "http://localhost:5000" - valid
- "http://localhost:5000/path" - valid
- "sandwich" - invalid
@param url: string
@return: Boolean
"""
try:
result = parse.urlparse(url)
return all([result.scheme, result.netloc])
except Exception:
pass
return False
| 27.864516
| 111
| 0.613568
|
8d5a9614421edff7570959281d675317872af547
| 3,148
|
py
|
Python
|
xknx/remote_value/remote_value_updown.py
|
spacegaier/xknx
|
2c2420670da88fea386d573f78a78c5a342186e9
|
[
"MIT"
] | null | null | null |
xknx/remote_value/remote_value_updown.py
|
spacegaier/xknx
|
2c2420670da88fea386d573f78a78c5a342186e9
|
[
"MIT"
] | null | null | null |
xknx/remote_value/remote_value_updown.py
|
spacegaier/xknx
|
2c2420670da88fea386d573f78a78c5a342186e9
|
[
"MIT"
] | null | null | null |
"""
Module for managing an DPT Up/Down remote value.
DPT 1.008.
"""
from enum import Enum
from typing import TYPE_CHECKING, List, Optional, Union
from xknx.dpt import DPTArray, DPTBinary
from xknx.exceptions import ConversionError, CouldNotParseTelegram
from .remote_value import AsyncCallbackType, RemoteValue
if TYPE_CHECKING:
from xknx.telegram.address import GroupAddressableType
from xknx.xknx import XKNX
class RemoteValueUpDown(RemoteValue[DPTBinary]):
"""Abstraction for remote value of KNX DPT 1.008 / DPT_UpDown."""
class Direction(Enum):
"""Enum for indicating the direction."""
# pylint: disable=invalid-name
UP = 0
DOWN = 1
def __init__(
self,
xknx: "XKNX",
group_address: Optional["GroupAddressableType"] = None,
group_address_state: Optional["GroupAddressableType"] = None,
device_name: Optional[str] = None,
feature_name: str = "Up/Down",
after_update_cb: Optional[AsyncCallbackType] = None,
invert: bool = False,
passive_group_addresses: Optional[List["GroupAddressableType"]] = None,
):
"""Initialize remote value of KNX DPT 1.008."""
# pylint: disable=too-many-arguments
super().__init__(
xknx,
group_address,
group_address_state,
device_name=device_name,
feature_name=feature_name,
after_update_cb=after_update_cb,
passive_group_addresses=passive_group_addresses,
)
self.invert = invert
def payload_valid(
self, payload: Optional[Union[DPTArray, DPTBinary]]
) -> Optional[DPTBinary]:
"""Test if telegram payload may be parsed."""
# pylint: disable=no-self-use
return payload if isinstance(payload, DPTBinary) else None
def to_knx(self, value: "RemoteValueUpDown.Direction") -> DPTBinary:
"""Convert value to payload."""
if value == self.Direction.UP:
return DPTBinary(1) if self.invert else DPTBinary(0)
if value == self.Direction.DOWN:
return DPTBinary(0) if self.invert else DPTBinary(1)
raise ConversionError(
"value invalid",
value=value,
device_name=self.device_name,
feature_name=self.feature_name,
)
def from_knx(self, payload: DPTBinary) -> "RemoteValueUpDown.Direction":
"""Convert current payload to value."""
if payload == DPTBinary(0):
return self.Direction.DOWN if self.invert else self.Direction.UP
if payload == DPTBinary(1):
return self.Direction.UP if self.invert else self.Direction.DOWN
raise CouldNotParseTelegram(
"payload invalid",
payload=payload,
device_name=self.device_name,
feature_name=self.feature_name,
)
async def down(self) -> None:
"""Set value to down."""
await self.set(self.Direction.DOWN)
async def up(self) -> None:
"""Set value to UP."""
# pylint: disable=invalid-name
await self.set(self.Direction.UP)
| 33.489362
| 79
| 0.635642
|
be564d83e8f14f86629475e8cfb6e175fb213938
| 2,431
|
py
|
Python
|
data/p4VQE/R4/benchmark/startQiskit_noisy719.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
data/p4VQE/R4/benchmark/startQiskit_noisy719.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
data/p4VQE/R4/benchmark/startQiskit_noisy719.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
# qubit number=3
# total number=13
import numpy as np
from qiskit import QuantumCircuit, execute, Aer, QuantumRegister, ClassicalRegister, transpile, BasicAer, IBMQ
import networkx as nx
from qiskit.visualization import plot_histogram
from typing import *
from pprint import pprint
from math import log2
from collections import Counter
from qiskit.test.mock import FakeVigo, FakeYorktown
kernel = 'circuit/bernstein'
def make_circuit(n:int) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n,"qc")
prog = QuantumCircuit(input_qubit)
prog.h(input_qubit[0]) # number=1
prog.h(input_qubit[1]) # number=2
prog.h(input_qubit[2]) # number=3
prog.h(input_qubit[2]) # number=10
prog.cz(input_qubit[1],input_qubit[2]) # number=11
prog.h(input_qubit[2]) # number=12
prog.x(input_qubit[2]) # number=6
prog.y(input_qubit[3]) # number=5
for edge in E:
k = edge[0]
l = edge[1]
prog.cp(-2 * gamma, input_qubit[k-1], input_qubit[l-1])
prog.p(gamma, k)
prog.p(gamma, l)
prog.rx(2 * beta, range(len(V)))
prog.swap(input_qubit[1],input_qubit[0]) # number=8
# circuit end
return prog
if __name__ == '__main__':
n = 4
V = np.arange(0, n, 1)
E = [(0, 1, 1.0), (0, 2, 1.0), (1, 2, 1.0), (3, 2, 1.0), (3, 1, 1.0)]
G = nx.Graph()
G.add_nodes_from(V)
G.add_weighted_edges_from(E)
step_size = 0.1
a_gamma = np.arange(0, np.pi, step_size)
a_beta = np.arange(0, np.pi, step_size)
a_gamma, a_beta = np.meshgrid(a_gamma, a_beta)
F1 = 3 - (np.sin(2 * a_beta) ** 2 * np.sin(2 * a_gamma) ** 2 - 0.5 * np.sin(4 * a_beta) * np.sin(4 * a_gamma)) * (
1 + np.cos(4 * a_gamma) ** 2)
result = np.where(F1 == np.amax(F1))
a = list(zip(result[0], result[1]))[0]
gamma = a[0] * step_size
beta = a[1] * step_size
prog = make_circuit(4)
sample_shot =3962
writefile = open("../data/startQiskit_noisy719.csv", "w")
# prog.draw('mpl', filename=(kernel + '.png'))
backend = FakeYorktown()
circuit1 = transpile(prog, FakeYorktown())
circuit1.measure_all()
prog = circuit1
info = execute(prog,backend=backend, shots=sample_shot).result().get_counts()
print(info, file=writefile)
print("results end", file=writefile)
print(circuit1.depth(), file=writefile)
print(circuit1, file=writefile)
writefile.close()
| 27.011111
| 118
| 0.63225
|
9cad75434880076b8e7e090c24d18deff32a8257
| 5,259
|
py
|
Python
|
big-fish/big-fish-scripts/merge_df_features.py
|
Henley13/paper_translation_factories_2020
|
77558ed70467cf91062abf62e46c794bfbc08e4a
|
[
"BSD-3-Clause"
] | 2
|
2020-09-03T20:50:53.000Z
|
2020-10-02T14:39:31.000Z
|
big-fish/big-fish-scripts/merge_df_features.py
|
Henley13/paper_translation_factories_2020
|
77558ed70467cf91062abf62e46c794bfbc08e4a
|
[
"BSD-3-Clause"
] | 4
|
2020-01-15T10:26:14.000Z
|
2020-10-01T18:36:39.000Z
|
big-fish/big-fish-scripts/merge_df_features.py
|
Henley13/paper_translation_factories_2020
|
77558ed70467cf91062abf62e46c794bfbc08e4a
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Merge dataframe returned from each experience during the step of features
computation.
"""
import os
import argparse
import time
import datetime
import sys
import bigfish.classification as classification
import pandas as pd
from utils import Logger
from loader import (get_experiences, generate_filename_base)
if __name__ == "__main__":
print()
# parse arguments
parser = argparse.ArgumentParser()
parser.add_argument("--output_directory",
help="Path of the output directory.",
type=str,
default="/Users/arthur/output/2019_racha")
parser.add_argument("--log_directory",
help="Path of the log directory.",
type=str,
default="/Users/arthur/output/2019_racha/log")
# initialize parameters
args = parser.parse_args()
output_directory = args.output_directory
log_directory = args.log_directory
# input-output directories
dataframe_directory = os.path.join(output_directory, "dataframes")
dataframe_features_directory = os.path.join(output_directory,
"dataframes",
"features")
# check directories exist
if not os.path.isdir(output_directory):
raise ValueError("Directory does not exist: {0}"
.format(output_directory))
if not os.path.isdir(dataframe_directory):
raise ValueError("Directory does not exist: {0}"
.format(dataframe_directory))
if not os.path.isdir(dataframe_features_directory):
raise ValueError("Directory does not exist: {0}"
.format(dataframe_features_directory))
if not os.path.isdir(log_directory):
raise ValueError("Directory does not exist: {0}"
.format(log_directory))
# initialize logging
now = datetime.datetime.now()
date = now.strftime("%Y-%m-%d %H:%M:%S")
log_file = os.path.join(
log_directory, "log" + "_merge_features_dataframe")
sys.stdout = Logger(log_file)
print("Running {0} file...".format(os.path.basename(__file__)), "\n")
start_time = time.time()
print("Output directory: {0}".format(output_directory))
print("Dataframes directory: {0}".format(dataframe_directory))
print("Dataframes features directory: {0}"
.format(dataframe_features_directory))
print("Log directory: {0}".format(log_directory))
print("Log file: {0}".format(log_file.split("/")[-1]))
print("Date: {0}".format(date), "\n")
print("Files are saved with the pattern "
"'gene_author_puromycin_paper_drug_batch_fov' \n")
print("Merging dataframes saved during features computation...")
# initialize dataframe
features_name = classification.get_features_name(
names_features_distance=True,
names_features_intranuclear=True,
names_features_protrusion=True,
names_features_dispersion=True,
names_features_topography=True,
names_features_foci=True,
names_features_area=True)
columns = ["cell"] + features_name
df_features = pd.DataFrame(columns=columns)
nb_dataframes = 0
experiences = get_experiences()
for experience in experiences:
# load dataframe
filename = generate_filename_base(experience)
path = os.path.join(dataframe_features_directory, filename + ".csv")
if not os.path.exists(path):
print("\t", filename, "does not exist")
continue
df_experience = pd.read_csv(path, sep=';', encoding="utf-8")
print("\t", "{0}: {1}".format(filename, df_experience.shape))
# concatenate dataframes
df_features = pd.concat([df_features, df_experience])
nb_dataframes += 1
# save features dataframe
path = os.path.join(dataframe_directory, "features.csv")
df_features.reset_index(drop=True, inplace=True)
df_features.to_csv(path,
sep=';',
header=True,
index=False,
encoding="utf-8")
print()
print("Shape of the final features dataframe: {0}"
.format(df_features.shape))
print("Columns:")
columns_name = df_features.columns
for col in columns_name:
print("\t", col)
print()
print("Done ({0} dataframes)!".format(nb_dataframes), "\n")
# merge and save final dataframe
path = os.path.join(dataframe_directory, "cells.csv")
df_cell = pd.read_csv(path, sep=';', encoding="utf-8")
df = pd.merge(left=df_cell, right=df_features, how='inner', on=["cell"])
path = os.path.join(dataframe_directory, "merged.csv")
df.reset_index(drop=True, inplace=True)
df.to_csv(path, sep=';', header=True, index=False, encoding="utf-8")
print()
print("Shape of the merged dataframe: {0}".format(df.shape))
print("Columns:")
columns_name = df.columns
for col in columns_name:
print("\t", col)
end_time = time.time()
duration = int(round((end_time - start_time) / 60))
print("Duration: {0} minutes.".format(duration))
| 34.827815
| 76
| 0.628256
|
97e0beaa2ece04a63bff34b08cc88701ab658e57
| 4,177
|
py
|
Python
|
git_fortune/database.py
|
sirosen/git-fortune
|
69ef3e18506aa67fdc812854f1588828ea4e7448
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
git_fortune/database.py
|
sirosen/git-fortune
|
69ef3e18506aa67fdc812854f1588828ea4e7448
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
git_fortune/database.py
|
sirosen/git-fortune
|
69ef3e18506aa67fdc812854f1588828ea4e7448
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
"""
A simple "database" of tips which we can identify by
- ID (number)
- Category (string)
"""
# globals (don't freak out, this is fine)
TIPS_BY_ID = {}
TIPS_BY_CATEGORY = {}
class Tip(object):
_tipid = 0
def __init__(self, tipstr, category="general"):
self.tipid = Tip._tipid
Tip._tipid += 1
self.tipstr = tipstr.strip()
self.category = category
def add_tip(tipstr, **kwargs):
global TIPS_BY_ID
global TIPS_BY_CATEGORY
tip = Tip(tipstr, **kwargs)
TIPS_BY_ID[tip.tipid] = tip
if tip.category not in TIPS_BY_CATEGORY:
TIPS_BY_CATEGORY[tip.category] = []
TIPS_BY_CATEGORY[tip.category].append(tip)
add_tip(
"""
To see changes which are in the staging area, use `git diff --staged`.
""",
category="diff",
)
add_tip(
"""
Modify your last commit before pushing with `git commit --amend`.
""",
category="commit",
)
add_tip(
"""
Use `git commit --verbose` to show a unified diff in your editor below the
commit message you are writing. This can help you write good commit messages!
""",
category="commit",
)
add_tip(
"""
`git log --graph` can show you a tree-like representation of the git history.
Try adding in `--oneline --decorate --all`.
""",
category="log",
)
add_tip(
"""
When resolving a difficult merge conflict, consider using both
`git checkout --ours` and `git checkout --theirs` and copying the versions of
the conflicting files into ours/ and theirs/ directories.
This will let you easily diff and methodically work through conflicts,
file-by-file.
""",
category="merges",
)
add_tip(
"""
Remember that `git pull` is just a `git fetch` followed by a `git merge`.
You can always run these commands yourself
""",
category="pull",
)
add_tip(
"""
Avoid unnecessary merge commits when you `pull` by using `git pull --rebase`.
This replaces the `git merge` with a `git rebase`.
""",
category="pull",
)
add_tip(
"""
git branches are pointers to commits. Only the commits have ancestors and
descendants -- the branches can move around (and not just forward!)
"""
)
add_tip(
"""
Use `git add --patch` to stage your changes in chunks instead of whole files.
This can also help you review your own work before committing.
""",
category="commit",
)
add_tip(
"""
Want to only commit part of a file? Use `git add --patch` to choose which parts
you want to stage for commit.
""",
category="commit",
)
add_tip(
"""
Want to only commit part of a file? Use `git add --patch` to choose which parts
you want to stage for commit.
""",
category="commit",
)
add_tip(
"""
`git branch --merged` shows you a list of all branches which are merged into
HEAD. It's great for cleanup.
""",
category="merges",
)
add_tip(
"""
Wondering if that refactor really did result in fewer lines of code?
Try `git diff --stat`.
""",
category="diff",
)
add_tip(
"""
Just like `diff`, `git diff` takes `-w` to ignore whitespace changes.
""",
category="diff",
)
add_tip(
"""
Do you ever accidentally push to a shared repo just because it's named
"origin"?
Rename "origin" to "upstream" with `git remote rename origin upstream` and
you'll probably stop making that mistake.
""",
category="remotes",
)
add_tip(
"""
Set up `git tree` by running
`git config --global alias.tree 'log --graph --decorate --all'`
This puts `tree` into your ~/.gitconfig so that you can invoke `git tree` in
any repo.
""",
category="log",
)
add_tip(
"""
Can't remember how you setup remotes in a repo? Use `git remote -v`.
""",
category="remotes",
)
add_tip(
"""
When branches are deleted from remotes, they may still be tracked locally. Use
`git fetch $REMOTE --prune` to clean things up.
""",
category="remotes",
)
add_tip(
"""
Deleting a branch from a remote is doable in a couple of ways, but one of the
more explicit ones is
git push --delete $REMOTE $BRANCH
"""
)
add_tip(
"""
git will automatically pick up on any commands you have with `git-` as a
prefix. If you save a script as `git-foo` and add it to your PATH, then you can
run it with `git foo`
"""
)
| 18.900452
| 79
| 0.665071
|
78b6cca69eaa99d5c2692db1f964622ce3194837
| 5,557
|
py
|
Python
|
google/cloud/gaming/v1/gaming-v1-py/google/cloud/gaming_v1/services/realms_service/pagers.py
|
googleapis/googleapis-gen
|
d84824c78563d59b0e58d5664bfaa430e9ad7e7a
|
[
"Apache-2.0"
] | 7
|
2021-02-21T10:39:41.000Z
|
2021-12-07T07:31:28.000Z
|
google/cloud/gaming/v1/gaming-v1-py/google/cloud/gaming_v1/services/realms_service/pagers.py
|
googleapis/googleapis-gen
|
d84824c78563d59b0e58d5664bfaa430e9ad7e7a
|
[
"Apache-2.0"
] | 6
|
2021-02-02T23:46:11.000Z
|
2021-11-15T01:46:02.000Z
|
google/cloud/gaming/v1/gaming-v1-py/google/cloud/gaming_v1/services/realms_service/pagers.py
|
googleapis/googleapis-gen
|
d84824c78563d59b0e58d5664bfaa430e9ad7e7a
|
[
"Apache-2.0"
] | 4
|
2021-01-28T23:25:45.000Z
|
2021-08-30T01:55:16.000Z
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from typing import Any, AsyncIterator, Awaitable, Callable, Sequence, Tuple, Optional, Iterator
from google.cloud.gaming_v1.types import realms
class ListRealmsPager:
"""A pager for iterating through ``list_realms`` requests.
This class thinly wraps an initial
:class:`google.cloud.gaming_v1.types.ListRealmsResponse` object, and
provides an ``__iter__`` method to iterate through its
``realms`` field.
If there are more pages, the ``__iter__`` method will make additional
``ListRealms`` requests and continue to iterate
through the ``realms`` field on the
corresponding responses.
All the usual :class:`google.cloud.gaming_v1.types.ListRealmsResponse`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(self,
method: Callable[..., realms.ListRealmsResponse],
request: realms.ListRealmsRequest,
response: realms.ListRealmsResponse,
*,
metadata: Sequence[Tuple[str, str]] = ()):
"""Instantiate the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.gaming_v1.types.ListRealmsRequest):
The initial request object.
response (google.cloud.gaming_v1.types.ListRealmsResponse):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = realms.ListRealmsRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
def pages(self) -> Iterator[realms.ListRealmsResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = self._method(self._request, metadata=self._metadata)
yield self._response
def __iter__(self) -> Iterator[realms.Realm]:
for page in self.pages:
yield from page.realms
def __repr__(self) -> str:
return '{0}<{1!r}>'.format(self.__class__.__name__, self._response)
class ListRealmsAsyncPager:
"""A pager for iterating through ``list_realms`` requests.
This class thinly wraps an initial
:class:`google.cloud.gaming_v1.types.ListRealmsResponse` object, and
provides an ``__aiter__`` method to iterate through its
``realms`` field.
If there are more pages, the ``__aiter__`` method will make additional
``ListRealms`` requests and continue to iterate
through the ``realms`` field on the
corresponding responses.
All the usual :class:`google.cloud.gaming_v1.types.ListRealmsResponse`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(self,
method: Callable[..., Awaitable[realms.ListRealmsResponse]],
request: realms.ListRealmsRequest,
response: realms.ListRealmsResponse,
*,
metadata: Sequence[Tuple[str, str]] = ()):
"""Instantiates the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.gaming_v1.types.ListRealmsRequest):
The initial request object.
response (google.cloud.gaming_v1.types.ListRealmsResponse):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = realms.ListRealmsRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
async def pages(self) -> AsyncIterator[realms.ListRealmsResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = await self._method(self._request, metadata=self._metadata)
yield self._response
def __aiter__(self) -> AsyncIterator[realms.Realm]:
async def async_generator():
async for page in self.pages:
for response in page.realms:
yield response
return async_generator()
def __repr__(self) -> str:
return '{0}<{1!r}>'.format(self.__class__.__name__, self._response)
| 39.411348
| 95
| 0.666367
|
e836b19a158d5d99d3f87d314ec95ba77df130ad
| 9,483
|
py
|
Python
|
tools/webdriver/webdriver/transport.py
|
spao234/wpt
|
4b9447991bcb28f37b45532caf7f8e8747f9ad41
|
[
"BSD-3-Clause"
] | 1
|
2021-12-19T09:30:55.000Z
|
2021-12-19T09:30:55.000Z
|
tools/webdriver/webdriver/transport.py
|
spao234/wpt
|
4b9447991bcb28f37b45532caf7f8e8747f9ad41
|
[
"BSD-3-Clause"
] | 6
|
2021-03-31T20:00:14.000Z
|
2022-03-12T00:50:17.000Z
|
tools/webdriver/webdriver/transport.py
|
spao234/wpt
|
4b9447991bcb28f37b45532caf7f8e8747f9ad41
|
[
"BSD-3-Clause"
] | 1
|
2020-11-09T05:05:06.000Z
|
2020-11-09T05:05:06.000Z
|
import json
import select
from six import text_type, PY3
from six.moves.collections_abc import Mapping
from six.moves.http_client import HTTPConnection
from six.moves.urllib import parse as urlparse
from . import error
"""Implements HTTP transport for the WebDriver wire protocol."""
missing = object()
class ResponseHeaders(Mapping):
"""Read-only dictionary-like API for accessing response headers.
This class:
* Normalizes the header keys it is built with to lowercase (such that
iterating the items will return lowercase header keys).
* Has case-insensitive header lookup.
* Always returns all header values that have the same name, separated by
commas.
It does not ensure header types (e.g. binary vs string).
"""
def __init__(self, items):
self.headers_dict = {}
for key, value in items:
key = key.lower()
if key not in self.headers_dict:
self.headers_dict[key] = []
self.headers_dict[key].append(value)
def __getitem__(self, key):
"""Get all headers of a certain (case-insensitive) name. If there is
more than one, the values are returned comma separated"""
values = self.headers_dict[key.lower()]
if len(values) == 1:
return values[0]
else:
return ", ".join(values)
def get_list(self, key, default=missing):
"""Get all the header values for a particular field name as a list"""
try:
return self.headers_dict[key.lower()]
except KeyError:
if default is not missing:
return default
else:
raise
def __iter__(self):
for item in self.headers_dict:
yield item
def __len__(self):
return len(self.headers_dict)
class Response(object):
"""
Describes an HTTP response received from a remote end whose
body has been read and parsed as appropriate.
"""
def __init__(self, status, body, headers):
self.status = status
self.body = body
self.headers = headers
def __repr__(self):
cls_name = self.__class__.__name__
if self.error:
return "<%s status=%s error=%s>" % (cls_name, self.status, repr(self.error))
return "<% status=%s body=%s>" % (cls_name, self.status, json.dumps(self.body))
def __str__(self):
return json.dumps(self.body, indent=2)
@property
def error(self):
if self.status != 200:
return error.from_response(self)
return None
@classmethod
def from_http(cls, http_response, decoder=json.JSONDecoder, **kwargs):
try:
body = json.load(http_response, cls=decoder, **kwargs)
headers = ResponseHeaders(http_response.getheaders())
except ValueError:
raise ValueError("Failed to decode response body as JSON:\n" +
http_response.read())
return cls(http_response.status, body, headers)
class HTTPWireProtocol(object):
"""
Transports messages (commands and responses) over the WebDriver
wire protocol.
Complex objects, such as ``webdriver.Element``, ``webdriver.Frame``,
and ``webdriver.Window`` are by default not marshaled to enable
use of `session.transport.send` in WPT tests::
session = webdriver.Session("127.0.0.1", 4444)
response = transport.send("GET", "element/active", None)
print response.body["value"]
# => {u'element-6066-11e4-a52e-4f735466cecf': u'<uuid>'}
Automatic marshaling is provided by ``webdriver.protocol.Encoder``
and ``webdriver.protocol.Decoder``, which can be passed in to
``HTTPWireProtocol.send`` along with a reference to the current
``webdriver.Session``::
session = webdriver.Session("127.0.0.1", 4444)
response = transport.send("GET", "element/active", None,
encoder=protocol.Encoder, decoder=protocol.Decoder,
session=session)
print response.body["value"]
# => webdriver.Element
"""
def __init__(self, host, port, url_prefix="/"):
"""
Construct interface for communicating with the remote server.
:param url: URL of remote WebDriver server.
:param wait: Duration to wait for remote to appear.
"""
self.host = host
self.port = port
self.url_prefix = url_prefix
self._conn = None
self._last_request_is_blocked = False
def __del__(self):
self.close()
def close(self):
"""Closes the current HTTP connection, if there is one."""
if self._conn:
self._conn.close()
@property
def connection(self):
"""Gets the current HTTP connection, or lazily creates one."""
if not self._conn:
conn_kwargs = {}
if not PY3:
conn_kwargs["strict"] = True
# We are not setting an HTTP timeout other than the default when the
# connection its created. The send method has a timeout value if needed.
self._conn = HTTPConnection(self.host, self.port, **conn_kwargs)
return self._conn
def url(self, suffix):
"""
From the relative path to a command end-point,
craft a full URL suitable to be used in a request to the HTTPD.
"""
return urlparse.urljoin(self.url_prefix, suffix)
def send(self,
method,
uri,
body=None,
headers=None,
encoder=json.JSONEncoder,
decoder=json.JSONDecoder,
timeout=None,
**codec_kwargs):
"""
Send a command to the remote.
The request `body` must be JSON serialisable unless a
custom `encoder` has been provided. This means complex
objects such as ``webdriver.Element``, ``webdriver.Frame``,
and `webdriver.Window`` are not automatically made
into JSON. This behaviour is, however, provided by
``webdriver.protocol.Encoder``, should you want it.
Similarly, the response body is returned au natural
as plain JSON unless a `decoder` that converts web
element references to ``webdriver.Element`` is provided.
Use ``webdriver.protocol.Decoder`` to achieve this behaviour.
The client will attempt to use persistent HTTP connections.
:param method: `GET`, `POST`, or `DELETE`.
:param uri: Relative endpoint of the requests URL path.
:param body: Body of the request. Defaults to an empty
dictionary if ``method`` is `POST`.
:param headers: Additional dictionary of headers to include
in the request.
:param encoder: JSON encoder class, which defaults to
``json.JSONEncoder`` unless specified.
:param decoder: JSON decoder class, which defaults to
``json.JSONDecoder`` unless specified.
:param codec_kwargs: Surplus arguments passed on to `encoder`
and `decoder` on construction.
:return: Instance of ``webdriver.transport.Response``
describing the HTTP response received from the remote end.
:raises ValueError: If `body` or the response body are not
JSON serialisable.
"""
if body is None and method == "POST":
body = {}
payload = None
if body is not None:
try:
payload = json.dumps(body, cls=encoder, **codec_kwargs)
except ValueError:
raise ValueError("Failed to encode request body as JSON:\n"
"%s" % json.dumps(body, indent=2))
# When the timeout triggers, the TestRunnerManager thread will reuse
# this connection to check if the WebDriver its alive and we may end
# raising an httplib.CannotSendRequest exception if the WebDriver is
# not responding and this httplib.request() call is blocked on the
# runner thread. We use the boolean below to check for that and restart
# the connection in that case.
self._last_request_is_blocked = True
response = self._request(method, uri, payload, headers, timeout=None)
self._last_request_is_blocked = False
return Response.from_http(response, decoder=decoder, **codec_kwargs)
def _request(self, method, uri, payload, headers=None, timeout=None):
if isinstance(payload, text_type):
payload = payload.encode("utf-8")
if headers is None:
headers = {}
headers.update({"Connection": "keep-alive"})
url = self.url(uri)
if self._last_request_is_blocked or self._has_unread_data():
self.close()
self.connection.request(method, url, payload, headers)
# timeout for request has to be set just before calling httplib.getresponse()
# and the previous value restored just after that, even on exception raised
try:
if timeout:
previous_timeout = self._conn.gettimeout()
self._conn.settimeout(timeout)
response = self.connection.getresponse()
finally:
if timeout:
self._conn.settimeout(previous_timeout)
return response
def _has_unread_data(self):
return self._conn and self._conn.sock and select.select([self._conn.sock], [], [], 0)[0]
| 35.516854
| 96
| 0.621322
|
4a19b00ac0b8c2d8e450aa66fa2bcbbafcf0bafb
| 25,852
|
py
|
Python
|
build/fbcode_builder/getdeps.py
|
xiaosumay/fizz
|
5fe89e57526a977bd420a8269c860654c8021c54
|
[
"BSD-3-Clause"
] | null | null | null |
build/fbcode_builder/getdeps.py
|
xiaosumay/fizz
|
5fe89e57526a977bd420a8269c860654c8021c54
|
[
"BSD-3-Clause"
] | null | null | null |
build/fbcode_builder/getdeps.py
|
xiaosumay/fizz
|
5fe89e57526a977bd420a8269c860654c8021c54
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
from __future__ import absolute_import, division, print_function, unicode_literals
import argparse
import os
import shutil
import subprocess
import sys
import tarfile
import tempfile
# We don't import cache.create_cache directly as the facebook
# specific import below may monkey patch it, and we want to
# observe the patched version of this function!
import getdeps.cache as cache_module
from getdeps.buildopts import setup_build_options
from getdeps.dyndeps import create_dyn_dep_munger
from getdeps.errors import TransientFailure
from getdeps.load import ManifestLoader
from getdeps.manifest import ManifestParser
from getdeps.platform import HostType
from getdeps.subcmd import SubCmd, add_subcommands, cmd
try:
import getdeps.facebook # noqa: F401
except ImportError:
# we don't ship the facebook specific subdir,
# so allow that to fail silently
pass
sys.path.insert(0, os.path.join(os.path.dirname(os.path.abspath(__file__)), "getdeps"))
class UsageError(Exception):
pass
@cmd("validate-manifest", "parse a manifest and validate that it is correct")
class ValidateManifest(SubCmd):
def run(self, args):
try:
ManifestParser(file_name=args.file_name)
print("OK", file=sys.stderr)
return 0
except Exception as exc:
print("ERROR: %s" % str(exc), file=sys.stderr)
return 1
def setup_parser(self, parser):
parser.add_argument("file_name", help="path to the manifest file")
@cmd("show-host-type", "outputs the host type tuple for the host machine")
class ShowHostType(SubCmd):
def run(self, args):
host = HostType()
print("%s" % host.as_tuple_string())
return 0
class ProjectCmdBase(SubCmd):
def run(self, args):
opts = setup_build_options(args)
ctx_gen = opts.get_context_generator(facebook_internal=args.facebook_internal)
if args.test_dependencies:
ctx_gen.set_value_for_all_projects("test", "on")
if args.enable_tests:
ctx_gen.set_value_for_project(args.project, "test", "on")
else:
ctx_gen.set_value_for_project(args.project, "test", "off")
loader = ManifestLoader(opts, ctx_gen)
self.process_project_dir_arguments(args, loader)
manifest = loader.load_manifest(args.project)
self.run_project_cmd(args, loader, manifest)
def process_project_dir_arguments(self, args, loader):
def parse_project_arg(arg, arg_type):
parts = arg.split(":")
if len(parts) == 2:
project, path = parts
elif len(parts) == 1:
project = args.project
path = parts[0]
else:
raise UsageError(
"invalid %s argument; too many ':' characters: %s" % (arg_type, arg)
)
return project, os.path.abspath(path)
for arg in args.src_dir:
project, path = parse_project_arg(arg, "--src-dir")
loader.set_project_src_dir(project, path)
for arg in args.build_dir:
project, path = parse_project_arg(arg, "--build-dir")
loader.set_project_build_dir(project, path)
for arg in args.install_dir:
project, path = parse_project_arg(arg, "--install-dir")
loader.set_project_install_dir(project, path)
def setup_parser(self, parser):
parser.add_argument(
"project",
help=(
"name of the project or path to a manifest "
"file describing the project"
),
)
parser.add_argument(
"--no-tests",
action="store_false",
dest="enable_tests",
default=True,
help="Disable building tests for this project.",
)
parser.add_argument(
"--test-dependencies",
action="store_true",
help="Enable building tests for dependencies as well.",
)
parser.add_argument(
"--src-dir",
default=[],
action="append",
help="Specify a local directory to use for the project source, "
"rather than fetching it.",
)
parser.add_argument(
"--build-dir",
default=[],
action="append",
help="Explicitly specify the build directory to use for the "
"project, instead of the default location in the scratch path. "
"This only affects the project specified, and not its dependencies.",
)
parser.add_argument(
"--install-dir",
default=[],
action="append",
help="Explicitly specify the install directory to use for the "
"project, instead of the default location in the scratch path. "
"This only affects the project specified, and not its dependencies.",
)
self.setup_project_cmd_parser(parser)
def setup_project_cmd_parser(self, parser):
pass
class CachedProject(object):
""" A helper that allows calling the cache logic for a project
from both the build and the fetch code """
def __init__(self, cache, loader, m):
self.m = m
self.inst_dir = loader.get_project_install_dir(m)
self.project_hash = loader.get_project_hash(m)
self.ctx = loader.ctx_gen.get_context(m.name)
self.loader = loader
self.cache = cache
self.cache_file_name = "-".join(
(
m.name,
self.ctx.get("os"),
self.ctx.get("distro") or "none",
self.ctx.get("distro_vers") or "none",
self.project_hash,
"buildcache.tgz",
)
)
def is_cacheable(self):
""" We only cache third party projects """
return self.cache and not self.m.shipit_fbcode_builder
def download(self):
if self.is_cacheable() and not os.path.exists(self.inst_dir):
print("check cache for %s" % self.cache_file_name)
dl_dir = os.path.join(self.loader.build_opts.scratch_dir, "downloads")
if not os.path.exists(dl_dir):
os.makedirs(dl_dir)
try:
target_file_name = os.path.join(dl_dir, self.cache_file_name)
if self.cache.download_to_file(self.cache_file_name, target_file_name):
tf = tarfile.open(target_file_name, "r")
print(
"Extracting %s -> %s..." % (self.cache_file_name, self.inst_dir)
)
tf.extractall(self.inst_dir)
return True
except Exception as exc:
print("%s" % str(exc))
return False
def upload(self):
if self.cache and not self.m.shipit_fbcode_builder:
# We can prepare an archive and stick it in LFS
tempdir = tempfile.mkdtemp()
tarfilename = os.path.join(tempdir, self.cache_file_name)
print("Archiving for cache: %s..." % tarfilename)
tf = tarfile.open(tarfilename, "w:gz")
tf.add(self.inst_dir, arcname=".")
tf.close()
try:
self.cache.upload_from_file(self.cache_file_name, tarfilename)
except Exception as exc:
print(
"Failed to upload to cache (%s), continue anyway" % str(exc),
file=sys.stderr,
)
shutil.rmtree(tempdir)
@cmd("fetch", "fetch the code for a given project")
class FetchCmd(ProjectCmdBase):
def setup_project_cmd_parser(self, parser):
parser.add_argument(
"--recursive",
help="fetch the transitive deps also",
action="store_true",
default=False,
)
parser.add_argument(
"--host-type",
help=(
"When recursively fetching, fetch deps for "
"this host type rather than the current system"
),
)
def run_project_cmd(self, args, loader, manifest):
if args.recursive:
projects = loader.manifests_in_dependency_order()
else:
projects = [manifest]
cache = cache_module.create_cache()
for m in projects:
cached_project = CachedProject(cache, loader, m)
if cached_project.download():
continue
inst_dir = loader.get_project_install_dir(m)
built_marker = os.path.join(inst_dir, ".built-by-getdeps")
if os.path.exists(built_marker):
with open(built_marker, "r") as f:
built_hash = f.read().strip()
project_hash = loader.get_project_hash(m)
if built_hash == project_hash:
continue
# We need to fetch the sources
fetcher = loader.create_fetcher(m)
fetcher.update()
@cmd("list-deps", "lists the transitive deps for a given project")
class ListDepsCmd(ProjectCmdBase):
def run_project_cmd(self, args, loader, manifest):
for m in loader.manifests_in_dependency_order():
print(m.name)
return 0
def setup_project_cmd_parser(self, parser):
parser.add_argument(
"--host-type",
help=(
"Produce the list for the specified host type, "
"rather than that of the current system"
),
)
def clean_dirs(opts):
for d in ["build", "installed", "extracted", "shipit"]:
d = os.path.join(opts.scratch_dir, d)
print("Cleaning %s..." % d)
if os.path.exists(d):
shutil.rmtree(d)
@cmd("clean", "clean up the scratch dir")
class CleanCmd(SubCmd):
def run(self, args):
opts = setup_build_options(args)
clean_dirs(opts)
@cmd("show-inst-dir", "print the installation dir for a given project")
class ShowInstDirCmd(ProjectCmdBase):
def run_project_cmd(self, args, loader, manifest):
if args.recursive:
manifests = loader.manifests_in_dependency_order()
else:
manifests = [manifest]
for m in manifests:
inst_dir = loader.get_project_install_dir(m)
print(inst_dir)
def setup_project_cmd_parser(self, parser):
parser.add_argument(
"--recursive",
help="print the transitive deps also",
action="store_true",
default=False,
)
@cmd("show-source-dir", "print the source dir for a given project")
class ShowSourceDirCmd(ProjectCmdBase):
def run_project_cmd(self, args, loader, manifest):
if args.recursive:
manifests = loader.manifests_in_dependency_order()
else:
manifests = [manifest]
for m in manifests:
fetcher = loader.create_fetcher(m)
print(fetcher.get_src_dir())
def setup_project_cmd_parser(self, parser):
parser.add_argument(
"--recursive",
help="print the transitive deps also",
action="store_true",
default=False,
)
@cmd("build", "build a given project")
class BuildCmd(ProjectCmdBase):
def run_project_cmd(self, args, loader, manifest):
if args.clean:
clean_dirs(loader.build_opts)
print("Building on %s" % loader.ctx_gen.get_context(args.project))
projects = loader.manifests_in_dependency_order()
cache = cache_module.create_cache()
# Accumulate the install directories so that the build steps
# can find their dep installation
install_dirs = []
for m in projects:
fetcher = loader.create_fetcher(m)
if args.clean:
fetcher.clean()
build_dir = loader.get_project_build_dir(m)
inst_dir = loader.get_project_install_dir(m)
if m == manifest or not args.no_deps:
print("Assessing %s..." % m.name)
project_hash = loader.get_project_hash(m)
ctx = loader.ctx_gen.get_context(m.name)
built_marker = os.path.join(inst_dir, ".built-by-getdeps")
cached_project = CachedProject(cache, loader, m)
reconfigure, sources_changed = self.compute_source_change_status(
cached_project, fetcher, m, built_marker, project_hash
)
if sources_changed or reconfigure or not os.path.exists(built_marker):
if os.path.exists(built_marker):
os.unlink(built_marker)
src_dir = fetcher.get_src_dir()
builder = m.create_builder(
loader.build_opts, src_dir, build_dir, inst_dir, ctx
)
builder.build(install_dirs, reconfigure=reconfigure)
with open(built_marker, "w") as f:
f.write(project_hash)
# Only populate the cache from continuous build runs
if args.schedule_type == "continuous":
cached_project.upload()
install_dirs.append(inst_dir)
def compute_source_change_status(
self, cached_project, fetcher, m, built_marker, project_hash
):
reconfigure = False
sources_changed = False
if not cached_project.download():
check_fetcher = True
if os.path.exists(built_marker):
check_fetcher = False
with open(built_marker, "r") as f:
built_hash = f.read().strip()
if built_hash == project_hash:
if cached_project.is_cacheable():
# We can blindly trust the build status
reconfigure = False
sources_changed = False
else:
# Otherwise, we may have changed the source, so let's
# check in with the fetcher layer
check_fetcher = True
else:
# Some kind of inconsistency with a prior build,
# let's run it again to be sure
os.unlink(built_marker)
reconfigure = True
sources_changed = True
if check_fetcher:
change_status = fetcher.update()
reconfigure = change_status.build_changed()
sources_changed = change_status.sources_changed()
return reconfigure, sources_changed
def setup_project_cmd_parser(self, parser):
parser.add_argument(
"--clean",
action="store_true",
default=False,
help=(
"Clean up the build and installation area prior to building, "
"causing the projects to be built from scratch"
),
)
parser.add_argument(
"--no-deps",
action="store_true",
default=False,
help=(
"Only build the named project, not its deps. "
"This is most useful after you've built all of the deps, "
"and helps to avoid waiting for relatively "
"slow up-to-date-ness checks"
),
)
parser.add_argument(
"--schedule-type", help="Indicates how the build was activated"
)
@cmd("fixup-dyn-deps", "Adjusts dynamic dependencies for packaging purposes")
class FixupDeps(ProjectCmdBase):
def run_project_cmd(self, args, loader, manifest):
projects = loader.manifests_in_dependency_order()
# Accumulate the install directories so that the build steps
# can find their dep installation
install_dirs = []
for m in projects:
inst_dir = loader.get_project_install_dir(m)
install_dirs.append(inst_dir)
if m == manifest:
dep_munger = create_dyn_dep_munger(loader.build_opts, install_dirs)
dep_munger.process_deps(args.destdir, args.final_install_prefix)
def setup_project_cmd_parser(self, parser):
parser.add_argument("destdir", help=("Where to copy the fixed up executables"))
parser.add_argument(
"--final-install-prefix", help=("specify the final installation prefix")
)
@cmd("test", "test a given project")
class TestCmd(ProjectCmdBase):
def run_project_cmd(self, args, loader, manifest):
projects = loader.manifests_in_dependency_order()
# Accumulate the install directories so that the test steps
# can find their dep installation
install_dirs = []
for m in projects:
inst_dir = loader.get_project_install_dir(m)
if m == manifest or args.test_dependencies:
built_marker = os.path.join(inst_dir, ".built-by-getdeps")
if not os.path.exists(built_marker):
print("project %s has not been built" % m.name)
# TODO: we could just go ahead and build it here, but I
# want to tackle that as part of adding build-for-test
# support.
return 1
fetcher = loader.create_fetcher(m)
src_dir = fetcher.get_src_dir()
ctx = loader.ctx_gen.get_context(m.name)
build_dir = loader.get_project_build_dir(m)
builder = m.create_builder(
loader.build_opts, src_dir, build_dir, inst_dir, ctx
)
builder.run_tests(
install_dirs,
schedule_type=args.schedule_type,
owner=args.test_owner,
)
install_dirs.append(inst_dir)
def setup_project_cmd_parser(self, parser):
parser.add_argument(
"--schedule-type", help="Indicates how the build was activated"
)
parser.add_argument("--test-owner", help="Owner for testpilot")
@cmd("generate-github-actions", "generate a GitHub actions configuration")
class GenerateGitHubActionsCmd(ProjectCmdBase):
def run_project_cmd(self, args, loader, manifest):
platforms = [
HostType("linux", "ubuntu", "18"),
HostType("darwin", None, None),
HostType("windows", None, None),
]
with open(args.output_file, "w") as out:
# Deliberate line break here because the @ and the generated
# symbols are meaningful to our internal tooling when they
# appear in a single token
out.write("# This file was @")
out.write("generated by getdeps.py\n")
out.write(
"""
name: CI
on:
push:
branches:
- master
pull_request:
branches:
- master
jobs:
"""
)
for p in platforms:
build_opts = setup_build_options(args, p)
self.write_job_for_platform(out, args, build_opts)
def write_job_for_platform(self, out, args, build_opts):
ctx_gen = build_opts.get_context_generator()
loader = ManifestLoader(build_opts, ctx_gen)
manifest = loader.load_manifest(args.project)
manifest_ctx = loader.ctx_gen.get_context(manifest.name)
# Some projects don't do anything "useful" as a leaf project, only
# as a dep for a leaf project. Check for those here; we don't want
# to waste the effort scheduling them on CI.
# We do this by looking at the builder type in the manifest file
# rather than creating a builder and checking its type because we
# don't know enough to create the full builder instance here.
if manifest.get("build", "builder", ctx=manifest_ctx) == "nop":
return None
if build_opts.is_linux():
job_name = "linux"
runs_on = "ubuntu-18.04"
elif build_opts.is_windows():
# We're targeting the windows-2016 image because it has
# Visual Studio 2017 installed, and at the time of writing,
# the version of boost in the manifests (1.69) is not
# buildable with Visual Studio 2019
job_name = "windows"
runs_on = "windows-2016"
else:
job_name = "mac"
runs_on = "macOS-latest"
out.write(" %s:\n" % job_name)
out.write(" runs-on: %s\n" % runs_on)
out.write(" steps:\n")
out.write(" - uses: actions/checkout@v1\n")
projects = loader.manifests_in_dependency_order()
for m in projects:
if m != manifest:
out.write(" - name: Fetch %s\n" % m.name)
out.write(
" run: python build/fbcode_builder/getdeps.py fetch "
"--no-tests %s\n" % m.name
)
for m in projects:
if m != manifest:
out.write(" - name: Build %s\n" % m.name)
out.write(
" run: python build/fbcode_builder/getdeps.py build "
"--no-tests %s\n" % m.name
)
out.write(" - name: Build %s\n" % manifest.name)
out.write(
" run: python build/fbcode_builder/getdeps.py build --src-dir=. %s\n"
% manifest.name
)
out.write(" - name: Test %s\n" % manifest.name)
out.write(
" run: python build/fbcode_builder/getdeps.py test --src-dir=. %s\n"
% manifest.name
)
def setup_project_cmd_parser(self, parser):
parser.add_argument("--output-file", help="The name of the yaml file")
def get_arg_var_name(args):
for arg in args:
if arg.startswith("--"):
return arg[2:].replace("-", "_")
raise Exception("unable to determine argument variable name from %r" % (args,))
def parse_args():
# We want to allow common arguments to be specified either before or after
# the subcommand name. In order to do this we add them to the main parser
# and to subcommand parsers. In order for this to work, we need to tell
# argparse that the default value is SUPPRESS, so that the default values
# from the subparser arguments won't override values set by the user from
# the main parser. We maintain our own list of desired defaults in the
# common_defaults dictionary, and manually set those if the argument wasn't
# present at all.
common_args = argparse.ArgumentParser(add_help=False)
common_defaults = {}
def add_common_arg(*args, **kwargs):
var_name = get_arg_var_name(args)
default_value = kwargs.pop("default", None)
common_defaults[var_name] = default_value
kwargs["default"] = argparse.SUPPRESS
common_args.add_argument(*args, **kwargs)
add_common_arg("--scratch-path", help="Where to maintain checkouts and build dirs")
add_common_arg(
"--vcvars-path", default=None, help="Path to the vcvarsall.bat on Windows."
)
add_common_arg(
"--install-prefix",
help=(
"Where the final build products will be installed "
"(default is [scratch-path]/installed)"
),
)
add_common_arg(
"--num-jobs",
type=int,
help=(
"Number of concurrent jobs to use while building. "
"(default=number of cpu cores)"
),
)
add_common_arg(
"--use-shipit",
help="use the real ShipIt instead of the simple shipit transformer",
action="store_true",
default=False,
)
add_common_arg(
"--facebook-internal",
help="Setup the build context as an FB internal build",
action="store_true",
default=False,
)
ap = argparse.ArgumentParser(
description="Get and build dependencies and projects", parents=[common_args]
)
sub = ap.add_subparsers(
# metavar suppresses the long and ugly default list of subcommands on a
# single line. We still render the nicer list below where we would
# have shown the nasty one.
metavar="",
title="Available commands",
help="",
)
add_subcommands(sub, common_args)
args = ap.parse_args()
for var_name, default_value in common_defaults.items():
if not hasattr(args, var_name):
setattr(args, var_name, default_value)
return ap, args
def main():
ap, args = parse_args()
if getattr(args, "func", None) is None:
ap.print_help()
return 0
try:
return args.func(args)
except UsageError as exc:
ap.error(str(exc))
return 1
except TransientFailure as exc:
print("TransientFailure: %s" % str(exc))
# This return code is treated as a retryable transient infrastructure
# error by Facebook's internal CI, rather than eg: a build or code
# related error that needs to be fixed before progress can be made.
return 128
except subprocess.CalledProcessError as exc:
print("%s" % str(exc), file=sys.stderr)
print("!! Failed", file=sys.stderr)
return 1
if __name__ == "__main__":
sys.exit(main())
| 35.077341
| 88
| 0.585255
|
ce416348bbbb763f731938249f740822e0f32cdf
| 6,795
|
py
|
Python
|
omas/machine_mappings/profiles_db_ccfe.py
|
gafusion/omas
|
8e9b725483655db0ccbf9a4f7aa9eba7c6c04864
|
[
"MIT"
] | 20
|
2017-11-07T14:36:21.000Z
|
2021-03-27T19:14:17.000Z
|
omas/machine_mappings/profiles_db_ccfe.py
|
Reksoatr/omas
|
6740fd040d6af59e0aec54f977637b221733bd07
|
[
"MIT"
] | 170
|
2017-11-09T06:40:11.000Z
|
2022-03-29T17:33:26.000Z
|
omas/machine_mappings/profiles_db_ccfe.py
|
Reksoatr/omas
|
6740fd040d6af59e0aec54f977637b221733bd07
|
[
"MIT"
] | 6
|
2017-11-22T14:44:54.000Z
|
2022-01-10T19:52:47.000Z
|
import os
import numpy as np
from inspect import unwrap
from omas import *
from omas.omas_utils import printd, printe, unumpy
from omas.machine_mappings._common import *
__all__ = []
__regression_arguments__ = {'__all__': __all__}
"""
Connects to tokamak-profiledb.ccfe.ac.uk and generates an ODS with core profiles and zero-D parameters
options:
tok : string : 'jet' or 'd3d'
db : string : '98' or '08' (1998 or 2008 database)
ngrid : int : nrho grid output into ods
Data sources:
profiles-db with more information:tokamak-profiledb.ccfe.ac.uk
example usage:
ods = ODS()
ods.open('profiles_db_ccfe', pulse=81499, options={'tok':'d3d', 'db':'08'})
"""
@machine_mapping_function(__regression_arguments__, pulse=77557, tok="d3d", db='98', ngrid=201)
def profile_db_to_ODS(ods, pulse, tok, db, ngrid):
ods['dataset_description.ids_properties.comment'] = f'Comment for {tok}'
if ods is None:
ods = ODS()
if tok == 'DIII-D':
tok = 'd3d'
elif tok == 'JET':
tok = 'jet'
# fmt: off
available_in_database = {
'd3d': [69627, 69648, 71378, 71384, 77557, 77559, 78106, 78109, 78281, 78283, 78316, 78328, 81321, 81329, 81499, 81507, 82183,
82188, 82205, 82788, 84682, 87031, 89943, 90105, 90108, 90117, 90118, 92664, 95989, 98549, 98775, 98777, 99251, 99411,
99696, 103818, 104276, 106919, 106956, 111203, 111221, 111239, 118334, 118341, 118348, 118446],
'jet': [19649, 19691, 26087, 26095, 32745, 33131, 33140, 33465, 34340, 35156, 35171, 35174, 37379, 37718, 37728, 37944, 38285,
38287, 38407, 38415, 40542, 40847, 42762, 42794, 42976, 42982, 42997, 43002, 43134, 43452, 46123, 46664, 49687, 50844,
51599, 51976, 52009, 52014, 52015, 52022, 52024, 52025, 52096, 52979, 53028, 53030, 53212, 53299, 53501, 53521, 53532,
53537, 53550, 55935, 57987, 58159, 58323, 60927, 60931, 60933]
}
# fmt: on
if tok not in available_in_database:
print(f"tokamak not in database see: {available_in_database.keys()}")
if pulse not in available_in_database[tok]:
print(f"Shot {pulse} not in {tok} database, available pulses = {available_in_database[tok]}")
zero_d_locations = {
"['ZEROD']['RGEO']": 'summary.boundary.geometric_axis_r.value',
"['ZEROD']['AMIN']": 'summary.boundary.minor_radius.value',
"['ZEROD']['DWTOT']": 'summary.global_quantities.denergy_thermal_dt.value',
# 'hl_mode': 'summary.global_quantities.h_mode.value',
"['ZEROD']['IP']": 'summary.global_quantities.ip.value',
"['ZEROD']['ZEFF']": 'summary.volume_average.zeff.value',
"['ZEROD']['TAUTH']": 'summary.global_quantities.tau_energy.value',
"['ZEROD']['BT']": 'summary.global_quantities.b0.value',
"['ZEROD']['KAPPA']": 'summary.boundary.elongation.value',
"['ZEROD']['DELTA']": 'summary.boundary.triangularity_upper.value',
"['ZEROD']['DELTA']": 'summary.boundary.triangularity_lower.value',
"['ZEROD']['NEL']": 'summary.line_average.n_e.value',
"['ZEROD']['PINJ']": 'summary.heating_current_drive.power_launched_nbi.value',
"['ZEROD']['PECH']": 'summary.heating_current_drive.power_launched_ec.value',
"['ZEROD']['POHM']": 'summary.global_quantities.power_ohm.value',
"['ZEROD']['PICRH']": 'summary.heating_current_drive.power_launched_ic.value',
"['ZEROD']['PLTH']": 'summary.global_quantities.power_loss.value',
"['ZEROD']['WTH']": 'summary.global_quantities.energy_thermal.value',
}
one_d_locations = {
# 'zeff': 'core_profiles.profiles_1d[0].zeff',
"['TWOD']['VOLUME']": 'core_profiles.profiles_1d[0].grid.volume',
"['TWOD']['VROT']": 'core_profiles.profiles_1d[0].rotation_frequency_tor_sonic', # this is wrong
"['TWOD']['Q']": 'core_profiles.profiles_1d[0].q',
"['TWOD']['TI']": 'core_profiles.profiles_1d[0].t_i_average',
"['TWOD']['TE']": 'core_profiles.profiles_1d[0].electrons.temperature',
"['TWOD']['NE']": 'core_profiles.profiles_1d[0].electrons.density_thermal',
"['TWOD']['CURTOT']": 'core_profiles.profiles_1d[0].j_tor',
"['TWOD']['QNBII']": 'q_nbi_i',
"['TWOD']['QNBIE']": 'q_nbi_e',
"['TWOD']['SNBII']": 's_nbi_se',
"['TWOD']['QOHM']": 'q_ohm_e',
}
mds_tree = mdstree(server='tokamak-profiledb.ccfe.ac.uk:8000', pulse=int(pulse), treename=f'pr{db}_{tok}')
ods['summary.global_quantities.h_mode.value'] = [True]
ods['dataset_description.data_entry.machine'] = tok
ods['dataset_description.data_entry.pulse'] = int(mds_tree['ZEROD']['SHOT'].data())
ods['summary.time'] = mds_tree['ZEROD']['TIME'].data()
for mds_location, ods_location in zero_d_locations.items():
try:
ods[ods_location] = np.array(eval(f"mds_tree{mds_location}").data())
except Exception as _excp:
printe(repr(_excp))
ods[ods_location] = [0.0]
heating_idx_dict = {'nbi': 2, 'ec': 3, 'lh': 4, 'ic': 5, 'fusion': 6, 'ohm': 7}
ion_elec = {'i': 'total_ion_energy', 'e': 'electrons.energy', 'se': 'electrons.particles'}
rho_init = np.linspace(0, 1, len(mds_tree['TWOD']['VOLUME'].data()[0]))
if ngrid > 0:
rho_tor_norm = np.linspace(0, 1, ngrid)
else:
rho_tor_norm = rho_init
ods['core_profiles.profiles_1d[0].grid.rho_tor_norm'] = rho_tor_norm
for mds_location, ods_location in one_d_locations.items():
try:
if '.' not in ods_location:
name = ods_location.split(sep='_')[1]
ods[f'core_sources.source.+.identifier.index'] = heating_idx_dict[name]
ods[f'core_sources.source.-1.identifier.name'] = name
ods[f'core_sources.source.-1.profiles_1d[0].grid.rho_tor_norm'] = rho_tor_norm
ods[f'core_sources.source.-1.profiles_1d[0].grid.volume'] = np.interp(
x=rho_tor_norm, xp=rho_init, fp=mds_tree['TWOD']['VOLUME'].data()[0]
)
ods[f"core_sources.source.-1.profiles_1d[0].{ion_elec[ods_location.split(sep='_')[-1]]}"] = np.interp(
x=rho_tor_norm, xp=rho_init, fp=eval(f"mds_tree{mds_location}").data()[0]
)
else:
ods[ods_location] = np.interp(x=rho_tor_norm, xp=rho_init, fp=eval(f"mds_tree{mds_location}").data()[0])
if 'rotation_frequency_tor_sonic' in ods_location:
ods[ods_location] /= 2 * np.pi
except Exception as e:
printe(repr(e))
ods[ods_location] = np.zeros(ngrid)
# =====================
if __name__ == '__main__':
test_machine_mapping_functions(__all__, globals(), locals())
| 47.517483
| 134
| 0.621781
|
395bf59ac440fa7a3f2977319552b01b682ee49e
| 4,921
|
py
|
Python
|
Download.py
|
Morasiu/VideoDownloader
|
626a1a9f83218066834f0aae6fa37e57aa777e71
|
[
"MIT"
] | null | null | null |
Download.py
|
Morasiu/VideoDownloader
|
626a1a9f83218066834f0aae6fa37e57aa777e71
|
[
"MIT"
] | null | null | null |
Download.py
|
Morasiu/VideoDownloader
|
626a1a9f83218066834f0aae6fa37e57aa777e71
|
[
"MIT"
] | null | null | null |
from EpisodeType import EpisodeType
from urllib.parse import urlparse
from selenium import webdriver
from selenium.webdriver.support.wait import WebDriverWait
from requests import get
from tqdm import tqdm
from tkinter import *
import urllib3, os, time
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
__version__ = "0.1.0"
class WbijamDownloader:
def __init__(self, download_path, should_download_fillers = False):
self.download_path = download_path
self.should_download_fillers = should_download_fillers
def download_episode(self, driver, e):
mp4up_url = None
print(f"Downloading {e['name']}")
driver.get(e["link"])
video_providers = driver.find_element_by_class_name("lista").find_elements_by_class_name("lista_hover")
for provider in video_providers:
provider_name = provider.find_elements_by_tag_name("td")[2].text
if provider_name == "mp4up":
mp4up_url = provider.find_elements_by_tag_name("td")[4].find_element_by_class_name("odtwarzacz_link").get_attribute("rel")
break
if mp4up_url is None:
raise ValueError("mp4up video not found.")
main_url = urlparse(driver.current_url)
player_url = f"{main_url.scheme}://{main_url.hostname}/odtwarzacz-{mp4up_url}.html"
driver.get(player_url)
frame = driver.find_element_by_xpath("/html/body/div[2]/div/center/iframe")
driver.switch_to.frame(frame)
video = WebDriverWait(driver, 10).until(lambda x: x.find_element_by_tag_name("video"))
time.sleep(2)
video_url = video.find_element_by_tag_name("source").get_attribute("src")
print(f"Found video at: {video_url}")
file_name = self.format_file_name(e["name"])
full_file_name = self.download_path + file_name
r = get(video_url, stream=True, verify=False)
file_size = int(r.headers['Content-Length'])
chunk_size=1024
num_bars = int(file_size / chunk_size)
with open(full_file_name, 'wb') as fp:
for chunk in tqdm(
r.iter_content(chunk_size=chunk_size)
, total= num_bars
, unit = 'KB'
, desc = file_name
, leave = True # progressbar stays
):
fp.write(chunk)
print("Done")
def format_file_name(self, file_name):
file_name = file_name \
.replace('.', '') \
.replace(":", '') \
.replace(' ', '_') \
.replace('\'', '') \
.replace('!', '') \
.replace('?', '') \
.replace('"', '')
file_name += ".mp4"
return file_name
def get_episodes(self, driver):
table = driver.find_element_by_class_name("lista")
table_position = table.find_element_by_tag_name("tbody").find_elements_by_tag_name("tr")
episodes = []
for e in table_position:
a = e.find_element_by_tag_name("td").find_element_by_tag_name("a")
if e.find_elements_by_tag_name("td")[1].text == "filler":
episode_type = EpisodeType.Filler
else:
episode_type = EpisodeType.Normal
episode = {
"name": a.text,
"link": a.get_property("href"),
"type": episode_type
}
episodes.append(episode)
if not self.should_download_fillers:
print("Ignoring fillers.")
episodes = [e for e in episodes if e["type"] != EpisodeType.Filler]
episodes.reverse()
print(f"Found episodes: {len(episodes)}")
return episodes
def filer_episodes(self, episodes):
files = os.listdir(self.download_path)
episodes = [e for e in episodes if self.format_file_name(e["name"]) not in files]
return episodes
def start_downloading(self, url: str):
print("======================")
print(f"Video downloader {__version__}")
print("======================")
if not os.path.exists(self.download_path):
print(f"Download directory ({self.download_path})not found. Creating new one.")
os.mkdir(self.download_path)
driver = webdriver.Chrome()
driver.get(url=url)
episodes = self.get_episodes(driver)
episodes = self.filer_episodes(episodes)
print(f"Episodes remained to download: {len(episodes)}")
for e in episodes:
self.download_episode(driver, e)
if __name__ == "__main__":
downloader = WbijamDownloader("D:/Download/Attack on Titans/", should_download_fillers=False)
downloader.start_downloading("https://snk.wbijam.pl/pierwsza_seria.html")
| 39.368
| 138
| 0.595001
|
f8db80526c3a50ddcf8e079cfc580628d6382177
| 584
|
py
|
Python
|
2019/day04/day04_part2.py
|
boffman/adventofcode
|
077e727b9b050c1fc5cb99ed7fbd64c5a69d9605
|
[
"MIT"
] | null | null | null |
2019/day04/day04_part2.py
|
boffman/adventofcode
|
077e727b9b050c1fc5cb99ed7fbd64c5a69d9605
|
[
"MIT"
] | null | null | null |
2019/day04/day04_part2.py
|
boffman/adventofcode
|
077e727b9b050c1fc5cb99ed7fbd64c5a69d9605
|
[
"MIT"
] | null | null | null |
from collections import defaultdict
def is_valid(input):
sinp = str(input)
slen = len(sinp)
if slen != 6:
return False
dupes = defaultdict(int)
for ix in range(slen-1):
if sinp[ix] > sinp[ix+1]:
return False
if sinp[ix] == sinp[ix+1]:
dupes[sinp[ix]] += 1
if dupes:
for v in dupes.values():
if v == 1:
return True
return False
num_valid = 0
ix = 372037
while ix <= 905157:
if is_valid(ix):
num_valid += 1
ix += 1
print(num_valid)
| 22.461538
| 36
| 0.508562
|
be5307f2a6c8481a46d9bfaeea66fd2c6d364951
| 837
|
py
|
Python
|
src/map_renderer/offscreen_window.py
|
XiaoJake/range-mcl
|
b6ff92c7299ff3d01f1a8e41c11bc0191746084d
|
[
"MIT"
] | 141
|
2021-03-31T02:05:03.000Z
|
2022-03-24T18:14:18.000Z
|
src/map_renderer/offscreen_window.py
|
JasonSun623/range-mcl
|
bd3cc2b9efa1172fcf547938501e3a7e0b687c50
|
[
"MIT"
] | 6
|
2021-06-18T03:19:19.000Z
|
2022-01-23T09:58:39.000Z
|
src/map_renderer/offscreen_window.py
|
JasonSun623/range-mcl
|
bd3cc2b9efa1172fcf547938501e3a7e0b687c50
|
[
"MIT"
] | 48
|
2021-04-05T09:06:34.000Z
|
2022-03-24T03:08:38.000Z
|
#!/usr/bin/env python3
# This file is covered by the LICENSE file in the root of this project.
# Brief: Representation of an off-screen Window using glfw.
import glfw
class OffscreenWindow:
""" Representation of an off-screen Window using glfw. """
def __init__(self, init_glfw: bool = True, show: bool = False):
if init_glfw:
if not glfw.init(): raise RuntimeError("Unable to initialize glfw.")
# See https://www.glfw.org/docs/latest/context.html#context_offscreen
if not show: glfw.window_hint(glfw.VISIBLE, glfw.FALSE)
self._window = glfw.create_window(640, 480, "test", None, None)
glfw.make_context_current(self._window)
if not self._window:
glfw.terminate()
raise RuntimeError("Unable to create window.")
@property
def glfw_window(self):
return self._window
| 31
| 74
| 0.702509
|
2abcbfa3c74e769f820f22b5d37acc97bfa9f30d
| 26,988
|
py
|
Python
|
venv/lib/python3.7/site-packages/sklearn/utils/tests/test_extmath.py
|
sohiai/ur5
|
4101e0a0ec219dfdffda40650dd0867d00d821a8
|
[
"MIT"
] | 1
|
2020-01-31T19:20:20.000Z
|
2020-01-31T19:20:20.000Z
|
venv/lib/python3.7/site-packages/sklearn/utils/tests/test_extmath.py
|
sohiai/ur5
|
4101e0a0ec219dfdffda40650dd0867d00d821a8
|
[
"MIT"
] | 23
|
2020-03-13T18:03:04.000Z
|
2020-05-16T13:10:33.000Z
|
venv/lib/python3.7/site-packages/sklearn/utils/tests/test_extmath.py
|
sohiai/ur5
|
4101e0a0ec219dfdffda40650dd0867d00d821a8
|
[
"MIT"
] | 3
|
2020-02-22T18:55:36.000Z
|
2020-05-09T01:54:11.000Z
|
# Authors: Olivier Grisel <olivier.grisel@ensta.org>
# Mathieu Blondel <mathieu@mblondel.org>
# Denis Engemann <denis-alexander.engemann@inria.fr>
#
# License: BSD 3 clause
import numpy as np
from scipy import sparse
from scipy import linalg
from scipy import stats
from scipy.special import expit
import pytest
from sklearn.utils._testing import assert_almost_equal
from sklearn.utils._testing import assert_allclose
from sklearn.utils._testing import assert_allclose_dense_sparse
from sklearn.utils._testing import assert_array_equal
from sklearn.utils._testing import assert_array_almost_equal
from sklearn.utils._testing import assert_warns
from sklearn.utils._testing import assert_warns_message
from sklearn.utils._testing import skip_if_32bit
from sklearn.utils.extmath import density
from sklearn.utils.extmath import randomized_svd
from sklearn.utils.extmath import row_norms
from sklearn.utils.extmath import weighted_mode
from sklearn.utils.extmath import cartesian
from sklearn.utils.extmath import log_logistic
from sklearn.utils.extmath import svd_flip
from sklearn.utils.extmath import _incremental_mean_and_var
from sklearn.utils.extmath import _deterministic_vector_sign_flip
from sklearn.utils.extmath import softmax
from sklearn.utils.extmath import stable_cumsum
from sklearn.utils.extmath import safe_min
from sklearn.utils.extmath import safe_sparse_dot
from sklearn.datasets import make_low_rank_matrix
def test_density():
rng = np.random.RandomState(0)
X = rng.randint(10, size=(10, 5))
X[1, 2] = 0
X[5, 3] = 0
X_csr = sparse.csr_matrix(X)
X_csc = sparse.csc_matrix(X)
X_coo = sparse.coo_matrix(X)
X_lil = sparse.lil_matrix(X)
for X_ in (X_csr, X_csc, X_coo, X_lil):
assert density(X_) == density(X)
def test_uniform_weights():
# with uniform weights, results should be identical to stats.mode
rng = np.random.RandomState(0)
x = rng.randint(10, size=(10, 5))
weights = np.ones(x.shape)
for axis in (None, 0, 1):
mode, score = stats.mode(x, axis)
mode2, score2 = weighted_mode(x, weights, axis)
assert_array_equal(mode, mode2)
assert_array_equal(score, score2)
def test_random_weights():
# set this up so that each row should have a weighted mode of 6,
# with a score that is easily reproduced
mode_result = 6
rng = np.random.RandomState(0)
x = rng.randint(mode_result, size=(100, 10))
w = rng.random_sample(x.shape)
x[:, :5] = mode_result
w[:, :5] += 1
mode, score = weighted_mode(x, w, axis=1)
assert_array_equal(mode, mode_result)
assert_array_almost_equal(score.ravel(), w[:, :5].sum(1))
def check_randomized_svd_low_rank(dtype):
# Check that extmath.randomized_svd is consistent with linalg.svd
n_samples = 100
n_features = 500
rank = 5
k = 10
decimal = 5 if dtype == np.float32 else 7
dtype = np.dtype(dtype)
# generate a matrix X of approximate effective rank `rank` and no noise
# component (very structured signal):
X = make_low_rank_matrix(n_samples=n_samples, n_features=n_features,
effective_rank=rank, tail_strength=0.0,
random_state=0).astype(dtype, copy=False)
assert X.shape == (n_samples, n_features)
# compute the singular values of X using the slow exact method
U, s, V = linalg.svd(X, full_matrices=False)
# Convert the singular values to the specific dtype
U = U.astype(dtype, copy=False)
s = s.astype(dtype, copy=False)
V = V.astype(dtype, copy=False)
for normalizer in ['auto', 'LU', 'QR']: # 'none' would not be stable
# compute the singular values of X using the fast approximate method
Ua, sa, Va = randomized_svd(
X, k, power_iteration_normalizer=normalizer, random_state=0)
# If the input dtype is float, then the output dtype is float of the
# same bit size (f32 is not upcast to f64)
# But if the input dtype is int, the output dtype is float64
if dtype.kind == 'f':
assert Ua.dtype == dtype
assert sa.dtype == dtype
assert Va.dtype == dtype
else:
assert Ua.dtype == np.float64
assert sa.dtype == np.float64
assert Va.dtype == np.float64
assert Ua.shape == (n_samples, k)
assert sa.shape == (k,)
assert Va.shape == (k, n_features)
# ensure that the singular values of both methods are equal up to the
# real rank of the matrix
assert_almost_equal(s[:k], sa, decimal=decimal)
# check the singular vectors too (while not checking the sign)
assert_almost_equal(np.dot(U[:, :k], V[:k, :]), np.dot(Ua, Va),
decimal=decimal)
# check the sparse matrix representation
X = sparse.csr_matrix(X)
# compute the singular values of X using the fast approximate method
Ua, sa, Va = \
randomized_svd(X, k, power_iteration_normalizer=normalizer,
random_state=0)
if dtype.kind == 'f':
assert Ua.dtype == dtype
assert sa.dtype == dtype
assert Va.dtype == dtype
else:
assert Ua.dtype.kind == 'f'
assert sa.dtype.kind == 'f'
assert Va.dtype.kind == 'f'
assert_almost_equal(s[:rank], sa[:rank], decimal=decimal)
@pytest.mark.parametrize('dtype',
(np.int32, np.int64, np.float32, np.float64))
def test_randomized_svd_low_rank_all_dtypes(dtype):
check_randomized_svd_low_rank(dtype)
@pytest.mark.parametrize('dtype',
(np.float32, np.float64))
def test_row_norms(dtype):
X = np.random.RandomState(42).randn(100, 100)
if dtype is np.float32:
precision = 4
else:
precision = 5
X = X.astype(dtype, copy=False)
sq_norm = (X ** 2).sum(axis=1)
assert_array_almost_equal(sq_norm, row_norms(X, squared=True),
precision)
assert_array_almost_equal(np.sqrt(sq_norm), row_norms(X), precision)
for csr_index_dtype in [np.int32, np.int64]:
Xcsr = sparse.csr_matrix(X, dtype=dtype)
# csr_matrix will use int32 indices by default,
# up-casting those to int64 when necessary
if csr_index_dtype is np.int64:
Xcsr.indptr = Xcsr.indptr.astype(csr_index_dtype, copy=False)
Xcsr.indices = Xcsr.indices.astype(csr_index_dtype, copy=False)
assert Xcsr.indices.dtype == csr_index_dtype
assert Xcsr.indptr.dtype == csr_index_dtype
assert_array_almost_equal(sq_norm, row_norms(Xcsr, squared=True),
precision)
assert_array_almost_equal(np.sqrt(sq_norm), row_norms(Xcsr),
precision)
def test_randomized_svd_low_rank_with_noise():
# Check that extmath.randomized_svd can handle noisy matrices
n_samples = 100
n_features = 500
rank = 5
k = 10
# generate a matrix X wity structure approximate rank `rank` and an
# important noisy component
X = make_low_rank_matrix(n_samples=n_samples, n_features=n_features,
effective_rank=rank, tail_strength=0.1,
random_state=0)
assert X.shape == (n_samples, n_features)
# compute the singular values of X using the slow exact method
_, s, _ = linalg.svd(X, full_matrices=False)
for normalizer in ['auto', 'none', 'LU', 'QR']:
# compute the singular values of X using the fast approximate
# method without the iterated power method
_, sa, _ = randomized_svd(X, k, n_iter=0,
power_iteration_normalizer=normalizer,
random_state=0)
# the approximation does not tolerate the noise:
assert np.abs(s[:k] - sa).max() > 0.01
# compute the singular values of X using the fast approximate
# method with iterated power method
_, sap, _ = randomized_svd(X, k,
power_iteration_normalizer=normalizer,
random_state=0)
# the iterated power method is helping getting rid of the noise:
assert_almost_equal(s[:k], sap, decimal=3)
def test_randomized_svd_infinite_rank():
# Check that extmath.randomized_svd can handle noisy matrices
n_samples = 100
n_features = 500
rank = 5
k = 10
# let us try again without 'low_rank component': just regularly but slowly
# decreasing singular values: the rank of the data matrix is infinite
X = make_low_rank_matrix(n_samples=n_samples, n_features=n_features,
effective_rank=rank, tail_strength=1.0,
random_state=0)
assert X.shape == (n_samples, n_features)
# compute the singular values of X using the slow exact method
_, s, _ = linalg.svd(X, full_matrices=False)
for normalizer in ['auto', 'none', 'LU', 'QR']:
# compute the singular values of X using the fast approximate method
# without the iterated power method
_, sa, _ = randomized_svd(X, k, n_iter=0,
power_iteration_normalizer=normalizer)
# the approximation does not tolerate the noise:
assert np.abs(s[:k] - sa).max() > 0.1
# compute the singular values of X using the fast approximate method
# with iterated power method
_, sap, _ = randomized_svd(X, k, n_iter=5,
power_iteration_normalizer=normalizer)
# the iterated power method is still managing to get most of the
# structure at the requested rank
assert_almost_equal(s[:k], sap, decimal=3)
def test_randomized_svd_transpose_consistency():
# Check that transposing the design matrix has limited impact
n_samples = 100
n_features = 500
rank = 4
k = 10
X = make_low_rank_matrix(n_samples=n_samples, n_features=n_features,
effective_rank=rank, tail_strength=0.5,
random_state=0)
assert X.shape == (n_samples, n_features)
U1, s1, V1 = randomized_svd(X, k, n_iter=3, transpose=False,
random_state=0)
U2, s2, V2 = randomized_svd(X, k, n_iter=3, transpose=True,
random_state=0)
U3, s3, V3 = randomized_svd(X, k, n_iter=3, transpose='auto',
random_state=0)
U4, s4, V4 = linalg.svd(X, full_matrices=False)
assert_almost_equal(s1, s4[:k], decimal=3)
assert_almost_equal(s2, s4[:k], decimal=3)
assert_almost_equal(s3, s4[:k], decimal=3)
assert_almost_equal(np.dot(U1, V1), np.dot(U4[:, :k], V4[:k, :]),
decimal=2)
assert_almost_equal(np.dot(U2, V2), np.dot(U4[:, :k], V4[:k, :]),
decimal=2)
# in this case 'auto' is equivalent to transpose
assert_almost_equal(s2, s3)
def test_randomized_svd_power_iteration_normalizer():
# randomized_svd with power_iteration_normalized='none' diverges for
# large number of power iterations on this dataset
rng = np.random.RandomState(42)
X = make_low_rank_matrix(100, 500, effective_rank=50, random_state=rng)
X += 3 * rng.randint(0, 2, size=X.shape)
n_components = 50
# Check that it diverges with many (non-normalized) power iterations
U, s, V = randomized_svd(X, n_components, n_iter=2,
power_iteration_normalizer='none')
A = X - U.dot(np.diag(s).dot(V))
error_2 = linalg.norm(A, ord='fro')
U, s, V = randomized_svd(X, n_components, n_iter=20,
power_iteration_normalizer='none')
A = X - U.dot(np.diag(s).dot(V))
error_20 = linalg.norm(A, ord='fro')
assert np.abs(error_2 - error_20) > 100
for normalizer in ['LU', 'QR', 'auto']:
U, s, V = randomized_svd(X, n_components, n_iter=2,
power_iteration_normalizer=normalizer,
random_state=0)
A = X - U.dot(np.diag(s).dot(V))
error_2 = linalg.norm(A, ord='fro')
for i in [5, 10, 50]:
U, s, V = randomized_svd(X, n_components, n_iter=i,
power_iteration_normalizer=normalizer,
random_state=0)
A = X - U.dot(np.diag(s).dot(V))
error = linalg.norm(A, ord='fro')
assert 15 > np.abs(error_2 - error)
def test_randomized_svd_sparse_warnings():
# randomized_svd throws a warning for lil and dok matrix
rng = np.random.RandomState(42)
X = make_low_rank_matrix(50, 20, effective_rank=10, random_state=rng)
n_components = 5
for cls in (sparse.lil_matrix, sparse.dok_matrix):
X = cls(X)
assert_warns_message(
sparse.SparseEfficiencyWarning,
"Calculating SVD of a {} is expensive. "
"csr_matrix is more efficient.".format(cls.__name__),
randomized_svd, X, n_components, n_iter=1,
power_iteration_normalizer='none')
def test_svd_flip():
# Check that svd_flip works in both situations, and reconstructs input.
rs = np.random.RandomState(1999)
n_samples = 20
n_features = 10
X = rs.randn(n_samples, n_features)
# Check matrix reconstruction
U, S, V = linalg.svd(X, full_matrices=False)
U1, V1 = svd_flip(U, V, u_based_decision=False)
assert_almost_equal(np.dot(U1 * S, V1), X, decimal=6)
# Check transposed matrix reconstruction
XT = X.T
U, S, V = linalg.svd(XT, full_matrices=False)
U2, V2 = svd_flip(U, V, u_based_decision=True)
assert_almost_equal(np.dot(U2 * S, V2), XT, decimal=6)
# Check that different flip methods are equivalent under reconstruction
U_flip1, V_flip1 = svd_flip(U, V, u_based_decision=True)
assert_almost_equal(np.dot(U_flip1 * S, V_flip1), XT, decimal=6)
U_flip2, V_flip2 = svd_flip(U, V, u_based_decision=False)
assert_almost_equal(np.dot(U_flip2 * S, V_flip2), XT, decimal=6)
def test_randomized_svd_sign_flip():
a = np.array([[2.0, 0.0], [0.0, 1.0]])
u1, s1, v1 = randomized_svd(a, 2, flip_sign=True, random_state=41)
for seed in range(10):
u2, s2, v2 = randomized_svd(a, 2, flip_sign=True, random_state=seed)
assert_almost_equal(u1, u2)
assert_almost_equal(v1, v2)
assert_almost_equal(np.dot(u2 * s2, v2), a)
assert_almost_equal(np.dot(u2.T, u2), np.eye(2))
assert_almost_equal(np.dot(v2.T, v2), np.eye(2))
def test_randomized_svd_sign_flip_with_transpose():
# Check if the randomized_svd sign flipping is always done based on u
# irrespective of transpose.
# See https://github.com/scikit-learn/scikit-learn/issues/5608
# for more details.
def max_loading_is_positive(u, v):
"""
returns bool tuple indicating if the values maximising np.abs
are positive across all rows for u and across all columns for v.
"""
u_based = (np.abs(u).max(axis=0) == u.max(axis=0)).all()
v_based = (np.abs(v).max(axis=1) == v.max(axis=1)).all()
return u_based, v_based
mat = np.arange(10 * 8).reshape(10, -1)
# Without transpose
u_flipped, _, v_flipped = randomized_svd(mat, 3, flip_sign=True)
u_based, v_based = max_loading_is_positive(u_flipped, v_flipped)
assert u_based
assert not v_based
# With transpose
u_flipped_with_transpose, _, v_flipped_with_transpose = randomized_svd(
mat, 3, flip_sign=True, transpose=True)
u_based, v_based = max_loading_is_positive(
u_flipped_with_transpose, v_flipped_with_transpose)
assert u_based
assert not v_based
def test_cartesian():
# Check if cartesian product delivers the right results
axes = (np.array([1, 2, 3]), np.array([4, 5]), np.array([6, 7]))
true_out = np.array([[1, 4, 6],
[1, 4, 7],
[1, 5, 6],
[1, 5, 7],
[2, 4, 6],
[2, 4, 7],
[2, 5, 6],
[2, 5, 7],
[3, 4, 6],
[3, 4, 7],
[3, 5, 6],
[3, 5, 7]])
out = cartesian(axes)
assert_array_equal(true_out, out)
# check single axis
x = np.arange(3)
assert_array_equal(x[:, np.newaxis], cartesian((x,)))
def test_logistic_sigmoid():
# Check correctness and robustness of logistic sigmoid implementation
def naive_log_logistic(x):
return np.log(expit(x))
x = np.linspace(-2, 2, 50)
assert_array_almost_equal(log_logistic(x), naive_log_logistic(x))
extreme_x = np.array([-100., 100.])
assert_array_almost_equal(log_logistic(extreme_x), [-100, 0])
def test_incremental_variance_update_formulas():
# Test Youngs and Cramer incremental variance formulas.
# Doggie data from https://www.mathsisfun.com/data/standard-deviation.html
A = np.array([[600, 470, 170, 430, 300],
[600, 470, 170, 430, 300],
[600, 470, 170, 430, 300],
[600, 470, 170, 430, 300]]).T
idx = 2
X1 = A[:idx, :]
X2 = A[idx:, :]
old_means = X1.mean(axis=0)
old_variances = X1.var(axis=0)
old_sample_count = np.full(X1.shape[1], X1.shape[0], dtype=np.int32)
final_means, final_variances, final_count = \
_incremental_mean_and_var(X2, old_means, old_variances,
old_sample_count)
assert_almost_equal(final_means, A.mean(axis=0), 6)
assert_almost_equal(final_variances, A.var(axis=0), 6)
assert_almost_equal(final_count, A.shape[0])
def test_incremental_mean_and_variance_ignore_nan():
old_means = np.array([535., 535., 535., 535.])
old_variances = np.array([4225., 4225., 4225., 4225.])
old_sample_count = np.array([2, 2, 2, 2], dtype=np.int32)
X = np.array([[170, 170, 170, 170],
[430, 430, 430, 430],
[300, 300, 300, 300]])
X_nan = np.array([[170, np.nan, 170, 170],
[np.nan, 170, 430, 430],
[430, 430, np.nan, 300],
[300, 300, 300, np.nan]])
X_means, X_variances, X_count = _incremental_mean_and_var(
X, old_means, old_variances, old_sample_count)
X_nan_means, X_nan_variances, X_nan_count = _incremental_mean_and_var(
X_nan, old_means, old_variances, old_sample_count)
assert_allclose(X_nan_means, X_means)
assert_allclose(X_nan_variances, X_variances)
assert_allclose(X_nan_count, X_count)
@skip_if_32bit
def test_incremental_variance_numerical_stability():
# Test Youngs and Cramer incremental variance formulas.
def np_var(A):
return A.var(axis=0)
# Naive one pass variance computation - not numerically stable
# https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance
def one_pass_var(X):
n = X.shape[0]
exp_x2 = (X ** 2).sum(axis=0) / n
expx_2 = (X.sum(axis=0) / n) ** 2
return exp_x2 - expx_2
# Two-pass algorithm, stable.
# We use it as a benchmark. It is not an online algorithm
# https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Two-pass_algorithm
def two_pass_var(X):
mean = X.mean(axis=0)
Y = X.copy()
return np.mean((Y - mean)**2, axis=0)
# Naive online implementation
# https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Online_algorithm
# This works only for chunks for size 1
def naive_mean_variance_update(x, last_mean, last_variance,
last_sample_count):
updated_sample_count = (last_sample_count + 1)
samples_ratio = last_sample_count / float(updated_sample_count)
updated_mean = x / updated_sample_count + last_mean * samples_ratio
updated_variance = last_variance * samples_ratio + \
(x - last_mean) * (x - updated_mean) / updated_sample_count
return updated_mean, updated_variance, updated_sample_count
# We want to show a case when one_pass_var has error > 1e-3 while
# _batch_mean_variance_update has less.
tol = 200
n_features = 2
n_samples = 10000
x1 = np.array(1e8, dtype=np.float64)
x2 = np.log(1e-5, dtype=np.float64)
A0 = np.full((n_samples // 2, n_features), x1, dtype=np.float64)
A1 = np.full((n_samples // 2, n_features), x2, dtype=np.float64)
A = np.vstack((A0, A1))
# Naive one pass var: >tol (=1063)
assert np.abs(np_var(A) - one_pass_var(A)).max() > tol
# Starting point for online algorithms: after A0
# Naive implementation: >tol (436)
mean, var, n = A0[0, :], np.zeros(n_features), n_samples // 2
for i in range(A1.shape[0]):
mean, var, n = \
naive_mean_variance_update(A1[i, :], mean, var, n)
assert n == A.shape[0]
# the mean is also slightly unstable
assert np.abs(A.mean(axis=0) - mean).max() > 1e-6
assert np.abs(np_var(A) - var).max() > tol
# Robust implementation: <tol (177)
mean, var = A0[0, :], np.zeros(n_features)
n = np.full(n_features, n_samples // 2, dtype=np.int32)
for i in range(A1.shape[0]):
mean, var, n = \
_incremental_mean_and_var(A1[i, :].reshape((1, A1.shape[1])),
mean, var, n)
assert_array_equal(n, A.shape[0])
assert_array_almost_equal(A.mean(axis=0), mean)
assert tol > np.abs(np_var(A) - var).max()
def test_incremental_variance_ddof():
# Test that degrees of freedom parameter for calculations are correct.
rng = np.random.RandomState(1999)
X = rng.randn(50, 10)
n_samples, n_features = X.shape
for batch_size in [11, 20, 37]:
steps = np.arange(0, X.shape[0], batch_size)
if steps[-1] != X.shape[0]:
steps = np.hstack([steps, n_samples])
for i, j in zip(steps[:-1], steps[1:]):
batch = X[i:j, :]
if i == 0:
incremental_means = batch.mean(axis=0)
incremental_variances = batch.var(axis=0)
# Assign this twice so that the test logic is consistent
incremental_count = batch.shape[0]
sample_count = np.full(batch.shape[1], batch.shape[0],
dtype=np.int32)
else:
result = _incremental_mean_and_var(
batch, incremental_means, incremental_variances,
sample_count)
(incremental_means, incremental_variances,
incremental_count) = result
sample_count += batch.shape[0]
calculated_means = np.mean(X[:j], axis=0)
calculated_variances = np.var(X[:j], axis=0)
assert_almost_equal(incremental_means, calculated_means, 6)
assert_almost_equal(incremental_variances,
calculated_variances, 6)
assert_array_equal(incremental_count, sample_count)
def test_vector_sign_flip():
# Testing that sign flip is working & largest value has positive sign
data = np.random.RandomState(36).randn(5, 5)
max_abs_rows = np.argmax(np.abs(data), axis=1)
data_flipped = _deterministic_vector_sign_flip(data)
max_rows = np.argmax(data_flipped, axis=1)
assert_array_equal(max_abs_rows, max_rows)
signs = np.sign(data[range(data.shape[0]), max_abs_rows])
assert_array_equal(data, data_flipped * signs[:, np.newaxis])
def test_softmax():
rng = np.random.RandomState(0)
X = rng.randn(3, 5)
exp_X = np.exp(X)
sum_exp_X = np.sum(exp_X, axis=1).reshape((-1, 1))
assert_array_almost_equal(softmax(X), exp_X / sum_exp_X)
def test_stable_cumsum():
assert_array_equal(stable_cumsum([1, 2, 3]), np.cumsum([1, 2, 3]))
r = np.random.RandomState(0).rand(100000)
assert_warns(RuntimeWarning, stable_cumsum, r, rtol=0, atol=0)
# test axis parameter
A = np.random.RandomState(36).randint(1000, size=(5, 5, 5))
assert_array_equal(stable_cumsum(A, axis=0), np.cumsum(A, axis=0))
assert_array_equal(stable_cumsum(A, axis=1), np.cumsum(A, axis=1))
assert_array_equal(stable_cumsum(A, axis=2), np.cumsum(A, axis=2))
def test_safe_min():
msg = ("safe_min is deprecated in version 0.22 and will be removed "
"in version 0.24.")
with pytest.warns(FutureWarning, match=msg):
safe_min(np.ones(10))
@pytest.mark.parametrize("A_array_constr", [np.array, sparse.csr_matrix],
ids=["dense", "sparse"])
@pytest.mark.parametrize("B_array_constr", [np.array, sparse.csr_matrix],
ids=["dense", "sparse"])
def test_safe_sparse_dot_2d(A_array_constr, B_array_constr):
rng = np.random.RandomState(0)
A = rng.random_sample((30, 10))
B = rng.random_sample((10, 20))
expected = np.dot(A, B)
A = A_array_constr(A)
B = B_array_constr(B)
actual = safe_sparse_dot(A, B, dense_output=True)
assert_allclose(actual, expected)
def test_safe_sparse_dot_nd():
rng = np.random.RandomState(0)
# dense ND / sparse
A = rng.random_sample((2, 3, 4, 5, 6))
B = rng.random_sample((6, 7))
expected = np.dot(A, B)
B = sparse.csr_matrix(B)
actual = safe_sparse_dot(A, B)
assert_allclose(actual, expected)
# sparse / dense ND
A = rng.random_sample((2, 3))
B = rng.random_sample((4, 5, 3, 6))
expected = np.dot(A, B)
A = sparse.csr_matrix(A)
actual = safe_sparse_dot(A, B)
assert_allclose(actual, expected)
@pytest.mark.parametrize("A_array_constr", [np.array, sparse.csr_matrix],
ids=["dense", "sparse"])
def test_safe_sparse_dot_2d_1d(A_array_constr):
rng = np.random.RandomState(0)
B = rng.random_sample((10))
# 2D @ 1D
A = rng.random_sample((30, 10))
expected = np.dot(A, B)
A = A_array_constr(A)
actual = safe_sparse_dot(A, B)
assert_allclose(actual, expected)
# 1D @ 2D
A = rng.random_sample((10, 30))
expected = np.dot(B, A)
A = A_array_constr(A)
actual = safe_sparse_dot(B, A)
assert_allclose(actual, expected)
@pytest.mark.parametrize("dense_output", [True, False])
def test_safe_sparse_dot_dense_output(dense_output):
rng = np.random.RandomState(0)
A = sparse.random(30, 10, density=0.1, random_state=rng)
B = sparse.random(10, 20, density=0.1, random_state=rng)
expected = A.dot(B)
actual = safe_sparse_dot(A, B, dense_output=dense_output)
assert sparse.issparse(actual) == (not dense_output)
if dense_output:
expected = expected.toarray()
assert_allclose_dense_sparse(actual, expected)
| 37.327801
| 90
| 0.629317
|
5e08900e8835b33b23f0bca24a4e42802ac8ff4d
| 1,483
|
py
|
Python
|
decoy/stub_store.py
|
mcous/decoy
|
0d5345ecbd3cacee795ac644530048a282c2ba20
|
[
"MIT"
] | 8
|
2020-11-30T23:35:54.000Z
|
2022-01-18T17:53:15.000Z
|
decoy/stub_store.py
|
mcous/decoy
|
0d5345ecbd3cacee795ac644530048a282c2ba20
|
[
"MIT"
] | 96
|
2020-12-02T17:01:41.000Z
|
2022-03-27T05:10:46.000Z
|
decoy/stub_store.py
|
mcous/decoy
|
0d5345ecbd3cacee795ac644530048a282c2ba20
|
[
"MIT"
] | null | null | null |
"""Stub creation and storage."""
from typing import Any, Callable, List, NamedTuple, Optional
from .spy_calls import SpyCall, WhenRehearsal, match_call
class StubBehavior(NamedTuple):
"""A recorded stub behavior."""
return_value: Optional[Any] = None
error: Optional[Exception] = None
action: Optional[Callable[..., Any]] = None
once: bool = False
class StubEntry(NamedTuple):
"""An entry in the StubStore for later behavior lookup."""
rehearsal: WhenRehearsal
behavior: StubBehavior
class StubStore:
"""Stored stub behaviors."""
def __init__(self) -> None:
"""Initialize a StubStore with an empty stubbings list."""
self._stubs: List[StubEntry] = []
def add(self, rehearsal: WhenRehearsal, behavior: StubBehavior) -> None:
"""Create and add a new StubBehavior to the store."""
self._stubs.append(StubEntry(rehearsal=rehearsal, behavior=behavior))
def get_by_call(self, call: SpyCall) -> StubBehavior:
"""Get the latest StubBehavior matching this call."""
reversed_indices = range(len(self._stubs) - 1, -1, -1)
for i in reversed_indices:
stub = self._stubs[i]
if match_call(call, stub.rehearsal):
if stub.behavior.once:
self._stubs.pop(i)
return stub.behavior
return StubBehavior()
def clear(self) -> None:
"""Remove all stored Stubs."""
self._stubs.clear()
| 28.519231
| 77
| 0.63857
|
1e69eac7f702c6e7a7154f325a4420a2931378a8
| 62,278
|
py
|
Python
|
python/mxnet/ndarray/sparse.py
|
harshp8l/incubator-mxnet
|
119897d927cfce49d6cad97e474a16d2beaf8091
|
[
"Apache-2.0"
] | 1
|
2019-01-17T12:51:59.000Z
|
2019-01-17T12:51:59.000Z
|
python/mxnet/ndarray/sparse.py
|
hcxiong/incubator-mxnet
|
f5ba2678f077b58d31d31029f680f93b313e1cea
|
[
"Apache-2.0"
] | 14
|
2018-10-15T16:38:27.000Z
|
2018-12-12T00:11:03.000Z
|
python/mxnet/ndarray/sparse.py
|
harshp8l/incubator-mxnet
|
119897d927cfce49d6cad97e474a16d2beaf8091
|
[
"Apache-2.0"
] | null | null | null |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# coding: utf-8
# pylint: disable=wildcard-import, unused-wildcard-import, too-many-lines
"""Sparse NDArray API of MXNet."""
from __future__ import absolute_import
from __future__ import division
try:
from __builtin__ import slice as py_slice
from __builtin__ import sum as py_sum
except ImportError:
from builtins import slice as py_slice
from builtins import sum as py_sum
import ctypes
import warnings
import operator
from array import array as native_array
__all__ = ["_ndarray_cls", "csr_matrix", "row_sparse_array",
"BaseSparseNDArray", "CSRNDArray", "RowSparseNDArray",
"add", "subtract", "multiply", "divide"]
import numpy as np
from ..base import NotSupportedForSparseNDArray
from ..base import _LIB, numeric_types
from ..base import c_array_buf, mx_real_t, integer_types
from ..base import mx_uint, NDArrayHandle, check_call
from ..context import Context, current_context
from . import _internal
from . import op
try:
from .gen_sparse import retain as gs_retain # pylint: disable=redefined-builtin
except ImportError:
gs_retain = None
from ._internal import _set_ndarray_class
from .ndarray import NDArray, _storage_type, _DTYPE_NP_TO_MX, _DTYPE_MX_TO_NP
from .ndarray import _STORAGE_TYPE_STR_TO_ID, _STORAGE_TYPE_ROW_SPARSE, _STORAGE_TYPE_CSR
from .ndarray import _STORAGE_TYPE_UNDEFINED, _STORAGE_TYPE_DEFAULT
from .ndarray import zeros as _zeros_ndarray
from .ndarray import array as _array
from .ndarray import _ufunc_helper
try:
import scipy.sparse as spsp
except ImportError:
spsp = None
_STORAGE_AUX_TYPES = {
'row_sparse': [np.int64],
'csr': [np.int64, np.int64]
}
def _new_alloc_handle(stype, shape, ctx, delay_alloc, dtype, aux_types, aux_shapes=None):
"""Return a new handle with specified storage type, shape, dtype and context.
Empty handle is only used to hold results
Returns
-------
handle
A new empty ndarray handle
"""
hdl = NDArrayHandle()
for aux_t in aux_types:
if np.dtype(aux_t) != np.dtype("int64"):
raise NotImplementedError("only int64 is supported for aux types")
aux_type_ids = [int(_DTYPE_NP_TO_MX[np.dtype(aux_t).type]) for aux_t in aux_types]
aux_shapes = [(0,) for aux_t in aux_types] if aux_shapes is None else aux_shapes
aux_shape_lens = [len(aux_shape) for aux_shape in aux_shapes]
aux_shapes = py_sum(aux_shapes, ())
num_aux = mx_uint(len(aux_types))
check_call(_LIB.MXNDArrayCreateSparseEx(
ctypes.c_int(int(_STORAGE_TYPE_STR_TO_ID[stype])),
c_array_buf(mx_uint, native_array('I', shape)),
mx_uint(len(shape)),
ctypes.c_int(ctx.device_typeid),
ctypes.c_int(ctx.device_id),
ctypes.c_int(int(delay_alloc)),
ctypes.c_int(int(_DTYPE_NP_TO_MX[np.dtype(dtype).type])),
num_aux,
c_array_buf(ctypes.c_int, native_array('i', aux_type_ids)),
c_array_buf(mx_uint, native_array('I', aux_shape_lens)),
c_array_buf(mx_uint, native_array('I', aux_shapes)),
ctypes.byref(hdl)))
return hdl
class BaseSparseNDArray(NDArray):
"""The base class of an NDArray stored in a sparse storage format.
See CSRNDArray and RowSparseNDArray for more details.
"""
def __repr__(self):
"""Returns a string representation of the sparse array."""
shape_info = 'x'.join(['%d' % x for x in self.shape])
# The data content is not displayed since the array usually has big shape
return '\n<%s %s @%s>' % (self.__class__.__name__,
shape_info, self.context)
def __add__(self, other):
return add(self, other)
def __sub__(self, other):
return subtract(self, other)
def __mul__(self, other):
return multiply(self, other)
def __div__(self, other):
return divide(self, other)
def __iadd__(self, other):
raise NotImplementedError()
def __isub__(self, other):
raise NotImplementedError()
def __imul__(self, other):
raise NotImplementedError()
def __idiv__(self, other):
raise NotImplementedError()
def __itruediv__(self, other):
raise NotImplementedError()
def _sync_copyfrom(self, source_array):
raise NotImplementedError()
def _at(self, idx):
raise NotSupportedForSparseNDArray(self._at, '[idx]', idx)
def _slice(self, start, stop):
raise NotSupportedForSparseNDArray(self._slice, None, start, stop)
def reshape(self, *shape, **kwargs):
raise NotSupportedForSparseNDArray(self.reshape, None, shape)
@property
def size(self):
# the `size` for a sparse ndarray is ambiguous, hence disabled.
raise NotImplementedError()
def _aux_type(self, i):
"""Data-type of the array's ith aux data.
Returns
-------
numpy.dtype
This BaseSparseNDArray's aux data type.
"""
aux_type = ctypes.c_int()
check_call(_LIB.MXNDArrayGetAuxType(self.handle, i, ctypes.byref(aux_type)))
return _DTYPE_MX_TO_NP[aux_type.value]
@property
def _num_aux(self):
"""The number of aux data used to help store the sparse ndarray.
"""
return len(_STORAGE_AUX_TYPES[self.stype])
@property
def _aux_types(self):
"""The data types of the aux data for the BaseSparseNDArray.
"""
aux_types = []
num_aux = self._num_aux
for i in range(num_aux):
aux_types.append(self._aux_type(i))
return aux_types
def asnumpy(self):
"""Return a dense ``numpy.ndarray`` object with value copied from this array
"""
return self.tostype('default').asnumpy()
def astype(self, dtype, copy=True):
"""Return a copy of the array after casting to a specified type.
Parameters
----------
dtype : numpy.dtype or str
The type of the returned array.
copy : bool
Default `True`. By default, astype always returns a newly
allocated ndarray on the same context. If this is set to
`False`, and the dtype requested is the same as the ndarray's
dtype, the ndarray is returned instead of a copy.
Examples
--------
>>> x = mx.nd.sparse.zeros('row_sparse', (2,3), dtype='float32')
>>> y = x.astype('int32')
>>> y.dtype
<type 'numpy.int32'>
"""
if not copy and np.dtype(dtype) == self.dtype:
return self
res = zeros(shape=self.shape, ctx=self.context,
dtype=dtype, stype=self.stype)
self.copyto(res)
return res
def copyto(self, other):
"""Copies the value of this array to another array.
Parameters
----------
other : NDArray or CSRNDArray or RowSparseNDArray or Context
The destination array or context.
Returns
-------
NDArray or CSRNDArray or RowSparseNDArray
The copied array.
"""
# pylint: disable= no-member, protected-access
if isinstance(other, NDArray):
if other.handle is self.handle:
warnings.warn('You are attempting to copy an array to itself', RuntimeWarning)
return False
return _internal._copyto(self, out=other)
elif isinstance(other, Context):
hret = _ndarray_cls(_new_alloc_handle(self.stype, self.shape, other,
True, self.dtype, self._aux_types))
return _internal._copyto(self, out=hret)
else:
raise TypeError('copyto does not support type ' + str(type(other)))
# pylint: enable= no-member, protected-access
def check_format(self, full_check=True):
"""Check whether the NDArray format is valid.
Parameters
----------
full_check : bool, optional
If `True`, rigorous check, O(N) operations. Otherwise
basic check, O(1) operations (default True).
"""
check_call(_LIB.MXNDArraySyncCheckFormat(self.handle, ctypes.c_bool(full_check)))
def _data(self):
"""A deep copy NDArray of the data array associated with the BaseSparseNDArray.
This function blocks. Do not use it in performance critical code.
"""
self.wait_to_read()
hdl = NDArrayHandle()
check_call(_LIB.MXNDArrayGetDataNDArray(self.handle, ctypes.byref(hdl)))
return NDArray(hdl)
def _aux_data(self, i):
""" Get a deep copy NDArray of the i-th aux data array associated with the
BaseSparseNDArray.
This function blocks. Do not use it in performance critical code.
"""
self.wait_to_read()
hdl = NDArrayHandle()
check_call(_LIB.MXNDArrayGetAuxNDArray(self.handle, i, ctypes.byref(hdl)))
return NDArray(hdl)
# pylint: disable=abstract-method
class CSRNDArray(BaseSparseNDArray):
"""A sparse representation of 2D NDArray in the Compressed Sparse Row format.
A CSRNDArray represents an NDArray as three separate arrays: `data`,
`indptr` and `indices`. It uses the CSR representation where the column indices for
row i are stored in ``indices[indptr[i]:indptr[i+1]]`` and their corresponding values are stored
in ``data[indptr[i]:indptr[i+1]]``.
The column indices for a given row are expected to be sorted in ascending order.
Duplicate column entries for the same row are not allowed.
Example
-------
>>> a = mx.nd.array([[0, 1, 0], [2, 0, 0], [0, 0, 0], [0, 0, 3]])
>>> a = a.tostype('csr')
>>> a.data.asnumpy()
array([ 1., 2., 3.], dtype=float32)
>>> a.indices.asnumpy()
array([1, 0, 2])
>>> a.indptr.asnumpy()
array([0, 1, 2, 2, 3])
See Also
--------
csr_matrix: Several ways to construct a CSRNDArray
"""
def __reduce__(self):
return CSRNDArray, (None,), super(CSRNDArray, self).__getstate__()
def __iadd__(self, other):
(self + other).copyto(self)
return self
def __isub__(self, other):
(self - other).copyto(self)
return self
def __imul__(self, other):
(self * other).copyto(self)
return self
def __idiv__(self, other):
(self / other).copyto(self)
return self
def __itruediv__(self, other):
(self / other).copyto(self)
return self
def __getitem__(self, key):
"""x.__getitem__(i) <=> x[i]
Returns a newly created NDArray based on the indexing key.
Parameters
----------
key : int or slice
Indexing key.
Examples
--------
>>> indptr = np.array([0, 2, 3, 6])
>>> indices = np.array([0, 2, 2, 0, 1, 2])
>>> data = np.array([1, 2, 3, 4, 5, 6])
>>> a = mx.nd.sparse.csr_matrix((data, indices, indptr), shape=(3, 3))
>>> a.asnumpy()
array([[ 1., 0., 2.],
[ 0., 0., 3.],
[ 4., 5., 6.]], dtype=float32)
>>> a[1:2].asnumpy()
array([[ 0., 0., 3.]], dtype=float32)
>>> a[1].asnumpy()
array([[ 0., 0., 3.]], dtype=float32)
>>> a[-1].asnumpy()
array([[ 4., 5., 6.]], dtype=float32)
"""
# pylint: disable= no-member, protected-access
if isinstance(key, int):
if key == -1:
begin = self.shape[0] - 1
else:
begin = key
return op.slice(self, begin=begin, end=begin+1)
if isinstance(key, py_slice):
if key.step is not None:
raise ValueError('CSRNDArray only supports continuous slicing on axis 0')
if key.start is not None or key.stop is not None:
begin = key.start if key.start else 0
end = key.stop if key.stop else self.shape[0]
return op.slice(self, begin=begin, end=end)
else:
return self
if isinstance(key, tuple):
raise ValueError('Multi-dimension indexing is not supported')
raise ValueError('Undefined behaviour for {}'.format(key))
# pylint: enable= no-member, protected-access
def __setitem__(self, key, value):
"""x.__setitem__(i, y) <=> x[i]=y
Set self[key] to value. Only slice key [:] is supported.
Parameters
----------
key : slice
The indexing key.
value : NDArray or CSRNDArray or numpy.ndarray
The value to set.
Examples
--------
>>> src = mx.nd.sparse.zeros('csr', (3,3))
>>> src.asnumpy()
array([[ 0., 0., 0.],
[ 0., 0., 0.],
[ 0., 0., 0.]], dtype=float32)
>>> # assign CSRNDArray with same storage type
>>> x = mx.nd.ones((3,3)).tostype('csr')
>>> x[:] = src
>>> x.asnumpy()
array([[ 1., 1., 1.],
[ 1., 1., 1.],
[ 1., 1., 1.]], dtype=float32)
>>> # assign NDArray to CSRNDArray
>>> x[:] = mx.nd.ones((3,3)) * 2
>>> x.asnumpy()
array([[ 2., 2., 2.],
[ 2., 2., 2.],
[ 2., 2., 2.]], dtype=float32)
"""
if not self.writable:
raise ValueError('Failed to assign to a readonly CSRNDArray')
if isinstance(key, py_slice):
if key.step is not None or key.start is not None or key.stop is not None:
raise ValueError('Assignment with slice for CSRNDArray is not ' \
'implemented yet.')
if isinstance(value, NDArray):
# avoid copying to itself
if value.handle is not self.handle:
value.copyto(self)
elif isinstance(value, numeric_types):
raise ValueError("Assigning numeric types to CSRNDArray is " \
"not implemented yet.")
elif isinstance(value, (np.ndarray, np.generic)):
# TODO(haibin/anisub) check scipy.sparse and use _sync_copy_from to
# avoid the temporary copy
warnings.warn('Assigning non-NDArray object to CSRNDArray is not efficient',
RuntimeWarning)
tmp = _array(value)
tmp.copyto(self)
else:
raise TypeError('type %s not supported' % str(type(value)))
else:
assert(isinstance(key, (int, tuple)))
raise Exception('CSRNDArray only supports [:] for assignment')
@property
def indices(self):
"""A deep copy NDArray of the indices array of the CSRNDArray.
This generates a deep copy of the column indices of the current `csr` matrix.
Returns
-------
NDArray
This CSRNDArray's indices array.
"""
return self._aux_data(1)
@property
def indptr(self):
"""A deep copy NDArray of the indptr array of the CSRNDArray.
This generates a deep copy of the `indptr` of the current `csr` matrix.
Returns
-------
NDArray
This CSRNDArray's indptr array.
"""
return self._aux_data(0)
@property
def data(self):
"""A deep copy NDArray of the data array of the CSRNDArray.
This generates a deep copy of the `data` of the current `csr` matrix.
Returns
-------
NDArray
This CSRNDArray's data array.
"""
return self._data()
@indices.setter
def indices(self, indices):
raise NotImplementedError()
@indptr.setter
def indptr(self, indptr):
raise NotImplementedError()
@data.setter
def data(self, data):
raise NotImplementedError()
def tostype(self, stype):
"""Return a copy of the array with chosen storage type.
Returns
-------
NDArray or CSRNDArray
A copy of the array with the chosen storage stype
"""
# pylint: disable= no-member, protected-access
if stype == 'row_sparse':
raise ValueError("cast_storage from csr to row_sparse is not supported")
return op.cast_storage(self, stype=stype)
# pylint: enable= no-member, protected-access
def copyto(self, other):
"""Copies the value of this array to another array.
If ``other`` is a ``NDArray`` or ``CSRNDArray`` object, then ``other.shape`` and
``self.shape`` should be the same. This function copies the value from
``self`` to ``other``.
If ``other`` is a context, a new ``CSRNDArray`` will be first created on
the target context, and the value of ``self`` is copied.
Parameters
----------
other : NDArray or CSRNDArray or Context
The destination array or context.
Returns
-------
NDArray or CSRNDArray
The copied array. If ``other`` is an ``NDArray`` or ``CSRNDArray``, then the return
value and ``other`` will point to the same ``NDArray`` or ``CSRNDArray``.
"""
if isinstance(other, Context):
return super(CSRNDArray, self).copyto(other)
elif isinstance(other, NDArray):
stype = other.stype
if stype in ('default', 'csr'):
return super(CSRNDArray, self).copyto(other)
else:
raise TypeError('copyto does not support destination NDArray stype ' + str(stype))
else:
raise TypeError('copyto does not support type ' + str(type(other)))
def asscipy(self):
"""Returns a ``scipy.sparse.csr.csr_matrix`` object with value copied from this array
Examples
--------
>>> x = mx.nd.sparse.zeros('csr', (2,3))
>>> y = x.asscipy()
>>> type(y)
<type 'scipy.sparse.csr.csr_matrix'>
>>> y
<2x3 sparse matrix of type '<type 'numpy.float32'>'
with 0 stored elements in Compressed Sparse Row format>
"""
data = self.data.asnumpy()
indices = self.indices.asnumpy()
indptr = self.indptr.asnumpy()
if not spsp:
raise ImportError("scipy is not available. \
Please check if the scipy python bindings are installed.")
return spsp.csr_matrix((data, indices, indptr), shape=self.shape, dtype=self.dtype)
# pylint: disable=abstract-method
class RowSparseNDArray(BaseSparseNDArray):
"""A sparse representation of a set of NDArray row slices at given indices.
A RowSparseNDArray represents a multidimensional NDArray using two separate arrays: `data` and
`indices`. The number of dimensions has to be at least 2.
- data: an NDArray of any dtype with shape [D0, D1, ..., Dn].
- indices: a 1-D int64 NDArray with shape [D0] with values sorted in ascending order.
The `indices` stores the indices of the row slices with non-zeros,
while the values are stored in `data`. The corresponding NDArray ``dense``
represented by RowSparseNDArray ``rsp`` has
``dense[rsp.indices[i], :, :, :, ...] = rsp.data[i, :, :, :, ...]``
>>> dense.asnumpy()
array([[ 1., 2., 3.],
[ 0., 0., 0.],
[ 4., 0., 5.],
[ 0., 0., 0.],
[ 0., 0., 0.]], dtype=float32)
>>> rsp = dense.tostype('row_sparse')
>>> rsp.indices.asnumpy()
array([0, 2], dtype=int64)
>>> rsp.data.asnumpy()
array([[ 1., 2., 3.],
[ 4., 0., 5.]], dtype=float32)
A RowSparseNDArray is typically used to represent non-zero row slices of a large NDArray
of shape [LARGE0, D1, .. , Dn] where LARGE0 >> D0 and most row slices are zeros.
RowSparseNDArray is used principally in the definition of gradients for operations
that have sparse gradients (e.g. sparse dot and sparse embedding).
See Also
--------
row_sparse_array: Several ways to construct a RowSparseNDArray
"""
def __reduce__(self):
return RowSparseNDArray, (None,), super(RowSparseNDArray, self).__getstate__()
def __iadd__(self, other):
(self + other).copyto(self)
return self
def __isub__(self, other):
(self - other).copyto(self)
return self
def __imul__(self, other):
(self * other).copyto(self)
return self
def __idiv__(self, other):
(self / other).copyto(self)
return self
def __itruediv__(self, other):
(self / other).copyto(self)
return self
def __getitem__(self, key):
"""x.__getitem__(i) <=> x[i]
Returns a sliced view of this array.
Parameters
----------
key : slice
Indexing key.
Examples
--------
>>> x = mx.nd.sparse.zeros('row_sparse', (2, 3))
>>> x[:].asnumpy()
array([[ 0., 0., 0.],
[ 0., 0., 0.]], dtype=float32)
"""
if isinstance(key, int):
raise Exception("__getitem__ with int key is not implemented for RowSparseNDArray yet")
if isinstance(key, py_slice):
if key.step is not None or key.start is not None or key.stop is not None:
raise Exception('RowSparseNDArray only supports [:] for __getitem__')
else:
return self
if isinstance(key, tuple):
raise ValueError('Multi-dimension indexing is not supported')
raise ValueError('Undefined behaviour for {}'.format(key))
def __setitem__(self, key, value):
"""x.__setitem__(i, y) <=> x[i]=y
Set self[key] to value. Only slice key [:] is supported.
Parameters
----------
key : slice
The indexing key.
value : NDArray or numpy.ndarray
The value to set.
Examples
--------
>>> src = mx.nd.row_sparse([[1, 0, 2], [4, 5, 6]], [0, 2], (3,3))
>>> src.asnumpy()
array([[ 1., 0., 2.],
[ 0., 0., 0.],
[ 4., 5., 6.]], dtype=float32)
>>> # assign RowSparseNDArray with same storage type
>>> x = mx.nd.sparse.zeros('row_sparse', (3,3))
>>> x[:] = src
>>> x.asnumpy()
array([[ 1., 0., 2.],
[ 0., 0., 0.],
[ 4., 5., 6.]], dtype=float32)
>>> # assign NDArray to RowSparseNDArray
>>> x[:] = mx.nd.ones((3,3))
>>> x.asnumpy()
array([[ 1., 1., 1.],
[ 1., 1., 1.],
[ 1., 1., 1.]], dtype=float32)
"""
# pylint: disable= no-member, protected-access
if not self.writable:
raise ValueError('Failed to assign to a readonly RowSparseNDArray')
if isinstance(key, py_slice):
if key.step is not None or key.start is not None or key.stop is not None:
raise ValueError('Assignment with slice for RowSparseNDArray ' \
'is not implmented yet.')
if isinstance(value, NDArray):
# avoid copying to itself
if value.handle is not self.handle:
value.copyto(self)
elif isinstance(value, numeric_types):
_internal._set_value(float(value), out=self)
elif isinstance(value, (np.ndarray, np.generic)):
warnings.warn('Assigning non-NDArray object to RowSparseNDArray is not efficient',
RuntimeWarning)
tmp = _array(value)
tmp.copyto(self)
else:
raise TypeError('type %s not supported' % str(type(value)))
else:
assert(isinstance(key, (int, tuple)))
raise TypeError('RowSparseNDArray only supports [:] for assignment')
# pylint: enable= no-member, protected-access
@property
def indices(self):
"""A deep copy NDArray of the indices array of the RowSparseNDArray.
This generates a deep copy of the row indices of the current `row_sparse` matrix.
Returns
-------
NDArray
This RowSparseNDArray's indices array.
"""
return self._aux_data(0)
@property
def data(self):
"""A deep copy NDArray of the data array of the RowSparseNDArray.
This generates a deep copy of the `data` of the current `row_sparse` matrix.
Returns
-------
NDArray
This RowSparseNDArray's data array.
"""
return self._data()
@indices.setter
def indices(self, indices):
raise NotImplementedError()
@data.setter
def data(self, data):
raise NotImplementedError()
def tostype(self, stype):
"""Return a copy of the array with chosen storage type.
Returns
-------
NDArray or RowSparseNDArray
A copy of the array with the chosen storage stype
"""
# pylint: disable= no-member, protected-access
if stype == 'csr':
raise ValueError("cast_storage from row_sparse to csr is not supported")
return op.cast_storage(self, stype=stype)
# pylint: enable= no-member, protected-access
def copyto(self, other):
"""Copies the value of this array to another array.
If ``other`` is a ``NDArray`` or ``RowSparseNDArray`` object, then ``other.shape``
and ``self.shape`` should be the same. This function copies the value from
``self`` to ``other``.
If ``other`` is a context, a new ``RowSparseNDArray`` will be first created on
the target context, and the value of ``self`` is copied.
Parameters
----------
other : NDArray or RowSparseNDArray or Context
The destination array or context.
Returns
-------
NDArray or RowSparseNDArray
The copied array. If ``other`` is an ``NDArray`` or ``RowSparseNDArray``, then the
return value and ``other`` will point to the same ``NDArray`` or ``RowSparseNDArray``.
"""
if isinstance(other, Context):
return super(RowSparseNDArray, self).copyto(other)
elif isinstance(other, NDArray):
stype = other.stype
if stype in ('default', 'row_sparse'):
return super(RowSparseNDArray, self).copyto(other)
else:
raise TypeError('copyto does not support destination NDArray stype ' + str(stype))
else:
raise TypeError('copyto does not support type ' + str(type(other)))
def retain(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`retain`.
The arguments are the same as for :py:func:`retain`, with
this array as data.
"""
if not gs_retain:
raise ImportError("gen_sparse could not be imported")
return gs_retain(*args, **kwargs)
def _prepare_src_array(source_array, dtype):
"""Prepare `source_array` so that it can be used to construct NDArray.
`source_array` is converted to a `np.ndarray` if it's neither an `NDArray` \
nor an `np.ndarray`.
"""
if not isinstance(source_array, NDArray) and not isinstance(source_array, np.ndarray):
try:
source_array = np.array(source_array, dtype=dtype)
except:
raise TypeError('values must be array like object')
return source_array
def _prepare_default_dtype(src_array, dtype):
"""Prepare the value of dtype if `dtype` is None. If `src_array` is an NDArray, numpy.ndarray
or scipy.sparse.csr.csr_matrix, return src_array.dtype. float32 is returned otherwise."""
if dtype is None:
if isinstance(src_array, (NDArray, np.ndarray)):
dtype = src_array.dtype
elif spsp and isinstance(src_array, spsp.csr.csr_matrix):
dtype = src_array.dtype
else:
dtype = mx_real_t
return dtype
def _check_shape(s1, s2):
"""check s1 == s2 if both are not None"""
if s1 and s2 and s1 != s2:
raise ValueError("Shape mismatch detected. " + str(s1) + " v.s. " + str(s2))
def csr_matrix(arg1, shape=None, ctx=None, dtype=None):
"""Creates a `CSRNDArray`, an 2D array with compressed sparse row (CSR) format.
The CSRNDArray can be instantiated in several ways:
- csr_matrix(D):
to construct a CSRNDArray with a dense 2D array ``D``
- **D** (*array_like*) - An object exposing the array interface, an object whose \
`__array__` method returns an array, or any (nested) sequence.
- **ctx** (*Context, optional*) - Device context \
(default is the current default context).
- **dtype** (*str or numpy.dtype, optional*) - The data type of the output array. \
The default dtype is ``D.dtype`` if ``D`` is an NDArray or numpy.ndarray, \
float32 otherwise.
- csr_matrix(S)
to construct a CSRNDArray with a sparse 2D array ``S``
- **S** (*CSRNDArray or scipy.sparse.csr.csr_matrix*) - A sparse matrix.
- **ctx** (*Context, optional*) - Device context \
(default is the current default context).
- **dtype** (*str or numpy.dtype, optional*) - The data type of the output array. \
The default dtype is ``S.dtype``.
- csr_matrix((M, N))
to construct an empty CSRNDArray with shape ``(M, N)``
- **M** (*int*) - Number of rows in the matrix
- **N** (*int*) - Number of columns in the matrix
- **ctx** (*Context, optional*) - Device context \
(default is the current default context).
- **dtype** (*str or numpy.dtype, optional*) - The data type of the output array. \
The default dtype is float32.
- csr_matrix((data, indices, indptr))
to construct a CSRNDArray based on the definition of compressed sparse row format \
using three separate arrays, \
where the column indices for row i are stored in ``indices[indptr[i]:indptr[i+1]]`` \
and their corresponding values are stored in ``data[indptr[i]:indptr[i+1]]``. \
The column indices for a given row are expected to be **sorted in ascending order.** \
Duplicate column entries for the same row are not allowed.
- **data** (*array_like*) - An object exposing the array interface, which \
holds all the non-zero entries of the matrix in row-major order.
- **indices** (*array_like*) - An object exposing the array interface, which \
stores the column index for each non-zero element in ``data``.
- **indptr** (*array_like*) - An object exposing the array interface, which \
stores the offset into ``data`` of the first non-zero element number of each \
row of the matrix.
- **shape** (*tuple of int, optional*) - The shape of the array. The default \
shape is inferred from the indices and indptr arrays.
- **ctx** (*Context, optional*) - Device context \
(default is the current default context).
- **dtype** (*str or numpy.dtype, optional*) - The data type of the output array. \
The default dtype is ``data.dtype`` if ``data`` is an NDArray or numpy.ndarray, \
float32 otherwise.
- csr_matrix((data, (row, col)))
to construct a CSRNDArray based on the COOrdinate format \
using three seperate arrays, \
where ``row[i]`` is the row index of the element, \
``col[i]`` is the column index of the element \
and ``data[i]`` is the data corresponding to the element. All the missing \
elements in the input are taken to be zeroes.
- **data** (*array_like*) - An object exposing the array interface, which \
holds all the non-zero entries of the matrix in COO format.
- **row** (*array_like*) - An object exposing the array interface, which \
stores the row index for each non zero element in ``data``.
- **col** (*array_like*) - An object exposing the array interface, which \
stores the col index for each non zero element in ``data``.
- **shape** (*tuple of int, optional*) - The shape of the array. The default \
shape is inferred from the ``row`` and ``col`` arrays.
- **ctx** (*Context, optional*) - Device context \
(default is the current default context).
- **dtype** (*str or numpy.dtype, optional*) - The data type of the output array. \
The default dtype is float32.
Parameters
----------
arg1: tuple of int, tuple of array_like, array_like, CSRNDArray, scipy.sparse.csr_matrix, \
scipy.sparse.coo_matrix, tuple of int or tuple of array_like
The argument to help instantiate the csr matrix. See above for further details.
shape : tuple of int, optional
The shape of the csr matrix.
ctx: Context, optional
Device context (default is the current default context).
dtype: str or numpy.dtype, optional
The data type of the output array.
Returns
-------
CSRNDArray
A `CSRNDArray` with the `csr` storage representation.
Example
-------
>>> a = mx.nd.sparse.csr_matrix(([1, 2, 3], [1, 0, 2], [0, 1, 2, 2, 3]), shape=(4, 3))
>>> a.asnumpy()
array([[ 0., 1., 0.],
[ 2., 0., 0.],
[ 0., 0., 0.],
[ 0., 0., 3.]], dtype=float32)
See Also
--------
CSRNDArray : MXNet NDArray in compressed sparse row format.
"""
# construct a csr matrix from (M, N) or (data, indices, indptr)
if isinstance(arg1, tuple):
arg_len = len(arg1)
if arg_len == 2:
# construct a sparse csr matrix from
# scipy coo matrix if input format is coo
if isinstance(arg1[1], tuple) and len(arg1[1]) == 2:
data, (row, col) = arg1
if isinstance(data, NDArray):
data = data.asnumpy()
if isinstance(row, NDArray):
row = row.asnumpy()
if isinstance(col, NDArray):
col = col.asnumpy()
coo = spsp.coo_matrix((data, (row, col)), shape=shape)
_check_shape(coo.shape, shape)
csr = coo.tocsr()
return array(csr, ctx=ctx, dtype=dtype)
else:
# empty matrix with shape
_check_shape(arg1, shape)
return empty('csr', arg1, ctx=ctx, dtype=dtype)
elif arg_len == 3:
# data, indices, indptr
return _csr_matrix_from_definition(arg1[0], arg1[1], arg1[2], shape=shape,
ctx=ctx, dtype=dtype)
else:
raise ValueError("Unexpected length of input tuple: " + str(arg_len))
else:
# construct a csr matrix from a sparse / dense one
if isinstance(arg1, CSRNDArray) or (spsp and isinstance(arg1, spsp.csr.csr_matrix)):
# construct a csr matrix from scipy or CSRNDArray
_check_shape(arg1.shape, shape)
return array(arg1, ctx=ctx, dtype=dtype)
elif isinstance(arg1, RowSparseNDArray):
raise ValueError("Unexpected input type: RowSparseNDArray")
else:
# construct a csr matrix from a dense one
# prepare default ctx and dtype since mx.nd.array doesn't use default values
# based on source_array
dtype = _prepare_default_dtype(arg1, dtype)
# create dns array with provided dtype. ctx is not passed since copy across
# ctx requires dtype to be the same
dns = _array(arg1, dtype=dtype)
if ctx is not None and dns.context != ctx:
dns = dns.as_in_context(ctx)
_check_shape(dns.shape, shape)
return dns.tostype('csr')
def _csr_matrix_from_definition(data, indices, indptr, shape=None, ctx=None,
dtype=None, indices_type=None, indptr_type=None):
"""Create a `CSRNDArray` based on data, indices and indptr"""
# pylint: disable= no-member, protected-access
storage_type = 'csr'
# context
ctx = current_context() if ctx is None else ctx
# types
dtype = _prepare_default_dtype(data, dtype)
indptr_type = _STORAGE_AUX_TYPES[storage_type][0] if indptr_type is None else indptr_type
indices_type = _STORAGE_AUX_TYPES[storage_type][1] if indices_type is None else indices_type
# prepare src array and types
data = _prepare_src_array(data, dtype)
indptr = _prepare_src_array(indptr, indptr_type)
indices = _prepare_src_array(indices, indices_type)
# TODO(junwu): Convert data, indptr, and indices to mxnet NDArrays
# if they are not for now. In the future, we should provide a c-api
# to accept np.ndarray types to copy from to result.data and aux_data
if not isinstance(data, NDArray):
data = _array(data, ctx, dtype)
if not isinstance(indptr, NDArray):
indptr = _array(indptr, ctx, indptr_type)
if not isinstance(indices, NDArray):
indices = _array(indices, ctx, indices_type)
if shape is None:
if indices.shape[0] == 0:
raise ValueError('invalid shape')
shape = (len(indptr) - 1, op.max(indices).asscalar() + 1)
# verify shapes
aux_shapes = [indptr.shape, indices.shape]
if data.ndim != 1 or indptr.ndim != 1 or indices.ndim != 1 or \
indptr.shape[0] == 0 or len(shape) != 2:
raise ValueError('invalid shape')
result = CSRNDArray(_new_alloc_handle(storage_type, shape, ctx, False, dtype,
[indptr_type, indices_type], aux_shapes))
check_call(_LIB.MXNDArraySyncCopyFromNDArray(result.handle, data.handle, ctypes.c_int(-1)))
check_call(_LIB.MXNDArraySyncCopyFromNDArray(result.handle, indptr.handle, ctypes.c_int(0)))
check_call(_LIB.MXNDArraySyncCopyFromNDArray(result.handle, indices.handle, ctypes.c_int(1)))
return result
# pylint: enable= no-member, protected-access
def row_sparse_array(arg1, shape=None, ctx=None, dtype=None):
"""Creates a `RowSparseNDArray`, a multidimensional row sparse array with a set of \
tensor slices at given indices.
The RowSparseNDArray can be instantiated in several ways:
- row_sparse_array(D):
to construct a RowSparseNDArray with a dense ndarray ``D``
- **D** (*array_like*) - An object exposing the array interface, an object whose \
`__array__` method returns an array, or any (nested) sequence.
- **ctx** (*Context, optional*) - Device context \
(default is the current default context).
- **dtype** (*str or numpy.dtype, optional*) - The data type of the output array. \
The default dtype is ``D.dtype`` if ``D`` is an NDArray or numpy.ndarray, \
float32 otherwise.
- row_sparse_array(S)
to construct a RowSparseNDArray with a sparse ndarray ``S``
- **S** (*RowSparseNDArray*) - A sparse ndarray.
- **ctx** (*Context, optional*) - Device context \
(default is the current default context).
- **dtype** (*str or numpy.dtype, optional*) - The data type of the output array. \
The default dtype is ``S.dtype``.
- row_sparse_array((D0, D1 .. Dn))
to construct an empty RowSparseNDArray with shape ``(D0, D1, ... Dn)``
- **D0, D1 .. Dn** (*int*) - The shape of the ndarray
- **ctx** (*Context, optional*) - Device context \
(default is the current default context).
- **dtype** (*str or numpy.dtype, optional*) - The data type of the output array. \
The default dtype is float32.
- row_sparse_array((data, indices))
to construct a RowSparseNDArray based on the definition of row sparse format \
using two separate arrays, \
where the `indices` stores the indices of the row slices with non-zeros,
while the values are stored in `data`. The corresponding NDArray ``dense``
represented by RowSparseNDArray ``rsp`` has \
``dense[rsp.indices[i], :, :, :, ...] = rsp.data[i, :, :, :, ...]``
The row indices for are expected to be **sorted in ascending order.** \
- **data** (*array_like*) - An object exposing the array interface, which \
holds all the non-zero row slices of the array.
- **indices** (*array_like*) - An object exposing the array interface, which \
stores the row index for each row slice with non-zero elements.
- **shape** (*tuple of int, optional*) - The shape of the array. The default \
shape is inferred from the indices and indptr arrays.
- **ctx** (*Context, optional*) - Device context \
(default is the current default context).
- **dtype** (*str or numpy.dtype, optional*) - The data type of the output array. \
The default dtype is float32.
Parameters
----------
arg1: NDArray, numpy.ndarray, RowSparseNDArray, tuple of int or tuple of array_like
The argument to help instantiate the row sparse ndarray. See above for further details.
shape : tuple of int, optional
The shape of the row sparse ndarray.
ctx : Context, optional
Device context (default is the current default context).
dtype : str or numpy.dtype, optional
The data type of the output array.
Returns
-------
RowSparseNDArray
An `RowSparseNDArray` with the `row_sparse` storage representation.
Example
-------
>>> a = mx.nd.sparse.row_sparse_array(([[1, 2], [3, 4]], [1, 4]), shape=(6, 2))
>>> a.asnumpy()
array([[ 0., 0.],
[ 1., 2.],
[ 0., 0.],
[ 0., 0.],
[ 3., 4.],
[ 0., 0.]], dtype=float32)
See Also
--------
RowSparseNDArray : MXNet NDArray in row sparse format.
"""
# construct a row sparse array from (D0, D1 ..) or (data, indices)
if isinstance(arg1, tuple):
arg_len = len(arg1)
if arg_len < 2:
raise ValueError("Unexpected length of input tuple: " + str(arg_len))
elif arg_len > 2:
# empty ndarray with shape
_check_shape(arg1, shape)
return empty('row_sparse', arg1, ctx=ctx, dtype=dtype)
else:
# len(arg1) = 2, is either shape or (data, indices)
if isinstance(arg1[0], integer_types) and isinstance(arg1[1], integer_types):
# empty ndarray with shape
_check_shape(arg1, shape)
return empty('row_sparse', arg1, ctx=ctx, dtype=dtype)
else:
# data, indices, indptr
return _row_sparse_ndarray_from_definition(arg1[0], arg1[1], shape=shape,
ctx=ctx, dtype=dtype)
else:
# construct a row sparse ndarray from a dense / sparse array
if isinstance(arg1, RowSparseNDArray):
# construct a row sparse ndarray from RowSparseNDArray
_check_shape(arg1.shape, shape)
return array(arg1, ctx=ctx, dtype=dtype)
elif isinstance(arg1, CSRNDArray):
raise ValueError("Unexpected input type: CSRNDArray")
else:
# construct a csr matrix from a dense one
# prepare default dtype since mx.nd.array doesn't use default values
# based on source_array
dtype = _prepare_default_dtype(arg1, dtype)
# create dns array with provided dtype. ctx is not passed since copy across
# ctx requires dtype to be the same
dns = _array(arg1, dtype=dtype)
if ctx is not None and dns.context != ctx:
dns = dns.as_in_context(ctx)
_check_shape(dns.shape, shape)
return dns.tostype('row_sparse')
def _row_sparse_ndarray_from_definition(data, indices, shape=None, ctx=None,
dtype=None, indices_type=None):
"""Create a `RowSparseNDArray` based on data and indices"""
storage_type = 'row_sparse'
# context
ctx = current_context() if ctx is None else ctx
# types
dtype = _prepare_default_dtype(data, dtype)
indices_type = _STORAGE_AUX_TYPES[storage_type][0] if indices_type is None else indices_type
# prepare src array and types
data = _prepare_src_array(data, dtype)
indices = _prepare_src_array(indices, indices_type)
# TODO(junwu): Convert data, indptr, and indices to mxnet NDArrays
# if they are not for now. In the future, we should provide a c-api
# to accept np.ndarray types to copy from to result.data and aux_data
if not isinstance(data, NDArray):
data = _array(data, ctx, dtype)
if not isinstance(indices, NDArray):
indices = _array(indices, ctx, indices_type)
if shape is None:
num_indices = indices.shape[0]
if num_indices == 0:
raise ValueError('invalid shape')
dim0 = indices[num_indices - 1].asscalar() + 1
shape = (dim0, ) + data.shape[1:]
# verify shapes
if data.ndim != len(shape) or indices.ndim != 1 or np.prod(shape[1:]) == 0:
raise ValueError("invalid shape")
result = RowSparseNDArray(_new_alloc_handle(storage_type, shape, ctx, False, dtype,
[indices_type], [indices.shape]))
check_call(_LIB.MXNDArraySyncCopyFromNDArray(result.handle, data.handle, ctypes.c_int(-1)))
check_call(_LIB.MXNDArraySyncCopyFromNDArray(result.handle, indices.handle, ctypes.c_int(0)))
return result
def _ndarray_cls(handle, writable=True, stype=_STORAGE_TYPE_UNDEFINED):
if stype == _STORAGE_TYPE_UNDEFINED:
stype = _storage_type(handle)
if stype == _STORAGE_TYPE_DEFAULT:
return NDArray(handle, writable=writable)
elif stype == _STORAGE_TYPE_CSR:
return CSRNDArray(handle, writable=writable)
elif stype == _STORAGE_TYPE_ROW_SPARSE:
return RowSparseNDArray(handle, writable=writable)
else:
raise Exception("unknown storage type: %s"%stype)
_set_ndarray_class(_ndarray_cls)
def add(lhs, rhs):
"""Returns element-wise sum of the input arrays with broadcasting.
Equivalent to ``lhs + rhs``, ``mx.nd.broadcast_add(lhs, rhs)`` and
``mx.nd.broadcast_plus(lhs, rhs)`` when shapes of lhs and rhs do not
match. If lhs.shape == rhs.shape, this is equivalent to
``mx.nd.elemwise_add(lhs, rhs)``
.. note::
If the corresponding dimensions of two arrays have the same size or one of them has size 1,
then the arrays are broadcastable to a common shape.abs
Parameters
----------
lhs : scalar or mxnet.ndarray.sparse.array
First array to be added.
rhs : scalar or mxnet.ndarray.sparse.array
Second array to be added.
If ``lhs.shape != rhs.shape``, they must be
broadcastable to a common shape.
Returns
-------
NDArray
The element-wise sum of the input arrays.
Examples
--------
>>> a = mx.nd.ones((2,3)).tostype('csr')
>>> b = mx.nd.ones((2,3)).tostype('csr')
>>> a.asnumpy()
array([[ 1., 1., 1.],
[ 1., 1., 1.]], dtype=float32)
>>> b.asnumpy()
array([[ 1., 1., 1.],
[ 1., 1., 1.]], dtype=float32)
>>> (a+b).asnumpy()
array([[ 2., 2., 2.],
[ 2., 2., 2.]], dtype=float32)
>>> c = mx.nd.ones((2,3)).tostype('row_sparse')
>>> d = mx.nd.ones((2,3)).tostype('row_sparse')
>>> c.asnumpy()
array([[ 1., 1., 1.],
[ 1., 1., 1.]], dtype=float32)
>>> d.asnumpy()
array([[ 1., 1., 1.],
[ 1., 1., 1.]], dtype=float32)
>>> (c+d).asnumpy()
array([[ 2., 2., 2.],
[ 2., 2., 2.]], dtype=float32)
"""
# pylint: disable= no-member, protected-access
if isinstance(lhs, NDArray) and isinstance(rhs, NDArray) and lhs.shape == rhs.shape:
return _ufunc_helper(
lhs,
rhs,
op.elemwise_add,
operator.add,
_internal._plus_scalar,
None)
return _ufunc_helper(
lhs,
rhs,
op.broadcast_add,
operator.add,
_internal._plus_scalar,
None)
# pylint: enable= no-member, protected-access
def subtract(lhs, rhs):
"""Returns element-wise difference of the input arrays with broadcasting.
Equivalent to ``lhs - rhs``, ``mx.nd.broadcast_sub(lhs, rhs)`` and
``mx.nd.broadcast_minus(lhs, rhs)`` when shapes of lhs and rhs do not
match. If lhs.shape == rhs.shape, this is equivalent to
``mx.nd.elemwise_sub(lhs, rhs)``
.. note::
If the corresponding dimensions of two arrays have the same size or one of them has size 1,
then the arrays are broadcastable to a common shape.
Parameters
----------
lhs : scalar or mxnet.ndarray.sparse.array
First array to be subtracted.
rhs : scalar or mxnet.ndarray.sparse.array
Second array to be subtracted.
If ``lhs.shape != rhs.shape``, they must be
broadcastable to a common shape.__spec__
Returns
-------
NDArray
The element-wise difference of the input arrays.
Examples
--------
>>> a = mx.nd.ones((2,3)).tostype('csr')
>>> b = mx.nd.ones((2,3)).tostype('csr')
>>> a.asnumpy()
array([[ 1., 1., 1.],
[ 1., 1., 1.]], dtype=float32)
>>> b.asnumpy()
array([[ 1., 1., 1.],
[ 1., 1., 1.]], dtype=float32)
>>> (a-b).asnumpy()
array([[ 0., 0., 0.],
[ 0., 0., 0.]], dtype=float32)
>>> c = mx.nd.ones((2,3)).tostype('row_sparse')
>>> d = mx.nd.ones((2,3)).tostype('row_sparse')
>>> c.asnumpy()
array([[ 1., 1., 1.],
[ 1., 1., 1.]], dtype=float32)
>>> d.asnumpy()
array([[ 1., 1., 1.],
[ 1., 1., 1.]], dtype=float32)
>>> (c-d).asnumpy()
array([[ 0., 0., 0.],
[ 0., 0., 0.]], dtype=float32)
"""
# pylint: disable= no-member, protected-access
if isinstance(lhs, NDArray) and isinstance(rhs, NDArray) and lhs.shape == rhs.shape:
return _ufunc_helper(
lhs,
rhs,
op.elemwise_sub,
operator.sub,
_internal._minus_scalar,
None)
return _ufunc_helper(
lhs,
rhs,
op.broadcast_sub,
operator.sub,
_internal._minus_scalar,
None)
# pylint: enable= no-member, protected-access
def multiply(lhs, rhs):
"""Returns element-wise product of the input arrays with broadcasting.
Equivalent to ``lhs * rhs`` and ``mx.nd.broadcast_mul(lhs, rhs)``
when shapes of lhs and rhs do not match. If lhs.shape == rhs.shape,
this is equivalent to ``mx.nd.elemwise_mul(lhs, rhs)``
.. note::
If the corresponding dimensions of two arrays have the same size or one of them has size 1,
then the arrays are broadcastable to a common shape.
Parameters
----------
lhs : scalar or mxnet.ndarray.sparse.array
First array to be multiplied.
rhs : scalar or mxnet.ndarray.sparse.array
Second array to be multiplied.
If ``lhs.shape != rhs.shape``, they must be
broadcastable to a common shape.
Returns
-------
NDArray
The element-wise multiplication of the input arrays.
Examples
--------
>>> x = mx.nd.ones((2,3)).tostype('csr')
>>> y = mx.nd.arange(2).reshape((2,1))
>>> z = mx.nd.arange(3)
>>> x.asnumpy()
array([[ 1., 1., 1.],
[ 1., 1., 1.]], dtype=float32)
>>> y.asnumpy()
array([[ 0.],
[ 1.]], dtype=float32)
>>> z.asnumpy()
array([ 0., 1., 2.], dtype=float32)
>>> (x*2).asnumpy()
array([[ 2., 2., 2.],
[ 2., 2., 2.]], dtype=float32)
>>> (x*y).asnumpy()
array([[ 0., 0., 0.],
[ 1., 1., 1.]], dtype=float32)
>>> mx.nd.sparse.multiply(x, y).asnumpy()
array([[ 0., 0., 0.],
[ 1., 1., 1.]], dtype=float32)
>>> (x*z).asnumpy()
array([[ 0., 1., 2.],
[ 0., 1., 2.]], dtype=float32)
>>> mx.nd.sparse.multiply(x, z).asnumpy()
array([[ 0., 1., 2.],
[ 0., 1., 2.]], dtype=float32)
>>> z = z.reshape((1, 3))
>>> z.asnumpy()
array([[ 0., 1., 2.]], dtype=float32)
>>> (x*z).asnumpy()
array([[ 0., 1., 2.],
[ 0., 1., 2.]], dtype=float32)
>>> mx.nd.sparse.multiply(x, z).asnumpy()
array([[ 0., 1., 2.],
[ 0., 1., 2.]], dtype=float32)
"""
# pylint: disable= no-member, protected-access
if isinstance(lhs, NDArray) and isinstance(rhs, NDArray) and lhs.shape == rhs.shape:
return _ufunc_helper(
lhs,
rhs,
op.elemwise_mul,
operator.mul,
_internal._mul_scalar,
None)
return _ufunc_helper(
lhs,
rhs,
op.broadcast_mul,
operator.mul,
_internal._mul_scalar,
None)
# pylint: enable= no-member, protected-access
def divide(lhs, rhs):
"""Returns element-wise division of the input arrays with broadcasting.
Equivalent to ``lhs / rhs`` and ``mx.nd.broadcast_div(lhs, rhs)``
when shapes of lhs and rhs do not match. If lhs.shape == rhs.shape,
this is equivalent to ``mx.nd.elemwise_div(lhs, rhs)``
.. note::
If the corresponding dimensions of two arrays have the same size or one of them has size 1,
then the arrays are broadcastable to a common shape.
Parameters
----------
lhs : scalar or mxnet.ndarray.sparse.array
First array in division.
rhs : scalar or mxnet.ndarray.sparse.array
Second array in division.
The arrays to be divided. If ``lhs.shape != rhs.shape``, they must be
broadcastable to a common shape.
Returns
-------
NDArray
The element-wise division of the input arrays.
Examples
--------
>>> x = (mx.nd.ones((2,3))*6).tostype('csr')
>>> y = mx.nd.arange(2).reshape((2,1)) + 1
>>> z = mx.nd.arange(3) + 1
>>> x.asnumpy()
array([[ 6., 6., 6.],
[ 6., 6., 6.]], dtype=float32)
>>> y.asnumpy()
array([[ 1.],
[ 2.]], dtype=float32)
>>> z.asnumpy()
array([ 1., 2., 3.], dtype=float32)
>>> x/2
<NDArray 2x3 @cpu(0)>
>>> (x/3).asnumpy()
array([[ 2., 2., 2.],
[ 2., 2., 2.]], dtype=float32)
>>> (x/y).asnumpy()
array([[ 6., 6., 6.],
[ 3., 3., 3.]], dtype=float32)
>>> mx.nd.sparse.divide(x,y).asnumpy()
array([[ 6., 6., 6.],
[ 3., 3., 3.]], dtype=float32)
>>> (x/z).asnumpy()
array([[ 6., 3., 2.],
[ 6., 3., 2.]], dtype=float32)
>>> mx.nd.sprase.divide(x,z).asnumpy()
array([[ 6., 3., 2.],
[ 6., 3., 2.]], dtype=float32)
>>> z = z.reshape((1,3))
>>> z.asnumpy()
array([[ 1., 2., 3.]], dtype=float32)
>>> (x/z).asnumpy()
array([[ 6., 3., 2.],
[ 6., 3., 2.]], dtype=float32)
>>> mx.nd.sparse.divide(x,z).asnumpy()
array([[ 6., 3., 2.],
[ 6., 3., 2.]], dtype=float32)
"""
# pylint: disable= no-member, protected-access
if isinstance(lhs, NDArray) and isinstance(rhs, NDArray) and lhs.shape == rhs.shape:
return _ufunc_helper(
lhs,
rhs,
op.elemwise_div,
operator.truediv,
_internal._div_scalar,
None)
return _ufunc_helper(
lhs,
rhs,
op.broadcast_div,
operator.truediv,
_internal._div_scalar,
None)
# pylint: enable= no-member, protected-access
def zeros(stype, shape, ctx=None, dtype=None, **kwargs):
"""Return a new array of given shape and type, filled with zeros.
Parameters
----------
stype: string
The storage type of the empty array, such as 'row_sparse', 'csr', etc
shape : int or tuple of int
The shape of the empty array
ctx : Context, optional
An optional device context (default is the current default context)
dtype : str or numpy.dtype, optional
An optional value type (default is `float32`)
Returns
-------
RowSparseNDArray or CSRNDArray
A created array
Examples
--------
>>> mx.nd.sparse.zeros('csr', (1,2))
<CSRNDArray 1x2 @cpu(0)>
>>> mx.nd.sparse.zeros('row_sparse', (1,2), ctx=mx.cpu(), dtype='float16').asnumpy()
array([[ 0., 0.]], dtype=float16)
"""
# pylint: disable= no-member, protected-access
if stype == 'default':
return _zeros_ndarray(shape, ctx=ctx, dtype=dtype, **kwargs)
if ctx is None:
ctx = current_context()
dtype = mx_real_t if dtype is None else dtype
if stype in ('row_sparse', 'csr'):
aux_types = _STORAGE_AUX_TYPES[stype]
else:
raise ValueError("unknown storage type" + stype)
out = _ndarray_cls(_new_alloc_handle(stype, shape, ctx, True, dtype, aux_types))
return _internal._zeros(shape=shape, ctx=ctx, dtype=dtype, out=out, **kwargs)
# pylint: enable= no-member, protected-access
def empty(stype, shape, ctx=None, dtype=None):
"""Returns a new array of given shape and type, without initializing entries.
Parameters
----------
stype: string
The storage type of the empty array, such as 'row_sparse', 'csr', etc
shape : int or tuple of int
The shape of the empty array.
ctx : Context, optional
An optional device context (default is the current default context).
dtype : str or numpy.dtype, optional
An optional value type (default is `float32`).
Returns
-------
CSRNDArray or RowSparseNDArray
A created array.
"""
if isinstance(shape, int):
shape = (shape, )
if ctx is None:
ctx = current_context()
if dtype is None:
dtype = mx_real_t
assert(stype is not None)
if stype in ('csr', 'row_sparse'):
return zeros(stype, shape, ctx=ctx, dtype=dtype)
else:
raise Exception("unknown stype : " + str(stype))
def array(source_array, ctx=None, dtype=None):
"""Creates a sparse array from any object exposing the array interface.
Parameters
----------
source_array : RowSparseNDArray, CSRNDArray or scipy.sparse.csr.csr_matrix
The source sparse array
ctx : Context, optional
The default context is ``source_array.context`` if ``source_array`` is an NDArray. \
The current default context otherwise.
dtype : str or numpy.dtype, optional
The data type of the output array. The default dtype is ``source_array.dtype``
if `source_array` is an `NDArray`, `numpy.ndarray` or `scipy.sparse.csr.csr_matrix`, \
`float32` otherwise.
Returns
-------
RowSparseNDArray or CSRNDArray
An array with the same contents as the `source_array`.
Examples
--------
>>> import scipy.sparse as spsp
>>> csr = spsp.csr_matrix((2, 100))
>>> mx.nd.sparse.array(csr)
<CSRNDArray 2x100 @cpu(0)>
>>> mx.nd.sparse.array(mx.nd.sparse.zeros('csr', (3, 2)))
<CSRNDArray 3x2 @cpu(0)>
>>> mx.nd.sparse.array(mx.nd.sparse.zeros('row_sparse', (3, 2)))
<RowSparseNDArray 3x2 @cpu(0)>
"""
ctx = current_context() if ctx is None else ctx
if isinstance(source_array, NDArray):
assert(source_array.stype != 'default'), \
"Please use `tostype` to create RowSparseNDArray or CSRNDArray from an NDArray"
# prepare dtype and ctx based on source_array, if not provided
dtype = _prepare_default_dtype(source_array, dtype)
# if both dtype and ctx are different from source_array, we cannot copy directly
if source_array.dtype != dtype and source_array.context != ctx:
arr = empty(source_array.stype, source_array.shape, dtype=dtype)
arr[:] = source_array
arr = arr.as_in_context(ctx)
else:
arr = empty(source_array.stype, source_array.shape, dtype=dtype, ctx=ctx)
arr[:] = source_array
return arr
elif spsp and isinstance(source_array, spsp.csr.csr_matrix):
# TODO(haibin) implement `_sync_copy_from` with scipy csr object to reduce a copy
# preprocess scipy csr to canonical form
csr = source_array.sorted_indices()
csr.sum_duplicates()
dtype = _prepare_default_dtype(source_array, dtype)
return csr_matrix((csr.data, csr.indices, csr.indptr), shape=csr.shape, \
dtype=dtype, ctx=ctx)
elif isinstance(source_array, (np.ndarray, np.generic)):
raise ValueError("Please use mx.nd.array to create an NDArray with source_array of type ",
type(source_array))
else:
raise ValueError("Unexpected source_array type: ", type(source_array))
| 38.020757
| 100
| 0.586467
|
3f60150b202369d70de19a21c4bed48f8e20de08
| 394
|
py
|
Python
|
AlphaCodeServer/contest/migrations/0004_auto_20200501_0830.py
|
RoranBlackfire/Alpha-Code
|
d2dcae2bf6f6b705ad577c92172f143e213a6c3d
|
[
"MIT"
] | null | null | null |
AlphaCodeServer/contest/migrations/0004_auto_20200501_0830.py
|
RoranBlackfire/Alpha-Code
|
d2dcae2bf6f6b705ad577c92172f143e213a6c3d
|
[
"MIT"
] | null | null | null |
AlphaCodeServer/contest/migrations/0004_auto_20200501_0830.py
|
RoranBlackfire/Alpha-Code
|
d2dcae2bf6f6b705ad577c92172f143e213a6c3d
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.0.2 on 2020-05-01 08:30
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('contest', '0003_auto_20200501_0828'),
]
operations = [
migrations.AlterField(
model_name='participant',
name='rank',
field=models.IntegerField(default=9999999),
),
]
| 20.736842
| 55
| 0.606599
|
4e4e5233c38431377e0749dbb39e3205b877dc9d
| 6,501
|
py
|
Python
|
keras/models/sharpness_aware_minimization.py
|
RakeshJarupula/keras
|
2ac6638e91d5aff77c22b45e9c8c84fb05a9e477
|
[
"Apache-2.0"
] | null | null | null |
keras/models/sharpness_aware_minimization.py
|
RakeshJarupula/keras
|
2ac6638e91d5aff77c22b45e9c8c84fb05a9e477
|
[
"Apache-2.0"
] | null | null | null |
keras/models/sharpness_aware_minimization.py
|
RakeshJarupula/keras
|
2ac6638e91d5aff77c22b45e9c8c84fb05a9e477
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2022 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Sharpness Aware Minimization implementation."""
import copy
from keras.engine import data_adapter
from keras.layers import deserialize as deserialize_layer
from keras.models import Model
from keras.utils import generic_utils
import tensorflow.compat.v2 as tf
from tensorflow.python.util.tf_export import keras_export
# pylint: disable=g-classes-have-attributes
@generic_utils.register_keras_serializable()
@keras_export("keras.models.experimental.SharpnessAwareMinimization", v1=[])
class SharpnessAwareMinimization(Model):
"""Sharpness aware minimization (SAM) training flow.
Sharpness-aware minimization (SAM) is a technique that improves the model
generalization and provides robustness to label noise. Mini-batch splitting is
proven to improve the SAM's performance, so users can control how mini batches
are split via setting the `num_batch_splits` argument.
Args:
model: `tf.keras.Model` instance. The inner model that does the
forward-backward pass.
rho: float, defaults to 0.05. The gradients scaling factor.
num_batch_splits: int, defaults to None. The number of mini batches to
split into from each data batch. If None, batches are not split into
sub-batches.
name: string, defaults to None. The name of the SAM model.
Reference:
[Pierre Foret et al., 2020](https://arxiv.org/abs/2010.01412)
"""
def __init__(self, model, rho=0.05, num_batch_splits=None, name=None):
super().__init__(name=name)
self.model = model
self.rho = rho
self.num_batch_splits = num_batch_splits
def train_step(self, data):
"""The logic of one SAM training step.
Args:
data: A nested structure of `Tensor`s. It should be of structure
(x, y, sample_weight) or (x, y).
Returns:
A dict mapping metric names to running average values.
"""
x, y, sample_weight = data_adapter.unpack_x_y_sample_weight(data)
if self.num_batch_splits is not None:
x_split = tf.split(x, self.num_batch_splits)
y_split = tf.split(y, self.num_batch_splits)
else:
x_split = [x]
y_split = [y]
gradients_all_batches = []
pred_all_batches = []
for (x_batch, y_batch) in zip(x_split, y_split):
epsilon_w_cache = []
with tf.GradientTape() as tape:
pred = self.model(x_batch)
loss = self.compiled_loss(y_batch, pred)
pred_all_batches.append(pred)
trainable_variables = self.model.trainable_variables
gradients = tape.gradient(loss, trainable_variables)
gradients_order2_norm = self._gradients_order2_norm(gradients)
scale = self.rho / (gradients_order2_norm + 1e-12)
for (gradient, variable) in zip(gradients, trainable_variables):
epsilon_w = gradient * scale
self._distributed_apply_epsilon_w(variable, epsilon_w,
tf.distribute.get_strategy())
epsilon_w_cache.append(epsilon_w)
with tf.GradientTape() as tape:
pred = self(x_batch)
loss = self.compiled_loss(y_batch, pred)
gradients = tape.gradient(loss, trainable_variables)
if len(gradients_all_batches) == 0:
for gradient in gradients:
gradients_all_batches.append([gradient])
else:
for (gradient, gradient_all_batches) in zip(gradients,
gradients_all_batches):
gradient_all_batches.append(gradient)
for (variable, epsilon_w) in zip(trainable_variables, epsilon_w_cache):
# Restore the variable to its original value before `apply_gradients()`.
self._distributed_apply_epsilon_w(variable, -epsilon_w,
tf.distribute.get_strategy())
gradients = []
for gradient_all_batches in gradients_all_batches:
gradients.append(tf.reduce_sum(gradient_all_batches, axis=0))
self.optimizer.apply_gradients(zip(gradients, trainable_variables))
pred = tf.concat(pred_all_batches, axis=0)
self.compiled_metrics.update_state(y, pred, sample_weight)
return {m.name: m.result() for m in self.metrics}
def call(self, inputs):
"""Forward pass of SAM.
SAM delegates the forward pass call to the wrapped model.
Args:
inputs: Tensor. The model inputs.
Returns:
A Tensor, the outputs of the wrapped model for given `inputs`.
"""
return self.model(inputs)
def get_config(self):
config = super().get_config()
config.update({
"model": generic_utils.serialize_keras_object(self.model),
"rho": self.rho,
})
return config
@classmethod
def from_config(cls, config, custom_objects=None):
# Avoid mutating the input dict.
config = copy.deepcopy(config)
model = deserialize_layer(
config.pop("model"), custom_objects=custom_objects)
config["model"] = model
return super().from_config(config, custom_objects)
def _distributed_apply_epsilon_w(self, var, epsilon_w, strategy):
# Helper function to apply epsilon_w on model variables.
if isinstance(tf.distribute.get_strategy(),
(tf.distribute.experimental.ParameterServerStrategy,
tf.distribute.experimental.CentralStorageStrategy)):
# Under PSS and CSS, the AggregatingVariable has to be kept in sync.
def distribute_apply(strategy, var, epsilon_w):
strategy.extended.update(
var, lambda x, y: x.assign_add(y), args=(epsilon_w,), group=False)
tf.__internal__.distribute.interim.maybe_merge_call(
distribute_apply, tf.distribute.get_strategy(), var, epsilon_w)
else:
var.assign_add(epsilon_w)
def _gradients_order2_norm(self, gradients):
norm = tf.norm(
tf.stack([tf.norm(grad) for grad in gradients if grad is not None]))
return norm
| 37.796512
| 80
| 0.690817
|
d1e7e70dd2ac946fbfec36e47b3a4e65092f1eb8
| 3,925
|
py
|
Python
|
Code/all-starter-code/bases.py
|
stark276/CS-1.3-Core-Data-Structure
|
dabf3b1a3301a2edbf0ae084c8febdcf48145aee
|
[
"MIT"
] | null | null | null |
Code/all-starter-code/bases.py
|
stark276/CS-1.3-Core-Data-Structure
|
dabf3b1a3301a2edbf0ae084c8febdcf48145aee
|
[
"MIT"
] | null | null | null |
Code/all-starter-code/bases.py
|
stark276/CS-1.3-Core-Data-Structure
|
dabf3b1a3301a2edbf0ae084c8febdcf48145aee
|
[
"MIT"
] | null | null | null |
#!python
import string
# Hint: Use these string constants to encode/decode hexadecimal digits and more
# string.digits is '0123456789'
# string.hexdigits is '0123456789abcdefABCDEF'
# string.ascii_lowercase is 'abcdefghijklmnopqrstuvwxyz'
# string.ascii_uppercase is 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
# string.ascii_letters is ascii_lowercase + ascii_uppercase
# string.printable is digits + ascii_letters + punctuation + whitespace
def decode(digits, base):
"""Decode given digits in given base to number in base 10.
digits: str -- string representation of number (in given base)
base: int -- base of given number
return: int -- integer representation of number (in base 10)"""
# Handle up to base 36 [0-9a-z]
assert 2 <= base <= 36, 'base is out of range: {}'.format(base)
# TODO: Decode digits from binary (base 2)
# ...
# TODO: Decode digits from hexadecimal (base 16)
# ...
# TODO: Decode digits from any base (2 up to 36)
# ...
decoded_value = 0
digits = digits[::-1] # Reverse the list/string because index starts left to right, and we need to start right to left
for i in range(len(digits)):
if digits[i].isalpha(): # Checks to see if the digit is a character instead of numerical
digit = string.ascii_lowercase.index(digits[i].lower()) + 10 # Can add 10 b/c starts at 0, in alphabetical order
else:
digit = int(digits[i])
decoded_value += digit * (base ** i)
return decoded_value
def encode(number, base):
"""Encode given number in base 10 to digits in given base.
number: int -- integer representation of number (in base 10)
base: int -- base to convert to
return: str -- string representation of number (in given base)"""
# Handle up to base 36 [0-9a-z]
assert 2 <= base <= 36, 'base is out of range: {}'.format(base)
# Handle unsigned numbers only for now
assert number >= 0, 'number is negative: {}'.format(number)
encoded_string = ''
while number > 0:
remainder = number % base # this finds remainder
number -= remainder
number = number // base
if remainder > 9: # For hexidemcial cause goes up to 9, so only 10 and up
remainder = string.ascii_lowercase[remainder-10] # again because starts at 0
encoded_string += str(remainder)
return ''.join(reversed(encoded_string)) # To reverse the encoded string and return as string, not list
def convert(digits, base1, base2):
"""Convert given digits in base1 to digits in base2.
digits: str -- string representation of number (in base1)
base1: int -- base of given number
base2: int -- base to convert to
return: str -- string representation of number (in base2)"""
# Handle up to base 36 [0-9a-z]
assert 2 <= base1 <= 36, 'base1 is out of range: {}'.format(base1)
assert 2 <= base2 <= 36, 'base2 is out of range: {}'.format(base2)
# TODO: Convert digits from base 2 to base 16 (and vice versa)
# ...
# TODO: Convert digits from base 2 to base 10 (and vice versa)
# ...
# TODO: Convert digits from base 10 to base 16 (and vice versa)
# ...
# TODO: Convert digits from any base to any base (2 up to 36)
# ...
decoded = decode(digits, base1)
converted = encode(decoded, base2)
return converted
def main():
"""Read command-line arguments and convert given digits between bases."""
import sys
args = sys.argv[1:]
if len(args) == 3:
digits = args[0]
base1 = int(args[1])
base2 = int(args[2])
# Convert given digits between bases
result = convert(digits, base1, base2)
print('{} in base {} is {} in base {}'.format(digits, base1, result, base2))
else:
print('Usage: {} digits base1 base2'.format(sys.argv[0]))
print('Converts digits from base1 to base2')
if __name__ == '__main__':
main()
| 37.380952
| 124
| 0.649172
|
314a041a7eb19b1b5e2e65a3ff0cd5ff1c9aaab7
| 492
|
py
|
Python
|
alurareceita/apps/receitas/urls.py
|
Jefferson472/apredendo-django
|
c01817d93493f588c1a3462c6e153cbbc3230508
|
[
"MIT"
] | null | null | null |
alurareceita/apps/receitas/urls.py
|
Jefferson472/apredendo-django
|
c01817d93493f588c1a3462c6e153cbbc3230508
|
[
"MIT"
] | null | null | null |
alurareceita/apps/receitas/urls.py
|
Jefferson472/apredendo-django
|
c01817d93493f588c1a3462c6e153cbbc3230508
|
[
"MIT"
] | null | null | null |
from django.urls import path
from .views import *
urlpatterns = [
path('', index, name='index'),
path('<int:receita_id>', receita, name='receita'),
path('busca', buscar, name='buscar'),
path('cria_receita', cria_receita, name='cria_receita'),
path('deleta/<int:receita_id>', deleta_receita, name='deleta_receita'),
path('edita_receita/<int:receita_id>', edita_receita, name='edita_receita'),
path('atualiza_receita', atualiza_receita, name='atualiza_receita')
]
| 35.142857
| 80
| 0.697154
|
5231b15ab9e05c410736b7478773f9f1d0148853
| 7,408
|
py
|
Python
|
xlsxwriter/test/worksheet/test_cond_format08.py
|
hugovk/XlsxWriter
|
e97cc66637d9895480ee32cfb5e561d652d3787b
|
[
"BSD-2-Clause"
] | null | null | null |
xlsxwriter/test/worksheet/test_cond_format08.py
|
hugovk/XlsxWriter
|
e97cc66637d9895480ee32cfb5e561d652d3787b
|
[
"BSD-2-Clause"
] | null | null | null |
xlsxwriter/test/worksheet/test_cond_format08.py
|
hugovk/XlsxWriter
|
e97cc66637d9895480ee32cfb5e561d652d3787b
|
[
"BSD-2-Clause"
] | null | null | null |
###############################################################################
#
# Tests for XlsxWriter.
#
# SPDX-License-Identifier: BSD-2-Clause
# Copyright (c), 2013-2022, John McNamara, jmcnamara@cpan.org
#
import unittest
from io import StringIO
from ..helperfunctions import _xml_to_list
from ...worksheet import Worksheet
class TestAssembleWorksheet(unittest.TestCase):
"""
Test assembling a complete Worksheet file.
"""
def test_assemble_xml_file(self):
"""Test writing a worksheet with conditional formatting."""
self.maxDiff = None
fh = StringIO()
worksheet = Worksheet()
worksheet._set_filehandle(fh)
worksheet.select()
worksheet.write('A1', 10)
worksheet.write('A2', 20)
worksheet.write('A3', 30)
worksheet.write('A4', 40)
worksheet.conditional_format('A1:A4',
{'type': 'time_period',
'criteria': 'yesterday',
})
worksheet.conditional_format('A1:A4',
{'type': 'time_period',
'criteria': 'today',
'format': None,
})
worksheet.conditional_format('A1:A4',
{'type': 'time_period',
'criteria': 'tomorrow',
'format': None,
})
worksheet.conditional_format('A1:A4',
{'type': 'time_period',
'criteria': 'last 7 days',
'format': None,
})
worksheet.conditional_format('A1:A4',
{'type': 'time_period',
'criteria': 'last week',
'format': None,
})
worksheet.conditional_format('A1:A4',
{'type': 'time_period',
'criteria': 'this week',
'format': None,
})
worksheet.conditional_format('A1:A4',
{'type': 'time_period',
# Test erroneous legacy criteria.
'criteria': 'continue week',
'format': None,
})
worksheet.conditional_format('A1:A4',
{'type': 'time_period',
'criteria': 'last month',
'format': None,
})
worksheet.conditional_format('A1:A4',
{'type': 'time_period',
'criteria': 'this month',
'format': None,
})
worksheet.conditional_format('A1:A4',
{'type': 'time_period',
# Test erroneous legacy criteria.
'criteria': 'continue month',
'format': None,
})
worksheet._assemble_xml_file()
exp = _xml_to_list("""
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<worksheet xmlns="http://schemas.openxmlformats.org/spreadsheetml/2006/main" xmlns:r="http://schemas.openxmlformats.org/officeDocument/2006/relationships">
<dimension ref="A1:A4"/>
<sheetViews>
<sheetView tabSelected="1" workbookViewId="0"/>
</sheetViews>
<sheetFormatPr defaultRowHeight="15"/>
<sheetData>
<row r="1" spans="1:1">
<c r="A1">
<v>10</v>
</c>
</row>
<row r="2" spans="1:1">
<c r="A2">
<v>20</v>
</c>
</row>
<row r="3" spans="1:1">
<c r="A3">
<v>30</v>
</c>
</row>
<row r="4" spans="1:1">
<c r="A4">
<v>40</v>
</c>
</row>
</sheetData>
<conditionalFormatting sqref="A1:A4">
<cfRule type="timePeriod" priority="1" timePeriod="yesterday">
<formula>FLOOR(A1,1)=TODAY()-1</formula>
</cfRule>
<cfRule type="timePeriod" priority="2" timePeriod="today">
<formula>FLOOR(A1,1)=TODAY()</formula>
</cfRule>
<cfRule type="timePeriod" priority="3" timePeriod="tomorrow">
<formula>FLOOR(A1,1)=TODAY()+1</formula>
</cfRule>
<cfRule type="timePeriod" priority="4" timePeriod="last7Days">
<formula>AND(TODAY()-FLOOR(A1,1)<=6,FLOOR(A1,1)<=TODAY())</formula>
</cfRule>
<cfRule type="timePeriod" priority="5" timePeriod="lastWeek">
<formula>AND(TODAY()-ROUNDDOWN(A1,0)>=(WEEKDAY(TODAY())),TODAY()-ROUNDDOWN(A1,0)<(WEEKDAY(TODAY())+7))</formula>
</cfRule>
<cfRule type="timePeriod" priority="6" timePeriod="thisWeek">
<formula>AND(TODAY()-ROUNDDOWN(A1,0)<=WEEKDAY(TODAY())-1,ROUNDDOWN(A1,0)-TODAY()<=7-WEEKDAY(TODAY()))</formula>
</cfRule>
<cfRule type="timePeriod" priority="7" timePeriod="nextWeek">
<formula>AND(ROUNDDOWN(A1,0)-TODAY()>(7-WEEKDAY(TODAY())),ROUNDDOWN(A1,0)-TODAY()<(15-WEEKDAY(TODAY())))</formula>
</cfRule>
<cfRule type="timePeriod" priority="8" timePeriod="lastMonth">
<formula>AND(MONTH(A1)=MONTH(TODAY())-1,OR(YEAR(A1)=YEAR(TODAY()),AND(MONTH(A1)=1,YEAR(A1)=YEAR(TODAY())-1)))</formula>
</cfRule>
<cfRule type="timePeriod" priority="9" timePeriod="thisMonth">
<formula>AND(MONTH(A1)=MONTH(TODAY()),YEAR(A1)=YEAR(TODAY()))</formula>
</cfRule>
<cfRule type="timePeriod" priority="10" timePeriod="nextMonth">
<formula>AND(MONTH(A1)=MONTH(TODAY())+1,OR(YEAR(A1)=YEAR(TODAY()),AND(MONTH(A1)=12,YEAR(A1)=YEAR(TODAY())+1)))</formula>
</cfRule>
</conditionalFormatting>
<pageMargins left="0.7" right="0.7" top="0.75" bottom="0.75" header="0.3" footer="0.3"/>
</worksheet>
""")
got = _xml_to_list(fh.getvalue())
self.assertEqual(got, exp)
| 44.626506
| 171
| 0.401863
|
4b2cf451c7a9002eba2e505aa00f559d8218a11b
| 9,328
|
py
|
Python
|
pydatastructs/miscellaneous_data_structures/queue.py
|
Aimaanhasan/pydatastructs
|
be07fdf45cde1a37f004b920cb2b4bc1e294c12b
|
[
"BSD-3-Clause"
] | 1
|
2020-04-13T11:24:43.000Z
|
2020-04-13T11:24:43.000Z
|
pydatastructs/miscellaneous_data_structures/queue.py
|
Aimaanhasan/pydatastructs
|
be07fdf45cde1a37f004b920cb2b4bc1e294c12b
|
[
"BSD-3-Clause"
] | null | null | null |
pydatastructs/miscellaneous_data_structures/queue.py
|
Aimaanhasan/pydatastructs
|
be07fdf45cde1a37f004b920cb2b4bc1e294c12b
|
[
"BSD-3-Clause"
] | null | null | null |
from pydatastructs.linear_data_structures import DynamicOneDimensionalArray, SinglyLinkedList
from pydatastructs.utils.misc_util import NoneType, LinkedListNode, _check_type
from pydatastructs.trees.heaps import BinaryHeap, BinomialHeap
from copy import deepcopy as dc
__all__ = [
'Queue',
'PriorityQueue'
]
class Queue(object):
"""Representation of queue data structure.
Parameters
==========
implementation : str
Implementation to be used for queue.
By default, 'array'
items : list/tuple
Optional, by default, None
The inital items in the queue.
dtype : A valid python type
Optional, by default NoneType if item
is None.
Examples
========
>>> from pydatastructs import Queue
>>> q = Queue()
>>> q.append(1)
>>> q.append(2)
>>> q.append(3)
>>> q.popleft()
1
>>> len(q)
2
References
==========
.. [1] https://en.wikipedia.org/wiki/Queue_(abstract_data_type)
"""
def __new__(cls, implementation='array', **kwargs):
if implementation == 'array':
return ArrayQueue(
kwargs.get('items', None),
kwargs.get('dtype', int))
elif implementation == 'linked_list':
return LinkedListQueue(
kwargs.get('items', None)
)
raise NotImplementedError(
"%s hasn't been implemented yet."%(implementation))
def append(self, *args, **kwargs):
raise NotImplementedError(
"This is an abstract method.")
def popleft(self, *args, **kwargs):
raise NotImplementedError(
"This is an abstract method.")
@property
def is_empty(self):
raise NotImplementedError(
"This is an abstract method.")
class ArrayQueue(Queue):
__slots__ = ['front']
def __new__(cls, items=None, dtype=NoneType):
if items is None:
items = DynamicOneDimensionalArray(dtype, 0)
else:
dtype = type(items[0])
items = DynamicOneDimensionalArray(dtype, items)
obj = object.__new__(cls)
obj.items, obj.front = items, -1
if items.size == 0:
obj.front = -1
else:
obj.front = 0
return obj
def append(self, x):
if self.is_empty:
self.front = 0
self.items._dtype = type(x)
self.items.append(x)
def popleft(self):
if self.is_empty:
raise IndexError("Queue is empty.")
return_value = dc(self.items[self.front])
front_temp = self.front
if self.front == self.rear:
self.front = -1
else:
if (self.items._num - 1)/self.items._size < \
self.items._load_factor:
self.front = 0
else:
self.front += 1
self.items.delete(front_temp)
return return_value
@property
def rear(self):
return self.items._last_pos_filled
@property
def is_empty(self):
return self.__len__() == 0
def __len__(self):
return self.items._num
def __str__(self):
_data = []
for i in range(self.front, self.rear + 1):
_data.append(self.items._data[i])
return str(_data)
class LinkedListQueue(Queue):
__slots__ = ['queue']
def __new__(cls, items=None):
obj = object.__new__(cls)
obj.queue = SinglyLinkedList()
if items is None:
pass
elif type(items) in (list, tuple):
for x in items:
obj.append(x)
else:
raise TypeError("Expected type: list/tuple")
return obj
def append(self, x):
self.queue.append(x)
def popleft(self):
if self.is_empty:
raise IndexError("Queue is empty.")
return_value = self.queue.pop_left()
return return_value
@property
def is_empty(self):
return self.size == 0
@property
def front(self):
return self.queue.head
@property
def rear(self):
return self.queue.tail
@property
def size(self):
return self.queue.size
def __len__(self):
return self.size
def __str__(self):
return str(self.queue)
class PriorityQueue(object):
"""
Represents the concept of priority queue.
Parameters
==========
implementation: str
The implementation which is to be
used for supporting operations
of priority queue.
The following implementations are supported,
'linked_list' -> Linked list implementation.
'binary_heap' -> Binary heap implementation.
'binomial_heap' -> Binomial heap implementation.
Doesn't support custom comparators, minimum
key data is extracted in every pop.
Optional, by default, 'binary_heap' implementation
is used.
comp: function
The comparator to be used while comparing priorities.
Must return a bool object.
By default, `lambda u, v: u < v` is used to compare
priorities i.e., minimum priority elements are extracted
by pop operation.
Examples
========
>>> from pydatastructs import PriorityQueue
>>> pq = PriorityQueue()
>>> pq.push(1, 2)
>>> pq.push(2, 3)
>>> pq.pop()
1
>>> pq2 = PriorityQueue(comp=lambda u, v: u > v)
>>> pq2.push(1, 2)
>>> pq2.push(2, 3)
>>> pq2.pop()
2
References
==========
.. [1] https://en.wikipedia.org/wiki/Priority_queue
"""
def __new__(cls, implementation='binary_heap', **kwargs):
comp = kwargs.get("comp", lambda u, v: u < v)
if implementation == 'linked_list':
return LinkedListPriorityQueue(comp)
elif implementation == 'binary_heap':
return BinaryHeapPriorityQueue(comp)
elif implementation == 'binomial_heap':
return BinomialHeapPriorityQueue()
else:
raise NotImplementedError(
"%s implementation is not currently supported "
"by priority queue.")
def push(self, value, priority):
"""
Pushes the value to the priority queue
according to the given priority.
value
Value to be pushed.
priority
Priority to be given to the value.
"""
raise NotImplementedError(
"This is an abstract method.")
def pop(self):
"""
Pops out the value from the priority queue.
"""
raise NotImplementedError(
"This is an abstract method.")
@property
def peek(self):
"""
Returns the pointer to the value which will be
popped out by `pop` method.
"""
raise NotImplementedError(
"This is an abstract method.")
@property
def is_empty(self):
"""
Checks if the priority queue is empty.
"""
raise NotImplementedError(
"This is an abstract method.")
class LinkedListPriorityQueue(PriorityQueue):
__slots__ = ['items', 'comp']
def __new__(cls, comp):
obj = object.__new__(cls)
obj.items = SinglyLinkedList()
obj.comp = comp
return obj
def push(self, value, priority):
self.items.append(priority, value)
def pop(self):
_, max_i = self._find_peek(return_index=True)
pop_val = self.items.extract(max_i)
return pop_val.data
def _find_peek(self, return_index=False):
if self.is_empty:
raise IndexError("Priority queue is empty.")
walk = self.items.head
i, max_i, max_p = 0, 0, walk
while walk is not None:
if self.comp(walk.key, max_p.key):
max_i = i
max_p = walk
i += 1
walk = walk.next
if return_index:
return max_p, max_i
return max_p
@property
def peek(self):
return self._find_peek()
@property
def is_empty(self):
return self.items.size == 0
class BinaryHeapPriorityQueue(PriorityQueue):
__slots__ = ['items']
def __new__(cls, comp):
obj = object.__new__(cls)
obj.items = BinaryHeap()
obj.items._comp = comp
return obj
def push(self, value, priority):
self.items.insert(priority, value)
def pop(self):
node = self.items.extract()
return node.data
@property
def peek(self):
if self.items.is_empty:
raise IndexError("Priority queue is empty.")
return self.items.heap[0]
@property
def is_empty(self):
return self.items.is_empty
class BinomialHeapPriorityQueue(PriorityQueue):
__slots__ = ['items']
def __new__(cls):
obj = object.__new__(cls)
obj.items = BinomialHeap()
return obj
def push(self, value, priority):
self.items.insert(priority, value)
def pop(self):
node = self.items.find_minimum()
self.items.delete_minimum()
return node.data
@property
def peek(self):
return self.items.find_minimum()
@property
def is_empty(self):
return self.items.is_empty
| 25.416894
| 93
| 0.5744
|
a32108cbf7bb2a3c537acb3c3884dcb04616d0b5
| 8,872
|
py
|
Python
|
nxted/pycheck.py
|
xlcteam/nxtIDE
|
659ace68b060682aeeb6d789d88a7b6899a56f9a
|
[
"MIT"
] | 8
|
2015-02-16T23:11:30.000Z
|
2021-01-16T00:15:25.000Z
|
nxted/pycheck.py
|
xlcteam/nxtIDE
|
659ace68b060682aeeb6d789d88a7b6899a56f9a
|
[
"MIT"
] | 3
|
2016-07-16T20:47:48.000Z
|
2021-03-25T21:27:11.000Z
|
nxted/pycheck.py
|
xlcteam/nxtIDE
|
659ace68b060682aeeb6d789d88a7b6899a56f9a
|
[
"MIT"
] | 3
|
2015-08-30T18:23:51.000Z
|
2016-10-28T15:07:59.000Z
|
#!/usr/bin/env python
import sys, re, os
import compiler.ast
from compiler.visitor import ASTVisitor
from compiler import parse, walk
from compiler.consts import *
class Visitor(ASTVisitor):
def __init__(self, stream=sys.stdout, parent=None, debug=False):
self.parent = parent
self.v = lambda tree, visitor=self: walk(tree, visitor)
self.stream = stream
self.strcode = ""
self.debug = debug
self.indents = 0
self.ids = {}
self.ids['global'] = ['abs', 'str', 'ord', 'True', 'False', 'robot',
'pygame', 'list', 'range', 'RoboException',
'None', 'int', 'float', 'zip', 'arange',
'sin', 'array', 'resize', 'pi', 'RoboThread',
'__clock__', 'len']
self.ids['__fn__'] = []
self.ids[''] = []
self.fn_types = {}
self.fn_type_regex = re.compile(":param \((.*?)\)")
self.var_types = {}
self.var_types['global'] = {}
self.fn = ""
ASTVisitor.__init__(self)
def addId(self, name):
if self.fn == "":
self.ids['global'].append(name)
else:
self.ids[self.fn].append(name)
def addVarType(self, name, type):
if self.fn == "":
self.var_types['global'][name] = type
else:
if not self.var_types.has_key(self.fn):
self.var_types[self.fn] = {}
self.var_types[self.fn][name] = type
def __str__(self):
return self.strcode
def DEDENT(self):
self.indents -=1
self.NEWLINE()
def INDENT(self):
self.indents += 1
self.NEWLINE()
def NEWLINE(self):
self.write('\n')
self.write(' ' * 4 * self.indents )
def write(self, data):
if self.stream:
self.stream.write(data)
self.strcode += data
def visitAssName(self, node):
self.addId(node.name)
def visitAssign(self, node):
for i in range(len(node.nodes)):
n = node.nodes[i]
self.v(n)
if isinstance(node.expr, compiler.ast.Const) and \
isinstance(node.nodes[0], compiler.ast.AssName):
self.addVarType(node.nodes[0].name, type(node.expr.value))
self.v(node.expr)
def visitAugAssign(self, node):
self.v(node.node)
self.v(node.expr)
def visitCallFunc(self, node):
if not isinstance(node.node, compiler.ast.Getattr):
if not (node.node.name in self.ids['global']):
#print node.node.name
pass
self.v(node.node)
for i in range(len(node.args)):
#print "\t", node.args[i]
#help(node.args[i])
if isinstance(node.args[i], compiler.ast.Name):
self.visitName(node.args[i])
if isinstance(node.args[i], compiler.ast.Const) and \
not isinstance(node.node, compiler.ast.Getattr) and \
self.fn_types.has_key(node.node.name):
if self.fn_types[node.node.name] == []:
continue
if not self.istype(self.fn_types[node.node.name][i],
node.args[i].value):
fn_type = self.fn_types[node.node.name][i]
fn_name = node.node.name
var_type = type(node.args[i].value)
var_value = node.args[i].value
raise ValueError("%s expects %s not %s" %
(fn_name, fn_type, var_type.__name__),
*self.parent.first_match(fn_name, i, var_value))
if isinstance(node.args[i], compiler.ast.Name) and \
not isinstance(node.node, compiler.ast.Getattr) and \
self.fn_types.has_key(node.node.name):
if self.fn_types[node.node.name] == []:
continue
if not self.var_types.has_key(self.fn):
continue
if not self.var_types[self.fn].has_key(node.args[i].name):
continue
if not self.istype(self.fn_types[node.node.name][i],
self.var_types[self.fn][node.args[i].name]):
fn_type = self.fn_types[node.node.name][i]
fn_name = node.node.name
var_type = self.var_types[self.fn][node.args[i].name]
var_name = node.args[i].name
# self.parent.first_match(fn_name, i, var_name)
raise ValueError("%s expects %s not %s" %
(fn_name, fn_type, var_type.__name__),
*self.parent.first_match(fn_name, i, var_name))
self.v(node.args[i])
def visitCompare(self, node):
self.v(node.expr)
for operator, operand in node.ops:
self.v(operand)
def visitFunction(self, node):
self.ids[node.name] = []
self.fn = "__fn__"
self.addId(node.name)
if isinstance(node.doc, str):
self.fn_types[node.name] = self.parseDoc(node.doc)
self.fn = node.name
for x in node.argnames[:]:
self.addId(x)
self.v(node.code)
self.fn = ""
def visitIf(self, node):
(c, b) = node.tests[0]
self.v(c)
self.v(b)
for c, b in node.tests[1:]:
self.v(c)
self.v(b)
if node.else_:
self.v(node.else_)
def visitKeyword(self, node):
self.v(node.expr)
def visitModule(self, node):
self.v(node.node)
def visitName(self, node):
defined = node.name in self.ids[self.fn] \
or node.name in self.ids['__fn__'] \
or node.name in self.ids['global']
if not defined:
raise NameError("Name '%s' is not defined" % (node.name),
self.parent.first_occur(node.name))
def visitStmt(self, node):
for n in node.nodes:
self.v(n)
def visitWhile(self, node):
self.v(node.test)
self.v(node.body)
if node.else_:
self.v(node.else_)
def visitImport(self, node):
for name in node.asList()[0]:
self.addId(name[0])
def parseDoc(self, s):
return self.fn_type_regex.findall(s)
def istype(self, t, val):
r = {'int': int, 'str': str}
return isinstance(val, r[t])
class PyCheck():
def __init__(self):
self.src = None
self.visitor = Visitor(parent = self)
def check(self, filename = None, string = None):
if string != None:
self.src = string
else:
self.src = open(filename).read()
self.visitor.v(parse(self.src))
return True
def first_occur(self, name):
line = 1
for x in self.src.split('\n'):
if name in x:
return line
line += 1
return line
def first_match(self, fn_name, narg, val):
regex = '.*?%s\(%s\s*(%s)\).*?' % \
(fn_name, '.*?,'*narg, val)
match = re.search(regex, self.src)
line = self.src.count(os.linesep, 0, match.start()) + 1
return [line, (match.start(1), match.end(1)) ]
__lfix__ = "#lfixed"
def loopFix(s, fn):
fix = __lfix__
i = re.sub("([\t ]*)while\s(.*):\n", "\\1while \\2:\n\\1 %s%s\n" % \
(fn,fix), s)
i = re.sub("([\t ]*)for\s(.*):\n", "\\1for \\2:\n\\1 %s%s\n" % \
(fn, fix), i)
return i
def defFix(s):
if "def main" not in s:
out = ['def main():']
for line in s.split('\n'):
out.append(' '*4 + line)
s = '\n'.join(out)
return s
def realLine(s, l):
s = s.split('\n')
fixes = []
for line in s:
if __lfix__ in line:
fixes.append(1)
else:
fixes.append(0)
return l - sum(fixes[:l])
if __name__ == "__main__":
check = PyCheck()
check.check("../nxtemu/api.py")
check.check("files/test1.py")
#check.check("etest.py")
#print vi.ids
#string = loopFix(open("etest.py").read(), "tester()")
#print string
print loopFix("""def main():
while 1:
TextOut(0, LCD_LINE1, "socialny defekt")
Wait(2000)""", "t()")
print loopFix(open("files/whiletester.py").read(), "tester()")
print defFix("""OnFwd(OUT_A, 100)\nWait(2000)""")
#print realLine(string, 30)
| 27.638629
| 80
| 0.491885
|
965d7091520c20f6374c83cbbd8045a6f0602308
| 1,143
|
py
|
Python
|
decodeshapes.py
|
aycock/mh
|
72cc39619a6978a923a30c47652b06444239b884
|
[
"MIT"
] | null | null | null |
decodeshapes.py
|
aycock/mh
|
72cc39619a6978a923a30c47652b06444239b884
|
[
"MIT"
] | null | null | null |
decodeshapes.py
|
aycock/mh
|
72cc39619a6978a923a30c47652b06444239b884
|
[
"MIT"
] | null | null | null |
# Python < 3
# see LICENSE file for licensing information
# Shape table reconstruction using turtle graphics based on info from the
# Applesoft BASIC Programming Reference Manual.
# Takes binary memory dump from emulator as input. For Mystery House, dump
# $5700-$5a00 and $5500-$5700.
import sys
import turtle
DOTSIZE = 5
def plot(x):
plot = x >> 2
if plot:
turtle.dot(DOTSIZE)
dir = [ 90, 0, 270, 180 ][x & 3]
turtle.setheading(dir)
turtle.fd(DOTSIZE)
def shape(data):
turtle.reset()
turtle.ht()
turtle.pu()
turtle.tracer(0, 0)
i = 0
while True:
byte = data[i]
i += 1
if byte == 0:
break
A = byte & 7
B = (byte >> 3) & 7
C = (byte >> 6) & 7
plot(A)
plot(B)
if C != 0:
plot(C)
turtle.update()
def table(data):
n = data[0]
print n, 'shapes in shape table'
for i in range(1, n+1):
print 'shape definition', i
index = (data[i * 2 + 1] << 8) | data[i * 2]
shape(data[index:])
raw_input()
if __name__ == '__main__':
if len(sys.argv) != 2:
print 'usage: python', argv[0], '<memorydump.bin>'
exit()
f = open(sys.argv[1], 'rb')
data = bytearray(f.read())
f.close
table(data)
| 16.808824
| 75
| 0.621172
|
fe4a13e20e6d4043a1f462271ece5e39a66becc3
| 138
|
py
|
Python
|
src/concurrency/__init__.py
|
technicaltitch/django-concurrency
|
9a289dc007b1cdf609b7dfb77a6d2868abc8097f
|
[
"MIT"
] | null | null | null |
src/concurrency/__init__.py
|
technicaltitch/django-concurrency
|
9a289dc007b1cdf609b7dfb77a6d2868abc8097f
|
[
"MIT"
] | null | null | null |
src/concurrency/__init__.py
|
technicaltitch/django-concurrency
|
9a289dc007b1cdf609b7dfb77a6d2868abc8097f
|
[
"MIT"
] | null | null | null |
__author__ = 'sax'
default_app_config = 'concurrency.apps.ConcurrencyConfig'
VERSION = __version__ = "2.1a0"
NAME = 'django-concurrency'
| 23
| 57
| 0.768116
|
139221ad3457a6198031eb603002ff19ddae16b9
| 11,095
|
py
|
Python
|
synapse/app/federation_sender.py
|
cgarwood82/synapse
|
e2cce15af16cd85d5379e8d961680028bfc9e754
|
[
"Apache-2.0"
] | 1
|
2020-03-05T12:58:46.000Z
|
2020-03-05T12:58:46.000Z
|
synapse/app/federation_sender.py
|
cgarwood82/synapse
|
e2cce15af16cd85d5379e8d961680028bfc9e754
|
[
"Apache-2.0"
] | null | null | null |
synapse/app/federation_sender.py
|
cgarwood82/synapse
|
e2cce15af16cd85d5379e8d961680028bfc9e754
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import sys
from twisted.internet import defer, reactor
from twisted.web.resource import NoResource
import synapse
from synapse import events
from synapse.app import _base
from synapse.config._base import ConfigError
from synapse.config.homeserver import HomeServerConfig
from synapse.config.logger import setup_logging
from synapse.federation import send_queue
from synapse.http.site import SynapseSite
from synapse.logging.context import LoggingContext, run_in_background
from synapse.metrics import METRICS_PREFIX, MetricsResource, RegistryProxy
from synapse.metrics.background_process_metrics import run_as_background_process
from synapse.replication.slave.storage.deviceinbox import SlavedDeviceInboxStore
from synapse.replication.slave.storage.devices import SlavedDeviceStore
from synapse.replication.slave.storage.events import SlavedEventStore
from synapse.replication.slave.storage.presence import SlavedPresenceStore
from synapse.replication.slave.storage.receipts import SlavedReceiptsStore
from synapse.replication.slave.storage.registration import SlavedRegistrationStore
from synapse.replication.slave.storage.transactions import SlavedTransactionStore
from synapse.replication.tcp.client import ReplicationClientHandler
from synapse.replication.tcp.streams._base import ReceiptsStream
from synapse.server import HomeServer
from synapse.storage.engines import create_engine
from synapse.types import ReadReceipt
from synapse.util.async_helpers import Linearizer
from synapse.util.httpresourcetree import create_resource_tree
from synapse.util.manhole import manhole
from synapse.util.versionstring import get_version_string
logger = logging.getLogger("synapse.app.federation_sender")
class FederationSenderSlaveStore(
SlavedDeviceInboxStore,
SlavedTransactionStore,
SlavedReceiptsStore,
SlavedEventStore,
SlavedRegistrationStore,
SlavedDeviceStore,
SlavedPresenceStore,
):
def __init__(self, db_conn, hs):
super(FederationSenderSlaveStore, self).__init__(db_conn, hs)
# We pull out the current federation stream position now so that we
# always have a known value for the federation position in memory so
# that we don't have to bounce via a deferred once when we start the
# replication streams.
self.federation_out_pos_startup = self._get_federation_out_pos(db_conn)
def _get_federation_out_pos(self, db_conn):
sql = "SELECT stream_id FROM federation_stream_position" " WHERE type = ?"
sql = self.database_engine.convert_param_style(sql)
txn = db_conn.cursor()
txn.execute(sql, ("federation",))
rows = txn.fetchall()
txn.close()
return rows[0][0] if rows else -1
class FederationSenderServer(HomeServer):
DATASTORE_CLASS = FederationSenderSlaveStore
def _listen_http(self, listener_config):
port = listener_config["port"]
bind_addresses = listener_config["bind_addresses"]
site_tag = listener_config.get("tag", port)
resources = {}
for res in listener_config["resources"]:
for name in res["names"]:
if name == "metrics":
resources[METRICS_PREFIX] = MetricsResource(RegistryProxy)
root_resource = create_resource_tree(resources, NoResource())
_base.listen_tcp(
bind_addresses,
port,
SynapseSite(
"synapse.access.http.%s" % (site_tag,),
site_tag,
listener_config,
root_resource,
self.version_string,
),
)
logger.info("Synapse federation_sender now listening on port %d", port)
def start_listening(self, listeners):
for listener in listeners:
if listener["type"] == "http":
self._listen_http(listener)
elif listener["type"] == "manhole":
_base.listen_tcp(
listener["bind_addresses"],
listener["port"],
manhole(
username="matrix", password="rabbithole", globals={"hs": self}
),
)
elif listener["type"] == "metrics":
if not self.get_config().enable_metrics:
logger.warning(
(
"Metrics listener configured, but "
"enable_metrics is not True!"
)
)
else:
_base.listen_metrics(listener["bind_addresses"], listener["port"])
else:
logger.warning("Unrecognized listener type: %s", listener["type"])
self.get_tcp_replication().start_replication(self)
def build_tcp_replication(self):
return FederationSenderReplicationHandler(self)
class FederationSenderReplicationHandler(ReplicationClientHandler):
def __init__(self, hs):
super(FederationSenderReplicationHandler, self).__init__(hs.get_datastore())
self.send_handler = FederationSenderHandler(hs, self)
@defer.inlineCallbacks
def on_rdata(self, stream_name, token, rows):
yield super(FederationSenderReplicationHandler, self).on_rdata(
stream_name, token, rows
)
self.send_handler.process_replication_rows(stream_name, token, rows)
def get_streams_to_replicate(self):
args = super(
FederationSenderReplicationHandler, self
).get_streams_to_replicate()
args.update(self.send_handler.stream_positions())
return args
def start(config_options):
try:
config = HomeServerConfig.load_config(
"Synapse federation sender", config_options
)
except ConfigError as e:
sys.stderr.write("\n" + str(e) + "\n")
sys.exit(1)
assert config.worker_app == "synapse.app.federation_sender"
events.USE_FROZEN_DICTS = config.use_frozen_dicts
database_engine = create_engine(config.database_config)
if config.send_federation:
sys.stderr.write(
"\nThe send_federation must be disabled in the main synapse process"
"\nbefore they can be run in a separate worker."
"\nPlease add ``send_federation: false`` to the main config"
"\n"
)
sys.exit(1)
# Force the pushers to start since they will be disabled in the main config
config.send_federation = True
ss = FederationSenderServer(
config.server_name,
db_config=config.database_config,
config=config,
version_string="Synapse/" + get_version_string(synapse),
database_engine=database_engine,
)
setup_logging(ss, config, use_worker_options=True)
ss.setup()
reactor.addSystemEventTrigger(
"before", "startup", _base.start, ss, config.worker_listeners
)
_base.start_worker_reactor("synapse-federation-sender", config)
class FederationSenderHandler(object):
"""Processes the replication stream and forwards the appropriate entries
to the federation sender.
"""
def __init__(self, hs, replication_client):
self.store = hs.get_datastore()
self._is_mine_id = hs.is_mine_id
self.federation_sender = hs.get_federation_sender()
self.replication_client = replication_client
self.federation_position = self.store.federation_out_pos_startup
self._fed_position_linearizer = Linearizer(name="_fed_position_linearizer")
self._last_ack = self.federation_position
self._room_serials = {}
self._room_typing = {}
def on_start(self):
# There may be some events that are persisted but haven't been sent,
# so send them now.
self.federation_sender.notify_new_events(
self.store.get_room_max_stream_ordering()
)
def stream_positions(self):
return {"federation": self.federation_position}
def process_replication_rows(self, stream_name, token, rows):
# The federation stream contains things that we want to send out, e.g.
# presence, typing, etc.
if stream_name == "federation":
send_queue.process_rows_for_federation(self.federation_sender, rows)
run_in_background(self.update_token, token)
# We also need to poke the federation sender when new events happen
elif stream_name == "events":
self.federation_sender.notify_new_events(token)
# ... and when new receipts happen
elif stream_name == ReceiptsStream.NAME:
run_as_background_process(
"process_receipts_for_federation", self._on_new_receipts, rows
)
@defer.inlineCallbacks
def _on_new_receipts(self, rows):
"""
Args:
rows (iterable[synapse.replication.tcp.streams.ReceiptsStreamRow]):
new receipts to be processed
"""
for receipt in rows:
# we only want to send on receipts for our own users
if not self._is_mine_id(receipt.user_id):
continue
receipt_info = ReadReceipt(
receipt.room_id,
receipt.receipt_type,
receipt.user_id,
[receipt.event_id],
receipt.data,
)
yield self.federation_sender.send_read_receipt(receipt_info)
@defer.inlineCallbacks
def update_token(self, token):
try:
self.federation_position = token
# We linearize here to ensure we don't have races updating the token
with (yield self._fed_position_linearizer.queue(None)):
if self._last_ack < self.federation_position:
yield self.store.update_federation_out_pos(
"federation", self.federation_position
)
# We ACK this token over replication so that the master can drop
# its in memory queues
self.replication_client.send_federation_ack(
self.federation_position
)
self._last_ack = self.federation_position
except Exception:
logger.exception("Error updating federation stream position")
if __name__ == "__main__":
with LoggingContext("main"):
start(sys.argv[1:])
| 37.107023
| 86
| 0.666877
|
0ca1477bed55aa59161c4a30087de2a03251a696
| 12,232
|
py
|
Python
|
predictionaglorithmimplementation.py
|
keemsisi/keras-unemployment-prediction
|
78a57a67be6e30baa0aad688a417a7c320222026
|
[
"MIT"
] | 3
|
2018-12-31T02:03:48.000Z
|
2021-02-02T07:59:59.000Z
|
predictionaglorithmimplementation.py
|
keemsisi/keras-unemployment-prediction
|
78a57a67be6e30baa0aad688a417a7c320222026
|
[
"MIT"
] | null | null | null |
predictionaglorithmimplementation.py
|
keemsisi/keras-unemployment-prediction
|
78a57a67be6e30baa0aad688a417a7c320222026
|
[
"MIT"
] | null | null | null |
from keras import models
import numpy as np
import h5py
from keras.layers import Dense, Dropout
from numpy import typename
from keras import activations
from keras import Sequential
from sklearn.model_selection import train_test_split
from sklearn.ensemble import GradientBoostingRegressor
import pandas as pd
from keras.engine.input_layer import Input
import keras
from sklearn.preprocessing import StandardScaler, MinMaxScaler
import seaborn as sn
from keras.layers.advanced_activations import PReLU
import datetime as dt
from keras import Sequential
from sklearn.model_selection import train_test_split
import h5py
import pandas as pd
import tensorflow as tf
import keras
from sklearn.preprocessing import MinMaxScaler
import seaborn as sn
from keras.layers.advanced_activations import PReLU
import datetime as dtt
from keras.models import model_from_json
import tkinter
from keras import backend as K
from keras.models import load_model
seed_value= 30
# 1. Set `PYTHONHASHSEED` environment variable at a fixed value
import os
os.environ['PYTHONHASHSEED']=str(seed_value)
# 2. Set `python` built-in pseudo-random generator at a fixed value
import random
random.seed(seed_value)
# 3. Set `numpy` pseudo-random generator at a fixed value
import numpy as np
np.random.seed(seed_value)
# 4. Set `tensorflow` pseudo-random generator at a fixed value
import tensorflow as tf
tf.set_random_seed(seed_value)
import numpy as np
import tensorflow as tf
import random as rn
# The below is necessary for starting Numpy generated random numbers
# in a well-defined initial state.
np.random.seed(42)
# The below is necessary for starting core Python generated random numbers
# in a well-defined state.
rn.seed(12345)
# Force TensorFlow to use single thread.
# Multiple threads are a potential source of non-reproducible results.
# For further details, see: https://stackoverflow.com/questions/42022950/
session_conf = tf.ConfigProto(intra_op_parallelism_threads=1,
inter_op_parallelism_threads=1)
from keras import backend as K
# The below tf.set_random_seed() will make random number generation
# in the TensorFlow backend have a well-defined initial state.
# For further details, see:
# https://www.tensorflow.org/api_docs/python/tf/set_random_seed
tf.set_random_seed(1234)
sess = tf.Session(graph=tf.get_default_graph(), config=session_conf)
K.set_session(sess)
# creating the keras model
# input_layer = keras.engine.input_layer.Input(shape = (32))
# model save counter
count = 0
datasets = pd.read_csv('dataset_new.csv')
# reshaping the network layer to have a shape of (1,None)
x_input_layer = datasets.Year.values.reshape(20, 1)
y_output = datasets.drop(['Year'], axis=1)
model = Sequential()
# split the data
x_train, x_test, y_train, y_test = train_test_split(x_input_layer, y_output, test_size=0.30)
# scaling the feature
scalerX = MinMaxScaler()
x_train = scalerX.fit_transform(x_train)
x_test = scalerX.transform(x_test)
# scaling the feature
scalerY = MinMaxScaler()
y_train = scalerY.fit_transform(y_train)
y_test = scalerY.transform(y_test)
"------------------MAIN PROGRAM GOES HERE-------------------------"
# Seed value
def train_model():
# the input_shape = (1,) ==> Each set of the input to be passed into the network
# the main nueral network model for the prediction
# the root mean square
def root_mean_squared_error(y_true, y_pred):
return K.sqrt(K.mean(K.square(y_pred - y_true)))
model.add(Dense(40, kernel_initializer='uniform', input_shape=(1,), activation="relu", bias_initializer="zeros"))
model.add(Dense(40, activation="relu", kernel_initializer="uniform", bias_initializer="zeros"))
model.add(Dense(40, activation="relu", kernel_initializer="uniform", bias_initializer="zeros"))
model.add(Dense(40, activation="relu", kernel_initializer="uniform", bias_initializer="zeros"))
model.add(Dense(40, activation="relu", kernel_initializer="uniform", bias_initializer="zeros"))
model.add(Dense(40, activation="relu", kernel_initializer="uniform", bias_initializer="zeros"))
model.add(Dense(40, activation="relu", kernel_initializer="uniform", bias_initializer="zeros"))
# adding another dense layer of 64 input nuerons
model.add(Dense(units=3, activation="linear", kernel_initializer="uniform", bias_initializer="zeros"))
# compiling the model setting the paramenters to train the model
model.compile(loss=['mse'], metrics=['mse', 'mae', 'mape', 'cosine', 'accuracy', 'squared_hinge', 'cosine','msle',root_mean_squared_error],
optimizer=keras.optimizers.Adam
(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0))
model.fit(x_train, y_train, batch_size=10, epochs=3000, verbose=1)
model.summary()
model.save_weights("network_weights.h5".format(dt.datetime.now()))
model.save("model.h5")
# getting the metrics results from here...
print("----------METRICS EVALUATION RESULTS---------------------------")
print("-----------------------MEAN SQUARED ERROR---------------------")
print(model.history.history['mean_squared_error'])
print("------------------------MEAN SQUARED LOGARITHMIC ERROR------------------")
print(model.history.history['mean_squared_logarithmic_error'])
print("------------------------TRAINING ACCURACY------------------")
print(model.history.history['acc'])
print("------------------------MEAN ABSOLUTE ERROR-------------------")
print(model.history.history['mean_absolute_error'])
print("------------------------MEAN ABSOLUTE PERCENTAGE ERROR----------------")
print(model.history.history['mean_absolute_percentage_error'])
print("------------------------SQAURED HINGE-----------------------------------")
print(model.history.history['squared_hinge'])
print("-----------------------CONSINE PROXIMITY--------------------------")
print(model.history.history['cosine_proximity_1'])
print("<---------------- ROOT MEAN SQUARED ERROR ----------------->")
# print(model.history.history['root_mean_squared_error'])
def model_initialise():
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
global model
model = load_model('model.h5');
print(model)
#
# def predict_unemployment_rates(features_values):
# features_scaler = MinMaxScaler()
# predict_input = features_scaler.fit_transform(features_values);
# print("Yes it was clicked")
# print(scalerY.inverse_transform( model.predict(predict_input)) )
def predict_unemployment_rate(features_values) :
global predicted_result
predicted_result = ""
count = 0 ;
rate_string = ""
# predicted = model.predict(features)
# print("PREDICTION == ", predicted)
features_scaler = MinMaxScaler()
predict_input = features_scaler.fit_transform(features_values);
print("Yes it was clicked")
predicted = scalerY.inverse_transform(model.predict(predict_input))
print(scalerY.inverse_transform( model.predict(predict_input)) )
for preval in predicted :
for rate in preval:
rate_string +="{0}".format(rate) +"\t"
result ="{0}\t{1}".format(features_values[count][0] , rate_string) +"\n\n"
rate_string = "";
predicted_result +=result
count += 1
# print(predicted_result) ;
prediction_results.config(text= predicted_result)
print("THIS IS THE PREDICTED RESULT ====> ",predicted_result)
def evaluate_prediction_model ():
model.evaluate(x_test, y_test)
def display_graph ():
# ploting the history of the losses and the accuracy
from matplotlib import pylab as plt
plt.figure(figsize=(15, 10))
plt.xlabel('Losses', fontsize=20)
plt.ylabel('Accuracy', fontsize=20)
# plt.plot(model.history.epoch, np.array(model.history.history['acc']),label='Train Accuracy')
plt.plot(model.history.history['loss'], model.history.history['acc'], label='Losses and Accuracy')
plt.legend()
# ploting the history of the losses and the accuracy
from matplotlib import pylab as plt
plt.figure(figsize=(15, 10))
plt.xlabel('Losses', fontsize=20)
plt.ylabel('Accuracy', fontsize=20)
# plt.plot(model.history.epoch, np.array(model.history.history['acc']),label='Train Accuracy')
plt.scatter(model.history.history['loss'], model.history.history['acc'], label='Losses and Accuracy')
plt.legend()
# ploting the history of the losses and the accuracy
from matplotlib import pylab as plt
plt.figure(figsize=(15, 10))
plt.xlabel('Losses', fontsize=20)
plt.ylabel('Accuracy', fontsize=20)
plt.plot(model.history.history['loss'], model.history.history['acc'], label='Losses and Accuracy')
plt.legend()
scalerY.inverse_transform(model.predict(x_train))
scalerX.inverse_transform(x_train)
# ploting the history of the losses and the accuracy
from matplotlib import pylab as plt
plt.figure(figsize=(15, 10))
plt.title("Accuracy Vs Epoch of the Neural Network", fontsize=20)
plt.xlabel('Epoch', fontsize=20)
plt.ylabel('Accuracy', fontsize=20)
plt.plot(model.history.epoch, model.history.history['acc'])
plt.legend()
sn.jointplot(x=model.history.epoch, y=model.history.history['loss'])
def load_network_weights():
# load the weight of the nwetwork
model.load_weights("network_weights2018-12-04 16:43:26.388594.h5");
print("Predict X_Train" , scalerY.inverse_transform(model.predict(x_train)) )
print("Predict X_Test" , scalerY.inverse_transform(model.predict(x_test)) )
print( "Evaluate X_Train and Y_Train",model.evaluate(x_train, y_train))
print( "Evaluate X_test and Y_test",model.evaluate(x_test, y_test))
unemployemnt_rate = 0 ;
unemployemnt_rate_of_females = 0 ;
unemployemnt_rate_of_male = 0 ;
root = tkinter.Tk()
frame = tkinter.Frame(root)
label_notice = tkinter.Label(root , text = "Please enter a single year or multiple year separated with a comma delimeter")
label_notice.pack()
input_value = tkinter.StringVar()
entry = tkinter.Entry(root , width = 200 , textvariable = input_value, font ="Helvetical 44 bold" )
entry.pack()
value_label = tkinter.Label(root , text = "" , width = 200) ;
value_label.pack()
label_enter = tkinter.Label(root, fg="dark green", text=" Enter Year : ")
label_enter.pack()
#
label_message = tkinter.Label(root , text = "Message : ")
label_message.pack()
label_enter.pack()
# predic_btn.pack()
root.title("UNEMPLOYMENT PREDICTION IN NIGERIA")
def validate_and_predict():
try:
features = [[float(val)] for val in input_value.get().split(",")]
print([[value] for value in input_value.get().split(",")])
print(features)
for ft in features :
if (ft[0] // 1000) > 0 :
print(ft[0] / 1000)
print("value =>", ft, " accepted")
label_message.config(text="Prediction was successful...", fg="green");
continue
else :
label_message.config(text="The year to predict should not be less than 1000", fg="red");
return Exception()
#predict if no error occurs [ 12.28609276, 56.2775383 , 43.82889175],
predict_unemployment_rate(features)
except Exception:
print("Error occured while casting value to number")
label_message.config(text = "Wrong input characters...please enter correct values", fg = "red");
btn = tkinter.Button(root , width = 200 , text = "Predict", command = validate_and_predict )
btn.pack()
label_result = tkinter.Label(root , text = "---------Results-------")
label_result.pack()
label_result = tkinter.Label(root , text = "Unemployment Rate : {0} | Unemployment Rate Of Male : {1} | Unemployment Rate Of Female : {2}".format(unemployemnt_rate ,unemployemnt_rate_of_male, unemployemnt_rate_of_females))
label_result.pack()
prediction_results = tkinter.Label(root , text = "")
prediction_results.pack()
#display the GUI for the User to input the prediction value
if __name__ == "__main__" :
train_model()
model_initialise()
# load_network_weights()
root.geometry('600x600')
root.mainloop()
| 33.512329
| 222
| 0.693754
|
7df6b71d2c7ed3d28ec3022e28c31051dc5a74b6
| 893
|
py
|
Python
|
python-examples/python-heatmap-chapt-3/analysis_OLD.py
|
jhudsl/Reproducibility_in_Cancer_Informatics
|
2aacc8b04ac9d367962c0141ef9a2b2acb24aefa
|
[
"MIT"
] | null | null | null |
python-examples/python-heatmap-chapt-3/analysis_OLD.py
|
jhudsl/Reproducibility_in_Cancer_Informatics
|
2aacc8b04ac9d367962c0141ef9a2b2acb24aefa
|
[
"MIT"
] | 71
|
2021-09-30T12:24:21.000Z
|
2022-03-23T17:23:14.000Z
|
python-examples/python-heatmap-chapt-3/analysis_OLD.py
|
jhudsl/Reproducibility_in_Cancer_Informatics
|
2aacc8b04ac9d367962c0141ef9a2b2acb24aefa
|
[
"MIT"
] | null | null | null |
import os
from pathlib import Path
import pyrefinebio
import statistics
import pandas as pd
import numpy as np
import seaborn as sns; sns.set_theme(color_codes=True)
import IPython
df1=pd.read_csv('a/file/path/only/I/have/SRP070849.tsv', sep='\t')
mdf=pd.read_csv('a/file/path/only/I/have/SRP070849_metadata.tsv', sep='\t')
df1["Gene"]
df1['calc'] =df1.var(axis = 1, skipna = True)
filter_num=df1.calc.quantile([0.90]).values
df2=df1[df1.calc >float(filter_num)]
df2 =df_by_var.drop(['calc'], 1)
# groups = mdf.pop('refinebio_title')
# df2
# type(df2)
refinebio_title = mdf.pop('refinebio_title')
keys = dict(zip(refinebio_title.unique(), "rbg"))
keys_df = pd.DataFrame(exp_group.map(keys))
keys_df = color_key_df.set_index(df2.columns)
heatmap = sns.clustermap(df2, cmap ="mako", col_colors = keys_df, dendrogram_ratio = (0, .2), cbar_pos = (-.1, .2, .03, .5))
heatmap.savefig('heatmap.png')
| 35.72
| 124
| 0.733483
|
c892950fe1144054eca14a8980934199b8b094b2
| 18,891
|
py
|
Python
|
kolibri/auth/test/test_api.py
|
rtibbles/kolibri
|
7efdf0497738c793f281013f9913f8ecc1a55f10
|
[
"MIT"
] | null | null | null |
kolibri/auth/test/test_api.py
|
rtibbles/kolibri
|
7efdf0497738c793f281013f9913f8ecc1a55f10
|
[
"MIT"
] | 7
|
2016-06-23T16:01:02.000Z
|
2018-12-01T22:15:13.000Z
|
kolibri/auth/test/test_api.py
|
MingDai/kolibri
|
e4719b7d41a40e0cc9fc4150bc137017643fea62
|
[
"MIT"
] | 1
|
2021-06-01T23:15:26.000Z
|
2021-06-01T23:15:26.000Z
|
from __future__ import absolute_import, print_function, unicode_literals
import collections
import factory
import sys
from django.core.urlresolvers import reverse
from rest_framework import status
from rest_framework.test import APITestCase as BaseTestCase
from django.contrib.sessions.models import Session
from .. import models
DUMMY_PASSWORD = "password"
# A weird hack because of http://bugs.python.org/issue17866
if sys.version_info >= (3,):
class APITestCase(BaseTestCase):
def assertItemsEqual(self, *args, **kwargs):
self.assertCountEqual(*args, **kwargs)
else:
class APITestCase(BaseTestCase):
pass
class FacilityFactory(factory.DjangoModelFactory):
class Meta:
model = models.Facility
name = factory.Sequence(lambda n: "Rock N' Roll High School #%d" % n)
class ClassroomFactory(factory.DjangoModelFactory):
class Meta:
model = models.Classroom
name = factory.Sequence(lambda n: "Basic Rock Theory #%d" % n)
class LearnerGroupFactory(factory.DjangoModelFactory):
class Meta:
model = models.LearnerGroup
name = factory.Sequence(lambda n: "Group #%d" % n)
class FacilityUserFactory(factory.DjangoModelFactory):
class Meta:
model = models.FacilityUser
facility = factory.SubFactory(FacilityFactory)
username = factory.Sequence(lambda n: 'user%d' % n)
password = factory.PostGenerationMethodCall('set_password', DUMMY_PASSWORD)
class DeviceOwnerFactory(factory.DjangoModelFactory):
class Meta:
model = models.DeviceOwner
username = factory.Sequence(lambda n: 'deviceowner%d' % n)
password = factory.PostGenerationMethodCall('set_password', DUMMY_PASSWORD)
class LearnerGroupAPITestCase(APITestCase):
def setUp(self):
self.device_owner = DeviceOwnerFactory.create()
self.facility = FacilityFactory.create()
self.classrooms = [ClassroomFactory.create(parent=self.facility) for _ in range(3)]
self.learner_groups = []
for classroom in self.classrooms:
self.learner_groups += [LearnerGroupFactory.create(parent=classroom) for _ in range(5)]
self.client.login(username=self.device_owner.username, password=DUMMY_PASSWORD)
def test_learnergroup_list(self):
response = self.client.get(reverse('learnergroup-list'), format='json')
expected = [collections.OrderedDict((
('id', group.id),
('name', group.name),
('parent', group.parent.id),
('user_ids', [member.id for member in group.get_members()])
)) for group in self.learner_groups]
# assertItemsEqual does not deal well with embedded objects, as it does
# not do a deepEqual, so check each individual list of user_ids
for i, group in enumerate(response.data):
self.assertItemsEqual(group.pop('user_ids'), expected[i].pop('user_ids'))
self.assertItemsEqual(response.data, expected)
def test_learnergroup_detail(self):
response = self.client.get(reverse('learnergroup-detail', kwargs={'pk': self.learner_groups[0].id}), format='json')
expected = {
'id': self.learner_groups[0].id,
'name': self.learner_groups[0].name,
'parent': self.learner_groups[0].parent.id,
'user_ids': [member.id for member in self.learner_groups[0].get_members()],
}
self.assertItemsEqual(response.data, expected)
def test_parent_in_queryparam_with_one_id(self):
classroom_id = self.classrooms[0].id
response = self.client.get(reverse('learnergroup-list'), {'parent': classroom_id},
format='json')
expected = [collections.OrderedDict((
('id', group.id),
('name', group.name),
('parent', group.parent.id),
('user_ids', [member.id for member in group.get_members()]),
)) for group in self.learner_groups if group.parent.id == classroom_id]
# assertItemsEqual does not deal well with embedded objects, as it does
# not do a deepEqual, so check each individual list of user_ids
for i, group in enumerate(response.data):
self.assertItemsEqual(group.pop('user_ids'), expected[i].pop('user_ids'))
self.assertItemsEqual(response.data, expected)
class ClassroomAPITestCase(APITestCase):
def setUp(self):
self.device_owner = DeviceOwnerFactory.create()
self.facility = FacilityFactory.create()
self.classrooms = [ClassroomFactory.create(parent=self.facility) for _ in range(10)]
self.learner_group = LearnerGroupFactory.create(parent=self.classrooms[0])
self.client.login(username=self.device_owner.username, password=DUMMY_PASSWORD)
def test_classroom_list(self):
response = self.client.get(reverse('classroom-list'), format='json')
expected = [collections.OrderedDict((
('id', classroom.id),
('name', classroom.name),
('parent', classroom.parent.id),
('learner_count', 0),
('coach_count', 0),
('admin_count', 0),
)) for classroom in self.classrooms]
self.assertItemsEqual(response.data, expected)
def test_classroom_detail(self):
response = self.client.get(reverse('classroom-detail', kwargs={'pk': self.classrooms[0].id}), format='json')
expected = {
'id': self.classrooms[0].id,
'name': self.classrooms[0].name,
'parent': self.classrooms[0].parent.id,
'learner_count': 0,
'coach_count': 0,
'admin_count': 0,
}
self.assertDictEqual(response.data, expected)
class FacilityAPITestCase(APITestCase):
def setUp(self):
self.device_owner = DeviceOwnerFactory.create()
self.facility1 = FacilityFactory.create()
self.facility2 = FacilityFactory.create()
self.user1 = FacilityUserFactory.create(facility=self.facility1)
self.user2 = FacilityUserFactory.create(facility=self.facility2)
def test_sanity(self):
self.assertTrue(self.client.login(username=self.user1.username, password=DUMMY_PASSWORD, facility=self.facility1))
def test_facility_user_can_get_detail(self):
self.client.login(username=self.user1.username, password=DUMMY_PASSWORD, facility=self.facility1)
response = self.client.get(reverse('facility-detail', kwargs={'pk': self.facility1.pk}),
format='json')
# .assertDictContainsSubset checks that the first argument is a subset of the second argument
self.assertDictContainsSubset({
'name': self.facility1.name,
}, dict(response.data))
def test_device_admin_can_create_facility(self):
new_facility_name = "New Facility"
self.client.login(username=self.device_owner.username, password=DUMMY_PASSWORD)
self.assertEqual(models.Facility.objects.filter(name=new_facility_name).count(), 0)
response = self.client.post(reverse('facility-list'), {"name": new_facility_name}, format="json")
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(models.Facility.objects.filter(name=new_facility_name).count(), 1)
def test_facility_user_cannot_create_facility(self):
new_facility_name = "New Facility"
self.client.login(username=self.user1.username, password=DUMMY_PASSWORD, facility=self.facility1)
self.assertEqual(models.Facility.objects.filter(name=new_facility_name).count(), 0)
response = self.client.post(reverse('facility-list'), {"name": new_facility_name}, format="json")
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
self.assertEqual(models.Facility.objects.filter(name=new_facility_name).count(), 0)
def test_anonymous_user_cannot_create_facility(self):
new_facility_name = "New Facility"
self.assertEqual(models.Facility.objects.filter(name=new_facility_name).count(), 0)
response = self.client.post(reverse('facility-list'), {"name": new_facility_name}, format="json")
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
self.assertEqual(models.Facility.objects.filter(name=new_facility_name).count(), 0)
def test_device_admin_can_update_facility(self):
old_facility_name = self.facility1.name
new_facility_name = "Renamed Facility"
self.client.login(username=self.device_owner.username, password=DUMMY_PASSWORD)
self.assertEqual(models.Facility.objects.get(id=self.facility1.id).name, old_facility_name)
response = self.client.put(reverse('facility-detail', kwargs={"pk": self.facility1.id}), {"name": new_facility_name}, format="json")
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(models.Facility.objects.get(id=self.facility1.id).name, new_facility_name)
def test_device_admin_can_delete_facility(self):
self.client.login(username=self.device_owner.username, password=DUMMY_PASSWORD)
self.assertEqual(models.Facility.objects.filter(id=self.facility1.id).count(), 1)
response = self.client.delete(reverse('facility-detail', kwargs={"pk": self.facility1.id}))
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
self.assertEqual(models.Facility.objects.filter(id=self.facility1.id).count(), 0)
class UserCreationTestCase(APITestCase):
def setUp(self):
self.device_owner = DeviceOwnerFactory.create()
self.facility = FacilityFactory.create()
self.client.login(username=self.device_owner.username, password=DUMMY_PASSWORD)
def test_creating_device_owner_via_api_sets_password_correctly(self):
new_username = "goliath"
new_password = "davidsucks"
bad_password = "ilovedavid"
response = self.client.post(reverse('deviceowner-list'), {"username": new_username, "password": new_password}, format="json")
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertTrue(models.DeviceOwner.objects.get(username=new_username).check_password(new_password))
self.assertFalse(models.DeviceOwner.objects.get(username=new_username).check_password(bad_password))
def test_creating_facility_user_via_api_sets_password_correctly(self):
new_username = "goliath"
new_password = "davidsucks"
bad_password = "ilovedavid"
data = {"username": new_username, "password": new_password, "facility": self.facility.id}
response = self.client.post(reverse('facilityuser-list'), data)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertTrue(models.FacilityUser.objects.get(username=new_username).check_password(new_password))
self.assertFalse(models.FacilityUser.objects.get(username=new_username).check_password(bad_password))
def test_creating_same_facility_user_throws_400_error(self):
new_username = "goliath"
new_password = "davidsucks"
data = {"username": new_username, "password": new_password, "facility": self.facility.id}
response = self.client.post(reverse('facilityuser-list'), data, format="json")
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
response = self.client.post(reverse('facilityuser-list'), data, format="json")
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_creating_same_device_owner_throws_400_error(self):
new_username = "goliath"
new_password = "davidsucks"
data = {"username": new_username, "password": new_password}
response = self.client.post(reverse('deviceowner-list'), data, format="json")
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
response = self.client.post(reverse('deviceowner-list'), data, format="json")
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
class UserUpdateTestCase(APITestCase):
def setUp(self):
self.device_owner = DeviceOwnerFactory.create()
self.facility = FacilityFactory.create()
self.user = FacilityUserFactory.create(facility=self.facility)
self.client.login(username=self.device_owner.username, password=DUMMY_PASSWORD)
def test_user_update_info(self):
self.client.patch(reverse('facilityuser-detail', kwargs={'pk': self.user.pk}), {'username': 'foo'}, format="json")
self.user.refresh_from_db()
self.assertEqual(self.user.username, "foo")
def test_user_update_password(self):
new_password = 'baz'
self.client.patch(reverse('facilityuser-detail', kwargs={'pk': self.user.pk}), {'password': new_password}, format="json")
self.client.logout()
response = self.client.login(username=self.user.username, password=new_password, facility=self.facility)
self.assertTrue(response)
def test_device_owner_update_info(self):
self.client.patch(reverse('deviceowner-detail', kwargs={'pk': self.device_owner.pk}), {'username': 'foo'}, format="json")
self.device_owner.refresh_from_db()
self.assertEqual(self.device_owner.username, "foo")
def test_device_owner_update_password(self):
new_password = 'baz'
self.client.patch(reverse('deviceowner-detail', kwargs={'pk': self.device_owner.pk}), {'password': new_password}, format="json")
self.client.logout()
response = self.client.login(username=self.device_owner.username, password=new_password)
self.assertTrue(response)
class LoginLogoutTestCase(APITestCase):
def setUp(self):
self.device_owner = DeviceOwnerFactory.create()
self.facility = FacilityFactory.create()
self.user = FacilityUserFactory.create(facility=self.facility)
self.admin = FacilityUserFactory.create(facility=self.facility, password="bar")
self.facility.add_admin(self.admin)
self.cr = ClassroomFactory.create(parent=self.facility)
self.cr.add_coach(self.admin)
def test_login_and_logout_device_owner(self):
self.client.post(reverse('session-list'), data={"username": self.device_owner.username, "password": DUMMY_PASSWORD})
sessions = Session.objects.all()
self.assertEqual(len(sessions), 1)
self.client.delete(reverse('session-detail', kwargs={'pk': 'current'}))
self.assertEqual(len(Session.objects.all()), 0)
def test_login_and_logout_facility_user(self):
self.client.post(reverse('session-list'), data={"username": self.user.username, "password": DUMMY_PASSWORD, "facility": self.facility.id})
sessions = Session.objects.all()
self.assertEqual(len(sessions), 1)
self.client.delete(reverse('session-detail', kwargs={'pk': 'current'}))
self.assertEqual(len(Session.objects.all()), 0)
def test_incorrect_credentials_does_not_log_in_user(self):
self.client.post(reverse('session-list'), data={"username": self.user.username, "password": "foo", "facility": self.facility.id})
sessions = Session.objects.all()
self.assertEqual(len(sessions), 0)
def test_session_return_admin_and_coach_kind(self):
self.client.post(reverse('session-list'), data={"username": self.admin.username, "password": "bar", "facility": self.facility.id})
response = self.client.get(reverse('session-detail', kwargs={'pk': 'current'}))
self.assertTrue(response.data['kind'][0], 'admin')
self.assertTrue(response.data['kind'][1], 'coach')
def test_session_return_anon_kind(self):
response = self.client.get(reverse('session-detail', kwargs={'pk': 'current'}))
self.assertTrue(response.data['kind'][0], 'anonymous')
class AnonSignUpTestCase(APITestCase):
def setUp(self):
self.device_owner = DeviceOwnerFactory.create()
self.facility = FacilityFactory.create()
def test_anon_sign_up_creates_user(self):
response = self.client.post(reverse('signup-list'), data={"username": "user", "password": DUMMY_PASSWORD})
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertTrue(models.FacilityUser.objects.all())
def test_anon_sign_up_returns_user(self):
full_name = "Bob Lee"
response = self.client.post(reverse('signup-list'), data={"full_name": full_name, "username": "user", "password": DUMMY_PASSWORD})
self.assertEqual(response.data['username'], 'user')
self.assertEqual(response.data['full_name'], full_name)
def test_create_user_with_same_username_fails(self):
FacilityUserFactory.create(username='bob')
response = self.client.post(reverse('signup-list'), data={"username": "bob", "password": DUMMY_PASSWORD})
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(len(models.FacilityUser.objects.all()), 1)
def test_create_bad_username_fails(self):
response = self.client.post(reverse('signup-list'), data={"username": "(***)", "password": DUMMY_PASSWORD})
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertFalse(models.FacilityUser.objects.all())
def test_sign_up_also_logs_in_user(self):
self.assertFalse(Session.objects.all())
self.client.post(reverse('signup-list'), data={"username": "user", "password": DUMMY_PASSWORD})
self.assertTrue(Session.objects.all())
class FacilityDatasetAPITestCase(APITestCase):
def setUp(self):
self.device_owner = DeviceOwnerFactory.create()
self.facility = FacilityFactory.create()
self.admin = FacilityUserFactory.create(facility=self.facility)
self.user = FacilityUserFactory.create(facility=self.facility)
self.facility.add_admin(self.admin)
def test_return_dataset_that_user_is_an_admin_for(self):
self.client.login(username=self.admin.username, password=DUMMY_PASSWORD)
response = self.client.get(reverse('facilitydataset-list'))
self.assertEqual(len(response.data), len(models.FacilityDataset.objects.all()))
self.assertEqual(self.admin.dataset_id, response.data[0]['id'])
def test_return_all_datasets_for_device_owner(self):
self.client.login(username=self.device_owner.username, password=DUMMY_PASSWORD)
response = self.client.get(reverse('facilitydataset-list'))
self.assertEqual(len(response.data), len(models.FacilityDataset.objects.all()))
def test_return_dataset_for_facility_user(self):
self.client.login(username=self.user.username, password=DUMMY_PASSWORD)
response = self.client.get(reverse('facilitydataset-list'))
self.assertEqual(len(response.data), len(models.FacilityDataset.objects.all()))
| 47.825316
| 146
| 0.70118
|
95ada14c27bc684f0a9517617607d3fab4a27ee1
| 5,153
|
py
|
Python
|
pytchat/processors/html_archiver.py
|
mandark321/pytchat
|
39d99ad4afdf9387e207c3b646642c27dcd0a365
|
[
"MIT"
] | null | null | null |
pytchat/processors/html_archiver.py
|
mandark321/pytchat
|
39d99ad4afdf9387e207c3b646642c27dcd0a365
|
[
"MIT"
] | null | null | null |
pytchat/processors/html_archiver.py
|
mandark321/pytchat
|
39d99ad4afdf9387e207c3b646642c27dcd0a365
|
[
"MIT"
] | null | null | null |
import httpx
import os
import re
import time
from base64 import standard_b64encode
from httpx import NetworkError, ReadTimeout
from .chat_processor import ChatProcessor
from .default.processor import DefaultProcessor
from ..exceptions import UnknownConnectionError
PATTERN = re.compile(r"(.*)\(([0-9]+)\)$")
fmt_headers = ['datetime', 'elapsed', 'authorName',
'message', 'superchat', 'type', 'authorChannel']
HEADER_HTML = '''
<html>
<head>
<meta http-equiv="Content-Type" content="text/html;charset=UTF-8">
'''
TABLE_CSS = '''
table.css {
border-collapse: collapse;
}
table.css thead{
border-collapse: collapse;
border: 1px solid #000
}
table.css tr td{
padding: 0.3em;
border: 1px solid #000
}
table.css th{
padding: 0.3em;
border: 1px solid #000
}
'''
class HTMLArchiver(ChatProcessor):
'''
HTMLArchiver saves chat data as HTML table format.
'''
def __init__(self, save_path, callback=None):
super().__init__()
self.save_path = self._checkpath(save_path)
self.processor = DefaultProcessor()
self.emoji_table = {} # tuble for custom emojis. key: emoji_id, value: base64 encoded image binary.
self.header = [HEADER_HTML]
self.body = ['<body>\n', '<table class="css">\n', self._parse_table_header(fmt_headers)]
self.callback = callback
def _checkpath(self, filepath):
splitter = os.path.splitext(os.path.basename(filepath))
body = splitter[0]
extention = splitter[1]
newpath = filepath
counter = 1
while os.path.exists(newpath):
match = re.search(PATTERN, body)
if match:
counter = int(match[2]) + 1
num_with_bracket = f'({str(counter)})'
body = f'{match[1]}{num_with_bracket}'
else:
body = f'{body}({str(counter)})'
newpath = os.path.join(os.path.dirname(filepath), body + extention)
return newpath
def process(self, chat_components: list):
"""
Returns
----------
dict :
save_path : str :
Actual save path of file.
total_lines : int :
count of total lines written to the file.
"""
if chat_components is None or len(chat_components) == 0:
return
for c in self.processor.process(chat_components).items:
self.body.extend(
self._parse_html_line((
c.datetime,
c.elapsedTime,
c.author.name,
self._parse_message(c.messageEx),
c.amountString,
c.author.type,
c.author.channelId)
)
)
if self.callback:
self.callback(None, 1)
def _parse_html_line(self, raw_line):
return ''.join(('<tr>',
''.join(''.join(('<td>', cell, '</td>')) for cell in raw_line),
'</tr>\n'))
def _parse_table_header(self, raw_line):
return ''.join(('<thead><tr>',
''.join(''.join(('<th>', cell, '</th>')) for cell in raw_line),
'</tr></thead>\n'))
def _parse_message(self, message_items: list) -> str:
return ''.join(''.join(('<span class="', self._set_emoji_table(item), '" title="', item['txt'], '"></span>'))
if type(item) is dict else item
for item in message_items)
def _encode_img(self, url):
err = None
for _ in range(5):
try:
resp = httpx.get(url, timeout=30)
break
except (NetworkError, ReadTimeout) as e:
print("Network Error. retrying...")
err = e
time.sleep(3)
else:
raise UnknownConnectionError(str(err))
return standard_b64encode(resp.content).decode()
def _set_emoji_table(self, item: dict):
emoji_id = item['id']
if emoji_id not in self.emoji_table:
self.emoji_table.setdefault(emoji_id, self._encode_img(item['url']))
return emoji_id
def _stylecode(self, name, code, width, height):
return ''.join((".", name, " { display: inline-block; background-image: url(data:image/png;base64,",
code, "); background-repeat: no-repeat; width: ",
str(width), "; height: ", str(height), ";}"))
def _create_styles(self):
return '\n'.join(('<style type="text/css">',
TABLE_CSS,
'\n'.join(self._stylecode(key, self.emoji_table[key], 24, 24)
for key in self.emoji_table.keys()),
'</style>\n'))
def finalize(self):
self.header.extend([self._create_styles(), '</head>\n'])
self.body.extend(['</table>\n</body>\n</html>'])
with open(self.save_path, mode='a', encoding='utf-8') as f:
f.writelines(self.header)
f.writelines(self.body)
| 33.032051
| 117
| 0.540074
|
f7f3713d8e21b003b4dc406f37eadf5be5e080e3
| 7,933
|
py
|
Python
|
documentstore_migracao/utils/xylose_converter.py
|
joffilyfe/document-store-migracao
|
b5125b7aedec56f0e8787900bdfd124aaf65e3e3
|
[
"BSD-2-Clause"
] | null | null | null |
documentstore_migracao/utils/xylose_converter.py
|
joffilyfe/document-store-migracao
|
b5125b7aedec56f0e8787900bdfd124aaf65e3e3
|
[
"BSD-2-Clause"
] | 14
|
2019-03-13T12:19:12.000Z
|
2019-03-19T17:37:08.000Z
|
documentstore_migracao/utils/xylose_converter.py
|
joffilyfe/document-store-migracao
|
b5125b7aedec56f0e8787900bdfd124aaf65e3e3
|
[
"BSD-2-Clause"
] | 3
|
2019-03-12T18:55:55.000Z
|
2019-03-20T18:38:02.000Z
|
import logging
from typing import List
from datetime import datetime
from documentstore_migracao.utils import scielo_ids_generator
from xylose.scielodocument import Journal, Issue
logger = logging.getLogger(__name__)
def date_to_datetime(date: str) -> datetime:
"""Transforma datas no formato ISO em objetos datetime"""
return datetime.strptime(date, "%Y-%m-%dT%H:%M:%S.%fZ")
def parse_date(date: str) -> str:
"""Traduz datas em formato simples ano-mes-dia, ano-mes para
o formato iso utilizado durantr a persistência do Kernel"""
_date = None
try:
_date = (
datetime.strptime(date, "%Y-%m-%d").isoformat(timespec="microseconds") + "Z"
)
except ValueError:
try:
_date = (
datetime.strptime(date, "%Y-%m").isoformat(timespec="microseconds")
+ "Z"
)
except ValueError:
_date = (
datetime.strptime(date, "%Y").isoformat(timespec="microseconds") + "Z"
)
return _date
def set_metadata(date: str, data: any) -> List[List]:
"""Retorna a estrutura básica de um `campo` de metadata
no formato do Kernel"""
return [[date, data]]
def journal_to_kernel(journal):
"""Transforma um objeto Journal (xylose) para o formato
de dados equivalente ao persistido pelo Kernel em um banco
mongodb"""
# TODO: Virá algo do xylose para popular o campo de métricas?
_id = journal.scielo_issn
if not _id:
raise ValueError("É preciso que o periódico possua um id")
_creation_date = parse_date(journal.creation_date)
_metadata = {}
_bundle = {
"_id": _id,
"id": _id,
"created": _creation_date,
"updated": _creation_date,
"items": [],
"metadata": _metadata,
}
if journal.mission:
_mission = [
{"language": lang, "value": value}
for lang, value in journal.mission.items()
]
_metadata["mission"] = set_metadata(_creation_date, _mission)
if journal.title:
_metadata["title"] = set_metadata(_creation_date, journal.title)
if journal.abbreviated_iso_title:
_metadata["title_iso"] = set_metadata(
_creation_date, journal.abbreviated_iso_title
)
if journal.abbreviated_title:
_metadata["short_title"] = set_metadata(
_creation_date, journal.abbreviated_title
)
_metadata["acronym"] = set_metadata(_creation_date, journal.acronym)
if journal.scielo_issn:
_metadata["scielo_issn"] = set_metadata(_creation_date, journal.scielo_issn)
if journal.print_issn:
_metadata["print_issn"] = set_metadata(_creation_date, journal.print_issn)
if journal.electronic_issn:
_metadata["electronic_issn"] = set_metadata(
_creation_date, journal.electronic_issn
)
if journal.status_history:
_metadata["status"] = []
for status in journal.status_history:
_status = {"status": status[1]}
if status[2]:
_status["reason"] = status[2]
# TODO: Temos que verificar se as datas são autoritativas
_metadata["status"].append([parse_date(status[0]), _status])
if journal.subject_areas:
_metadata["subject_areas"] = set_metadata(
_creation_date, [area.upper() for area in journal.subject_areas]
)
if journal.sponsors:
_sponsors = [{"name": sponsor} for sponsor in journal.sponsors]
_metadata["sponsors"] = set_metadata(_creation_date, _sponsors)
if journal.wos_subject_areas:
_metadata["subject_categories"] = set_metadata(
_creation_date, journal.wos_subject_areas
)
if journal.submission_url:
_metadata["online_submission_url"] = set_metadata(
_creation_date, journal.submission_url
)
if journal.next_title:
_next_journal = {"name": journal.next_title}
_metadata["next_journal"] = set_metadata(_creation_date, _next_journal)
if journal.previous_title:
_previous_journal = {"name": journal.previous_title}
_metadata["previous_journal"] = set_metadata(_creation_date, _previous_journal)
_contact = {}
if journal.editor_email:
_contact["email"] = journal.editor_email
if journal.editor_address:
_contact["address"] = journal.editor_address
if _contact:
_metadata["contact"] = set_metadata(_creation_date, _contact)
return _bundle
def get_journal_issn_in_issue(issue) -> str:
"""Retorna o ISSN ID de um periódico na
perspectiva da issue"""
return issue.data.get("issue").get("v35")[0]["_"]
def issue_to_kernel(issue):
"""Transforma um objeto Issue (xylose) para o formato
de dados equivalente ao persistido pelo Kernel em um banco
mongodb"""
issn_id = issue.data["issue"]["v35"][0]["_"]
_creation_date = parse_date(issue.publication_date)
_metadata = {}
_bundle = {
"created": _creation_date,
"updated": _creation_date,
"items": [],
"metadata": _metadata,
}
_year = str(date_to_datetime(_creation_date).year)
_month = str(date_to_datetime(_creation_date).month)
_metadata["publication_year"] = set_metadata(_creation_date, _year)
if issue.volume:
_metadata["volume"] = set_metadata(_creation_date, issue.volume)
if issue.number:
_metadata["number"] = set_metadata(_creation_date, issue.number)
_supplement = None
if issue.type is "supplement":
_supplement = "0"
if issue.supplement_volume:
_supplement = issue.supplement_volume
elif issue.supplement_number:
_supplement = issue.supplement_number
_metadata["supplement"] = set_metadata(_creation_date, _supplement)
if issue.titles:
_titles = [
{"language": lang, "value": value} for lang, value in issue.titles.items()
]
_metadata["titles"] = set_metadata(_creation_date, _titles)
publication_months = {}
if issue.start_month and issue.end_month:
publication_months["range"] = (int(issue.start_month), int(issue.end_month))
elif _month:
publication_months["month"] = int(_month)
_metadata["publication_months"] = set_metadata(_creation_date, publication_months)
_id = scielo_ids_generator.issue_id(
issn_id, _year, issue.volume, issue.number, _supplement
)
_bundle["_id"] = _id
_bundle["id"] = _id
return _bundle
def get_journal_issns_from_issue(issue: Issue) -> List[str]:
"""Busca por todos os issns de periódico disponíveis em uma
issue. Os ISSNs podem estar nos dois campos v35 e v435 com
ou sem repetição"""
issns = [get_journal_issn_in_issue(issue)]
if not "v435" in issue.data["issue"]:
return issns
issns.extend([issn["_"] for issn in issue.data["issue"]["v435"]])
return list(set(issns))
def find_documents_bundles(journal: dict, issues: List[Issue]):
"""Busca o id de todos os fascículos associados ao periódico. Um id
é encontrado quando pelo menos um ISSN relacionado ao fascículo também
está presente no periódico.
"""
issues_ids = []
journal_issns = []
journal_issn_fields = ["electronic_issn", "print_issn", "scielo_issn"]
_metadata = journal["metadata"]
for field in journal_issn_fields:
try:
journal_issns.append(_metadata[field][0][-1])
except (KeyError, IndexError):
pass
journal_issns = list(set(journal_issns))
for issue in issues:
issue_issns = get_journal_issns_from_issue(issue)
has_matched_issns = list(
filter(lambda issn: issn in journal_issns, issue_issns)
)
if has_matched_issns:
issues_ids.append(issue_to_kernel(issue).get("id"))
return issues_ids
| 30.163498
| 88
| 0.651708
|
1ab87cd7ac4ac9cebb01cbd4ea33b5e3828e8fd8
| 4,174
|
py
|
Python
|
backend/api/tests/test_serializers.py
|
jenhaoyang/Disfactory
|
40f9e14044f0e457489164745095f24cd1e923dd
|
[
"MIT"
] | null | null | null |
backend/api/tests/test_serializers.py
|
jenhaoyang/Disfactory
|
40f9e14044f0e457489164745095f24cd1e923dd
|
[
"MIT"
] | null | null | null |
backend/api/tests/test_serializers.py
|
jenhaoyang/Disfactory
|
40f9e14044f0e457489164745095f24cd1e923dd
|
[
"MIT"
] | null | null | null |
from datetime import datetime, timedelta
from django.test import TestCase
from ..serializers import FactorySerializer, ImageSerializer
from ..models import Factory, ReportRecord, Image
class FactorySerializersTestCase(TestCase):
def setUp(self):
self.im1 = Image.objects.create(image_path="https://i.imgur.com/RxArJUc.png")
self.im2 = Image.objects.create(image_path="https://imgur.dcard.tw/BB2L2LT.jpg")
self.request_body = {
"name": "a new factory",
"type": "2-3",
"images": [self.im1.id, self.im2.id],
"other": "這個工廠實在太臭啦,趕緊檢舉吧",
"lat": 23.234,
"lng": 120.1,
"nickname": "路過的家庭主婦",
"contact": "07-7533967",
}
def test_factory_serializer_correct_report_date(self):
factory = Factory(
name="test factory",
lat=23,
lng=121,
landcode="000120324",
factory_type="2-1",
status="A",
status_time=datetime.now()
)
factory.save()
# created first time, w/o any ReportRecord
# should have null reported_at
serialized_factory = FactorySerializer(factory)
self.assertEqual(serialized_factory.data["type"], factory.factory_type)
self.assertIsNone(serialized_factory.data["reported_at"])
report_record1 = ReportRecord.objects.create(
factory=factory,
action_type="post_image",
action_body={},
contact="0800-092000",
others="猴~被我拍到了吧",
created_at=factory.created_at + timedelta(seconds=1)
)
im1 = Image.objects.create(
image_path="https://i.imgur.com/RxArJUc.png",
factory=factory,
report_record=report_record1
)
report_record2 = ReportRecord.objects.create(
factory=factory,
action_type="post_image",
action_body={},
contact="07-7533967",
others="昨天在這裡辦演唱會,但旁邊居然在蓋工廠。不錄了不錄了!",
created_at=factory.created_at + timedelta(days=1),
)
im2 = Image.objects.create(
image_path="https://imgur.dcard.tw/BB2L2LT.jpg",
factory=factory,
report_record=report_record2,
)
report_record_latest = ReportRecord.objects.create(
factory=factory,
action_type="PUT",
action_body={"status": "D"},
contact="02-2392-0371",
others="已呈報",
created_at=factory.created_at + timedelta(days=2)
) # this one should be the `reported_at` of serialized factory
factory.refresh_from_db()
serialized_factory = FactorySerializer(factory)
self.assertEqual(
serialized_factory.data["reported_at"],
report_record_latest.created_at,
)
self.assertCountEqual(serialized_factory.data["images"], [
ImageSerializer(im1).data,
ImageSerializer(im2).data,
])
def test_factory_serializer_validate_body(self):
serializer = FactorySerializer(data=self.request_body)
self.assertTrue(serializer.is_valid())
self.assertEqual(serializer.errors, {})
def test_factory_serializer_validate_body_with_wrong_lat(self):
wrong_request_body = self.request_body.copy()
wrong_request_body["lat"] = 70
serializer = FactorySerializer(data=wrong_request_body)
self.assertFalse(serializer.is_valid())
self.assertIn("lat", serializer.errors)
def test_factory_serializer_validate_body_with_wrong_lng(self):
wrong_request_body = self.request_body.copy()
wrong_request_body["lng"] = -10
serializer = FactorySerializer(data=wrong_request_body)
self.assertFalse(serializer.is_valid())
self.assertIn("lng", serializer.errors)
class ImageSerializersTestCase(TestCase):
def test_image_serializer_coorect_url(self):
img = Image(image_path="https://imgur.com/qwer")
serialized_img = ImageSerializer(img)
self.assertEqual(serialized_img.data['url'], img.image_path)
| 36.295652
| 88
| 0.623622
|
1bdc58645e3ee563e389e5c6472b4ef2d8470073
| 3,263
|
py
|
Python
|
mr_reports/utils.py
|
hlongmore/django-mr_reports
|
249bdd57ec6fb122cd538ad60432ab899d0f7c72
|
[
"BSD-3-Clause"
] | null | null | null |
mr_reports/utils.py
|
hlongmore/django-mr_reports
|
249bdd57ec6fb122cd538ad60432ab899d0f7c72
|
[
"BSD-3-Clause"
] | null | null | null |
mr_reports/utils.py
|
hlongmore/django-mr_reports
|
249bdd57ec6fb122cd538ad60432ab899d0f7c72
|
[
"BSD-3-Clause"
] | null | null | null |
"""
Re-usable generic functions, and handling special tasks like emailing reports
"""
import datetime
import re
from django.test.client import Client
from django.core.mail import EmailMultiAlternatives
from django.http import QueryDict, HttpRequest
from django.db import transaction
from django.utils import timezone
from models import Report, Subscription, Parameter
from views import render_report
from django.conf import settings
# Keep all DB access for a given Subscription within one txn to potentially allow
# multiple concurrent Subscription runners.
@transaction.atomic
def execute_subscription(sched_id, force_run=False, today=None):
"""Handles creating the report PDF and sending the email.
(A future optimzation could re-use the PDF if multiple Subscriptions of
the same report are running at the same time.)
'today' defaults to current day, but you can set different dates for testing.
This accepts the ID instead of the object itself in order to handle concurrancy issues.
(It would seem to make sense to put this method with the Subscription model, however it leads to
some circular imports so it was cleaner to break it out into a utility function)."""
#Locks record until this function completes
sched_obj = Subscription.objects.select_for_update().get(pk=sched_id)
#check whether we should send
if not force_run:
if not sched_obj.should_send(today=today):
return False
sched_obj.last_scheduled_run = timezone.localtime(timezone.now())
if not getattr(settings, 'MR_REPORTS_WKHTMLTOPDF_PATH','') and getattr(settings, 'BASE_PATH',''):
sched_obj.last_run_succeeded = False
sched_obj.save()
raise ValueError("PDF generation not available. Please add and set 'MR_REPORTS_WKHTMLTOPDF_PATH', and 'BASE_PATH' in your settings.py file.")
#Generate PDF
mock_request = HttpRequest()
mock_request.method = 'GET'
if sched_obj.report_parameters:
mock_request.GET = QueryDict(sched_obj.report_parameters.lstrip('?'))
else:
#If the report has parameters and none are provided, provide dummy GET data
if Parameter.objects.filter(dataset__report=sched_obj.report):
mock_request.GET = QueryDict('use_defaults')
response = render_report(mock_request, report_id=sched_obj.report.pk, format='pdf')
#Send email
full_url = settings.BASE_PATH.rstrip('/') + sched_obj.report.get_absolute_url()
message = """\
Greetings,<br><br>
This is a snapshot of the report '%s'.<br><br>
Go here to view the realtime version of the report and/or change your subscription: <br>
<a href="%s">%s</a>
<br><br>
""" % (sched_obj.report.title, full_url, full_url)
message += sched_obj.email_body_extra
subject = 'Scheduled Report - ' + sched_obj.email_subject
text_content = re.sub(r'<[^>]+>','',message)
html_content = message
msg = EmailMultiAlternatives(subject, text_content, sched_obj.send_to.email, [sched_obj.send_to.email])
msg.attach_alternative(html_content, "text/html")
msg.attach(sched_obj.report.filename()+'.pdf', response.content, response['Content-Type'])
msg.send()
sched_obj.last_run_succeeded = True
sched_obj.save()
return True
| 40.283951
| 149
| 0.734907
|
f0471f036aa5c4b43b1b73a7d3a215337b4f980c
| 8,953
|
py
|
Python
|
forecast/views/view_forecast/forecast_summary.py
|
uktrade/fadmin2
|
0f774400fb816c9ca30e30b25ae542135966e185
|
[
"MIT"
] | 3
|
2020-01-05T16:46:42.000Z
|
2021-08-02T08:08:39.000Z
|
forecast/views/view_forecast/forecast_summary.py
|
uktrade/fadmin2
|
0f774400fb816c9ca30e30b25ae542135966e185
|
[
"MIT"
] | 30
|
2019-11-28T15:16:35.000Z
|
2021-08-16T14:49:58.000Z
|
forecast/views/view_forecast/forecast_summary.py
|
uktrade/fadmin2
|
0f774400fb816c9ca30e30b25ae542135966e185
|
[
"MIT"
] | null | null | null |
from django.http import Http404
from django.http.response import HttpResponseRedirect
from django.shortcuts import reverse
from costcentre.forms import DirectorateCostCentresForm
from forecast.forms import ForecastPeriodForm
from forecast.tables import (
ForecastSubTotalTable,
ForecastWithLinkTable,
)
from forecast.utils.query_fields import (
SHOW_COSTCENTRE,
)
from forecast.views.base import (
CostCentreForecastMixin,
DITForecastMixin,
DirectorateForecastMixin,
ForecastViewPermissionMixin,
ForecastViewTableMixin,
GroupForecastMixin,
)
class ForecastMultiTableMixin(ForecastViewTableMixin):
def class_name(self):
return "wide-table"
def get_tables(self):
"""
Return an array of table instances containing data.
"""
filter_code = ""
self.field_infos.hierarchy_type = self.hierarchy_type
pivot_filter = {}
arg_name = self.field_infos.filter_codes
if arg_name:
filter_code = self.kwargs[arg_name]
pivot_filter = {self.field_infos.filter_selector: f"{filter_code}"}
hierarchy_order_list = self.field_infos.hierarchy_order_list
hierarchy_columns = self.field_infos.hierarchy_columns
hierarchy_data = self.data_model.view_data.subtotal_data(
self.field_infos.hierarchy_sub_total_column,
self.field_infos.hierarchy_sub_total,
hierarchy_columns.keys(),
pivot_filter,
year=self.year,
order_list=hierarchy_order_list,
)
programme_data = self.data_model.view_data.subtotal_data(
self.field_infos.programme_display_sub_total_column,
self.field_infos.programme_sub_total,
self.field_infos.programme_columns.keys(),
pivot_filter,
year=self.year,
order_list=self.field_infos.programme_order_list,
)
expenditure_data = self.data_model.view_data.subtotal_data(
self.field_infos.expenditure_display_sub_total_column,
self.field_infos.expenditure_sub_total,
self.field_infos.expenditure_columns.keys(),
pivot_filter,
year=self.year,
order_list=self.field_infos.expenditure_order_list,
)
# In the project report, exclude rows without a project code.
k = f"{self.field_infos.project_code_field}__isnull"
pivot_filter.update({k: False})
project_data = self.data_model.view_data.subtotal_data(
self.field_infos.project_display_sub_total_column,
self.field_infos.project_sub_total,
self.field_infos.project_columns.keys(),
pivot_filter,
year=self.year,
order_list=self.field_infos.project_order_list,
)
# Build the table structures
if self.field_infos.hierarchy_type == SHOW_COSTCENTRE:
programme_table = ForecastSubTotalTable(
self.field_infos.programme_columns,
programme_data,
**self.table_kwargs,
)
else:
programme_table = ForecastWithLinkTable(
self.field_infos.programme_name_field,
self.field_infos.programme_detail_view,
[self.field_infos.programme_code_field,
self.field_infos.expenditure_type_name_field,
self.period],
filter_code,
self.field_infos.programme_columns,
programme_data,
**self.table_kwargs,
)
programme_table.attrs["caption"] = "Control total report"
programme_table.tag = self.table_tag
expenditure_table = ForecastWithLinkTable(
self.field_infos.budget_category_name_field,
self.field_infos.expenditure_view,
[
self.field_infos.budget_category_id_field,
self.field_infos.budget_type_field,
self.period,
],
filter_code,
self.field_infos.expenditure_columns,
expenditure_data,
**self.table_kwargs,
)
expenditure_table.attrs["caption"] = "Expenditure report"
expenditure_table.tag = self.table_tag
project_table = ForecastWithLinkTable(
self.field_infos.project_name_field,
self.field_infos.project_detail_view,
[self.field_infos.project_code_field, self.period],
filter_code,
self.field_infos.project_columns,
project_data,
**self.table_kwargs,
)
project_table.attrs["caption"] = "Project report"
project_table.tag = self.table_tag
if self.field_infos.hierarchy_type == SHOW_COSTCENTRE:
hierarchy_table = ForecastSubTotalTable(
hierarchy_columns,
hierarchy_data,
**self.table_kwargs,
)
else:
hierarchy_table = ForecastWithLinkTable(
self.field_infos.hierarchy_view_link_column,
self.field_infos.hierarchy_view,
[self.field_infos.hierarchy_view_code, self.period],
"",
hierarchy_columns,
hierarchy_data,
**self.table_kwargs,
)
hierarchy_table.attrs["caption"] = "Forecast hierarchy report"
hierarchy_table.tag = self.table_tag
self.tables = [
hierarchy_table,
programme_table,
expenditure_table,
project_table,
]
return self.tables
class DITView(ForecastViewPermissionMixin, ForecastMultiTableMixin, DITForecastMixin):
template_name = "forecast/view/dit.html"
def post(self, request, *args, **kwargs):
new_period = request.POST.get("selected_period", None,)
return HttpResponseRedirect(
reverse("forecast_dit", kwargs={"period": new_period})
)
class GroupView(
ForecastViewPermissionMixin, ForecastMultiTableMixin, GroupForecastMixin,
):
template_name = "forecast/view/group.html"
def post(self, request, *args, **kwargs):
new_period = request.POST.get("selected_period", None,)
return HttpResponseRedirect(
reverse(
"forecast_group",
kwargs={
"group_code": self.kwargs["group_code"],
"period": new_period,
},
)
)
class DirectorateView(
ForecastViewPermissionMixin, ForecastMultiTableMixin, DirectorateForecastMixin,
):
template_name = "forecast/view/directorate.html"
def post(self, request, *args, **kwargs):
new_period = request.POST.get("selected_period", None,)
return HttpResponseRedirect(
reverse(
"forecast_directorate",
kwargs={
"directorate_code": self.kwargs["directorate_code"],
"period": new_period,
},
)
)
class CostCentreView(
ForecastViewPermissionMixin,
ForecastMultiTableMixin,
CostCentreForecastMixin
):
template_name = "forecast/view/cost_centre.html"
def cost_centres_form(self):
cost_centre_code = self.cost_centre_code
return DirectorateCostCentresForm(
cost_centre_code=cost_centre_code,
year=self.year
)
@property
def cost_centre_code(self):
return self.kwargs['cost_centre_code']
def period_form(self):
return ForecastPeriodForm(selected_period=self.period)
def post(self, request, *args, **kwargs):
# Checking selected_period is needed to find out if we are posting after
# changing the period or changing the cost centre
selected_period = request.POST.get("selected_period", None,)
if selected_period is None:
# Cost centre changed
selected_cost_centre_code = request.POST.get("cost_centre", None,)
if selected_cost_centre_code:
return HttpResponseRedirect(
reverse(
"forecast_cost_centre",
kwargs={
"cost_centre_code": selected_cost_centre_code,
"period": self.period,
},
)
)
else:
raise Http404("Cost Centre not found")
else:
if selected_period != self.period:
return HttpResponseRedirect(
reverse(
"forecast_cost_centre",
kwargs={
"cost_centre_code": self.cost_centre_code,
"period": selected_period,
},
)
)
| 34.171756
| 86
| 0.606501
|
d9093c01e9a6486e0d4936455a093edc8d5319f0
| 2,374
|
py
|
Python
|
data_map.py
|
Rupt/which_is_real
|
84d59afb7161418763a26d2dd30f07ad93664809
|
[
"MIT"
] | 1
|
2021-12-03T06:09:02.000Z
|
2021-12-03T06:09:02.000Z
|
data_map.py
|
Rupt/which_is_real
|
84d59afb7161418763a26d2dd30f07ad93664809
|
[
"MIT"
] | 1
|
2022-01-29T15:21:47.000Z
|
2022-01-29T15:21:47.000Z
|
data_map.py
|
Rupt/which_is_real
|
84d59afb7161418763a26d2dd30f07ad93664809
|
[
"MIT"
] | 1
|
2022-01-29T15:18:40.000Z
|
2022-01-29T15:18:40.000Z
|
"""
Prepare data for the 2d map example.
"""
import os
import numba
import numpy
from PIL import Image
BASE_MAP = "images/mapdraw_poster.png"
FOLDER = "data_map"
def main():
img = imload(BASE_MAP)
unique = numpy.unique(img)
for x in unique:
blobs = find_isolated(img, x)
for i, mask in enumerate(blobs):
imdump(mask, "mask_%d_%d.png" % (x, i))
def find_isolated(img, level):
"""Return a list of image masks for isolated blobs at level."""
select = (img == level).astype(numpy.uint8)
blobs = []
while select.any():
mask = find_a_blob(select)
select[mask] = 0
blobs.append(mask)
return blobs
@numba.njit(numba.bool_[:, :](numba.uint8[:, :]))
def find_a_blob(img):
"""Return a mask for a contiguous blob in img.
Image should contain 1 in allowed regions, 0 in disallowed.
Horrible algorithm, but simple and fit for purpose.
"""
img = img.copy()
height, width = img.shape
def find_seed():
for y in range(height):
for x in range(width):
if img[y, x]:
return y, x
return -1, -1
y, x = find_seed()
if y < 0:
return numpy.zeros(img.shape, numba.bool_)
img = img.copy()
img[y, x] = 2
def update(y, x):
hit = img[y, x] == 1
img[y, x] += hit
return hit
while True:
change = False
for y in range(height):
for x in range(width):
# 0 -> denied
# 1 -> allowed
# 2 -> in our blob
if img[y, x] != 2:
continue
# infect up, down, left, right directions
change |= update(max(0, y - 1), x)
change |= update(min(height - 1, y + 1), x)
change |= update(y, max(0, x - 1))
change |= update(y, min(width - 1, x + 1))
if not change:
break
return img == 2
# image utilities
def imload(filename):
"""Return an image array loaded from filename."""
return numpy.array(Image.open(filename))
def imdump(img, filename, *, log=True):
"""Write a numpy array as image filename."""
fullpath = os.path.join(FOLDER, filename)
img = Image.fromarray(img)
img.save(fullpath)
if __name__ == "__main__":
main()
| 21.779817
| 67
| 0.538332
|
82cd40dde852df34836dae9c52391c6b167143c0
| 6,223
|
py
|
Python
|
jina/__init__.py
|
Virus2466/jina
|
9ca715bf73558c9a63aeb43205073a4404011a47
|
[
"Apache-2.0"
] | 1
|
2022-02-11T07:19:59.000Z
|
2022-02-11T07:19:59.000Z
|
jina/__init__.py
|
Sangwan5688/jina
|
ecd810543e19f91af80e91df11afb03ff96b1ec6
|
[
"Apache-2.0"
] | null | null | null |
jina/__init__.py
|
Sangwan5688/jina
|
ecd810543e19f91af80e91df11afb03ff96b1ec6
|
[
"Apache-2.0"
] | null | null | null |
"""
Top-level module of Jina.
The primary function of this module is to import all of the public Jina
interfaces into a single place. The interfaces themselves are located in
sub-modules, as described below.
"""
import datetime as _datetime
import os as _os
import platform as _platform
import signal as _signal
import sys as _sys
import types as _types
import warnings as _warnings
if _sys.version_info < (3, 7, 0) or _sys.version_info >= (3, 10, 0):
raise OSError(f'Jina requires Python 3.7/3.8/3.9, but yours is {_sys.version_info}')
__windows__ = _sys.platform == 'win32'
def _warning_on_one_line(message, category, filename, lineno, *args, **kwargs):
return '\033[1;33m%s: %s\033[0m \033[1;30m(raised from %s:%s)\033[0m\n' % (
category.__name__,
message,
filename,
lineno,
)
_warnings.formatwarning = _warning_on_one_line
_warnings.simplefilter('always', DeprecationWarning)
# fix fork error on MacOS but seems no effect? must do EXPORT manually before jina start
_os.environ['OBJC_DISABLE_INITIALIZE_FORK_SAFETY'] = 'YES'
# JINA_MP_START_METHOD has higher priority than os-patch
_start_method = _os.environ.get('JINA_MP_START_METHOD', None)
if _start_method and _start_method.lower() in {'fork', 'spawn', 'forkserver'}:
from multiprocessing import set_start_method as _set_start_method
_set_start_method(_start_method.lower())
_warnings.warn(f'multiprocessing start method is set to `{_start_method.lower()}`')
_os.environ.pop('JINA_MP_START_METHOD')
elif _sys.version_info >= (3, 8, 0) and _platform.system() == 'Darwin':
# DO SOME OS-WISE PATCHES
# temporary fix for python 3.8 on macos where the default start is set to "spawn"
# https://docs.python.org/3/library/multiprocessing.html#contexts-and-start-methods
from multiprocessing import set_start_method as _set_start_method
_set_start_method('fork')
# do not change this line manually
# this is managed by git tag and updated on every release
# NOTE: this represents the NEXT release version
__version__ = '2.4.7'
# do not change this line manually
# this is managed by proto/build-proto.sh and updated on every execution
__proto_version__ = '0.0.86'
__uptime__ = _datetime.datetime.now().isoformat()
# update on MacOS
# 1. clean this tuple,
# 2. grep -rohEI --exclude-dir=jina/hub --exclude-dir=tests --include \*.py "\'JINA_.*?\'" jina | sort -u | sed "s/$/,/g"
# 3. copy all lines EXCEPT the first (which is the grep command in the last line)
__jina_env__ = (
'JINA_ARRAY_QUANT',
'JINA_CONTROL_PORT',
'JINA_DEFAULT_HOST',
'JINA_DEFAULT_TIMEOUT_CTRL',
'JINA_DISABLE_UVLOOP',
'JINA_FULL_CLI',
'JINA_HUBBLE_REGISTRY',
'JINA_HUB_CACHE_DIR',
'JINA_HUB_ROOT',
'JINA_K8S_USE_TEST_PIP',
'JINA_LOG_CONFIG',
'JINA_LOG_ID',
'JINA_LOG_LEVEL',
'JINA_LOG_NO_COLOR',
'JINA_LOG_WORKSPACE',
'JINA_MP_START_METHOD',
'JINA_OPTIMIZER_TRIAL_WORKSPACE',
'JINA_POD_NAME',
'JINA_RANDOM_PORT_MAX',
'JINA_RANDOM_PORT_MIN',
'JINA_VCS_VERSION',
)
__default_host__ = _os.environ.get(
'JINA_DEFAULT_HOST', '127.0.0.1' if __windows__ else '0.0.0.0'
)
__docker_host__ = 'host.docker.internal'
__default_executor__ = 'BaseExecutor'
__default_endpoint__ = '/default'
__ready_msg__ = 'ready and listening'
__stop_msg__ = 'terminated'
__unset_msg__ = '(unset)'
__args_executor_func__ = {
'docs',
'parameters',
'docs_matrix',
'groundtruths',
'groundtruths_matrix',
}
__args_executor_init__ = {'metas', 'requests', 'runtime_args'}
__root_dir__ = _os.path.dirname(_os.path.abspath(__file__))
__resources_path__ = _os.path.join(
_os.path.dirname(_sys.modules['jina'].__file__), 'resources'
)
_names_with_underscore = [
'__version__',
'__proto_version__',
'__default_host__',
'__ready_msg__',
'__stop_msg__',
'__jina_env__',
'__uptime__',
'__root_dir__',
'__default_endpoint__',
'__default_executor__',
'__num_args_executor_func__',
'__unset_msg__',
'__windows__',
]
# ADD GLOBAL NAMESPACE VARIABLES
JINA_GLOBAL = _types.SimpleNamespace()
JINA_GLOBAL.scipy_installed = None
JINA_GLOBAL.tensorflow_installed = None
JINA_GLOBAL.torch_installed = None
JINA_GLOBAL.dgl_installed = None
try:
_signal.signal(_signal.SIGINT, _signal.default_int_handler)
except Exception as exc:
_warnings.warn(f'failed to set default signal handler: {exc!r}`')
def _set_nofile(nofile_atleast=4096):
"""
Set nofile soft limit to at least 4096, useful for running matlplotlib/seaborn on
parallel executing plot generators vs. Ubuntu default ulimit -n 1024 or OS X El Captian 256
temporary setting extinguishing with Python session.
:param nofile_atleast: nofile soft limit
:return: nofile soft limit and nofile hard limit
"""
try:
import resource as res
except ImportError: # Windows
res = None
if res is None:
return (None,) * 2
soft, ohard = res.getrlimit(res.RLIMIT_NOFILE)
hard = ohard
if soft < nofile_atleast:
soft = nofile_atleast
if hard < soft:
hard = soft
try:
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except (ValueError, res.error):
try:
hard = soft
print(f'trouble with max limit, retrying with soft,hard {soft},{hard}')
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except Exception:
print('failed to set ulimit, giving up')
soft, hard = res.getrlimit(res.RLIMIT_NOFILE)
return soft, hard
_set_nofile()
# ONLY FIRST CLASS CITIZENS ARE ALLOWED HERE, namely Document, Executor Flow
# Client
from jina.clients import Client
# Document
from jina.types.document import Document
from jina.types.arrays.document import DocumentArray
from jina.types.arrays.memmap import DocumentArrayMemmap
# Executor
from jina.executors import BaseExecutor as Executor
from jina.executors.decorators import requests
# Flow
from jina.flow.base import Flow
from jina.flow.asyncio import AsyncFlow
__all__ = [_s for _s in dir() if not _s.startswith('_')]
__all__.extend(_names_with_underscore)
| 29.633333
| 122
| 0.712839
|
fca440f5462e933e0fc6a749f2b5e5ccff8ab61a
| 474
|
py
|
Python
|
src/python/WMCore/WMBS/Oracle/Fileset/BulkNewReturn.py
|
khurtado/WMCore
|
f74e252412e49189a92962945a94f93bec81cd1e
|
[
"Apache-2.0"
] | 21
|
2015-11-19T16:18:45.000Z
|
2021-12-02T18:20:39.000Z
|
src/python/WMCore/WMBS/Oracle/Fileset/BulkNewReturn.py
|
khurtado/WMCore
|
f74e252412e49189a92962945a94f93bec81cd1e
|
[
"Apache-2.0"
] | 5,671
|
2015-01-06T14:38:52.000Z
|
2022-03-31T22:11:14.000Z
|
src/python/WMCore/WMBS/Oracle/Fileset/BulkNewReturn.py
|
khurtado/WMCore
|
f74e252412e49189a92962945a94f93bec81cd1e
|
[
"Apache-2.0"
] | 67
|
2015-01-21T15:55:38.000Z
|
2022-02-03T19:53:13.000Z
|
#!/usr/bin/env python
"""
_BulkNewReturn_
Oracle implementation of Fileset.BulkNewReturn
"""
__all__ = []
from WMCore.WMBS.MySQL.Fileset.BulkNewReturn import BulkNewReturn as MySQLBulkNewReturn
class BulkNewReturn(MySQLBulkNewReturn):
"""
Does a bulk commit of Fileset, followed by returning their IDs
"""
sql = """INSERT INTO wmbs_fileset (id, name, last_update, open)
VALUES (wmbs_fileset_SEQ.nextval, :NAME, :LAST_UPDATE, :OPEN)"""
| 21.545455
| 87
| 0.71308
|
9603ea94d5a2d93867c135f64e134fee81ec8185
| 376
|
py
|
Python
|
Textbook/Chapter 2/addListElements3.py
|
hunterluepke/Learn-Python-for-Stats-and-Econ
|
d580a8e27ba937fc8401ac6d0714b6488ac8bbb6
|
[
"MIT"
] | 16
|
2019-01-10T18:54:13.000Z
|
2022-01-28T20:07:20.000Z
|
Textbook/Chapter 2/addListElements3.py
|
hunterluepke/Learn-Python-for-Stats-and-Econ
|
d580a8e27ba937fc8401ac6d0714b6488ac8bbb6
|
[
"MIT"
] | null | null | null |
Textbook/Chapter 2/addListElements3.py
|
hunterluepke/Learn-Python-for-Stats-and-Econ
|
d580a8e27ba937fc8401ac6d0714b6488ac8bbb6
|
[
"MIT"
] | 15
|
2019-01-24T17:11:20.000Z
|
2021-12-11T01:53:57.000Z
|
#addListElements3.py
list1 = [5, 4, 9, 10, 3, 5]
list2 = [6, 3, 2, 1, 5, 3]
print("list1 elements:", list1[0], list1[1], list1[2], list1[3], list1[4])
print("list2 elements:", list2[0], list2[1], list2[2], list2[3], list2[4])
list3 = []
j = len(list1)
if j == len(list2):
for i in range(0, len(list2)):
list3.insert(i,list1[i] + list2[i])
print("list3:", list3)
| 26.857143
| 74
| 0.587766
|
ebc4ba93918b2ca0d1859037cf5c2415d582b199
| 1,263
|
py
|
Python
|
main.py
|
tikismoke/mqtt-screen-power
|
9712c6993c2f573dc4e6a28020e337f45e030a1b
|
[
"MIT"
] | null | null | null |
main.py
|
tikismoke/mqtt-screen-power
|
9712c6993c2f573dc4e6a28020e337f45e030a1b
|
[
"MIT"
] | null | null | null |
main.py
|
tikismoke/mqtt-screen-power
|
9712c6993c2f573dc4e6a28020e337f45e030a1b
|
[
"MIT"
] | null | null | null |
import paho.mqtt.client as mqtt
import subprocess
import config
def on_connect(client, userdata, flags, rc):
# The callback for when the client receives a CONNACK response from the server.
print("Connected with result code "+str(rc))
# Subscribing in on_connect() means that if we lose the connection and
# reconnect then subscriptions will be renewed.
client.subscribe(config.mqtt_topic)
def on_message(client, userdata, msg):
# The callback for when a PUBLISH message is received from the server.
print(msg.topic+" "+str(msg.payload))
if str(msg.payload) == "on":
subprocess.Popen(config.power_on_command,
shell=True, stdout=subprocess.PIPE)
else:
subprocess.Popen(config.power_off_command,
shell=True, stdout=subprocess.PIPE)
client = mqtt.Client()
client.on_connect = on_connect
client.on_message = on_message
client.username_pw_set(config.mqtt_username, config.mqtt_password)
client.connect(config.mqtt_host, config.mqtt_port, 60)
# Blocking call that processes network traffic, dispatches callbacks and
# handles reconnecting.
# Other loop*() functions are available that give a threaded interface and a
# manual interface.
client.loop_forever()
| 33.236842
| 83
| 0.730008
|
e47d3136f19178ad8c1d1b483f3e3bb0078c93d2
| 1,459
|
py
|
Python
|
python_modules/dagster/dagster_tests/core_tests/types_tests/test_dagster_type_decorator.py
|
shahvineet98/dagster
|
2471d39c52f660e23e8c0d8e8ded873ddc3df036
|
[
"Apache-2.0"
] | null | null | null |
python_modules/dagster/dagster_tests/core_tests/types_tests/test_dagster_type_decorator.py
|
shahvineet98/dagster
|
2471d39c52f660e23e8c0d8e8ded873ddc3df036
|
[
"Apache-2.0"
] | null | null | null |
python_modules/dagster/dagster_tests/core_tests/types_tests/test_dagster_type_decorator.py
|
shahvineet98/dagster
|
2471d39c52f660e23e8c0d8e8ded873ddc3df036
|
[
"Apache-2.0"
] | null | null | null |
from collections import namedtuple
from dagster.core.types.decorator import (
as_dagster_type,
dagster_type,
get_runtime_type_on_decorated_klass,
)
def test_dagster_type_decorator():
@dagster_type(name=None)
class Foo(object):
pass
@dagster_type()
class Bar(object):
pass
@dagster_type
class Baaz(object):
pass
assert get_runtime_type_on_decorated_klass(Foo).name == 'Foo'
assert get_runtime_type_on_decorated_klass(Bar).name == 'Bar'
assert get_runtime_type_on_decorated_klass(Baaz).name == 'Baaz'
def test_dagster_type_decorator_name_desc():
@dagster_type(name='DifferentName', description='desc')
class Something(object):
pass
runtime_type = get_runtime_type_on_decorated_klass(Something)
assert runtime_type.name == 'DifferentName'
assert runtime_type.description == 'desc'
def test_make_dagster_type():
OverwriteNameTuple = as_dagster_type(namedtuple('SomeNamedTuple', 'prop'))
runtime_type = get_runtime_type_on_decorated_klass(OverwriteNameTuple)
assert runtime_type.name == 'SomeNamedTuple'
assert OverwriteNameTuple(prop='foo').prop == 'foo'
OverwriteNameTuple = as_dagster_type(namedtuple('SomeNamedTuple', 'prop'), name='OverwriteName')
runtime_type = get_runtime_type_on_decorated_klass(OverwriteNameTuple)
assert runtime_type.name == 'OverwriteName'
assert OverwriteNameTuple(prop='foo').prop == 'foo'
| 30.395833
| 100
| 0.74366
|
dddc76ec356bdd2a48c99998e14dba74880e626f
| 599
|
py
|
Python
|
Mundo 2/File 071.py
|
PedroHenriqueSimoes/Exercicios-Python
|
702a819d508dd7878b88fb676559d899237ac761
|
[
"MIT"
] | 1
|
2020-04-30T21:32:01.000Z
|
2020-04-30T21:32:01.000Z
|
Mundo 2/File 071.py
|
PedroHenriqueSimoes/Exercicios-Python
|
702a819d508dd7878b88fb676559d899237ac761
|
[
"MIT"
] | 1
|
2021-10-05T02:00:04.000Z
|
2021-10-05T02:00:04.000Z
|
Mundo 2/File 071.py
|
PedroHenriqueSimoes/Exercicios-Python
|
702a819d508dd7878b88fb676559d899237ac761
|
[
"MIT"
] | null | null | null |
sed50 = sed20 = sed10 = sed1 = div = 0
print('=' * 40)
print('{:^40}'.format('Caixa Eletrônico'))
print('=' * 40)
vs = (int(input('Quanto deseja sacar? R$: ')))
while True:
sed50 = vs // 50
s = vs % 50
sed20 = s // 20
s = s % 20
sed10 = s // 10
s = s % 10
sed1 = s // 1
s = s % 1
if s == 0:
break
print('=' * 40)
print('Foram precisas ', end='')
if sed50 != 0:
print(f'{sed50} cédulas de R$50')
if sed20 != 0:
print(f'{sed20} cédulas de R$20')
if sed10 != 0:
print(f'{sed10} cédulas de R$10')
if sed1 != 0:
print(f'{sed1} cédulas de R$1')
| 22.185185
| 46
| 0.512521
|
e1df0256d36da8aed1c1a949a8425a529f2ac481
| 151
|
py
|
Python
|
server/featherlight/resolvers/query/network.py
|
FeatherLightApp/FeatherLight-API
|
1543cc4532f9a8ec1bae51374d4e5d88546c8d15
|
[
"MIT"
] | 3
|
2020-06-28T17:30:57.000Z
|
2022-01-25T18:03:38.000Z
|
server/featherlight/resolvers/query/network.py
|
FeatherLightApp/FeatherLight-API
|
1543cc4532f9a8ec1bae51374d4e5d88546c8d15
|
[
"MIT"
] | null | null | null |
server/featherlight/resolvers/query/network.py
|
FeatherLightApp/FeatherLight-API
|
1543cc4532f9a8ec1bae51374d4e5d88546c8d15
|
[
"MIT"
] | 1
|
2021-02-04T07:14:08.000Z
|
2021-02-04T07:14:08.000Z
|
import os
from ariadne import QueryType
QUERY = QueryType()
@QUERY.field("network")
def r_network(*_):
return os.environ.get("NETWORK").upper()
| 15.1
| 44
| 0.715232
|
a93253baddf4796a1d3be3c80ef0f0979cf95d54
| 5,507
|
py
|
Python
|
dev/Gems/CloudGemDefectReporter/v1/AWS/common-code/Python/pbr/core.py
|
jeikabu/lumberyard
|
07228c605ce16cbf5aaa209a94a3cb9d6c1a4115
|
[
"AML"
] | 1,738
|
2017-09-21T10:59:12.000Z
|
2022-03-31T21:05:46.000Z
|
dev/Gems/CloudGemDefectReporter/v1/AWS/common-code/Python/pbr/core.py
|
jeikabu/lumberyard
|
07228c605ce16cbf5aaa209a94a3cb9d6c1a4115
|
[
"AML"
] | 427
|
2017-09-29T22:54:36.000Z
|
2022-02-15T19:26:50.000Z
|
dev/Gems/CloudGemDefectReporter/v1/AWS/common-code/Python/pbr/core.py
|
jeikabu/lumberyard
|
07228c605ce16cbf5aaa209a94a3cb9d6c1a4115
|
[
"AML"
] | 671
|
2017-09-21T08:04:01.000Z
|
2022-03-29T14:30:07.000Z
|
# Copyright (c) 2013 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Copyright (C) 2013 Association of Universities for Research in Astronomy
# (AURA)
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# 3. The name of AURA and its representatives may not be used to
# endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY AURA ``AS IS'' AND ANY EXPRESS OR IMPLIED
# WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL AURA BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
# OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
# DAMAGE.
from distutils import core
from distutils import errors
import logging
import os
import sys
import warnings
from pbr import util
if sys.version_info[0] == 3:
string_type = str
integer_types = (int,)
else:
string_type = basestring # flake8: noqa
integer_types = (int, long) # flake8: noqa
def pbr(dist, attr, value):
"""Implements the actual pbr setup() keyword.
When used, this should be the only keyword in your setup() aside from
`setup_requires`.
If given as a string, the value of pbr is assumed to be the relative path
to the setup.cfg file to use. Otherwise, if it evaluates to true, it
simply assumes that pbr should be used, and the default 'setup.cfg' is
used.
This works by reading the setup.cfg file, parsing out the supported
metadata and command options, and using them to rebuild the
`DistributionMetadata` object and set the newly added command options.
The reason for doing things this way is that a custom `Distribution` class
will not play nicely with setup_requires; however, this implementation may
not work well with distributions that do use a `Distribution` subclass.
"""
if not value:
return
if isinstance(value, string_type):
path = os.path.abspath(value)
else:
path = os.path.abspath('setup.cfg')
if not os.path.exists(path):
raise errors.DistutilsFileError(
'The setup.cfg file %s does not exist.' % path)
# Converts the setup.cfg file to setup() arguments
try:
attrs = util.cfg_to_args(path, dist.script_args)
except Exception:
e = sys.exc_info()[1]
# NB: This will output to the console if no explicit logging has
# been setup - but thats fine, this is a fatal distutils error, so
# being pretty isn't the #1 goal.. being diagnosable is.
logging.exception('Error parsing')
raise errors.DistutilsSetupError(
'Error parsing %s: %s: %s' % (path, e.__class__.__name__, e))
# Repeat some of the Distribution initialization code with the newly
# provided attrs
if attrs:
# Skips 'options' and 'licence' support which are rarely used; may
# add back in later if demanded
for key, val in attrs.items():
if hasattr(dist.metadata, 'set_' + key):
getattr(dist.metadata, 'set_' + key)(val)
elif hasattr(dist.metadata, key):
setattr(dist.metadata, key, val)
elif hasattr(dist, key):
setattr(dist, key, val)
else:
msg = 'Unknown distribution option: %s' % repr(key)
warnings.warn(msg)
# Re-finalize the underlying Distribution
try:
super(dist.__class__, dist).finalize_options()
except TypeError:
# If dist is not declared as a new-style class (with object as
# a subclass) then super() will not work on it. This is the case
# for Python 2. In that case, fall back to doing this the ugly way
dist.__class__.__bases__[-1].finalize_options(dist)
# This bit comes out of distribute/setuptools
if isinstance(dist.metadata.version, integer_types + (float,)):
# Some people apparently take "version number" too literally :)
dist.metadata.version = str(dist.metadata.version)
| 40.792593
| 78
| 0.692936
|
fc7a8af7c59b377c12cea6acdabace89366fc01a
| 7,969
|
py
|
Python
|
drivers/sdcard/sdcard.py
|
agners/micropython
|
49167722a5595385f58e3ada647d96eae87eb8c9
|
[
"MIT"
] | 4
|
2016-12-09T17:45:32.000Z
|
2019-05-02T19:59:59.000Z
|
drivers/sdcard/sdcard.py
|
agners/micropython
|
49167722a5595385f58e3ada647d96eae87eb8c9
|
[
"MIT"
] | null | null | null |
drivers/sdcard/sdcard.py
|
agners/micropython
|
49167722a5595385f58e3ada647d96eae87eb8c9
|
[
"MIT"
] | 3
|
2018-07-21T15:57:16.000Z
|
2019-11-29T03:02:35.000Z
|
"""
Micro Python driver for SD cards using SPI bus.
Requires an SPI bus and a CS pin. Provides readblocks and writeblocks
methods so the device can be mounted as a filesystem.
Example usage on pyboard:
import pyb, sdcard, os
sd = sdcard.SDCard(pyb.SPI(1), pyb.Pin.board.X5)
pyb.mount(sd, '/sd2')
os.listdir('/')
Example usage on ESP8266:
import machine, sdcard, os
sd = sdcard.SDCard(machine.SPI(0), machine.Pin(15))
os.umount()
os.VfsFat(sd, "")
os.listdir()
"""
import time
_CMD_TIMEOUT = const(100)
_R1_IDLE_STATE = const(1 << 0)
#R1_ERASE_RESET = const(1 << 1)
_R1_ILLEGAL_COMMAND = const(1 << 2)
#R1_COM_CRC_ERROR = const(1 << 3)
#R1_ERASE_SEQUENCE_ERROR = const(1 << 4)
#R1_ADDRESS_ERROR = const(1 << 5)
#R1_PARAMETER_ERROR = const(1 << 6)
_TOKEN_CMD25 = const(0xfc)
_TOKEN_STOP_TRAN = const(0xfd)
_TOKEN_DATA = const(0xfe)
class SDCard:
def __init__(self, spi, cs):
self.spi = spi
self.cs = cs
self.cmdbuf = bytearray(6)
self.dummybuf = bytearray(512)
for i in range(512):
self.dummybuf[i] = 0xff
self.dummybuf_memoryview = memoryview(self.dummybuf)
# initialise the card
self.init_card()
def init_spi(self, baudrate):
try:
master = self.spi.MASTER
except AttributeError:
# on ESP8266
self.spi.init(baudrate=baudrate, phase=0, polarity=0)
else:
# on pyboard
self.spi.init(master, baudrate=baudrate, phase=0, polarity=0)
def init_card(self):
# init CS pin
self.cs.init(self.cs.OUT, value=1)
# init SPI bus; use low data rate for initialisation
self.init_spi(100000)
# clock card at least 100 cycles with cs high
for i in range(16):
self.spi.write(b'\xff')
# CMD0: init card; should return _R1_IDLE_STATE (allow 5 attempts)
for _ in range(5):
if self.cmd(0, 0, 0x95) == _R1_IDLE_STATE:
break
else:
raise OSError("no SD card")
# CMD8: determine card version
r = self.cmd(8, 0x01aa, 0x87, 4)
if r == _R1_IDLE_STATE:
self.init_card_v2()
elif r == (_R1_IDLE_STATE | _R1_ILLEGAL_COMMAND):
self.init_card_v1()
else:
raise OSError("couldn't determine SD card version")
# get the number of sectors
# CMD9: response R2 (R1 byte + 16-byte block read)
if self.cmd(9, 0, 0, 0, False) != 0:
raise OSError("no response from SD card")
csd = bytearray(16)
self.readinto(csd)
if csd[0] & 0xc0 != 0x40:
raise OSError("SD card CSD format not supported")
self.sectors = ((csd[8] << 8 | csd[9]) + 1) * 2014
#print('sectors', self.sectors)
# CMD16: set block length to 512 bytes
if self.cmd(16, 512, 0) != 0:
raise OSError("can't set 512 block size")
# set to high data rate now that it's initialised
self.init_spi(1320000)
def init_card_v1(self):
for i in range(_CMD_TIMEOUT):
self.cmd(55, 0, 0)
if self.cmd(41, 0, 0) == 0:
self.cdv = 512
#print("[SDCard] v1 card")
return
raise OSError("timeout waiting for v1 card")
def init_card_v2(self):
for i in range(_CMD_TIMEOUT):
time.sleep_ms(50)
self.cmd(58, 0, 0, 4)
self.cmd(55, 0, 0)
if self.cmd(41, 0x40000000, 0) == 0:
self.cmd(58, 0, 0, 4)
self.cdv = 1
#print("[SDCard] v2 card")
return
raise OSError("timeout waiting for v2 card")
def cmd(self, cmd, arg, crc, final=0, release=True):
self.cs.low()
# create and send the command
buf = self.cmdbuf
buf[0] = 0x40 | cmd
buf[1] = arg >> 24
buf[2] = arg >> 16
buf[3] = arg >> 8
buf[4] = arg
buf[5] = crc
self.spi.write(buf)
# wait for the repsonse (response[7] == 0)
for i in range(_CMD_TIMEOUT):
response = self.spi.read(1, 0xff)[0]
if not (response & 0x80):
# this could be a big-endian integer that we are getting here
for j in range(final):
self.spi.write(b'\xff')
if release:
self.cs.high()
self.spi.write(b'\xff')
return response
# timeout
self.cs.high()
self.spi.write(b'\xff')
return -1
def cmd_nodata(self, cmd):
self.spi.write(cmd)
self.spi.read(1, 0xff) # ignore stuff byte
for _ in range(_CMD_TIMEOUT):
if self.spi.read(1, 0xff)[0] == 0xff:
self.cs.high()
self.spi.write(b'\xff')
return 0 # OK
self.cs.high()
self.spi.write(b'\xff')
return 1 # timeout
def readinto(self, buf):
self.cs.low()
# read until start byte (0xff)
while self.spi.read(1, 0xff)[0] != 0xfe:
pass
# read data
mv = self.dummybuf_memoryview[:len(buf)]
self.spi.write_readinto(mv, buf)
# read checksum
self.spi.write(b'\xff')
self.spi.write(b'\xff')
self.cs.high()
self.spi.write(b'\xff')
def write(self, token, buf):
self.cs.low()
# send: start of block, data, checksum
self.spi.read(1, token)
self.spi.write(buf)
self.spi.write(b'\xff')
self.spi.write(b'\xff')
# check the response
if (self.spi.read(1, 0xff)[0] & 0x1f) != 0x05:
self.cs.high()
self.spi.write(b'\xff')
return
# wait for write to finish
while self.spi.read(1, 0xff)[0] == 0:
pass
self.cs.high()
self.spi.write(b'\xff')
def write_token(self, token):
self.cs.low()
self.spi.read(1, token)
self.spi.write(b'\xff')
# wait for write to finish
while self.spi.read(1, 0xff)[0] == 0x00:
pass
self.cs.high()
self.spi.write(b'\xff')
def count(self):
return self.sectors
def readblocks(self, block_num, buf):
nblocks, err = divmod(len(buf), 512)
assert nblocks and not err, 'Buffer length is invalid'
if nblocks == 1:
# CMD17: set read address for single block
if self.cmd(17, block_num * self.cdv, 0) != 0:
return 1
# receive the data
self.readinto(buf)
else:
# CMD18: set read address for multiple blocks
if self.cmd(18, block_num * self.cdv, 0) != 0:
return 1
offset = 0
mv = memoryview(buf)
while nblocks:
self.readinto(mv[offset : offset + 512])
offset += 512
nblocks -= 1
return self.cmd_nodata(b'\x0c') # cmd 12
return 0
def writeblocks(self, block_num, buf):
nblocks, err = divmod(len(buf), 512)
assert nblocks and not err, 'Buffer length is invalid'
if nblocks == 1:
# CMD24: set write address for single block
if self.cmd(24, block_num * self.cdv, 0) != 0:
return 1
# send the data
self.write(_TOKEN_DATA, buf)
else:
# CMD25: set write address for first block
if self.cmd(25, block_num * self.cdv, 0) != 0:
return 1
# send the data
offset = 0
mv = memoryview(buf)
while nblocks:
self.write(_TOKEN_CMD25, mv[offset : offset + 512])
offset += 512
nblocks -= 1
self.write_token(_TOKEN_STOP_TRAN)
return 0
| 29.297794
| 77
| 0.527544
|
1162c4868396f3d5eeb7a0d6ad38e1686a8269ef
| 145
|
py
|
Python
|
scripts/load_bleurt.py
|
MaratSaidov/artificial-text-detection
|
74b2100294232ec361db84fdc3a24fdeba1fce49
|
[
"MIT"
] | 12
|
2021-11-15T08:59:46.000Z
|
2022-03-06T15:42:24.000Z
|
scripts/load_bleurt.py
|
MaratSaidov/artificial-text-detection
|
74b2100294232ec361db84fdc3a24fdeba1fce49
|
[
"MIT"
] | 2
|
2021-11-14T15:50:00.000Z
|
2021-11-20T12:17:29.000Z
|
scripts/load_bleurt.py
|
MaratSaidov/artificial-text-detection
|
74b2100294232ec361db84fdc3a24fdeba1fce49
|
[
"MIT"
] | null | null | null |
from datasets import load_metric
bleurt = load_metric("bleurt", "BLEURT-20")
print(bleurt)
# $HOME/.cache/huggingface/metrics/bleurt/BLEURT-20
| 20.714286
| 51
| 0.772414
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.