hexsha stringlengths 40 40 | size int64 1 1.03M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 239 | max_stars_repo_name stringlengths 5 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 239 | max_issues_repo_name stringlengths 5 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 239 | max_forks_repo_name stringlengths 5 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.03M | avg_line_length float64 1 958k | max_line_length int64 1 1.03M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
ace2edd1c07b5d73f8f6f34b356323d385080674 | 56,755 | py | Python | mayan/apps/common/apps.py | kyper999/mayan-edms | ca7b8301a1f68548e8e718d42a728a500d67286e | [
"Apache-2.0"
] | 1 | 2020-07-15T02:56:02.000Z | 2020-07-15T02:56:02.000Z | mayan/apps/common/apps.py | kyper999/mayan-edms | ca7b8301a1f68548e8e718d42a728a500d67286e | [
"Apache-2.0"
] | null | null | null | mayan/apps/common/apps.py | kyper999/mayan-edms | ca7b8301a1f68548e8e718d42a728a500d67286e | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from datetime import timedelta
import logging
from kombu import Exchange, Queue
from django import apps
from django.conf import settings
from django.conf.urls import include, url
from django.contrib.auth.signals import user_logged_in
from django.db.models.signals import post_save
from django.utils.translation import ugettext_lazy as _
from mayan.celery import app
from rest_api.classes import APIEndPoint
from .classes import Package
from .handlers import (
user_locale_profile_session_config, user_locale_profile_create
)
from .links import (
link_about, link_current_user_details, link_current_user_edit,
link_current_user_locale_profile_details,
link_current_user_locale_profile_edit, link_filters, link_license,
link_packages_licenses, link_setup, link_tools
)
from .literals import DELETE_STALE_UPLOADS_INTERVAL
from .menus import menu_facet, menu_main, menu_secondary, menu_tools
from .settings import setting_auto_logging
from .tasks import task_delete_stale_uploads # NOQA - Force task registration
logger = logging.getLogger(__name__)
class MayanAppConfig(apps.AppConfig):
app_url = None
app_namespace = None
def ready(self):
from mayan.urls import urlpatterns
if self.app_url:
top_url = '{}/'.format(self.app_url)
elif self.app_url is not None:
top_url = ''
else:
top_url = '{}/'.format(self.name)
try:
urlpatterns += url(
r'^{}'.format(top_url),
include(
'{}.urls'.format(self.name),
namespace=self.app_namespace or self.name
)
),
except ImportError as exception:
logger.debug(
'App %s doesn\'t have URLs defined. Exception: %s', self.name,
exception
)
if 'No module named urls' not in unicode(exception):
raise exception
class CommonApp(MayanAppConfig):
app_url = ''
name = 'common'
test = True
verbose_name = _('Common')
def ready(self):
super(CommonApp, self).ready()
APIEndPoint(app=self, version_string='1')
Package(label='Django', license_text='''
Copyright (c) Django Software Foundation and individual contributors.
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. Neither the name of Django nor the names of its contributors may be used
to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
''')
Package(label='PyYAML', license_text='''
Copyright (c) 2006 Kirill Simonov
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do
so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
''')
Package(label='Celery', license_text='''
Copyright (c) 2015 Ask Solem & contributors. All rights reserved.
Copyright (c) 2012-2014 GoPivotal, Inc. All rights reserved.
Copyright (c) 2009, 2010, 2011, 2012 Ask Solem, and individual contributors. All rights reserved.
Celery is licensed under The BSD License (3 Clause, also known as
the new BSD license). The license is an OSI approved Open Source
license and is GPL-compatible(1).
The license text can also be found here:
http://www.opensource.org/licenses/BSD-3-Clause
License
=======
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of Ask Solem, nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL Ask Solem OR CONTRIBUTORS
BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
Documentation License
=====================
The documentation portion of Celery (the rendered contents of the
"docs" directory of a software distribution or checkout) is supplied
under the Creative Commons Attribution-Noncommercial-Share Alike 3.0
United States License as described by
http://creativecommons.org/licenses/by-nc-sa/3.0/us/
Footnotes
=========
(1) A GPL-compatible license makes it possible to
combine Celery with other software that is released
under the GPL, it does not mean that we're distributing
Celery under the GPL license. The BSD license, unlike the GPL,
let you distribute a modified version without making your
changes open source.
''')
Package(label='cssmin', license_text='''
`cssmin.py` - A Python port of the YUI CSS compressor.
Copyright (c) 2010 Zachary Voase
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
-------------------------------------------------------------------------------
This software contains portions of the YUI CSS Compressor, notably some regular
expressions for reducing the size of CSS. The YUI Compressor source code can be
found at <http://github.com/yui/yuicompressor>, and is licensed as follows:
> YUI Compressor Copyright License Agreement (BSD License)
>
> Copyright (c) 2009, Yahoo! Inc.
> All rights reserved.
>
> Redistribution and use of this software in source and binary forms,
> with or without modification, are permitted provided that the following
> conditions are met:
>
> * Redistributions of source code must retain the above
> copyright notice, this list of conditions and the
> following disclaimer.
>
> * Redistributions in binary form must reproduce the above
> copyright notice, this list of conditions and the
> following disclaimer in the documentation and/or other
> materials provided with the distribution.
>
> * Neither the name of Yahoo! Inc. nor the names of its
> contributors may be used to endorse or promote products
> derived from this software without specific prior
> written permission of Yahoo! Inc.
>
> THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
> AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
> IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
> DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
> FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
> DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
> SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
> CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
> OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
> OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
''')
Package(label='django-autoadmin', license_text='''
The MIT License (MIT)
Copyright (c) 2014 Roberto Rosario
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
''')
Package(label='django-celery', license_text='''
Copyright (c) 2012-2013 GoPivotal, Inc. All Rights Reserved.
Copyright (c) 2009-2012 Ask Solem. All Rights Reserved.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
Neither the name of Ask Solem nor the names of its contributors may be used
to endorse or promote products derived from this software without specific
prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
''')
Package(label='django-colorful', license_text='''
copyright (c) 2010 SIMON CHARETTE
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
''')
Package(label='django-compressor', license_text='''
django_compressor
-----------------
Copyright (c) 2009-2014 Django Compressor authors (see AUTHORS file)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
django_compressor contains code from Andreas Pelme's django-compress
--------------------------------------------------------------------
Copyright (c) 2008 Andreas Pelme <andreas@pelme.se>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
rjsmin.py (License-information from the file)
---------------------------------------------
Copyright 2006, 2007, 2008, 2009, 2010, 2011
André Malo or his licensors, as applicable
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
utils.cache.cached_property extracted from Celery
-------------------------------------------
Copyright (c) 2009-2011, Ask Solem and contributors.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
Neither the name of Ask Solem nor the names of its contributors may be used
to endorse or promote products derived from this software without specific
prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
utils.FormattableString
-----------------------
Copyright (c) 2010 by Florent Xicluna.
Some rights reserved.
Redistribution and use in source and binary forms of the software as well
as documentation, with or without modification, are permitted provided
that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials provided
with the distribution.
* The names of the contributors may not be used to endorse or
promote products derived from this software without specific
prior written permission.
THIS SOFTWARE AND DOCUMENTATION IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT
NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER
OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE AND DOCUMENTATION, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
DAMAGE.
''')
Package(label='django-cors-headers', license_text='''
Copyright 2013 Otto Yiu and other contributors
http://ottoyiu.com
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
''')
Package(label='django-filetransfers', license_text='''
Copyright (c) Waldemar Kornewald, Thomas Wanschik, and all contributors.
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. Neither the name of All Buttons Pressed nor
the names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
''')
Package(label='django-pure-pagination', license_text='''
Copyright (c) James Pacileo and contributors.
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. Neither the name of Django nor the names of its contributors may be used
to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
''')
Package(label='django-model-util', license_text='''
Copyright (c) 2009-2015, Carl Meyer and contributors
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials provided
with the distribution.
* Neither the name of the author nor the names of other
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
''')
Package(label='django-qsstats-magic', license_text='''
Copyright (c) 2010, Matt Croydon, Mikhail Korobov
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the tastypie nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL MATT CROYDON BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
''')
Package(label='Django Suit', license_text='''
Django Suit
-----------
Django Suit by is licensed under a
Creative Commons Attribution-NonCommercial 3.0 Unported License
See online version of this license here:
http://creativecommons.org/licenses/by-nc/3.0/
License
-------
THE WORK (AS DEFINED BELOW) IS PROVIDED UNDER THE TERMS OF THIS CREATIVE
COMMONS PUBLIC LICENSE ("CCPL" OR "LICENSE"). THE WORK IS PROTECTED BY
COPYRIGHT AND/OR OTHER APPLICABLE LAW. ANY USE OF THE WORK OTHER THAN AS
AUTHORIZED UNDER THIS LICENSE OR COPYRIGHT LAW IS PROHIBITED.
BY EXERCISING ANY RIGHTS TO THE WORK PROVIDED HERE, YOU ACCEPT AND AGREE TO BE
BOUND BY THE TERMS OF THIS LICENSE. TO THE EXTENT THIS LICENSE MAY BE
CONSIDERED TO BE A CONTRACT, THE LICENSOR GRANTS YOU THE RIGHTS CONTAINED HERE
IN CONSIDERATION OF YOUR ACCEPTANCE OF SUCH TERMS AND CONDITIONS.
1. Definitions
a. "Adaptation" means a work based upon the Work, or upon the Work and other
pre-existing works, such as a translation, adaptation, derivative work,
arrangement of music or other alterations of a literary or artistic work, or
phonogram or performance and includes cinematographic adaptations or any
other form in which the Work may be recast, transformed, or adapted
including in any form recognizably derived from the original, except that a
work that constitutes a Collection will not be considered an Adaptation for
the purpose of this License. For the avoidance of doubt, where the Work is a
musical work, performance or phonogram, the synchronization of the Work in
timed-relation with a moving image ("synching") will be considered an
Adaptation for the purpose of this License.
b. "Collection" means a collection of literary or artistic works, such as
encyclopedias and anthologies, or performances, phonograms or broadcasts, or
other works or subject matter other than works listed in Section 1(f) below,
which, by reason of the selection and arrangement of their contents,
constitute intellectual creations, in which the Work is included in its
entirety in unmodified form along with one or more other contributions, each
constituting separate and independent works in themselves, which together
are assembled into a collective whole. A work that constitutes a Collection
will not be considered an Adaptation (as defined above) for the purposes of
this License.
c. "Distribute" means to make available to the public the original and
copies of the Work or Adaptation, as appropriate, through sale or other
transfer of ownership.
d. "Licensor" means the individual, individuals, entity or entities that
offer(s) the Work under the terms of this License.
e. "Original Author" means, in the case of a literary or artistic work, the
individual, individuals, entity or entities who created the Work or if no
individual or entity can be identified, the publisher; and in addition (i)
in the case of a performance the actors, singers, musicians, dancers, and
other persons who act, sing, deliver, declaim, play in, interpret or
otherwise perform literary or artistic works or expressions of folklore;
(ii) in the case of a phonogram the producer being the person or legal
entity who first fixes the sounds of a performance or other sounds; and,
(iii) in the case of broadcasts, the organization that transmits the
broadcast.
f. "Work" means the literary and/or artistic work offered under the terms of
this License including without limitation any production in the literary,
scientific and artistic domain, whatever may be the mode or form of its
expression including digital form, such as a book, pamphlet and other
writing; a lecture, address, sermon or other work of the same nature; a
dramatic or dramatico-musical work; a choreographic work or entertainment in
dumb show; a musical composition with or without words; a cinematographic
work to which are assimilated works expressed by a process analogous to
cinematography; a work of drawing, painting, architecture, sculpture,
engraving or lithography; a photographic work to which are assimilated works
expressed by a process analogous to photography; a work of applied art; an
illustration, map, plan, sketch or three-dimensional work relative to
geography, topography, architecture or science; a performance; a broadcast;
a phonogram; a compilation of data to the extent it is protected as a
copyrightable work; or a work performed by a variety or circus performer to
the extent it is not otherwise considered a literary or artistic work.
g. "You" means an individual or entity exercising rights under this License
who has not previously violated the terms of this License with respect to
the Work, or who has received express permission from the Licensor to
exercise rights under this License despite a previous violation.
h. "Publicly Perform" means to perform public recitations of the Work and to
communicate to the public those public recitations, by any means or process,
including by wire or wireless means or public digital performances; to make
available to the public Works in such a way that members of the public may
access these Works from a place and at a place individually chosen by them;
to perform the Work to the public by any means or process and the
communication to the public of the performances of the Work, including by
public digital performance; to broadcast and rebroadcast the Work by any
means including signs, sounds or images.
i. "Reproduce" means to make copies of the Work by any means including
without limitation by sound or visual recordings and the right of fixation
and reproducing fixations of the Work, including storage of a protected
performance or phonogram in digital form or other electronic medium.
2. Fair Dealing Rights. Nothing in this License is intended to reduce, limit,
or restrict any uses free from copyright or rights arising from limitations or
exceptions that are provided for in connection with the copyright protection
under copyright law or other applicable laws.
3. License Grant. Subject to the terms and conditions of this License,
Licensor hereby grants You a worldwide, royalty-free, non-exclusive, perpetual
(for the duration of the applicable copyright) license to exercise the rights
in the Work as stated below:
a. to Reproduce the Work, to incorporate the Work into one or more
Collections, and to Reproduce the Work as incorporated in the Collections;
b. to create and Reproduce Adaptations provided that any such Adaptation,
including any translation in any medium, takes reasonable steps to clearly
label, demarcate or otherwise identify that changes were made to the
original Work. For example, a translation could be marked "The original work
was translated from English to Spanish," or a modification could indicate
"The original work has been modified.";
c. to Distribute and Publicly Perform the Work including as incorporated in
Collections; and,
d. to Distribute and Publicly Perform Adaptations.
The above rights may be exercised in all media and formats whether now known
or hereafter devised. The above rights include the right to make such
modifications as are technically necessary to exercise the rights in other
media and formats. Subject to Section 8(f), all rights not expressly granted
by Licensor are hereby reserved, including but not limited to the rights set
forth in Section 4(d).
4. Restrictions. The license granted in Section 3 above is expressly made
subject to and limited by the following restrictions:
a. You may Distribute or Publicly Perform the Work only under the terms of
this License. You must include a copy of, or the Uniform Resource Identifier
(URI) for, this License with every copy of the Work You Distribute or
Publicly Perform. You may not offer or impose any terms on the Work that
restrict the terms of this License or the ability of the recipient of the
Work to exercise the rights granted to that recipient under the terms of the
License. You may not sublicense the Work. You must keep intact all notices
that refer to this License and to the disclaimer of warranties with every
copy of the Work You Distribute or Publicly Perform. When You Distribute or
Publicly Perform the Work, You may not impose any effective technological
measures on the Work that restrict the ability of a recipient of the Work
from You to exercise the rights granted to that recipient under the terms of
the License. This Section 4(a) applies to the Work as incorporated in a
Collection, but this does not require the Collection apart from the Work
itself to be made subject to the terms of this License. If You create a
Collection, upon notice from any Licensor You must, to the extent
practicable, remove from the Collection any credit as required by Section
4(c), as requested. If You create an Adaptation, upon notice from any
Licensor You must, to the extent practicable, remove from the Adaptation any
credit as required by Section 4(c), as requested.
b. You may not exercise any of the rights granted to You in Section 3 above
in any manner that is primarily intended for or directed toward commercial
advantage or private monetary compensation. The exchange of the Work for
other copyrighted works by means of digital file-sharing or otherwise shall
not be considered to be intended for or directed toward commercial advantage
or private monetary compensation, provided there is no payment of any
monetary compensation in connection with the exchange of copyrighted works.
c. If You Distribute, or Publicly Perform the Work or any Adaptations or
Collections, You must, unless a request has been made pursuant to Section
4(a), keep intact all copyright notices for the Work and provide, reasonable
to the medium or means You are utilizing: (i) the name of the Original
Author (or pseudonym, if applicable) if supplied, and/or if the Original
Author and/or Licensor designate another party or parties (e.g., a sponsor
institute, publishing entity, journal) for attribution ("Attribution
Parties") in Licensor's copyright notice, terms of service or by other
reasonable means, the name of such party or parties; (ii) the title of the
Work if supplied; (iii) to the extent reasonably practicable, the URI, if
any, that Licensor specifies to be associated with the Work, unless such URI
does not refer to the copyright notice or licensing information for the
Work; and, (iv) consistent with Section 3(b), in the case of an Adaptation,
a credit identifying the use of the Work in the Adaptation (e.g., "French
translation of the Work by Original Author," or "Screenplay based on
original Work by Original Author"). The credit required by this Section 4(c)
may be implemented in any reasonable manner; provided, however, that in the
case of a Adaptation or Collection, at a minimum such credit will appear, if
a credit for all contributing authors of the Adaptation or Collection
appears, then as part of these credits and in a manner at least as prominent
as the credits for the other contributing authors. For the avoidance of
doubt, You may only use the credit required by this Section for the purpose
of attribution in the manner set out above and, by exercising Your rights
under this License, You may not implicitly or explicitly assert or imply any
connection with, sponsorship or endorsement by the Original Author, Licensor
and/or Attribution Parties, as appropriate, of You or Your use of the Work,
without the separate, express prior written permission of the Original
Author, Licensor and/or Attribution Parties.
d. For the avoidance of doubt:
i. Non-waivable Compulsory License Schemes. In those jurisdictions in
which the right to collect royalties through any statutory or compulsory
licensing scheme cannot be waived, the Licensor reserves the exclusive
right to collect such royalties for any exercise by You of the rights
granted under this License;
ii. Waivable Compulsory License Schemes. In those jurisdictions in which
the right to collect royalties through any statutory or compulsory
licensing scheme can be waived, the Licensor reserves the exclusive right
to collect such royalties for any exercise by You of the rights granted
under this License if Your exercise of such rights is for a purpose or use
which is otherwise than noncommercial as permitted under Section 4(b) and
otherwise waives the right to collect royalties through any statutory or
compulsory licensing scheme; and,
iii. Voluntary License Schemes. The Licensor reserves the right to collect
royalties, whether individually or, in the event that the Licensor is a
member of a collecting society that administers voluntary licensing
schemes, via that society, from any exercise by You of the rights granted
under this License that is for a purpose or use which is otherwise than
noncommercial as permitted under Section 4(c).
e. Except as otherwise agreed in writing by the Licensor or as may be
otherwise permitted by applicable law, if You Reproduce, Distribute or
Publicly Perform the Work either by itself or as part of any Adaptations or
Collections, You must not distort, mutilate, modify or take other derogatory
action in relation to the Work which would be prejudicial to the Original
Author's honor or reputation. Licensor agrees that in those jurisdictions
(e.g. Japan), in which any exercise of the right granted in Section 3(b) of
this License (the right to make Adaptations) would be deemed to be a
distortion, mutilation, modification or other derogatory action prejudicial
to the Original Author's honor and reputation, the Licensor will waive or
not assert, as appropriate, this Section, to the fullest extent permitted by
the applicable national law, to enable You to reasonably exercise Your right
under Section 3(b) of this License (right to make Adaptations) but not
otherwise.
5. Representations, Warranties and Disclaimer
UNLESS OTHERWISE MUTUALLY AGREED TO BY THE PARTIES IN WRITING, LICENSOR OFFERS
THE WORK AS-IS AND MAKES NO REPRESENTATIONS OR WARRANTIES OF ANY KIND
CONCERNING THE WORK, EXPRESS, IMPLIED, STATUTORY OR OTHERWISE, INCLUDING,
WITHOUT LIMITATION, WARRANTIES OF TITLE, MERCHANTIBILITY, FITNESS FOR A
PARTICULAR PURPOSE, NONINFRINGEMENT, OR THE ABSENCE OF LATENT OR OTHER
DEFECTS, ACCURACY, OR THE PRESENCE OF ABSENCE OF ERRORS, WHETHER OR NOT
DISCOVERABLE. SOME JURISDICTIONS DO NOT ALLOW THE EXCLUSION OF IMPLIED
WARRANTIES, SO SUCH EXCLUSION MAY NOT APPLY TO YOU.
6. Limitation on Liability. EXCEPT TO THE EXTENT REQUIRED BY APPLICABLE LAW,
IN NO EVENT WILL LICENSOR BE LIABLE TO YOU ON ANY LEGAL THEORY FOR ANY
SPECIAL, INCIDENTAL, CONSEQUENTIAL, PUNITIVE OR EXEMPLARY DAMAGES ARISING OUT
OF THIS LICENSE OR THE USE OF THE WORK, EVEN IF LICENSOR HAS BEEN ADVISED OF
THE POSSIBILITY OF SUCH DAMAGES.
7. Termination
a. This License and the rights granted hereunder will terminate
automatically upon any breach by You of the terms of this License.
Individuals or entities who have received Adaptations or Collections from
You under this License, however, will not have their licenses terminated
provided such individuals or entities remain in full compliance with those
licenses. Sections 1, 2, 5, 6, 7, and 8 will survive any termination of this
License.
b. Subject to the above terms and conditions, the license granted here is
perpetual (for the duration of the applicable copyright in the Work).
Notwithstanding the above, Licensor reserves the right to release the Work
under different license terms or to stop distributing the Work at any time;
provided, however that any such election will not serve to withdraw this
License (or any other license that has been, or is required to be, granted
under the terms of this License), and this License will continue in full
force and effect unless terminated as stated above.
8. Miscellaneous
a. Each time You Distribute or Publicly Perform the Work or a Collection,
the Licensor offers to the recipient a license to the Work on the same terms
and conditions as the license granted to You under this License.
b. Each time You Distribute or Publicly Perform an Adaptation, Licensor
offers to the recipient a license to the original Work on the same terms and
conditions as the license granted to You under this License.
c. If any provision of this License is invalid or unenforceable under
applicable law, it shall not affect the validity or enforceability of the
remainder of the terms of this License, and without further action by the
parties to this agreement, such provision shall be reformed to the minimum
extent necessary to make such provision valid and enforceable.
d. No term or provision of this License shall be deemed waived and no breach
consented to unless such waiver or consent shall be in writing and signed by
the party to be charged with such waiver or consent.
e. This License constitutes the entire agreement between the parties with
respect to the Work licensed here. There are no understandings, agreements
or representations with respect to the Work not specified here. Licensor
shall not be bound by any additional provisions that may appear in any
communication from You. This License may not be modified without the mutual
written agreement of the Licensor and You.
f. The rights granted under, and the subject matter referenced, in this
License were drafted utilizing the terminology of the Berne Convention for
the Protection of Literary and Artistic Works (as amended on September 28,
1979), the Rome Convention of 1961, the WIPO Copyright Treaty of 1996, the
WIPO Performances and Phonograms Treaty of 1996 and the Universal Copyright
Convention (as revised on July 24, 1971). These rights and subject matter
take effect in the relevant jurisdiction in which the License terms are
sought to be enforced according to the corresponding provisions of the
implementation of those treaty provisions in the applicable national law. If
the standard suite of rights granted under applicable copyright law includes
additional rights not granted under this License, such additional rights are
deemed to be included in the License; this License is not intended to
restrict the license of any rights under applicable law.
''')
Package(label='pdfminer', license_text='''
(This is so-called MIT/X License)
Copyright (c) 2004-2014 Yusuke Shinyama
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
''')
Package(label='pycountry', license_text='''
Author: Christian Theune
License: LGPL 2.1
''')
Package(label='pytesseract', license_text='''
LICENSE: Python-tesseract is released under the GPL v3.
''')
Package(label='dateutil', license_text='''
dateutil - Extensions to the standard Python datetime module.
Copyright (c) 2003-2011 - Gustavo Niemeyer <gustavo@niemeyer.net>
Copyright (c) 2012-2014 - Tomi Pieviläinen <tomi.pievilainen@iki.fi>
Copyright (c) 2014 - Yaron de Leeuw <me@jarondl.net>
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
''')
Package(label='pytz', license_text='''
Copyright (c) 2003-2005 Stuart Bishop <stuart@stuartbishop.net>
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
''')
Package(label='sh', license_text='''
Copyright (C) 2011-2012 by Andrew Moffat
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
''')
app.conf.CELERYBEAT_SCHEDULE.update(
{
'task_delete_stale_uploads': {
'task': 'common.tasks.task_delete_stale_uploads',
'schedule': timedelta(
seconds=DELETE_STALE_UPLOADS_INTERVAL
),
},
}
)
app.conf.CELERY_QUEUES.extend(
(
Queue('default', Exchange('default'), routing_key='default'),
Queue('tools', Exchange('tools'), routing_key='tools'),
Queue(
'common_periodic', Exchange('common_periodic'),
routing_key='common_periodic', delivery_mode=1
),
)
)
app.conf.CELERY_DEFAULT_QUEUE = 'default'
app.conf.CELERY_ROUTES.update(
{
'common.tasks.task_delete_stale_uploads': {
'queue': 'common_periodic'
},
}
)
menu_facet.bind_links(
links=(
link_current_user_details,
link_current_user_locale_profile_details, link_tools,
link_setup
), sources=(
'common:current_user_details', 'common:current_user_edit',
'common:current_user_locale_profile_details',
'common:current_user_locale_profile_edit',
'authentication:password_change_view', 'common:setup_list',
'common:tools_list'
)
)
menu_facet.bind_links(
links=(link_about, link_license, link_packages_licenses),
sources=(
'common:about_view', 'common:license_view',
'common:packages_licenses_view'
)
)
menu_main.bind_links(links=(link_about,), position=99)
menu_secondary.bind_links(
links=(
link_current_user_edit, link_current_user_locale_profile_edit
),
sources=(
'common:current_user_details', 'common:current_user_edit',
'common:current_user_locale_profile_details',
'common:current_user_locale_profile_edit',
'authentication:password_change_view', 'common:setup_list',
'common:tools_list'
)
)
menu_tools.bind_links(
links=(link_filters,)
)
post_save.connect(
user_locale_profile_create,
dispatch_uid='user_locale_profile_create',
sender=settings.AUTH_USER_MODEL
)
user_logged_in.connect(
user_locale_profile_session_config,
dispatch_uid='user_locale_profile_session_config'
)
self.setup_auto_logging()
def setup_auto_logging(self):
if setting_auto_logging.value:
if settings.DEBUG:
level = 'DEBUG'
else:
level = 'INFO'
loggers = {}
for project_app in apps.apps.get_app_configs():
loggers[project_app.name] = {
'handlers': ['console'],
'propagate': True,
'level': level,
}
logging.config.dictConfig(
{
'version': 1,
'disable_existing_loggers': True,
'formatters': {
'intermediate': {
'format': '%(name)s <%(process)d> [%(levelname)s] "%(funcName)s() %(message)s"'
},
},
'handlers': {
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'intermediate'
}
},
'loggers': loggers
}
)
| 50.092674 | 460 | 0.755933 |
ace2edf01d317a02075f4f858d8602ca938db376 | 801 | py | Python | setup.py | RiversideRocks/du.py | d396605fdbb9a332ba09987e3a3bcff4014ff22a | [
"MIT"
] | null | null | null | setup.py | RiversideRocks/du.py | d396605fdbb9a332ba09987e3a3bcff4014ff22a | [
"MIT"
] | null | null | null | setup.py | RiversideRocks/du.py | d396605fdbb9a332ba09987e3a3bcff4014ff22a | [
"MIT"
] | null | null | null | import setuptools
with open("README.md", "r", encoding="utf-8") as fh:
long_description = fh.read()
setuptools.setup(
name="du.py",
version="2.0.4",
author="Stylix58",
author_email="lateman-jpeg@outlook.fr",
description="A API wrapper for Dangerous Users Data Base",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/Stylix58/du.py",
project_urls={
"Bug Tracker": "https://github.com/Stylix58/du.py/issues",
},
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
package_dir={"": "src"},
packages=setuptools.find_packages(where="src"),
python_requires=">=3.6",
)
| 29.666667 | 66 | 0.644195 |
ace2eef4eabe0d5268bf221a92317d994434290b | 49 | py | Python | example/example/__init__.py | ebanalyse/django-nested-form-field | 5872f2a60676948d4d54332e30e01b46536af323 | [
"MIT"
] | 1 | 2021-05-25T20:41:38.000Z | 2021-05-25T20:41:38.000Z | example/example/__init__.py | ebanalyse/django-nested-form-field | 5872f2a60676948d4d54332e30e01b46536af323 | [
"MIT"
] | 1 | 2021-05-25T21:00:09.000Z | 2021-05-25T21:00:09.000Z | example/example/__init__.py | nielslerches/django-nested-form-field | 5872f2a60676948d4d54332e30e01b46536af323 | [
"MIT"
] | null | null | null | default_app_config = "example.apps.ExampleConfig" | 49 | 49 | 0.857143 |
ace2ef4424b930f2f8a27a3cdebc27d84ddd7527 | 2,699 | py | Python | RaspberryPi/scripts/WaterCtrl.py | janhieber/WaterCtrl | f661c8767f819490e170a2cd80f82716301cec65 | [
"MIT"
] | 6 | 2015-04-20T10:19:42.000Z | 2018-05-15T08:18:29.000Z | RaspberryPi/scripts/WaterCtrl.py | janhieber/WaterCtrl | f661c8767f819490e170a2cd80f82716301cec65 | [
"MIT"
] | 13 | 2015-09-10T09:37:34.000Z | 2017-09-10T12:20:07.000Z | RaspberryPi/scripts/WaterCtrl.py | janhieber/WaterCtrl | f661c8767f819490e170a2cd80f82716301cec65 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
""" imports """
import queue
import logging
import signal
import sys
import configparser
import ControlDaemon
import MessageBroker
import WebService
from flask import Flask, request
""" global vars """
thread1 = None
thread2 = None
thread3 = None
config = None
server = Flask(__name__)
"""
Setup everything
"""
def setup():
# read config
config = configparser.RawConfigParser()
config.read('WaterCtrl.conf')
# setup logging
logging.basicConfig(
filename = config.get('general', 'logfile'),
level = config.getint('general', 'loglevel'),
format = '%(asctime)s %(levelname)-8s [%(threadName)-13s] %(message)s'
)
# setup signal traps
signal.signal(signal.SIGINT, signal_handler)
signal.signal(signal.SIGUSR1, signal_handler)
signal.signal(signal.SIGUSR2, signal_handler)
# create queues for exchanging data
sendQueue = queue.Queue()
recvQueue = queue.Queue() # not used now
# create threads
global thread1
global thread2
global thread3
thread1 = ControlDaemon.app(sendQueue, recvQueue)
thread1.setName('ControlDaemon')
thread2 = MessageBroker.app(sendQueue, recvQueue)
thread2.setName('MessageBroker')
thread3 = WebService.app(server,sendQueue=sendQueue,recvQueue=recvQueue)
thread3.setName('WebService')
thread1.start()
thread2.start()
thread3.start()
# some empty loglines on start
logging.critical('STARTUP done\n\n\n\n\n')
"""
Main function
"""
def main():
logging.info("main function")
"""
Exit handler.
Tries to shutdown the app graceful.
"""
def gracefulExit():
logging.critical('exit graceful')
# signal threads to exit
global thread1
global thread2
global thread3
thread1.exit = True
thread2.exit = True
thread3.exit = True
func = request.environ.get('werkzeug.server.shutdown')
if func is None:
raise RuntimeError('Not running with the Werkzeug Server')
func()
"""
Signal handler.
Handles user signals and when the app
is requested to exit, for example ctrl+c
"""
def signal_handler(signum, frame):
if signum == signal.SIGUSR1:
logging.info('SIGUSR1 received')
if signum == signal.SIGUSR2:
logging.info('SIGUSR2 received')
if signum == signal.SIGINT:
logging.critical('SIGINT received')
gracefulExit()
if __name__ == '__main__':
setup()
logging.info('running main')
main()
server.run(host="0.0.0.0",port=5372,debug=True)
# wait for threads to finish
thread1.join()
thread2.join()
thread3.join()
logging.info('exit')
exit(0)
| 22.872881 | 78 | 0.667655 |
ace2f1df2e3a8f05d4a454a88f96d3b46be4d2fd | 61,728 | py | Python | angr/knowledge_plugins/functions/function.py | loverics/angr | ef89a6fbd1bd23bda17ed967c7fe9274be209874 | [
"BSD-2-Clause"
] | 1 | 2020-11-02T00:37:29.000Z | 2020-11-02T00:37:29.000Z | angr/knowledge_plugins/functions/function.py | loverics/angr | ef89a6fbd1bd23bda17ed967c7fe9274be209874 | [
"BSD-2-Clause"
] | null | null | null | angr/knowledge_plugins/functions/function.py | loverics/angr | ef89a6fbd1bd23bda17ed967c7fe9274be209874 | [
"BSD-2-Clause"
] | 3 | 2019-10-17T07:47:36.000Z | 2022-01-24T23:38:13.000Z | import os
import logging
import networkx
import string
import itertools
from collections import defaultdict
from typing import Union, Optional, Iterable, Set, Generator
from typing import Type # For some reasons the linter doesn't recognize the use in apply_definition but PyCharm needs it imported to correctly recognize it # pylint: disable=unused-import
from itanium_demangler import parse
from cle.backends.symbol import Symbol
from archinfo.arch_arm import get_real_address_if_arm
import claripy
from ...codenode import CodeNode, BlockNode, HookNode, SyscallNode
from ...serializable import Serializable
from ...errors import AngrValueError, SimEngineError, SimMemoryError
from ...procedures import SIM_LIBRARIES
from ...protos import function_pb2
from ...calling_conventions import DEFAULT_CC
from .function_parser import FunctionParser
l = logging.getLogger(name=__name__)
from ...sim_type import SimTypeFunction, parse_defns
from ...calling_conventions import SimCC
from ...project import Project
class Function(Serializable):
"""
A representation of a function and various information about it.
"""
__slots__ = ('transition_graph', '_local_transition_graph', 'normalized', '_ret_sites', '_jumpout_sites',
'_callout_sites', '_endpoints', '_call_sites', '_retout_sites', 'addr', '_function_manager',
'is_syscall', '_project', 'is_plt', 'addr', 'is_simprocedure', '_name', 'binary_name',
'_argument_registers', '_argument_stack_variables',
'bp_on_stack', 'retaddr_on_stack', 'sp_delta', '_cc', '_prototype', '_returning',
'prepared_registers', 'prepared_stack_variables', 'registers_read_afterwards',
'startpoint', '_addr_to_block_node', '_block_sizes', '_block_cache', '_local_blocks',
'_local_block_addrs', 'info', 'tags', 'alignment',
)
def __init__(self, function_manager, addr, name=None, syscall=None, is_simprocedure=None, binary_name=None,
is_plt=None, returning=None, alignment=False):
"""
Function constructor. If the optional parameters are not provided, they will be automatically determined upon
the creation of a Function object.
:param addr: The address of the function.
The following parameters are optional.
:param str name: The name of the function.
:param bool syscall: Whether this function is a syscall or not.
:param bool is_simprocedure: Whether this function is a SimProcedure or not.
:param str binary_name: Name of the binary where this function is.
:param bool is_plt: If this function is a PLT entry.
:param bool returning: If this function returns.
:param bool alignment: If this function acts as an alignment filler. Such functions usually only contain nops.
"""
self.transition_graph = networkx.DiGraph()
self._local_transition_graph = None
self.normalized = False
# block nodes at whose ends the function returns
self._ret_sites = set()
# block nodes at whose ends the function jumps out to another function (jumps outside)
self._jumpout_sites = set()
# block nodes at whose ends the function calls out to another non-returning function
self._callout_sites = set()
# block nodes that ends the function by returning out to another function (returns outside). This is rare.
self._retout_sites = set()
# block nodes (basic block nodes) at whose ends the function terminates
# in theory, if everything works fine, endpoints == ret_sites | jumpout_sites | callout_sites
self._endpoints = defaultdict(set)
self._call_sites = {}
self.addr = addr
# startpoint can be None if the corresponding CFGNode is a syscall node
self.startpoint = None
self._function_manager = function_manager
self.is_syscall = None
self.is_plt = None
self.is_simprocedure = False
self.alignment = alignment
# These properties are set by VariableManager
self.bp_on_stack = False
self.retaddr_on_stack = False
self.sp_delta = 0
# Calling convention
self._cc = None # type: Optional[SimCC]
# Function prototype
self._prototype = None # type: Optional[SimTypeFunction]
# Whether this function returns or not. `None` means it's not determined yet
self._returning = None
self.prepared_registers = set()
self.prepared_stack_variables = set()
self.registers_read_afterwards = set()
self._addr_to_block_node = {} # map addresses to nodes. it's a cache of blocks. if a block is removed from the
# function, it may not be removed from _addr_to_block_node. if you want to list
# all blocks of a function, access .blocks.
self._block_sizes = {} # map addresses to block sizes
self._block_cache = {} # a cache of real, hard data Block objects
self._local_blocks = {} # a dict of all blocks inside the function
self._local_block_addrs = set() # a set of addresses of all blocks inside the function
self.info = {} # storing special information, like $gp values for MIPS32
self.tags = tuple() # store function tags. can be set manually by performing CodeTagging analysis.
# TODO: Can we remove the following two members?
# Register offsets of those arguments passed in registers
self._argument_registers = []
# Stack offsets of those arguments passed in stack variables
self._argument_stack_variables = []
self._project = None # type: Optional[Project] # will be initialized upon the first access to self.project
#
# Initialize unspecified properties
#
if syscall is not None:
self.is_syscall = syscall
else:
if self.project is None:
raise ValueError("'syscall' must be specified if you do not specify a function manager for this new"
" function." )
# Determine whether this function is a syscall or not
self.is_syscall = self.project.simos.is_syscall_addr(addr)
# Determine whether this function is a SimProcedure
if is_simprocedure is not None:
self.is_simprocedure = is_simprocedure
else:
if self.project is None:
raise ValueError("'is_simprocedure' must be specified if you do not specify a function manager for this"
" new function.")
if self.is_syscall or self.project.is_hooked(addr):
self.is_simprocedure = True
# Determine if this function is a PLT entry
if is_plt is not None:
self.is_plt = is_plt
else:
# Whether this function is a PLT entry or not is fully relying on the PLT detection in CLE
if self.project is None:
raise ValueError("'is_plt' must be specified if you do not specify a function manager for this new"
" function.")
self.is_plt = self.project.loader.find_plt_stub_name(addr) is not None
# Determine the name of this function
if name is None:
self._name = self._get_initial_name()
else:
self._name = name
# Determine the name the binary where this function is.
if binary_name is not None:
self.binary_name = binary_name
else:
self.binary_name = self._get_initial_binary_name()
# Determine returning status for SimProcedures and Syscalls
if returning is not None:
self._returning = returning
else:
if self.project is None:
raise ValueError("'returning' must be specified if you do not specify a functio nmnager for this new"
" function.")
self._returning = self._get_initial_returning()
# Determine a calling convention
# If it is a SimProcedure it might have a CC already defined which can be used
if self.is_simprocedure and self.project is not None and self.addr in self.project._sim_procedures:
simproc = self.project._sim_procedures[self.addr]
cc = simproc.cc
if cc is None:
arch = self.project.arch
if self.project.arch.name in DEFAULT_CC:
cc = DEFAULT_CC[arch.name](arch)
# update cc.args according to num_args
# TODO: Handle non-traditional arguments like fp
if cc is not None and not cc.args and simproc.num_args:
args = cc.arg_locs(is_fp=[False] * simproc.num_args) # arg_locs() uses cc.args
cc.args = args
self.calling_convention = cc
else:
self.calling_convention = None
@property
def name(self):
return self._name
@name.setter
def name(self, v):
self._name = v
self._function_manager._kb.labels[self.addr] = v
@property
def project(self):
if self._project is None:
# try to set it from function manager
if self._function_manager is not None:
self._project = self._function_manager._kb._project #type: Optional[Project]
return self._project
@property
def returning(self):
return self._returning
@returning.setter
def returning(self, v):
self._returning = v
@property
def blocks(self):
"""
An iterator of all local blocks in the current function.
:return: angr.lifter.Block instances.
"""
for block_addr, block in self._local_blocks.items():
try:
yield self.get_block(block_addr, size=block.size,
byte_string=block.bytestr if isinstance(block, BlockNode) else None)
except (SimEngineError, SimMemoryError):
pass
@property
def block_addrs(self):
"""
An iterator of all local block addresses in the current function.
:return: block addresses.
"""
return self._local_blocks.keys()
@property
def block_addrs_set(self):
"""
Return a set of block addresses for a better performance of inclusion tests.
:return: A set of block addresses.
:rtype: set
"""
return self._local_block_addrs
def get_block(self, addr, size=None, byte_string=None):
if addr in self._block_cache:
b = self._block_cache[addr]
if size is None or b.size == size:
return b
else:
# size seems to be updated. remove this cached entry from the block cache
del self._block_cache[addr]
if size is None and addr in self.block_addrs:
# we know the size
size = self._block_sizes[addr]
block = self._project.factory.block(addr, size=size, byte_string=byte_string)
if size is None:
# update block_size dict
self._block_sizes[addr] = block.size
self._block_cache[addr] = block
return block
# compatibility
_get_block = get_block
@property
def nodes(self) -> Generator[CodeNode,None,None]:
return self.transition_graph.nodes()
def get_node(self, addr):
return self._addr_to_block_node.get(addr, None)
@property
def has_unresolved_jumps(self):
for addr in self.block_addrs:
if addr in self._function_manager._kb.unresolved_indirect_jumps:
b = self._function_manager._kb._project.factory.block(addr)
if b.vex.jumpkind == 'Ijk_Boring':
return True
return False
@property
def has_unresolved_calls(self):
for addr in self.block_addrs:
if addr in self._function_manager._kb.unresolved_indirect_jumps:
b = self._function_manager._kb._project.factory.block(addr)
if b.vex.jumpkind == 'Ijk_Call':
return True
return False
@property
def operations(self):
"""
All of the operations that are done by this functions.
"""
return [op for block in self.blocks for op in block.vex.operations]
@property
def code_constants(self):
"""
All of the constants that are used by this functions's code.
"""
# TODO: remove link register values
return [const.value for block in self.blocks for const in block.vex.constants]
@property
def calling_convention(self):
"""
Get the calling convention of this function.
:return: The calling convention of this function.
:rtype: Optional[SimCC]
"""
return self._cc
@calling_convention.setter
def calling_convention(self, v):
"""
Set the calling convention of this function. If the new cc has a function prototype, we will clear
self._prototype. Otherwise, if self.prototype is set, we will use it to update the function prototype of the new
cc, and then clear self._prototype. A warning message will be generated in either case.
:param Optional[SimCC] v: The new calling convention.
:return: None
"""
self._cc = v
if self._cc is not None:
if self._cc.func_ty is None and self._prototype is not None:
l.warning("The new calling convention for %r does not have a prototype associated. Using the existing "
"function prototype to update the new calling convention. The existing function prototype "
"will be removed.", self)
self._cc.set_func_type_with_arch(self._prototype)
self._prototype = None
elif self._cc.func_ty is not None and self._prototype is not None:
l.warning("The new calling convention for %r already has a prototype associated. The existing function "
"prototype will be removed.", self)
self._prototype = None
@property
def prototype(self):
"""
Get the prototype of this function. We prioritize the function prototype that is set in self.calling_convention.
:return: The function prototype.
:rtype: Optional[SimTypeFunction]
"""
if self._cc:
return self._cc.func_ty
else:
return self._prototype
@prototype.setter
def prototype(self, proto):
"""
Set a new prototype to this function. If a calling convention is already set to this function, the new prototype
will be set to this calling convention instead.
:param Optional[SimTypeFunction] proto: The new prototype.
:return: None
"""
if self._cc:
self._cc.set_func_type_with_arch(proto)
else:
self._prototype = proto
@classmethod
def _get_cmsg(cls):
return function_pb2.Function()
def serialize_to_cmessage(self):
return FunctionParser.serialize(self)
@classmethod
def parse_from_cmessage(cls, cmsg, **kwargs):
"""
:param cmsg:
:return Function: The function instantiated out of the cmsg data.
"""
return FunctionParser.parse_from_cmsg(cmsg, **kwargs)
def string_references(self, minimum_length=2, vex_only=False):
"""
All of the constant string references used by this function.
:param minimum_length: The minimum length of strings to find (default is 1)
:param vex_only: Only analyze VEX IR, don't interpret the entry state to detect additional constants.
:return: A list of tuples of (address, string) where is address is the location of the string in
memory.
"""
strings = []
memory = self._project.loader.memory
# get known instruction addresses and call targets
# these addresses cannot be string references, but show up frequently in the runtime values
known_executable_addresses = set()
for block in self.blocks:
known_executable_addresses.update(block.instruction_addrs)
for function in self._function_manager.values():
known_executable_addresses.update(set(x.addr for x in function.graph.nodes()))
# loop over all local runtime values and check if the value points to a printable string
for addr in self.local_runtime_values if not vex_only else self.code_constants:
if not isinstance(addr, claripy.fp.FPV) and addr in memory:
# check that the address isn't an pointing to known executable code
# and that it isn't an indirect pointer to known executable code
try:
possible_pointer = memory.unpack_word(addr)
if addr not in known_executable_addresses and possible_pointer not in known_executable_addresses:
# build string
stn = ""
offset = 0
current_char = chr(memory[addr + offset])
while current_char in string.printable:
stn += current_char
offset += 1
current_char = chr(memory[addr + offset])
# check that the string was a null terminated string with minimum length
if current_char == "\x00" and len(stn) >= minimum_length:
strings.append((addr, stn))
except KeyError:
pass
return strings
@property
def local_runtime_values(self):
"""
Tries to find all runtime values of this function which do not come from inputs.
These values are generated by starting from a blank state and reanalyzing the basic blocks once each.
Function calls are skipped, and back edges are never taken so these values are often unreliable,
This function is good at finding simple constant addresses which the function will use or calculate.
:return: a set of constants
"""
constants = set()
if not self._project.loader.main_object.contains_addr(self.addr):
return constants
# FIXME the old way was better for architectures like mips, but we need the initial irsb
# reanalyze function with a new initial state (use persistent registers)
# initial_state = self._function_manager._cfg.get_any_irsb(self.addr).initial_state
# fresh_state = self._project.factory.blank_state(mode="fastpath")
# for reg in initial_state.arch.persistent_regs + ['ip']:
# fresh_state.registers.store(reg, initial_state.registers.load(reg))
# reanalyze function with a new initial state
fresh_state = self._project.factory.blank_state(mode="fastpath")
fresh_state.regs.ip = self.addr
graph_addrs = set(x.addr for x in self.graph.nodes() if isinstance(x, BlockNode))
# process the nodes in a breadth-first order keeping track of which nodes have already been analyzed
analyzed = set()
q = [fresh_state]
analyzed.add(fresh_state.solver.eval(fresh_state.ip))
while len(q) > 0:
state = q.pop()
# make sure its in this function
if state.solver.eval(state.ip) not in graph_addrs:
continue
# don't trace into simprocedures
if self._project.is_hooked(state.solver.eval(state.ip)):
continue
# don't trace outside of the binary
if not self._project.loader.main_object.contains_addr(state.solver.eval(state.ip)):
continue
# don't trace unreachable blocks
if state.history.jumpkind in {'Ijk_EmWarn', 'Ijk_NoDecode',
'Ijk_MapFail', 'Ijk_NoRedir',
'Ijk_SigTRAP', 'Ijk_SigSEGV',
'Ijk_ClientReq'}:
continue
curr_ip = state.solver.eval(state.ip)
# get runtime values from logs of successors
successors = self._project.factory.successors(state)
for succ in successors.flat_successors + successors.unsat_successors:
for a in succ.history.recent_actions:
for ao in a.all_objects:
if not isinstance(ao.ast, claripy.ast.Base):
constants.add(ao.ast)
elif not ao.ast.symbolic:
constants.add(succ.solver.eval(ao.ast))
# add successors to the queue to analyze
if not succ.solver.symbolic(succ.ip):
succ_ip = succ.solver.eval(succ.ip)
if succ_ip in self and succ_ip not in analyzed:
analyzed.add(succ_ip)
q.insert(0, succ)
# force jumps to missing successors
# (this is a slightly hacky way to force it to explore all the nodes in the function)
node = self.get_node(curr_ip)
if node is None:
# the node does not exist. maybe it's not a block node.
continue
missing = set(x.addr for x in list(self.graph.successors(node))) - analyzed
for succ_addr in missing:
l.info("Forcing jump to missing successor: %#x", succ_addr)
if succ_addr not in analyzed:
all_successors = successors.unconstrained_successors + \
successors.flat_successors + \
successors.unsat_successors
if len(all_successors) > 0:
# set the ip of a copied successor to the successor address
succ = all_successors[0].copy()
succ.ip = succ_addr
analyzed.add(succ_addr)
q.insert(0, succ)
else:
l.warning("Could not reach successor: %#x", succ_addr)
return constants
@property
def num_arguments(self):
return len(self._argument_registers) + len(self._argument_stack_variables)
def __contains__(self, val):
if isinstance(val, int):
return val in self._block_sizes
else:
return False
def __str__(self):
s = 'Function %s [%s]\n' % (self.name, self.addr)
s += ' Syscall: %s\n' % self.is_syscall
s += ' SP difference: %d\n' % self.sp_delta
s += ' Has return: %s\n' % self.has_return
s += ' Returning: %s\n' % ('Unknown' if self.returning is None else self.returning)
s += ' Alignment: %s\n' % (self.alignment)
s += ' Arguments: reg: %s, stack: %s\n' % \
(self._argument_registers,
self._argument_stack_variables)
s += ' Blocks: [%s]\n' % ", ".join(['%#x' % i for i in self.block_addrs])
s += " Calling convention: %s" % self.calling_convention
return s
def __repr__(self):
if self.is_syscall:
return '<Syscall function %s (%s)>' % (self.name,
hex(self.addr) if isinstance(self.addr, int) else self.addr)
return '<Function %s (%s)>' % (self.name, hex(self.addr) if isinstance(self.addr, int) else self.addr)
@property
def endpoints(self):
return list(itertools.chain(*self._endpoints.values()))
@property
def endpoints_with_type(self):
return self._endpoints
@property
def ret_sites(self):
return list(self._ret_sites)
@property
def jumpout_sites(self):
return list(self._jumpout_sites)
@property
def retout_sites(self):
return list(self._retout_sites)
@property
def callout_sites(self):
return list(self._callout_sites)
@property
def size(self):
return sum([ b.size for b in self.blocks ])
@property
def binary(self):
"""
Get the object this function belongs to.
:return: The object this function belongs to.
"""
return self._project.loader.find_object_containing(self.addr, membership_check=False)
@property
def offset(self) -> int:
"""
:return: the function's binary offset (i.e., non-rebased address)
"""
return self.addr - self.binary.mapped_base
@property
def symbol(self) -> Union[None, Symbol]:
"""
:return: the function's Symbol, if any
"""
return self.binary.loader.find_symbol(self.addr)
def add_jumpout_site(self, node):
"""
Add a custom jumpout site.
:param node: The address of the basic block that control flow leaves during this transition.
:return: None
"""
self._register_nodes(True, node)
self._jumpout_sites.add(node)
self._add_endpoint(node, 'transition')
def add_retout_site(self, node):
"""
Add a custom retout site.
Retout (returning to outside of the function) sites are very rare. It mostly occurs during CFG recovery when we
incorrectly identify the beginning of a function in the first iteration, and then correctly identify that
function later in the same iteration (function alignments can lead to this bizarre case). We will mark all edges
going out of the header of that function as a outside edge, because all successors now belong to the
incorrectly-identified function. This identification error will be fixed in the second iteration of CFG
recovery. However, we still want to keep track of jumpouts/retouts during the first iteration so other logic in
CFG recovery still work.
:param node: The address of the basic block that control flow leaves the current function after a call.
:return: None
"""
self._register_nodes(True, node)
self._retout_sites.add(node)
self._add_endpoint(node, 'return')
def _get_initial_name(self):
"""
Determine the most suitable name of the function.
:return: The initial function name.
:rtype: string
"""
name = None
addr = self.addr
# Try to get a name from existing labels
if self._function_manager is not None:
if addr in self._function_manager._kb.labels:
name = self._function_manager._kb.labels[addr]
# try to get the name from a hook
if name is None and self.project is not None:
project = self.project
if project.is_hooked(addr):
hooker = project.hooked_by(addr)
name = hooker.display_name
elif project.simos.is_syscall_addr(addr):
syscall_inst = project.simos.syscall_from_addr(addr)
name = syscall_inst.display_name
# generate an IDA-style sub_X name
if name is None:
name = 'sub_%x' % addr
return name
def _get_initial_binary_name(self):
"""
Determine the name of the binary where this function is.
:return: None
"""
binary_name = None
# if this function is a simprocedure but not a syscall, use its library name as
# its binary name
# if it is a syscall, fall back to use self.binary.binary which explicitly says cle##kernel
if self.project and self.is_simprocedure and not self.is_syscall:
hooker = self.project.hooked_by(self.addr)
if hooker is not None:
binary_name = hooker.library_name
if binary_name is None and self.binary is not None and self.binary.binary:
binary_name = os.path.basename(self.binary.binary)
return binary_name
def _get_initial_returning(self):
"""
Determine if this function returns or not *if it is hooked by a SimProcedure or a user hook*.
:return: True if the hooker returns, False otherwise.
:rtype: bool
"""
hooker = None
if self.is_syscall:
hooker = self.project.simos.syscall_from_addr(self.addr)
elif self.is_simprocedure:
hooker = self.project.hooked_by(self.addr)
if hooker and hasattr(hooker, 'NO_RET'):
return not hooker.NO_RET
# Cannot determine
return None
def _clear_transition_graph(self):
self._block_cache = {}
self._block_sizes = {}
self.startpoint = None
self.transition_graph = networkx.DiGraph()
self._local_transition_graph = None
def _confirm_fakeret(self, src, dst):
if src not in self.transition_graph or dst not in self.transition_graph[src]:
raise AngrValueError('FakeRet edge (%s, %s) is not in transition graph.' % (src, dst))
data = self.transition_graph[src][dst]
if 'type' not in data or data['type'] != 'fake_return':
raise AngrValueError('Edge (%s, %s) is not a FakeRet edge' % (src, dst))
# it's confirmed. register the node if needed
if 'outside' not in data or data['outside'] is False:
self._register_nodes(True, dst)
self.transition_graph[src][dst]['confirmed'] = True
def _transit_to(self, from_node, to_node, outside=False, ins_addr=None, stmt_idx=None, is_exception=False):
"""
Registers an edge between basic blocks in this function's transition graph.
Arguments are CodeNode objects.
:param from_node The address of the basic block that control
flow leaves during this transition.
:param to_node The address of the basic block that control
flow enters during this transition.
:param bool outside: If this is a transition to another function, e.g. tail call optimization
:return: None
"""
if outside:
self._register_nodes(True, from_node)
if to_node is not None:
self._register_nodes(False, to_node)
self._jumpout_sites.add(from_node)
else:
if to_node is not None:
self._register_nodes(True, from_node, to_node)
else:
self._register_nodes(True, from_node)
type_ = 'transition' if not is_exception else 'exception'
if to_node is not None:
self.transition_graph.add_edge(from_node, to_node, type=type_, outside=outside, ins_addr=ins_addr,
stmt_idx=stmt_idx
)
if outside:
# this node is an endpoint of the current function
self._add_endpoint(from_node, type_)
# clear the cache
self._local_transition_graph = None
def _call_to(self, from_node, to_func, ret_node, stmt_idx=None, ins_addr=None, return_to_outside=False):
"""
Registers an edge between the caller basic block and callee function.
:param from_addr: The basic block that control flow leaves during the transition.
:type from_addr: angr.knowledge.CodeNode
:param to_func: The function that we are calling
:type to_func: Function
:param ret_node The basic block that control flow should return to after the
function call.
:type to_func: angr.knowledge.CodeNode or None
:param stmt_idx: Statement ID of this call.
:type stmt_idx: int, str or None
:param ins_addr: Instruction address of this call.
:type ins_addr: int or None
"""
self._register_nodes(True, from_node)
if to_func.is_syscall:
self.transition_graph.add_edge(from_node, to_func, type='syscall', stmt_idx=stmt_idx, ins_addr=ins_addr)
else:
self.transition_graph.add_edge(from_node, to_func, type='call', stmt_idx=stmt_idx, ins_addr=ins_addr)
if ret_node is not None:
self._fakeret_to(from_node, ret_node, to_outside=return_to_outside)
self._local_transition_graph = None
def _fakeret_to(self, from_node, to_node, confirmed=None, to_outside=False):
self._register_nodes(True, from_node)
if confirmed is None:
self.transition_graph.add_edge(from_node, to_node, type='fake_return', outside=to_outside)
else:
self.transition_graph.add_edge(from_node, to_node, type='fake_return', confirmed=confirmed,
outside=to_outside
)
if confirmed:
self._register_nodes(not to_outside, to_node)
self._local_transition_graph = None
def _remove_fakeret(self, from_node, to_node):
self.transition_graph.remove_edge(from_node, to_node)
self._local_transition_graph = None
def _return_from_call(self, from_func, to_node, to_outside=False):
self.transition_graph.add_edge(from_func, to_node, type='return', to_outside=to_outside)
for _, _, data in self.transition_graph.in_edges(to_node, data=True):
if 'type' in data and data['type'] == 'fake_return':
data['confirmed'] = True
self._local_transition_graph = None
def _register_nodes(self, is_local, *nodes):
if not isinstance(is_local, bool):
raise AngrValueError('_register_nodes(): the "is_local" parameter must be a bool')
for node in nodes:
self.transition_graph.add_node(node)
if not isinstance(node, CodeNode):
continue
node._graph = self.transition_graph
if node.addr not in self or self._block_sizes[node.addr] == 0:
self._block_sizes[node.addr] = node.size
if node.addr == self.addr:
if self.startpoint is None or not self.startpoint.is_hook:
self.startpoint = node
if is_local:
self._local_blocks[node.addr] = node
self._local_block_addrs.add(node.addr)
# add BlockNodes to the addr_to_block_node cache if not already there
if isinstance(node, BlockNode):
if node.addr not in self._addr_to_block_node:
self._addr_to_block_node[node.addr] = node
#else:
# # checks that we don't have multiple block nodes at a single address
# assert node == self._addr_to_block_node[node.addr]
def _add_return_site(self, return_site):
"""
Registers a basic block as a site for control flow to return from this function.
:param CodeNode return_site: The block node that ends with a return.
"""
self._register_nodes(True, return_site)
self._ret_sites.add(return_site)
# A return site must be an endpoint of the function - you cannot continue execution of the current function
# after returning
self._add_endpoint(return_site, 'return')
def _add_call_site(self, call_site_addr, call_target_addr, retn_addr):
"""
Registers a basic block as calling a function and returning somewhere.
:param call_site_addr: The address of a basic block that ends in a call.
:param call_target_addr: The address of the target of said call.
:param retn_addr: The address that said call will return to.
"""
self._call_sites[call_site_addr] = (call_target_addr, retn_addr)
def _add_endpoint(self, endpoint_node, sort):
"""
Registers an endpoint with a type of `sort`. The type can be one of the following:
- call: calling a function that does not return
- return: returning from the current function
- transition: a jump/branch targeting a different function
It is possible for a block to act as two different sorts of endpoints. For example, consider the following
block:
.text:0000000000024350 mov eax, 1
.text:0000000000024355 lock xadd [rdi+4], eax
.text:000000000002435A retn
VEX code:
00 | ------ IMark(0x424350, 5, 0) ------
01 | PUT(rax) = 0x0000000000000001
02 | PUT(rip) = 0x0000000000424355
03 | ------ IMark(0x424355, 5, 0) ------
04 | t11 = GET:I64(rdi)
05 | t10 = Add64(t11,0x0000000000000004)
06 | t0 = LDle:I32(t10)
07 | t2 = Add32(t0,0x00000001)
08 | t(4,4294967295) = CASle(t10 :: (t0,None)->(t2,None))
09 | t14 = CasCmpNE32(t4,t0)
10 | if (t14) { PUT(rip) = 0x424355; Ijk_Boring }
11 | PUT(cc_op) = 0x0000000000000003
12 | t15 = 32Uto64(t0)
13 | PUT(cc_dep1) = t15
14 | PUT(cc_dep2) = 0x0000000000000001
15 | t17 = 32Uto64(t0)
16 | PUT(rax) = t17
17 | PUT(rip) = 0x000000000042435a
18 | ------ IMark(0x42435a, 1, 0) ------
19 | t6 = GET:I64(rsp)
20 | t7 = LDle:I64(t6)
21 | t8 = Add64(t6,0x0000000000000008)
22 | PUT(rsp) = t8
23 | t18 = Sub64(t8,0x0000000000000080)
24 | ====== AbiHint(0xt18, 128, t7) ======
NEXT: PUT(rip) = t7; Ijk_Ret
This block acts as both a return endpoint and a transition endpoint (transitioning to 0x424355).
:param endpoint_node: The endpoint node.
:param sort: Type of the endpoint.
:return: None
"""
self._endpoints[sort].add(endpoint_node)
def mark_nonreturning_calls_endpoints(self):
"""
Iterate through all call edges in transition graph. For each call a non-returning function, mark the source
basic block as an endpoint.
This method should only be executed once all functions are recovered and analyzed by CFG recovery, so we know
whether each function returns or not.
:return: None
"""
for src, dst, data in self.transition_graph.edges(data=True):
if 'type' in data and data['type'] == 'call':
func_addr = dst.addr
if func_addr in self._function_manager:
function = self._function_manager[func_addr]
if function.returning is False:
# the target function does not return
the_node = self.get_node(src.addr)
self._callout_sites.add(the_node)
self._add_endpoint(the_node, 'call')
def get_call_sites(self) -> Iterable[int]:
"""
Gets a list of all the basic blocks that end in calls.
:return: A view of the addresses of the blocks that end in calls.
"""
return self._call_sites.keys()
def get_call_target(self, callsite_addr):
"""
Get the target of a call.
:param callsite_addr: The address of a basic block that ends in a call.
:return: The target of said call, or None if callsite_addr is not a
callsite.
"""
if callsite_addr in self._call_sites:
return self._call_sites[callsite_addr][0]
return None
def get_call_return(self, callsite_addr):
"""
Get the hypothetical return address of a call.
:param callsite_addr: The address of the basic block that ends in a call.
:return: The likely return target of said call, or None if callsite_addr
is not a callsite.
"""
if callsite_addr in self._call_sites:
return self._call_sites[callsite_addr][1]
return None
@property
def graph(self):
"""
Get a local transition graph. A local transition graph is a transition graph that only contains nodes that
belong to the current function. All edges, except for the edges going out from the current function or coming
from outside the current function, are included.
The generated graph is cached in self._local_transition_graph.
:return: A local transition graph.
:rtype: networkx.DiGraph
"""
if self._local_transition_graph is not None:
return self._local_transition_graph
g = networkx.DiGraph()
if self.startpoint is not None:
g.add_node(self.startpoint)
for block in self._local_blocks.values():
g.add_node(block)
for src, dst, data in self.transition_graph.edges(data=True):
if 'type' in data:
if data['type'] in ('transition', 'exception') and ('outside' not in data or data['outside'] is False):
g.add_edge(src, dst, **data)
elif data['type'] == 'fake_return' and 'confirmed' in data and \
('outside' not in data or data['outside'] is False):
g.add_edge(src, dst, **data)
self._local_transition_graph = g
return g
def graph_ex(self, exception_edges=True):
"""
Get a local transition graph with a custom configuration. A local transition graph is a transition graph that
only contains nodes that belong to the current function. This method allows user to exclude certain types of
edges together with the nodes that are only reachable through such edges, such as exception edges.
The generated graph is not cached.
:param bool exception_edges: Should exception edges and the nodes that are only reachable through exception
edges be kept.
:return: A local transition graph with a special configuration.
:rtype: networkx.DiGraph
"""
# graph_ex() should not impact any already cached graph
old_cached_graph = self._local_transition_graph
graph = self.graph
self._local_transition_graph = old_cached_graph # restore the cached graph
# fast path
if exception_edges:
return graph
# BFS on local graph but ignoring certain types of graphs
g = networkx.DiGraph()
queue = [ n for n in graph if n is self.startpoint or graph.in_degree[n] == 0 ]
traversed = set(queue)
while queue:
node = queue.pop(0)
g.add_node(node)
for _, dst, edge_data in graph.out_edges(node, data=True):
edge_type = edge_data.get('type', None)
if not exception_edges and edge_type == 'exception':
# ignore this edge
continue
g.add_edge(node, dst, **edge_data)
if dst not in traversed:
traversed.add(dst)
queue.append(dst)
return g
def transition_graph_ex(self, exception_edges=True):
"""
Get a transition graph with a custom configuration. This method allows user to exclude certain types of edges
together with the nodes that are only reachable through such edges, such as exception edges.
The generated graph is not cached.
:param bool exception_edges: Should exception edges and the nodes that are only reachable through exception
edges be kept.
:return: A local transition graph with a special configuration.
:rtype: networkx.DiGraph
"""
graph = self.transition_graph
# fast path
if exception_edges:
return graph
# BFS on local graph but ignoring certain types of graphs
g = networkx.DiGraph()
queue = [ n for n in graph if n is self.startpoint or graph.in_degree[n] == 0 ]
traversed = set(queue)
while queue:
node = queue.pop(0)
traversed.add(node)
g.add_node(node)
for _, dst, edge_data in graph.out_edges(node, data=True):
edge_type = edge_data.get('type', None)
if not exception_edges and edge_type == 'exception':
# ignore this edge
continue
g.add_edge(node, dst, **edge_data)
if dst not in traversed:
traversed.add(dst)
queue.append(dst)
return g
def subgraph(self, ins_addrs):
"""
Generate a sub control flow graph of instruction addresses based on self.graph
:param iterable ins_addrs: A collection of instruction addresses that should be included in the subgraph.
:return networkx.DiGraph: A subgraph.
"""
# find all basic blocks that include those instructions
blocks = []
block_addr_to_insns = {}
for b in self._local_blocks.values():
# TODO: should I call get_blocks?
block = self.get_block(b.addr, size=b.size, byte_string=b.bytestr)
common_insns = set(block.instruction_addrs).intersection(ins_addrs)
if common_insns:
blocks.append(b)
block_addr_to_insns[b.addr] = sorted(common_insns)
#subgraph = networkx.subgraph(self.graph, blocks)
subgraph = self.graph.subgraph(blocks).copy()
g = networkx.DiGraph()
for n in subgraph.nodes():
insns = block_addr_to_insns[n.addr]
in_edges = subgraph.in_edges(n)
# out_edges = subgraph.out_edges(n)
if len(in_edges) > 1:
# the first instruction address should be included
if n.addr not in insns:
insns = [n.addr] + insns
for src, _ in in_edges:
last_instr = block_addr_to_insns[src.addr][-1]
g.add_edge(last_instr, insns[0])
for i in range(0, len(insns) - 1):
g.add_edge(insns[i], insns[i + 1])
return g
def instruction_size(self, insn_addr):
"""
Get the size of the instruction specified by `insn_addr`.
:param int insn_addr: Address of the instruction
:return int: Size of the instruction in bytes, or None if the instruction is not found.
"""
for b in self.blocks:
block = self.get_block(b.addr, size=b.size, byte_string=b.bytestr)
if insn_addr in block.instruction_addrs:
index = block.instruction_addrs.index(insn_addr)
if index == len(block.instruction_addrs) - 1:
# the very last instruction
size = block.addr + block.size - insn_addr
else:
size = block.instruction_addrs[index + 1] - insn_addr
return size
return None
def addr_to_instruction_addr(self, addr):
"""
Obtain the address of the instruction that covers @addr.
:param int addr: An address.
:return: Address of the instruction that covers @addr, or None if this addr is not covered by any
instruction of this function.
:rtype: int or None
"""
# TODO: Replace the linear search with binary search
for b in self.blocks:
if b.addr <= addr < b.addr + b.size:
# found it
for i, instr_addr in enumerate(b.instruction_addrs):
if i < len(b.instruction_addrs) - 1 and instr_addr <= addr < b.instruction_addrs[i+1]:
return instr_addr
elif i == len(b.instruction_addrs) - 1 and instr_addr <= addr:
return instr_addr
# Not covered by any instruction... why?
return None
return None
def dbg_print(self):
"""
Returns a representation of the list of basic blocks in this function.
"""
return "[%s]" % (', '.join(('%#08x' % n.addr) for n in self.transition_graph.nodes()))
def dbg_draw(self, filename):
"""
Draw the graph and save it to a PNG file.
"""
import matplotlib.pyplot as pyplot # pylint: disable=import-error
from networkx.drawing.nx_agraph import graphviz_layout # pylint: disable=import-error
tmp_graph = networkx.DiGraph()
for from_block, to_block in self.transition_graph.edges():
node_a = "%#08x" % from_block.addr
node_b = "%#08x" % to_block.addr
if node_b in self._ret_sites:
node_b += "[Ret]"
if node_a in self._call_sites:
node_a += "[Call]"
tmp_graph.add_edge(node_a, node_b)
pos = graphviz_layout(tmp_graph, prog='fdp') # pylint: disable=no-member
networkx.draw(tmp_graph, pos, node_size=1200)
pyplot.savefig(filename)
def _add_argument_register(self, reg_offset):
"""
Registers a register offset as being used as an argument to the function.
:param reg_offset: The offset of the register to register.
"""
if reg_offset in self._function_manager._arg_registers and \
reg_offset not in self._argument_registers:
self._argument_registers.append(reg_offset)
def _add_argument_stack_variable(self, stack_var_offset):
if stack_var_offset not in self._argument_stack_variables:
self._argument_stack_variables.append(stack_var_offset)
@property
def arguments(self):
if self.calling_convention is None:
return self._argument_registers + self._argument_stack_variables
else:
return self.calling_convention.args
@property
def has_return(self):
return len(self._ret_sites) > 0
@property
def callable(self):
return self._project.factory.callable(self.addr)
def normalize(self):
"""
Make sure all basic blocks in the transition graph of this function do not overlap. You will end up with a CFG
that IDA Pro generates.
This method does not touch the CFG result. You may call CFG{Emulated, Fast}.normalize() for that matter.
:return: None
"""
# let's put a check here
if self.startpoint is None:
# this function is empty
l.debug('Unexpected error: %s does not have any blocks. normalize() fails.', repr(self))
return
graph = self.transition_graph
end_addresses = defaultdict(list)
for block in self.nodes:
if isinstance(block, BlockNode):
end_addr = block.addr + block.size
end_addresses[end_addr].append(block)
while any(len(x) > 1 for x in end_addresses.values()):
end_addr, all_nodes = \
next((end_addr, x) for (end_addr, x) in end_addresses.items() if len(x) > 1)
all_nodes = sorted(all_nodes, key=lambda node: node.size)
smallest_node = all_nodes[0]
other_nodes = all_nodes[1:]
is_outside_node = False
if smallest_node not in graph:
is_outside_node = True
# Break other nodes
for n in other_nodes:
new_size = get_real_address_if_arm(self._project.arch, smallest_node.addr) - get_real_address_if_arm(self._project.arch, n.addr)
if new_size == 0:
# This is the node that has the same size as the smallest one
continue
new_end_addr = n.addr + new_size
# Does it already exist?
new_node = None
if new_end_addr in end_addresses:
nodes = [i for i in end_addresses[new_end_addr] if i.addr == n.addr]
if len(nodes) > 0:
new_node = nodes[0]
if new_node is None:
# TODO: Do this correctly for hook nodes
# Create a new one
new_node = BlockNode(n.addr, new_size, graph=graph, thumb=n.thumb)
self._block_sizes[n.addr] = new_size
self._addr_to_block_node[n.addr] = new_node
# Put the newnode into end_addresses
end_addresses[new_end_addr].append(new_node)
# Modify the CFG
original_predecessors = list(graph.in_edges([n], data=True))
original_successors = list(graph.out_edges([n], data=True))
for _, d, data in original_successors:
ins_addr = data.get('ins_addr', data.get('pseudo_ins_addr', None))
if ins_addr is not None and ins_addr < d.addr:
continue
if d not in graph[smallest_node]:
if d is n:
graph.add_edge(smallest_node, new_node, **data)
else:
graph.add_edge(smallest_node, d, **data)
for p, _, _ in original_predecessors:
graph.remove_edge(p, n)
graph.remove_node(n)
# update local_blocks
if n.addr in self._local_blocks and self._local_blocks[n.addr].size != new_node.size:
del self._local_blocks[n.addr]
self._local_blocks[n.addr] = new_node
# update block_cache and block_sizes
if (n.addr in self._block_cache and self._block_cache[n.addr].size != new_node.size) or \
(n.addr in self._block_sizes and self._block_sizes[n.addr] != new_node.size):
# the cache needs updating
self._block_cache.pop(n.addr, None)
self._block_sizes[n.addr] = new_node.size
for p, _, data in original_predecessors:
if p not in other_nodes:
graph.add_edge(p, new_node, **data)
# We should find the correct successor
new_successors = [i for i in all_nodes
if i.addr == smallest_node.addr]
if new_successors:
new_successor = new_successors[0]
graph.add_edge(new_node, new_successor,
type="transition",
outside=is_outside_node,
# it's named "pseudo_ins_addr" because we have no way to know what the actual last
# instruction is at this moment (without re-lifting the block, which would be a
# waste of time).
pseudo_ins_addr=new_node.addr + new_node.size - 1,
)
else:
# We gotta create a new one
l.error('normalize(): Please report it to Fish/maybe john.')
end_addresses[end_addr] = [smallest_node]
# Rebuild startpoint
if self.startpoint.size != self._block_sizes[self.startpoint.addr]:
self.startpoint = self.get_node(self.startpoint.addr)
# Clear the cache
self._local_transition_graph = None
self.normalized = True
def find_declaration(self):
"""
Find the most likely function declaration from the embedded collection of prototypes, set it to self.prototype,
and update self.calling_convention with the declaration.
:return: None
"""
# determine the library name
if not self.is_plt:
binary_name = self.binary_name
if binary_name not in SIM_LIBRARIES:
return
else:
binary_name = None
# PLT entries must have the same declaration as their jump targets
# Try to determine which library this PLT entry will jump to
edges = self.transition_graph.edges()
if len(edges) == 0: return
node = next(iter(edges))[1]
if len(edges) == 1 and (type(node) is HookNode or type(node) is SyscallNode):
target = node.addr
if target in self._function_manager:
target_func = self._function_manager[target]
binary_name = target_func.binary_name
if binary_name is None:
return
library = SIM_LIBRARIES.get(binary_name, None)
if library is None:
return
if not library.has_prototype(self.name):
return
proto = library.get_prototype(self.name)
self.prototype = proto
if self.calling_convention is not None:
self.calling_convention.args = None
self.calling_convention.set_func_type_with_arch(proto)
@staticmethod
def _addr_to_funcloc(addr):
# FIXME
if isinstance(addr, tuple):
return addr[0]
else: # int, long
return addr
@property
def demangled_name(self):
if self.name[0:2] == "_Z":
try:
ast = parse(self.name)
except NotImplementedError:
return self.name
if ast:
return ast.__str__()
return self.name
def apply_definition(self, definition, calling_convention=None):
"""
:param str definition:
:param Optional[Union[SimCC, Type[SimCC]]] calling_convention:
:return None:
"""
if not definition.endswith(";"):
definition += ";"
func_def = parse_defns(definition)
if len(func_def.keys()) > 1:
raise Exception("Too many definitions: %s " % list(func_def.keys()))
name, ty = func_def.popitem() # type: str, SimTypeFunction
self.name = name
# setup the calling convention
# If a SimCC object is passed assume that this is sane and just use it
if isinstance(calling_convention, SimCC):
self.calling_convention = calling_convention
# If it is a subclass of SimCC we can instantiate it
elif isinstance(calling_convention, type) and issubclass(calling_convention, SimCC):
self.calling_convention = calling_convention(self.project.arch, func_ty=ty)
# If none is specified default to something
elif calling_convention is None:
self.calling_convention = self.project.factory.cc(func_ty=ty)
else:
raise TypeError("calling_convention has to be one of: [SimCC, type(SimCC), None]")
def functions_called(self) -> Set['Function']:
"""
:return: The set of all functions that can be reached from the function represented by self.
"""
called = set()
def _find_called(function_address):
successors = set(self._function_manager.callgraph.successors(function_address)) - called
for s in successors:
called.add(s)
_find_called(s)
_find_called(self.addr)
return { self._function_manager.function(a) for a in called }
def copy(self):
func = Function(self._function_manager, self.addr, name=self.name, syscall=self.is_syscall)
func.transition_graph = networkx.DiGraph(self.transition_graph)
func.normalized = self.normalized
func._ret_sites = self._ret_sites.copy()
func._jumpout_sites = self._jumpout_sites.copy()
func._retout_sites = self._retout_sites.copy()
func._endpoints = self._endpoints.copy()
func._call_sites = self._call_sites.copy()
func._project = self._project
func.is_plt = self.is_plt
func.is_simprocedure = self.is_simprocedure
func.binary_name = self.binary_name
func.bp_on_stack = self.bp_on_stack
func.retaddr_on_stack = self.retaddr_on_stack
func.sp_delta = self.sp_delta
func.calling_convention = self.calling_convention
func.prototype = self.prototype
func._returning = self._returning
func.alignment = self.alignment
func.startpoint = self.startpoint
func._addr_to_block_node = self._addr_to_block_node.copy()
func._block_sizes = self._block_sizes.copy()
func._block_cache = self._block_cache.copy()
func._local_blocks = self._local_blocks.copy()
func._local_block_addrs = self._local_block_addrs.copy()
func.info = self.info.copy()
func.tags = self.tags
return func
| 40.664032 | 187 | 0.597411 |
ace2f2cb90eefc46e0b3690d1b6ffdcc7d0e8837 | 5,224 | py | Python | examples/python/reconstruction_system/slac.py | amoran-symbio/Open3D | ae7e44e0dcef11a5df763819d47dec8c5bd5294b | [
"MIT"
] | 1,455 | 2021-07-27T19:44:50.000Z | 2022-03-31T19:39:21.000Z | examples/python/reconstruction_system/slac.py | amoran-symbio/Open3D | ae7e44e0dcef11a5df763819d47dec8c5bd5294b | [
"MIT"
] | 1,439 | 2021-07-27T16:02:52.000Z | 2022-03-31T22:29:05.000Z | examples/python/reconstruction_system/slac.py | amoran-symbio/Open3D | ae7e44e0dcef11a5df763819d47dec8c5bd5294b | [
"MIT"
] | 339 | 2021-07-28T03:07:28.000Z | 2022-03-31T13:38:00.000Z | # ----------------------------------------------------------------------------
# - Open3D: www.open3d.org -
# ----------------------------------------------------------------------------
# The MIT License (MIT)
#
# Copyright (c) 2018-2021 www.open3d.org
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
# ----------------------------------------------------------------------------
# examples/python/reconstruction_system/slac.py
import numpy as np
import open3d as o3d
import os, sys
pyexample_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.append(pyexample_path)
from utility.file import join, get_file_list, write_poses_to_log
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
def run(config):
print("slac non-rigid optimization.")
o3d.utility.set_verbosity_level(o3d.utility.VerbosityLevel.Debug)
path_dataset = config['path_dataset']
ply_file_names = get_file_list(
join(config["path_dataset"], config["folder_fragment"]), ".ply")
if (len(ply_file_names) == 0):
raise RuntimeError(
"No fragment found in {}, please make sure the reconstruction_system has finished running on the dataset."
.format(join(config["path_dataset"], config["folder_fragment"])))
pose_graph_fragment = o3d.io.read_pose_graph(
join(path_dataset, config["template_refined_posegraph_optimized"]))
# SLAC optimizer parameters.
slac_params = o3d.t.pipelines.slac.slac_optimizer_params(
max_iterations=config["max_iterations"],
voxel_size=config["voxel_size"],
distance_threshold=config["distance_threshold"],
fitness_threshold=config["fitness_threshold"],
regularizer_weight=config["regularizer_weight"],
device=o3d.core.Device(str(config["device"])),
slac_folder=join(path_dataset, config["folder_slac"]))
# SLAC debug option.
debug_option = o3d.t.pipelines.slac.slac_debug_option(False, 0)
# Run the system.
pose_graph_updated = o3d.pipelines.registration.PoseGraph()
# rigid optimization method.
if (config["method"] == "rigid"):
pose_graph_updated = o3d.t.pipelines.slac.run_rigid_optimizer_for_fragments(
ply_file_names, pose_graph_fragment, slac_params, debug_option)
elif (config["method"] == "slac"):
pose_graph_updated, ctrl_grid = o3d.t.pipelines.slac.run_slac_optimizer_for_fragments(
ply_file_names, pose_graph_fragment, slac_params, debug_option)
hashmap = ctrl_grid.get_hashmap()
active_buf_indices = hashmap.active_buf_indices().to(
o3d.core.Dtype.Int64)
key_tensor = hashmap.key_tensor()[active_buf_indices]
key_tensor.save(
join(slac_params.get_subfolder_name(), "ctr_grid_keys.npy"))
value_tensor = hashmap.value_tensor()[active_buf_indices]
value_tensor.save(
join(slac_params.get_subfolder_name(), "ctr_grid_values.npy"))
else:
raise RuntimeError(
"Requested optimization method {}, is not implemented. Implemented methods includes slac and rigid."
.format(config["method"]))
# Write updated pose graph.
o3d.io.write_pose_graph(
join(slac_params.get_subfolder_name(),
config["template_optimized_posegraph_slac"]), pose_graph_updated)
# Write trajectory for slac-integrate stage.
fragment_folder = join(path_dataset, config["folder_fragment"])
params = []
for i in range(len(pose_graph_updated.nodes)):
fragment_pose_graph = o3d.io.read_pose_graph(
join(fragment_folder, "fragment_optimized_%03d.json" % i))
for node in fragment_pose_graph.nodes:
pose = np.dot(pose_graph_updated.nodes[i].pose, node.pose)
param = o3d.camera.PinholeCameraParameters()
param.extrinsic = np.linalg.inv(pose)
params.append(param)
trajectory = o3d.camera.PinholeCameraTrajectory()
trajectory.parameters = params
o3d.io.write_pinhole_camera_trajectory(
slac_params.get_subfolder_name() + "/optimized_trajectory_" +
str(config["method"]) + ".log", trajectory)
| 42.819672 | 118 | 0.677833 |
ace2f464173581d6b5f13525ae0c715df00a8524 | 9,088 | py | Python | rl_reliability_metrics/metrics/metrics_offline.py | mcx/rl-reliability-metrics | f91a671ef00fc49803b67b2fd420e7a703dfdba2 | [
"Apache-2.0"
] | 122 | 2019-12-06T21:10:45.000Z | 2022-03-27T06:29:56.000Z | rl_reliability_metrics/metrics/metrics_offline.py | mcx/rl-reliability-metrics | f91a671ef00fc49803b67b2fd420e7a703dfdba2 | [
"Apache-2.0"
] | 4 | 2019-12-12T22:35:00.000Z | 2021-02-21T23:37:31.000Z | rl_reliability_metrics/metrics/metrics_offline.py | mcx/rl-reliability-metrics | f91a671ef00fc49803b67b2fd420e7a703dfdba2 | [
"Apache-2.0"
] | 18 | 2019-12-11T10:12:25.000Z | 2022-01-20T04:25:57.000Z | # coding=utf-8
# Copyright 2019 The Authors of RL Reliability Metrics.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Offline metrics for evaluating robustness of an RL algorithm.
Given the performance of an algorithm on a set of rollouts, these metrics
provide measures of the robustness of the RL algorithm.
"""
import abc
import functools
import gin
import numpy as np
from rl_reliability_metrics.metrics import metric_utils as utils
from rl_reliability_metrics.metrics import metrics_base
import scipy.stats
import six
@six.add_metaclass(abc.ABCMeta)
class _OfflineMetric(metrics_base.Metric):
"""Base class for offline metrics."""
def all_offline_metrics():
"""Get all the offline metrics."""
return _OfflineMetric.public_subclasses()
class _DispersionAcrossRollouts(_OfflineMetric):
"""Computes dispersion across rollouts of a fixed policy.
A rollout may be a fixed number of actions, or an episode otherwise defined.
"""
# Set metric properties (see metrics_base.Metric).
result_dimensions = 'ATR'
bigger_is_better = False
def __init__(self, dispersion_fn, baseline=None):
"""Initializes parameters.
Args:
dispersion_fn: Function for computing dispersion.
baseline: Set to "median_perf" to normalize by the median performance
across rollouts (within each rollout set). Set to a float to normalize
by that value. Set to None for no normalization.
"""
self._dispersion_fn = dispersion_fn
self.baseline = baseline
def __call__(self, rollout_sets):
"""Computes dispersion across rollouts.
Args:
rollout_sets: A list of rollout sets, with length n_rollout_sets.
Each element of the list corresponds to the performance values of one
set of rollouts that we will measure dispersion across (e.g. for a
single model checkpoint). It is a 2D numpy array where rollouts[0, :] is
just an index variable (e.g. range(0, n_rollouts)) and rollouts[1, :]
are the performances per rollout.
Returns:
Dispersion across rollouts, computed for each rollout set.
(1-D Numpy array with length = n_rollout_sets)
"""
utils.assert_non_empty(rollout_sets)
dispersions = []
for rollout_set in rollout_sets:
dispersion = self._dispersion_fn(rollout_set[1, :])
dispersions.append(dispersion)
dispersions = np.array(dispersions)
if self.baseline:
if self.baseline == 'median_perf':
divisor = utils.median_rollout_performance(rollout_sets)
else:
divisor = self.baseline
dispersions /= divisor
return dispersions
@gin.configurable
class MadAcrossRollouts(_DispersionAcrossRollouts):
"""Computes median absolute deviation across rollouts of a fixed policy.
A rollout may be a fixed number of actions, or an episode otherwise defined.
"""
def __init__(self, baseline=None):
super(MadAcrossRollouts, self).__init__(
utils.median_absolute_deviations,
baseline)
@gin.configurable
class IqrAcrossRollouts(_DispersionAcrossRollouts):
"""Computes inter-quartile range across rollouts of a fixed policy.
A rollout may be a fixed number of actions, or an episode otherwise defined.
"""
def __init__(self, baseline=None):
super(IqrAcrossRollouts, self).__init__(scipy.stats.iqr, baseline)
@gin.configurable
class StddevAcrossRollouts(_DispersionAcrossRollouts):
"""Computes median absolute deviation across rollouts of a fixed policy.
A rollout may be a fixed number of actions, or an episode otherwise defined.
"""
def __init__(self, baseline=None):
super(StddevAcrossRollouts, self).__init__(
functools.partial(np.std, ddof=1), baseline)
class _CVaRAcrossRollouts(_OfflineMetric):
"""Computes CVaR (as a measure of risk) across rollouts of a fixed policy.
A rollout may be a fixed number of actions, or an episode otherwise defined.
"""
# Set metric properties (see metrics_base.Metric).
result_dimensions = 'ATR'
bigger_is_better = True
def __init__(self, tail, alpha=0.05, baseline=None):
"""Initializes parameters for computing CVaR across rollouts.
Args:
tail: Set to "lower" or "upper" accordingly to compute CVaR on the lower
or upper tail of the distribution.
alpha: The threshold for computing CVaR. If tail="lower", we compute on
the part of the distribution <= the (alpha)-quantile. If tail="upper",
we compute on the part of the distribution >= the (1-alpha)-quantile.
baseline: A float value. When set, the rollout data will be divided by
this baseline before we compute CVaR.
"""
self.tail = tail
self.alpha = alpha
self.baseline = baseline
def __call__(self, rollout_sets):
"""Computes CVaR across rollouts of a fixed policy.
Args:
rollout_sets: A list of rollout sets, with length n_rollout_sets.
Each element of the list corresponds to the performance values of one
set of rollouts that we will measure dispersion across (e.g. for a
single model checkpoint). It is a 2D numpy array where rollouts[0, :] is
just an index variable (e.g. range(0, n_rollouts)) and rollouts[1, :]
are the performances per rollout.
Returns:
CVaR across rollouts, computed for each rollout set.
(1-D Numpy array with length = n_rollout_sets)
"""
utils.assert_non_empty(rollout_sets)
if self.baseline is not None:
if self.baseline == 'median_perf':
divisor = utils.median_rollout_performance(rollout_sets)
else:
divisor = self.baseline
rollout_sets = utils.divide_by_baseline(rollout_sets, divisor)
cvar_list = []
# Compute CVaR within each rollout set.
for rollout_set in rollout_sets:
dependent_var = rollout_set[1, :]
cvar = utils.compute_cvar(dependent_var, self.tail, self.alpha)
cvar_list.append(cvar)
return np.array(cvar_list)
@gin.configurable
class LowerCVaRAcrossRollouts(_CVaRAcrossRollouts):
def __init__(self, alpha=0.05, baseline=None):
super(LowerCVaRAcrossRollouts, self).__init__('lower', alpha, baseline)
@gin.configurable
class UpperCVaRAcrossRollouts(_CVaRAcrossRollouts):
def __init__(self, alpha=0.05, baseline=None):
super(UpperCVaRAcrossRollouts, self).__init__('upper', alpha, baseline)
@gin.configurable
class MedianPerfAcrossRollouts(_OfflineMetric):
"""Median performance for each rollout set."""
# Set metric properties (see metrics_base.Metric).
result_dimensions = 'ATR'
bigger_is_better = True
def __init__(self, baseline=None):
"""Initializes parameters for computing median performance.
Args:
baseline: If this is a single float, we normalize using
normalized = perf / baseline. If this is a tuple of floats (low, high),
we normalize using normalized = (perf - low) / (high - low). If None or
if an iterable that contains None, we do not perform any normalization.
"""
self.baseline = baseline
def __call__(self, rollout_sets):
"""Computes median performance for each rollout set.
Args:
rollout_sets: A list of rollout sets, with length n_rollout_sets.
Each element of the list corresponds to the performance values of one
set of rollouts that we will measure median performance for (e.g. for a
single model checkpoint). It is a 2D numpy array where rollout_set[0, :]
is just an index variable (e.g. range(0, n_rollouts)) and
rollout_set[1, :] are the performances per rollout.
Returns:
Median performance for each rollout set.
(1-D Numpy array with length = n_rollout_sets)
"""
rollout_sets = self._normalize(rollout_sets)
perf = [np.median(rollout_set[1, :]) for rollout_set in rollout_sets]
return perf
def _normalize(self, rollout_sets):
"""Normalize curves depending on setting of self.baseline."""
if self.baseline is None:
return rollout_sets
if isinstance(self.baseline, tuple):
if None in self.baseline: # E.g., (None, None) or (None, some float)
return rollout_sets
if len(self.baseline) != 2:
raise ValueError('If baseline is a tuple it must be of the form '
'(low, high). Got %r' % self.baseline)
low, high = self.baseline
else:
low = 0
high = self.baseline
return utils.band_normalization(rollout_sets, low, high)
# Maintain a registry linking metric names to classes.
REGISTRY = {
metric.__name__: metric for metric in all_offline_metrics()
}
| 34.037453 | 80 | 0.714018 |
ace2f563daec2d7e3030d98c9f15f25b377eb86f | 3,813 | py | Python | rels/exceptions.py | sobolevn/rels | 3b0afb11f2b4325c8ede961809f151dacfa257ef | [
"BSD-2-Clause-FreeBSD"
] | 7 | 2015-04-05T19:21:06.000Z | 2018-11-04T06:06:42.000Z | rels/exceptions.py | sobolevn/rels | 3b0afb11f2b4325c8ede961809f151dacfa257ef | [
"BSD-2-Clause-FreeBSD"
] | 2 | 2015-06-08T07:41:45.000Z | 2018-11-03T07:15:40.000Z | rels/exceptions.py | sobolevn/rels | 3b0afb11f2b4325c8ede961809f151dacfa257ef | [
"BSD-2-Clause-FreeBSD"
] | 2 | 2015-06-17T11:35:11.000Z | 2018-11-03T07:12:43.000Z | # coding: utf-8
class RelsException(Exception): pass
class ColumnException(RelsException): pass
class RecordException(RelsException): pass
class RelationException(RelsException): pass
class PrimaryWithoutUniqueError(ColumnException):
def __init__(self, column_name):
message = 'Primary column "%s" MUST has unique restriction' % column_name
super(PrimaryWithoutUniqueError, self).__init__(message)
class ExternalWithoutUniqueError(ColumnException):
def __init__(self, column_name):
message = 'External column "%s" MUST has unique restriction' % column_name
super(ExternalWithoutUniqueError, self).__init__(message)
class DuplicateValueError(ColumnException):
def __init__(self, column_name, value):
message = 'Duplicate value "%s" in column "%s"' % (value, column_name)
super(DuplicateValueError, self).__init__(message)
class SingleTypeError(ColumnException):
def __init__(self, column_name):
message = 'Column "%s" MUST contain values of one type' % (column_name)
super(SingleTypeError, self).__init__(message)
class ColumnsNumberError(RecordException):
def __init__(self, columns, data):
message = 'Wrong columns number in record: %s (expected: %d)' % (data, len(columns))
super(ColumnsNumberError, self).__init__(message)
class SetRelatedNameError(RecordException):
def __init__(self, object):
message = ('Can not set related name for object %r, it has no method "set_related_name"'
% object)
super(SetRelatedNameError, self).__init__(message)
class DuplicateRelatonNameError(RecordException):
def __init__(self, record, name):
message = 'Can not set related name for record %s, it has already had attribute with name "%s"' % (record, name)
super(DuplicateRelatonNameError, self).__init__(message)
class DuplicateIsPrimaryError(RecordException):
def __init__(self, record, column, attr_name, primary_name):
message = ('record %(record)s attribute %(attr_name)s of column %(column_name)s duplicates another record attribute (probably, column with name "%(primary_name)s")' %
{'attr_name': attr_name,
'record': record,
'column_name': column.name,
'primary_name': primary_name})
super(DuplicateIsPrimaryError, self).__init__(message)
class PrimaryDuplicatesRelationAttributeError(RelationException):
def __init__(self, column_name, duplicates):
message = 'Primary names "%(duplicates)r" of column "%(column)s" duplicate another table attributes' % {'duplicates': duplicates, 'column': column_name}
super(PrimaryDuplicatesRelationAttributeError, self).__init__(message)
class IndexDuplicatesRelationAttributeError(RelationException):
def __init__(self, column_name, index_name):
message = ('Index name "%s" of column "%s" duplicates another table attribute'
% (index_name, column_name))
super(IndexDuplicatesRelationAttributeError, self).__init__(message)
class NotExternalValueError(RelationException):
def __init__(self, id_):
message = '"%(id)s" is not external value' % {'id': id_}
super(NotExternalValueError, self).__init__(message)
class MultipleExternalColumnsError(RelationException):
def __init__(self, external_columns):
message = ('there are more then 1 external column: %s' %
', '.join(column.name for column in external_columns))
super(MultipleExternalColumnsError, self).__init__(message)
class WrongRelationNameError(RelationException):
def __init__(self, relation_name, enum_name):
message = 'wrong relation name "%s", expected enum name: "%s"' % (relation_name, enum_name)
super(WrongRelationNameError, self).__init__(message)
| 47.074074 | 174 | 0.720168 |
ace2f59567ec0946a29929af679c9f69284919be | 4,349 | py | Python | tests/testing_output_files/src/maxpooling1d/maxpooling1d_test.py | RS-Coop/Correll_Lab | 7c9ea0ff0ce3f54848a0eb112ef29b28d8e735e5 | [
"MIT"
] | 4 | 2021-05-31T23:49:39.000Z | 2022-03-16T12:27:08.000Z | tests/testing_output_files/src/maxpooling1d/maxpooling1d_test.py | RS-Coop/Correll_Lab | 7c9ea0ff0ce3f54848a0eb112ef29b28d8e735e5 | [
"MIT"
] | 1 | 2021-10-02T19:51:01.000Z | 2022-01-20T21:45:37.000Z | tests/testing_output_files/src/maxpooling1d/maxpooling1d_test.py | correlllab/nn4mc_py | 24fa3f9187f7d89692041b640c48f91c2a77b644 | [
"MIT"
] | null | null | null | import maxpooling1d
from tensorflow.keras import Sequential
from tensorflow.keras.layers import MaxPooling1D
from tensorflow.keras.backend import clear_session
import numpy as np
import unittest
from typing import List, Final
import ctypes
import copy
padding_dictionary = {'valid': 0x00, 'causal': 0x02, 'same': 0x03}
dataformat_dictionary = {'channels_last': 0x00, 'channels_first': 0x02}
def swig_py_object_2_list(object, size : int) -> List[float]:
"""
Converts SwigPyObject to List[float]
"""
y = (ctypes.c_float * size).from_address(int(object))
new_object = []
for i in range(size):
new_object += [float(y[i])]
return new_object
def swig_py_object_2_list_int(object, size : int) -> List[int]:
"""
Converts SwigPyObject to List[float]
"""
y = (ctypes.c_float * size).from_address(int(object))
new_object = []
for i in range(size):
new_object += [int(y[i])]
return new_object
def list_2_swig_float_pointer(list : List[float], size : int):
"""
Converts from list of floats to swig float* object
"""
test_buffer = maxpooling1d.input(size)
for i in range(size):
test_buffer[i] = float(list[i])
return test_buffer
class MaxPooling1DTest(unittest.TestCase):
"""
MaxPooling1D
"""
def __generate_sample(self, input_dims):
return np.random.normal(0.0, 20, size = input_dims)
def __keras_build(self, build_dict : dict):
model = Sequential()
model.add(MaxPooling1D(
pool_size = build_dict['pool_size'],
strides = build_dict['strides'],
padding = build_dict['padding'],
data_format = build_dict['data_format'],
))
model.trainable = False
return model
def __c_fwd(self, build_dict : dict, input_, input_dims, output_dims):
input_ = input_.flatten().tolist()
input_all = list_2_swig_float_pointer(input_, len(input_))
layer = maxpooling1d.build_layer_maxpooling1d(build_dict['pool_size'],
build_dict['strides'],
input_dims[1],
input_dims[2],
padding_dictionary[build_dict['padding']])
output = maxpooling1d.fwd_maxpooling1d(layer, input_all.cast())
print("keras output", output_dims)
c_output_size = int(np.ceil((input_dims[1] - build_dict['pool_size'] + 1) / build_dict['strides']) * input_dims[2])
print("c_output size", (1,int(np.ceil((input_dims[1] - build_dict['pool_size'] + 1) / build_dict['strides'])) ,input_dims[2]))
output = swig_py_object_2_list(output, c_output_size)
return output, output_dims
def __keras_fwd(self, config_dict : dict, input_):
model = self.__keras_build(config_dict)
prediction = model.predict(input_)
del model
clear_session()
return prediction
def test_fwd(self):
N = 1000
for _ in range(N):
pool_size = np.random.randint(1, 10, size=1).tolist()[0]
strides = np.random.randint(1, 10, size=1).tolist()[0]
build_dict = {'pool_size' : pool_size ,
'strides' : strides,
'padding' : 'valid',
'data_format' : "channels_last"}
shape = np.random.randint(50, 100, size = 2).tolist()
input_dims = (1, shape[0], shape[1])
print("input: ", input_dims)
input_ = self.__generate_sample(input_dims)
build_dict['input_shape'] = input_dims
original_input = input_.copy()
c_keras = np.array(self.__keras_fwd(build_dict, original_input))
c_output, output_dims = self.__c_fwd(build_dict, input_, input_dims, c_keras.shape)
c_output = np.array(c_output).reshape(c_keras.shape).astype(np.float32)
print(c_keras)
print(c_output)
assert_result = np.testing.assert_allclose(c_output.flatten(), c_keras.flatten(), rtol = 1e-5)
print(assert_result)
print("forward passed!")
if __name__=='__main__':
unittest.main() | 37.491379 | 134 | 0.59393 |
ace2f5974096023bfdd43c543902d34344996b5b | 889 | py | Python | Trakttv.bundle/Contents/Libraries/Shared/plex/objects/client.py | disrupted/Trakttv.bundle | 24712216c71f3b22fd58cb5dd89dad5bb798ed60 | [
"RSA-MD"
] | 1,346 | 2015-01-01T14:52:24.000Z | 2022-03-28T12:50:48.000Z | Trakttv.bundle/Contents/Libraries/Shared/plex/objects/client.py | alcroito/Plex-Trakt-Scrobbler | 4f83fb0860dcb91f860d7c11bc7df568913c82a6 | [
"RSA-MD"
] | 474 | 2015-01-01T10:27:46.000Z | 2022-03-21T12:26:16.000Z | Trakttv.bundle/Contents/Libraries/Shared/plex/objects/client.py | alcroito/Plex-Trakt-Scrobbler | 4f83fb0860dcb91f860d7c11bc7df568913c82a6 | [
"RSA-MD"
] | 191 | 2015-01-02T18:27:22.000Z | 2022-03-29T10:49:48.000Z | from plex.core.helpers import to_iterable
from plex.objects.container import Container
from plex.objects.core.base import Property
from plex.objects.server import Server
class Client(Server):
product = Property
device_class = Property('deviceClass')
protocol = Property
protocol_version = Property('protocolVersion', int)
protocol_capabilities = Property('protocolCapabilities')
class ClientContainer(Container):
filter_passes = lambda _, allowed, value: allowed is None or value in allowed
def filter(self, identifiers=None):
identifiers = to_iterable(identifiers)
for client in self:
if not self.filter_passes(identifiers, client.machine_identifier):
continue
yield client
def get(self, identifier):
for item in self.filter(identifier):
return item
return None
| 26.939394 | 81 | 0.704162 |
ace2f628575205828635e56aa0ff0561e4d2ba77 | 2,366 | py | Python | setup.py | nj94ray39/cft-template | 37040bf5f09421e8bad4f42c3d5532334eecbeef | [
"Apache-2.0"
] | null | null | null | setup.py | nj94ray39/cft-template | 37040bf5f09421e8bad4f42c3d5532334eecbeef | [
"Apache-2.0"
] | null | null | null | setup.py | nj94ray39/cft-template | 37040bf5f09421e8bad4f42c3d5532334eecbeef | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright 2016 Yelp Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from setuptools import find_packages
from setuptools import setup
from kafka_utils import __version__
with open(
os.path.join(
os.path.abspath(os.path.dirname(__file__)),
"README.md"
)
) as f:
README = f.read()
setup(
name="kafka-utils",
version=__version__,
author="Team Data Streams Core",
author_email="data-streams-core@yelp.com",
description="Kafka management utils",
packages=find_packages(exclude=["scripts*", "tests*"]),
url="https://github.com/Yelp/kafka-utils",
license="Apache License 2.0",
long_description=README,
keywords="apache kafka",
scripts=[
"scripts/kafka-consumer-manager",
"scripts/kafka-cluster-manager",
"scripts/kafka-rolling-restart",
"scripts/kafka-utils",
"scripts/kafka-check",
"scripts/kafka-corruption-check",
],
install_requires=[
"humanfriendly>=4.8",
"kafka-python>=1.3.2,<1.5.0",
"kazoo>=2.0,<3.0.0",
"PyYAML>3.10",
"pytz>=2014.1",
"requests-futures>0.9.0",
"paramiko<2.5.0",
"requests<3.0.0",
"retrying",
"six>=1.10.0",
],
classifiers=[
"Development Status :: 4 - Beta",
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Environment :: Console",
"Intended Audience :: Developers",
"Intended Audience :: System Administrators",
"Operating System :: POSIX",
"Operating System :: MacOS :: MacOS X",
],
)
| 30.333333 | 74 | 0.628064 |
ace2f69691aa7aac68af9a6d60f7e62da29efad8 | 1,548 | py | Python | CommonTools/RecoAlgos/python/sortedPFPrimaryVertices_cfi.py | pasmuss/cmssw | 566f40c323beef46134485a45ea53349f59ae534 | [
"Apache-2.0"
] | null | null | null | CommonTools/RecoAlgos/python/sortedPFPrimaryVertices_cfi.py | pasmuss/cmssw | 566f40c323beef46134485a45ea53349f59ae534 | [
"Apache-2.0"
] | null | null | null | CommonTools/RecoAlgos/python/sortedPFPrimaryVertices_cfi.py | pasmuss/cmssw | 566f40c323beef46134485a45ea53349f59ae534 | [
"Apache-2.0"
] | null | null | null | import FWCore.ParameterSet.Config as cms
sortedPFPrimaryVertices = cms.EDProducer("PFCandidatePrimaryVertexSorter",
sorting = cms.PSet(),
assignment = cms.PSet(
#cuts to assign primary tracks not used in PV fit based on dZ compatibility
maxDzSigForPrimaryAssignment = cms.double(5.0), # in AND with next
maxDzForPrimaryAssignment = cms.double(0.1), # in AND with prev
maxDzErrorForPrimaryAssignment = cms.double(0.05), # in AND with prev, tracks with uncertainty above 500um cannot tell us which pv they come from
# cuts used to recover b-tracks if they are closed to jet axis
maxJetDeltaR = cms.double(0.5),
minJetPt = cms.double(25),
maxDistanceToJetAxis = cms.double(0.07), # std cut in b-tag is 700um
maxDzForJetAxisAssigment = cms.double(0.1), # 1mm, because b-track IP is boost invariant
maxDxyForJetAxisAssigment = cms.double(0.1), # 1mm, because b-track IP is boost invariant
#cuts used to identify primary tracks compatible with beamspot
maxDxySigForNotReconstructedPrimary = cms.double(2), #in AND with next
maxDxyForNotReconstructedPrimary = cms.double(0.01), #in AND with prev
),
particles = cms.InputTag("particleFlow"),
vertices= cms.InputTag("offlinePrimaryVertices"),
jets= cms.InputTag("ak4PFJets"),
qualityForPrimary = cms.int32(3),
usePVMET = cms.bool(True),
produceAssociationToOriginalVertices = cms.bool(True),
produceSortedVertices = cms.bool(True),
producePileUpCollection = cms.bool(True),
produceNoPileUpCollection = cms.bool(True),
)
| 46.909091 | 149 | 0.742894 |
ace2f6f2b4375c6eefb2aeb1a427c3e17f4d9556 | 2,588 | py | Python | crawlers/mooncrawl/mooncrawl/esd.py | zomglings/moonstream | 954f6014f782157ff3d708d0697457c4306a6588 | [
"Apache-2.0"
] | 67 | 2021-07-22T11:09:30.000Z | 2022-03-30T07:38:19.000Z | crawlers/mooncrawl/mooncrawl/esd.py | zomglings/moonstream | 954f6014f782157ff3d708d0697457c4306a6588 | [
"Apache-2.0"
] | 246 | 2021-07-19T15:40:59.000Z | 2022-03-24T20:30:55.000Z | crawlers/mooncrawl/mooncrawl/esd.py | zomglings/moonstream | 954f6014f782157ff3d708d0697457c4306a6588 | [
"Apache-2.0"
] | 21 | 2021-07-25T18:36:05.000Z | 2022-03-30T16:30:24.000Z | import argparse
import sys
import time
from typing import Optional, Union
import requests
from moonstreamdb.db import yield_db_session_ctx
from moonstreamdb.models import ESDEventSignature, ESDFunctionSignature
from sqlalchemy.orm import Session
CRAWL_URLS = {
"functions": "https://www.4byte.directory/api/v1/signatures/",
"events": "https://www.4byte.directory/api/v1/event-signatures/",
}
DB_MODELS = {
"functions": ESDFunctionSignature,
"events": ESDEventSignature,
}
def crawl_step(
db_session: Session,
crawl_url: str,
db_model: Union[ESDEventSignature, ESDFunctionSignature],
) -> Optional[str]:
attempt = 0
current_interval = 2
success = False
response: Optional[requests.Response] = None
while (not success) and attempt < 3:
attempt += 1
try:
response = requests.get(crawl_url)
response.raise_for_status()
success = True
except:
current_interval *= 2
time.sleep(current_interval)
if response is None:
print(f"Could not process URL: {crawl_url}", file=sys.stderr)
return None
page = response.json()
results = page.get("results", [])
rows = [
db_model(
id=row.get("id"),
text_signature=row.get("text_signature"),
hex_signature=row.get("hex_signature"),
created_at=row.get("created_at"),
)
for row in results
]
db_session.bulk_save_objects(rows)
db_session.commit()
return page.get("next")
def crawl(crawl_type: str, interval: float) -> None:
crawl_url: Optional[str] = CRAWL_URLS[crawl_type]
db_model = DB_MODELS[crawl_type]
with yield_db_session_ctx() as db_session:
while crawl_url is not None:
print(f"Crawling: {crawl_url}")
crawl_url = crawl_step(db_session, crawl_url, db_model)
time.sleep(interval)
def main():
parser = argparse.ArgumentParser(
description="Crawls function and event signatures from the Ethereum Signature Database (https://www.4byte.directory/)"
)
parser.add_argument(
"crawl_type",
choices=CRAWL_URLS,
help="Specifies whether to crawl function signatures or event signatures",
)
parser.add_argument(
"--interval",
type=float,
default=0.1,
help="Number of seconds to wait between requests to the Ethereum Signature Database API",
)
args = parser.parse_args()
crawl(args.crawl_type, args.interval)
if __name__ == "__main__":
main()
| 26.958333 | 126 | 0.651082 |
ace2f7aed98059d74302ec4f46b93a17111d3f00 | 25,649 | py | Python | azure-mgmt-compute/azure/mgmt/compute/v2017_12_01/models/__init__.py | jmalobicky/azure-sdk-for-python | 61234a3d83f8fb481d1dd2386e54e888864878fd | [
"MIT"
] | 1 | 2018-07-23T08:59:24.000Z | 2018-07-23T08:59:24.000Z | azure-mgmt-compute/azure/mgmt/compute/v2017_12_01/models/__init__.py | jmalobicky/azure-sdk-for-python | 61234a3d83f8fb481d1dd2386e54e888864878fd | [
"MIT"
] | null | null | null | azure-mgmt-compute/azure/mgmt/compute/v2017_12_01/models/__init__.py | jmalobicky/azure-sdk-for-python | 61234a3d83f8fb481d1dd2386e54e888864878fd | [
"MIT"
] | null | null | null | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
try:
from .compute_operation_value_py3 import ComputeOperationValue
from .instance_view_status_py3 import InstanceViewStatus
from .sub_resource_py3 import SubResource
from .sku_py3 import Sku
from .availability_set_py3 import AvailabilitySet
from .availability_set_update_py3 import AvailabilitySetUpdate
from .virtual_machine_size_py3 import VirtualMachineSize
from .virtual_machine_extension_image_py3 import VirtualMachineExtensionImage
from .virtual_machine_image_resource_py3 import VirtualMachineImageResource
from .virtual_machine_extension_instance_view_py3 import VirtualMachineExtensionInstanceView
from .virtual_machine_extension_py3 import VirtualMachineExtension
from .virtual_machine_extension_update_py3 import VirtualMachineExtensionUpdate
from .virtual_machine_extensions_list_result_py3 import VirtualMachineExtensionsListResult
from .purchase_plan_py3 import PurchasePlan
from .os_disk_image_py3 import OSDiskImage
from .data_disk_image_py3 import DataDiskImage
from .virtual_machine_image_py3 import VirtualMachineImage
from .usage_name_py3 import UsageName
from .usage_py3 import Usage
from .virtual_machine_capture_parameters_py3 import VirtualMachineCaptureParameters
from .virtual_machine_capture_result_py3 import VirtualMachineCaptureResult
from .plan_py3 import Plan
from .hardware_profile_py3 import HardwareProfile
from .image_reference_py3 import ImageReference
from .key_vault_secret_reference_py3 import KeyVaultSecretReference
from .key_vault_key_reference_py3 import KeyVaultKeyReference
from .disk_encryption_settings_py3 import DiskEncryptionSettings
from .virtual_hard_disk_py3 import VirtualHardDisk
from .managed_disk_parameters_py3 import ManagedDiskParameters
from .os_disk_py3 import OSDisk
from .data_disk_py3 import DataDisk
from .storage_profile_py3 import StorageProfile
from .additional_unattend_content_py3 import AdditionalUnattendContent
from .win_rm_listener_py3 import WinRMListener
from .win_rm_configuration_py3 import WinRMConfiguration
from .windows_configuration_py3 import WindowsConfiguration
from .ssh_public_key_py3 import SshPublicKey
from .ssh_configuration_py3 import SshConfiguration
from .linux_configuration_py3 import LinuxConfiguration
from .vault_certificate_py3 import VaultCertificate
from .vault_secret_group_py3 import VaultSecretGroup
from .os_profile_py3 import OSProfile
from .network_interface_reference_py3 import NetworkInterfaceReference
from .network_profile_py3 import NetworkProfile
from .boot_diagnostics_py3 import BootDiagnostics
from .diagnostics_profile_py3 import DiagnosticsProfile
from .virtual_machine_extension_handler_instance_view_py3 import VirtualMachineExtensionHandlerInstanceView
from .virtual_machine_agent_instance_view_py3 import VirtualMachineAgentInstanceView
from .disk_instance_view_py3 import DiskInstanceView
from .boot_diagnostics_instance_view_py3 import BootDiagnosticsInstanceView
from .virtual_machine_identity_py3 import VirtualMachineIdentity
from .maintenance_redeploy_status_py3 import MaintenanceRedeployStatus
from .virtual_machine_instance_view_py3 import VirtualMachineInstanceView
from .virtual_machine_health_status_py3 import VirtualMachineHealthStatus
from .virtual_machine_scale_set_vm_instance_view_py3 import VirtualMachineScaleSetVMInstanceView
from .virtual_machine_py3 import VirtualMachine
from .virtual_machine_update_py3 import VirtualMachineUpdate
from .auto_os_upgrade_policy_py3 import AutoOSUpgradePolicy
from .rolling_upgrade_policy_py3 import RollingUpgradePolicy
from .upgrade_policy_py3 import UpgradePolicy
from .image_os_disk_py3 import ImageOSDisk
from .image_data_disk_py3 import ImageDataDisk
from .image_storage_profile_py3 import ImageStorageProfile
from .image_py3 import Image
from .image_update_py3 import ImageUpdate
from .virtual_machine_scale_set_identity_py3 import VirtualMachineScaleSetIdentity
from .virtual_machine_scale_set_os_profile_py3 import VirtualMachineScaleSetOSProfile
from .virtual_machine_scale_set_update_os_profile_py3 import VirtualMachineScaleSetUpdateOSProfile
from .virtual_machine_scale_set_managed_disk_parameters_py3 import VirtualMachineScaleSetManagedDiskParameters
from .virtual_machine_scale_set_os_disk_py3 import VirtualMachineScaleSetOSDisk
from .virtual_machine_scale_set_update_os_disk_py3 import VirtualMachineScaleSetUpdateOSDisk
from .virtual_machine_scale_set_data_disk_py3 import VirtualMachineScaleSetDataDisk
from .virtual_machine_scale_set_storage_profile_py3 import VirtualMachineScaleSetStorageProfile
from .virtual_machine_scale_set_update_storage_profile_py3 import VirtualMachineScaleSetUpdateStorageProfile
from .api_entity_reference_py3 import ApiEntityReference
from .virtual_machine_scale_set_public_ip_address_configuration_dns_settings_py3 import VirtualMachineScaleSetPublicIPAddressConfigurationDnsSettings
from .virtual_machine_scale_set_public_ip_address_configuration_py3 import VirtualMachineScaleSetPublicIPAddressConfiguration
from .virtual_machine_scale_set_update_public_ip_address_configuration_py3 import VirtualMachineScaleSetUpdatePublicIPAddressConfiguration
from .virtual_machine_scale_set_ip_configuration_py3 import VirtualMachineScaleSetIPConfiguration
from .virtual_machine_scale_set_update_ip_configuration_py3 import VirtualMachineScaleSetUpdateIPConfiguration
from .virtual_machine_scale_set_network_configuration_dns_settings_py3 import VirtualMachineScaleSetNetworkConfigurationDnsSettings
from .virtual_machine_scale_set_network_configuration_py3 import VirtualMachineScaleSetNetworkConfiguration
from .virtual_machine_scale_set_update_network_configuration_py3 import VirtualMachineScaleSetUpdateNetworkConfiguration
from .virtual_machine_scale_set_network_profile_py3 import VirtualMachineScaleSetNetworkProfile
from .virtual_machine_scale_set_update_network_profile_py3 import VirtualMachineScaleSetUpdateNetworkProfile
from .virtual_machine_scale_set_extension_py3 import VirtualMachineScaleSetExtension
from .virtual_machine_scale_set_extension_profile_py3 import VirtualMachineScaleSetExtensionProfile
from .virtual_machine_scale_set_vm_profile_py3 import VirtualMachineScaleSetVMProfile
from .virtual_machine_scale_set_update_vm_profile_py3 import VirtualMachineScaleSetUpdateVMProfile
from .virtual_machine_scale_set_py3 import VirtualMachineScaleSet
from .virtual_machine_scale_set_update_py3 import VirtualMachineScaleSetUpdate
from .virtual_machine_scale_set_vm_instance_ids_py3 import VirtualMachineScaleSetVMInstanceIDs
from .virtual_machine_scale_set_vm_instance_required_ids_py3 import VirtualMachineScaleSetVMInstanceRequiredIDs
from .virtual_machine_status_code_count_py3 import VirtualMachineStatusCodeCount
from .virtual_machine_scale_set_instance_view_statuses_summary_py3 import VirtualMachineScaleSetInstanceViewStatusesSummary
from .virtual_machine_scale_set_vm_extensions_summary_py3 import VirtualMachineScaleSetVMExtensionsSummary
from .virtual_machine_scale_set_instance_view_py3 import VirtualMachineScaleSetInstanceView
from .virtual_machine_scale_set_sku_capacity_py3 import VirtualMachineScaleSetSkuCapacity
from .virtual_machine_scale_set_sku_py3 import VirtualMachineScaleSetSku
from .api_error_base_py3 import ApiErrorBase
from .inner_error_py3 import InnerError
from .api_error_py3 import ApiError
from .rollback_status_info_py3 import RollbackStatusInfo
from .upgrade_operation_history_status_py3 import UpgradeOperationHistoryStatus
from .rolling_upgrade_progress_info_py3 import RollingUpgradeProgressInfo
from .upgrade_operation_historical_status_info_properties_py3 import UpgradeOperationHistoricalStatusInfoProperties
from .upgrade_operation_historical_status_info_py3 import UpgradeOperationHistoricalStatusInfo
from .virtual_machine_scale_set_vm_py3 import VirtualMachineScaleSetVM
from .rolling_upgrade_running_status_py3 import RollingUpgradeRunningStatus
from .rolling_upgrade_status_info_py3 import RollingUpgradeStatusInfo
from .compute_long_running_operation_properties_py3 import ComputeLongRunningOperationProperties
from .resource_py3 import Resource
from .update_resource_py3 import UpdateResource
from .sub_resource_read_only_py3 import SubResourceReadOnly
from .recovery_walk_response_py3 import RecoveryWalkResponse
from .operation_status_response_py3 import OperationStatusResponse
from .request_rate_by_interval_input_py3 import RequestRateByIntervalInput
from .throttled_requests_input_py3 import ThrottledRequestsInput
from .log_analytics_input_base_py3 import LogAnalyticsInputBase
from .log_analytics_output_py3 import LogAnalyticsOutput
from .log_analytics_operation_result_py3 import LogAnalyticsOperationResult
from .run_command_input_parameter_py3 import RunCommandInputParameter
from .run_command_input_py3 import RunCommandInput
from .run_command_parameter_definition_py3 import RunCommandParameterDefinition
from .run_command_document_base_py3 import RunCommandDocumentBase
from .run_command_document_py3 import RunCommandDocument
from .run_command_result_py3 import RunCommandResult
except (SyntaxError, ImportError):
from .compute_operation_value import ComputeOperationValue
from .instance_view_status import InstanceViewStatus
from .sub_resource import SubResource
from .sku import Sku
from .availability_set import AvailabilitySet
from .availability_set_update import AvailabilitySetUpdate
from .virtual_machine_size import VirtualMachineSize
from .virtual_machine_extension_image import VirtualMachineExtensionImage
from .virtual_machine_image_resource import VirtualMachineImageResource
from .virtual_machine_extension_instance_view import VirtualMachineExtensionInstanceView
from .virtual_machine_extension import VirtualMachineExtension
from .virtual_machine_extension_update import VirtualMachineExtensionUpdate
from .virtual_machine_extensions_list_result import VirtualMachineExtensionsListResult
from .purchase_plan import PurchasePlan
from .os_disk_image import OSDiskImage
from .data_disk_image import DataDiskImage
from .virtual_machine_image import VirtualMachineImage
from .usage_name import UsageName
from .usage import Usage
from .virtual_machine_capture_parameters import VirtualMachineCaptureParameters
from .virtual_machine_capture_result import VirtualMachineCaptureResult
from .plan import Plan
from .hardware_profile import HardwareProfile
from .image_reference import ImageReference
from .key_vault_secret_reference import KeyVaultSecretReference
from .key_vault_key_reference import KeyVaultKeyReference
from .disk_encryption_settings import DiskEncryptionSettings
from .virtual_hard_disk import VirtualHardDisk
from .managed_disk_parameters import ManagedDiskParameters
from .os_disk import OSDisk
from .data_disk import DataDisk
from .storage_profile import StorageProfile
from .additional_unattend_content import AdditionalUnattendContent
from .win_rm_listener import WinRMListener
from .win_rm_configuration import WinRMConfiguration
from .windows_configuration import WindowsConfiguration
from .ssh_public_key import SshPublicKey
from .ssh_configuration import SshConfiguration
from .linux_configuration import LinuxConfiguration
from .vault_certificate import VaultCertificate
from .vault_secret_group import VaultSecretGroup
from .os_profile import OSProfile
from .network_interface_reference import NetworkInterfaceReference
from .network_profile import NetworkProfile
from .boot_diagnostics import BootDiagnostics
from .diagnostics_profile import DiagnosticsProfile
from .virtual_machine_extension_handler_instance_view import VirtualMachineExtensionHandlerInstanceView
from .virtual_machine_agent_instance_view import VirtualMachineAgentInstanceView
from .disk_instance_view import DiskInstanceView
from .boot_diagnostics_instance_view import BootDiagnosticsInstanceView
from .virtual_machine_identity import VirtualMachineIdentity
from .maintenance_redeploy_status import MaintenanceRedeployStatus
from .virtual_machine_instance_view import VirtualMachineInstanceView
from .virtual_machine_health_status import VirtualMachineHealthStatus
from .virtual_machine_scale_set_vm_instance_view import VirtualMachineScaleSetVMInstanceView
from .virtual_machine import VirtualMachine
from .virtual_machine_update import VirtualMachineUpdate
from .auto_os_upgrade_policy import AutoOSUpgradePolicy
from .rolling_upgrade_policy import RollingUpgradePolicy
from .upgrade_policy import UpgradePolicy
from .image_os_disk import ImageOSDisk
from .image_data_disk import ImageDataDisk
from .image_storage_profile import ImageStorageProfile
from .image import Image
from .image_update import ImageUpdate
from .virtual_machine_scale_set_identity import VirtualMachineScaleSetIdentity
from .virtual_machine_scale_set_os_profile import VirtualMachineScaleSetOSProfile
from .virtual_machine_scale_set_update_os_profile import VirtualMachineScaleSetUpdateOSProfile
from .virtual_machine_scale_set_managed_disk_parameters import VirtualMachineScaleSetManagedDiskParameters
from .virtual_machine_scale_set_os_disk import VirtualMachineScaleSetOSDisk
from .virtual_machine_scale_set_update_os_disk import VirtualMachineScaleSetUpdateOSDisk
from .virtual_machine_scale_set_data_disk import VirtualMachineScaleSetDataDisk
from .virtual_machine_scale_set_storage_profile import VirtualMachineScaleSetStorageProfile
from .virtual_machine_scale_set_update_storage_profile import VirtualMachineScaleSetUpdateStorageProfile
from .api_entity_reference import ApiEntityReference
from .virtual_machine_scale_set_public_ip_address_configuration_dns_settings import VirtualMachineScaleSetPublicIPAddressConfigurationDnsSettings
from .virtual_machine_scale_set_public_ip_address_configuration import VirtualMachineScaleSetPublicIPAddressConfiguration
from .virtual_machine_scale_set_update_public_ip_address_configuration import VirtualMachineScaleSetUpdatePublicIPAddressConfiguration
from .virtual_machine_scale_set_ip_configuration import VirtualMachineScaleSetIPConfiguration
from .virtual_machine_scale_set_update_ip_configuration import VirtualMachineScaleSetUpdateIPConfiguration
from .virtual_machine_scale_set_network_configuration_dns_settings import VirtualMachineScaleSetNetworkConfigurationDnsSettings
from .virtual_machine_scale_set_network_configuration import VirtualMachineScaleSetNetworkConfiguration
from .virtual_machine_scale_set_update_network_configuration import VirtualMachineScaleSetUpdateNetworkConfiguration
from .virtual_machine_scale_set_network_profile import VirtualMachineScaleSetNetworkProfile
from .virtual_machine_scale_set_update_network_profile import VirtualMachineScaleSetUpdateNetworkProfile
from .virtual_machine_scale_set_extension import VirtualMachineScaleSetExtension
from .virtual_machine_scale_set_extension_profile import VirtualMachineScaleSetExtensionProfile
from .virtual_machine_scale_set_vm_profile import VirtualMachineScaleSetVMProfile
from .virtual_machine_scale_set_update_vm_profile import VirtualMachineScaleSetUpdateVMProfile
from .virtual_machine_scale_set import VirtualMachineScaleSet
from .virtual_machine_scale_set_update import VirtualMachineScaleSetUpdate
from .virtual_machine_scale_set_vm_instance_ids import VirtualMachineScaleSetVMInstanceIDs
from .virtual_machine_scale_set_vm_instance_required_ids import VirtualMachineScaleSetVMInstanceRequiredIDs
from .virtual_machine_status_code_count import VirtualMachineStatusCodeCount
from .virtual_machine_scale_set_instance_view_statuses_summary import VirtualMachineScaleSetInstanceViewStatusesSummary
from .virtual_machine_scale_set_vm_extensions_summary import VirtualMachineScaleSetVMExtensionsSummary
from .virtual_machine_scale_set_instance_view import VirtualMachineScaleSetInstanceView
from .virtual_machine_scale_set_sku_capacity import VirtualMachineScaleSetSkuCapacity
from .virtual_machine_scale_set_sku import VirtualMachineScaleSetSku
from .api_error_base import ApiErrorBase
from .inner_error import InnerError
from .api_error import ApiError
from .rollback_status_info import RollbackStatusInfo
from .upgrade_operation_history_status import UpgradeOperationHistoryStatus
from .rolling_upgrade_progress_info import RollingUpgradeProgressInfo
from .upgrade_operation_historical_status_info_properties import UpgradeOperationHistoricalStatusInfoProperties
from .upgrade_operation_historical_status_info import UpgradeOperationHistoricalStatusInfo
from .virtual_machine_scale_set_vm import VirtualMachineScaleSetVM
from .rolling_upgrade_running_status import RollingUpgradeRunningStatus
from .rolling_upgrade_status_info import RollingUpgradeStatusInfo
from .compute_long_running_operation_properties import ComputeLongRunningOperationProperties
from .resource import Resource
from .update_resource import UpdateResource
from .sub_resource_read_only import SubResourceReadOnly
from .recovery_walk_response import RecoveryWalkResponse
from .operation_status_response import OperationStatusResponse
from .request_rate_by_interval_input import RequestRateByIntervalInput
from .throttled_requests_input import ThrottledRequestsInput
from .log_analytics_input_base import LogAnalyticsInputBase
from .log_analytics_output import LogAnalyticsOutput
from .log_analytics_operation_result import LogAnalyticsOperationResult
from .run_command_input_parameter import RunCommandInputParameter
from .run_command_input import RunCommandInput
from .run_command_parameter_definition import RunCommandParameterDefinition
from .run_command_document_base import RunCommandDocumentBase
from .run_command_document import RunCommandDocument
from .run_command_result import RunCommandResult
from .compute_operation_value_paged import ComputeOperationValuePaged
from .availability_set_paged import AvailabilitySetPaged
from .virtual_machine_size_paged import VirtualMachineSizePaged
from .virtual_machine_paged import VirtualMachinePaged
from .usage_paged import UsagePaged
from .image_paged import ImagePaged
from .virtual_machine_scale_set_paged import VirtualMachineScaleSetPaged
from .virtual_machine_scale_set_sku_paged import VirtualMachineScaleSetSkuPaged
from .upgrade_operation_historical_status_info_paged import UpgradeOperationHistoricalStatusInfoPaged
from .virtual_machine_scale_set_extension_paged import VirtualMachineScaleSetExtensionPaged
from .virtual_machine_scale_set_vm_paged import VirtualMachineScaleSetVMPaged
from .run_command_document_base_paged import RunCommandDocumentBasePaged
from .compute_management_client_enums import (
StatusLevelTypes,
OperatingSystemTypes,
VirtualMachineSizeTypes,
CachingTypes,
DiskCreateOptionTypes,
StorageAccountTypes,
PassNames,
ComponentNames,
SettingNames,
ProtocolTypes,
ResourceIdentityType,
MaintenanceOperationResultCodeTypes,
UpgradeMode,
OperatingSystemStateTypes,
IPVersion,
VirtualMachinePriorityTypes,
VirtualMachineEvictionPolicyTypes,
VirtualMachineScaleSetSkuScaleType,
UpgradeState,
UpgradeOperationInvoker,
RollingUpgradeStatusCode,
RollingUpgradeActionType,
IntervalInMins,
InstanceViewTypes,
)
__all__ = [
'ComputeOperationValue',
'InstanceViewStatus',
'SubResource',
'Sku',
'AvailabilitySet',
'AvailabilitySetUpdate',
'VirtualMachineSize',
'VirtualMachineExtensionImage',
'VirtualMachineImageResource',
'VirtualMachineExtensionInstanceView',
'VirtualMachineExtension',
'VirtualMachineExtensionUpdate',
'VirtualMachineExtensionsListResult',
'PurchasePlan',
'OSDiskImage',
'DataDiskImage',
'VirtualMachineImage',
'UsageName',
'Usage',
'VirtualMachineCaptureParameters',
'VirtualMachineCaptureResult',
'Plan',
'HardwareProfile',
'ImageReference',
'KeyVaultSecretReference',
'KeyVaultKeyReference',
'DiskEncryptionSettings',
'VirtualHardDisk',
'ManagedDiskParameters',
'OSDisk',
'DataDisk',
'StorageProfile',
'AdditionalUnattendContent',
'WinRMListener',
'WinRMConfiguration',
'WindowsConfiguration',
'SshPublicKey',
'SshConfiguration',
'LinuxConfiguration',
'VaultCertificate',
'VaultSecretGroup',
'OSProfile',
'NetworkInterfaceReference',
'NetworkProfile',
'BootDiagnostics',
'DiagnosticsProfile',
'VirtualMachineExtensionHandlerInstanceView',
'VirtualMachineAgentInstanceView',
'DiskInstanceView',
'BootDiagnosticsInstanceView',
'VirtualMachineIdentity',
'MaintenanceRedeployStatus',
'VirtualMachineInstanceView',
'VirtualMachineHealthStatus',
'VirtualMachineScaleSetVMInstanceView',
'VirtualMachine',
'VirtualMachineUpdate',
'AutoOSUpgradePolicy',
'RollingUpgradePolicy',
'UpgradePolicy',
'ImageOSDisk',
'ImageDataDisk',
'ImageStorageProfile',
'Image',
'ImageUpdate',
'VirtualMachineScaleSetIdentity',
'VirtualMachineScaleSetOSProfile',
'VirtualMachineScaleSetUpdateOSProfile',
'VirtualMachineScaleSetManagedDiskParameters',
'VirtualMachineScaleSetOSDisk',
'VirtualMachineScaleSetUpdateOSDisk',
'VirtualMachineScaleSetDataDisk',
'VirtualMachineScaleSetStorageProfile',
'VirtualMachineScaleSetUpdateStorageProfile',
'ApiEntityReference',
'VirtualMachineScaleSetPublicIPAddressConfigurationDnsSettings',
'VirtualMachineScaleSetPublicIPAddressConfiguration',
'VirtualMachineScaleSetUpdatePublicIPAddressConfiguration',
'VirtualMachineScaleSetIPConfiguration',
'VirtualMachineScaleSetUpdateIPConfiguration',
'VirtualMachineScaleSetNetworkConfigurationDnsSettings',
'VirtualMachineScaleSetNetworkConfiguration',
'VirtualMachineScaleSetUpdateNetworkConfiguration',
'VirtualMachineScaleSetNetworkProfile',
'VirtualMachineScaleSetUpdateNetworkProfile',
'VirtualMachineScaleSetExtension',
'VirtualMachineScaleSetExtensionProfile',
'VirtualMachineScaleSetVMProfile',
'VirtualMachineScaleSetUpdateVMProfile',
'VirtualMachineScaleSet',
'VirtualMachineScaleSetUpdate',
'VirtualMachineScaleSetVMInstanceIDs',
'VirtualMachineScaleSetVMInstanceRequiredIDs',
'VirtualMachineStatusCodeCount',
'VirtualMachineScaleSetInstanceViewStatusesSummary',
'VirtualMachineScaleSetVMExtensionsSummary',
'VirtualMachineScaleSetInstanceView',
'VirtualMachineScaleSetSkuCapacity',
'VirtualMachineScaleSetSku',
'ApiErrorBase',
'InnerError',
'ApiError',
'RollbackStatusInfo',
'UpgradeOperationHistoryStatus',
'RollingUpgradeProgressInfo',
'UpgradeOperationHistoricalStatusInfoProperties',
'UpgradeOperationHistoricalStatusInfo',
'VirtualMachineScaleSetVM',
'RollingUpgradeRunningStatus',
'RollingUpgradeStatusInfo',
'ComputeLongRunningOperationProperties',
'Resource',
'UpdateResource',
'SubResourceReadOnly',
'RecoveryWalkResponse',
'OperationStatusResponse',
'RequestRateByIntervalInput',
'ThrottledRequestsInput',
'LogAnalyticsInputBase',
'LogAnalyticsOutput',
'LogAnalyticsOperationResult',
'RunCommandInputParameter',
'RunCommandInput',
'RunCommandParameterDefinition',
'RunCommandDocumentBase',
'RunCommandDocument',
'RunCommandResult',
'ComputeOperationValuePaged',
'AvailabilitySetPaged',
'VirtualMachineSizePaged',
'VirtualMachinePaged',
'UsagePaged',
'ImagePaged',
'VirtualMachineScaleSetPaged',
'VirtualMachineScaleSetSkuPaged',
'UpgradeOperationHistoricalStatusInfoPaged',
'VirtualMachineScaleSetExtensionPaged',
'VirtualMachineScaleSetVMPaged',
'RunCommandDocumentBasePaged',
'StatusLevelTypes',
'OperatingSystemTypes',
'VirtualMachineSizeTypes',
'CachingTypes',
'DiskCreateOptionTypes',
'StorageAccountTypes',
'PassNames',
'ComponentNames',
'SettingNames',
'ProtocolTypes',
'ResourceIdentityType',
'MaintenanceOperationResultCodeTypes',
'UpgradeMode',
'OperatingSystemStateTypes',
'IPVersion',
'VirtualMachinePriorityTypes',
'VirtualMachineEvictionPolicyTypes',
'VirtualMachineScaleSetSkuScaleType',
'UpgradeState',
'UpgradeOperationInvoker',
'RollingUpgradeStatusCode',
'RollingUpgradeActionType',
'IntervalInMins',
'InstanceViewTypes',
]
| 54.341102 | 153 | 0.842216 |
ace2f80134158a1b85d6fdc9703daab274786dba | 4,929 | py | Python | houdini/commands.py | Josecholi/Houdini-asyncio | 5eae39bc53c0d3813b6a65f777410805ed79acda | [
"MIT"
] | 1 | 2020-05-30T19:37:41.000Z | 2020-05-30T19:37:41.000Z | houdini/commands.py | Josecholi/Houdini-asyncio | 5eae39bc53c0d3813b6a65f777410805ed79acda | [
"MIT"
] | null | null | null | houdini/commands.py | Josecholi/Houdini-asyncio | 5eae39bc53c0d3813b6a65f777410805ed79acda | [
"MIT"
] | null | null | null | import inspect
import config
from houdini import handlers
from houdini import plugins
from houdini import _AbstractManager
from houdini.constants import ConflictResolution
from houdini.converters import _ArgumentDeserializer, _listener
class UnknownCommandException(Exception):
"""Raised when a command is executed that doesn't exist"""
class _Command(_ArgumentDeserializer):
def __init__(self, name, callback, **kwargs):
super().__init__(name, callback, **kwargs)
self.alias = kwargs.get('alias', [])
self.parent = kwargs.get('parent', None)
class _CommandGroup(_Command):
__slots__ = ['commands']
def __init__(self, name, callback, **kwargs):
super().__init__(name, callback, **kwargs)
self.commands = {}
async def __call__(self, p, data):
if not data:
if self.instance:
return await self.callback(self.instance, p)
return await self.callback(p)
await invoke_command_objects(self.commands, p, data)
def command(self, name=None, **kwargs):
return command(name, parent=self, **kwargs)
def group(self, name=None, **kwargs):
return group(name, parent=self, **kwargs)
def command(name=None, **kwargs):
return _listener(_Command, name, string_delimiter=config.commands['StringDelimiters'],
string_separator=' ', **kwargs)
def group(name=None, **kwargs):
return _listener(_CommandGroup, name, string_delimiter=config.commands['StringDelimiters'],
string_separator=' ', **kwargs)
cooldown = handlers.cooldown
check = handlers.check
player_attribute = handlers.player_attribute
player_data_attribute = handlers.player_data_attribute
player_in_room = handlers.player_in_room
class CommandManager(_AbstractManager):
async def setup(self, module):
raise NotImplementedError('Commands can only be loaded from plugins')
async def load(self, module):
command_objects = inspect.getmembers(module, is_command)
if not isinstance(module, plugins.IPlugin):
raise TypeError('Commands can only be loaded from plugins')
for command_name, command_object in command_objects:
command_object.instance = module
if type(command_object.alias) == str:
command_object.alias = [command_object.alias]
command_object.alias.append(command_object.name)
parent_commands = self if command_object.parent is None else command_object.parent.commands
for name in command_object.alias:
if name in parent_commands and len(parent_commands[name]):
conflict_command = parent_commands[name][0]
if config.commands['ConflictMode'] == ConflictResolution.Exception:
raise NameError('Command name conflict: \'{}\' from plugin \'{}\' '
'conflicts with \'{}\' from module \'{}\''
.format(name, module.__class__.__name__, conflict_command.name,
conflict_command.instance.__class__.__name__))
elif config.commands['ConflictMode'] == ConflictResolution.Append:
parent_commands[name].append(command_object)
elif config.commands['ConflictMode'] == ConflictResolution.Silent:
module.server.logger.warning(
'Command \'{}\' from module \'{}\' disabled due to conflict with \'{}\''.format(
name, module.__class__.__name__, conflict_command.instance.__class__.__name__))
else:
parent_commands[name] = [command_object]
def is_command(command_object):
return issubclass(type(command_object), _Command)
if type(config.commands['Prefix']) == str:
config.commands['Prefix'] = [config.commands['Prefix']]
def has_command_prefix(command_string):
for prefix in config.commands['Prefix']:
if command_string.startswith(prefix):
return True
return False
def get_command_prefix(command_string):
for prefix in config.commands['Prefix']:
if command_string.startswith(prefix):
return prefix
async def invoke_command_string(commands, p, command_string):
prefix = get_command_prefix(command_string)
no_prefix = command_string[len(prefix):]
data = no_prefix.split(' ')
await invoke_command_objects(commands, p, data)
async def invoke_command_objects(commands, p, data):
command_identifier = data.pop(0)
if command_identifier not in commands:
raise UnknownCommandException('Command \'{}\' does not exist'.format(command_identifier))
command_objects = commands[command_identifier]
for command_object in command_objects:
await command_object(p, data)
| 35.207143 | 111 | 0.656928 |
ace2f82160d7e54c4bce324f1d20f179b400dc46 | 7,573 | py | Python | loss2.py | tomaszkaliciak/semantic-segmentation-pytorch | e0f36da84a1c6c7cd376c557e35c17ec16bf387b | [
"BSD-3-Clause"
] | null | null | null | loss2.py | tomaszkaliciak/semantic-segmentation-pytorch | e0f36da84a1c6c7cd376c557e35c17ec16bf387b | [
"BSD-3-Clause"
] | null | null | null | loss2.py | tomaszkaliciak/semantic-segmentation-pytorch | e0f36da84a1c6c7cd376c557e35c17ec16bf387b | [
"BSD-3-Clause"
] | null | null | null | import numpy as np
import torch
import torch.nn.functional as F
import torch.nn as nn
from sklearn.utils import class_weight
from lovasz_losses import lovasz_softmax
def make_one_hot(labels, classes):
one_hot = torch.cuda.FloatTensor(labels.size()[0], classes, labels.size()[2], labels.size()[3]).zero_()
target = one_hot.scatter_(1, labels.data, 1)
return target
def get_weights(target):
t_np = target.view(-1).data.cpu().numpy()
classes, counts = np.unique(t_np, return_counts=True)
cls_w = np.median(counts) / counts
#cls_w = class_weight.compute_class_weight('balanced', classes, t_np)
weights = np.ones(7)
weights[classes] = cls_w
return torch.from_numpy(weights).float().cuda()
class CrossEntropy(nn.Module):
def __init__(self, ignore_label=-1, weight=torch.cuda.FloatTensor([1.0405, 0.9311, 1.0038, 1.1441, 1.0039, 0.8766])):
super(CrossEntropy, self).__init__()
self.ignore_label = ignore_label
self.criterion = nn.CrossEntropyLoss(weight=weight,
ignore_index=ignore_label)
self.dice = DiceLoss()
def forward(self, score, target):
ph, pw = score.size(2), score.size(3)
h, w = target.size(1), target.size(2)
if ph != h or pw != w:
score = F.upsample(
input=score, size=(h, w), mode='bilinear')
loss = self.criterion(score, target)
loss2 = self.dice(score, target)
return loss + loss2
class OhemCrossEntropy(nn.Module):
def __init__(self, ignore_label=-1, thres=0.9,
min_kept=131072, weight=torch.cuda.FloatTensor([1.1405, 0.9311, 1.0038, 1.1941, 1.0039, 0.8566])):
super(OhemCrossEntropy, self).__init__()
self.thresh = thres
self.min_kept = max(1, min_kept)
self.ignore_label = ignore_label
self.criterion = nn.CrossEntropyLoss(weight=weight,
ignore_index=ignore_label,
reduction='none')
def forward(self, score, target, **kwargs):
ph, pw = score.size(2), score.size(3)
h, w = target.size(1), target.size(2)
if ph != h or pw != w:
score = F.upsample(input=score, size=(h, w), mode='bilinear')
pred = F.softmax(score, dim=1)
pixel_losses = self.criterion(score, target).contiguous().view(-1)
loss2 = pixel_losses.clone()
mask = target.contiguous().view(-1) != self.ignore_label
tmp_target = target.clone()
tmp_target[tmp_target == self.ignore_label] = 0
pred = pred.gather(1, tmp_target.unsqueeze(1))
pred, ind = pred.contiguous().view(-1,)[mask].contiguous().sort()
min_value = pred[min(self.min_kept, pred.numel() - 1)]
threshold = max(min_value, self.thresh)
pixel_losses = pixel_losses[mask][ind]
pixel_losses = pixel_losses[pred < threshold]
return pixel_losses.mean() + loss2
class OhemCrossEntropy2(nn.Module):
def __init__(self, ignore_label=-1, thres=0.9,
min_kept=131072, weight= torch.cuda.FloatTensor([1.1931, 0.9287, 0.9954, 1.1389, 1.0004, 0.8735])):
super(OhemCrossEntropy2, self).__init__()
self.thresh = thres
self.min_kept = max(1, min_kept)
self.ignore_label = ignore_label
self.DiceLoss = nn.CrossEntropyLoss(weight=weight,
ignore_index=ignore_label,
reduction='none')
def forward(self, score, target, **kwargs):
ph, pw = score.size(2), score.size(3)
h, w = target.size(1), target.size(2)
if ph != h or pw != w:
score = F.upsample(input=score, size=(h, w), mode='bilinear')
pred = F.softmax(score, dim=1)
pixel_losses = self.criterion(score, target).contiguous().view(-1)
mask = target.contiguous().view(-1) != self.ignore_label
tmp_target = target.clone()
tmp_target[tmp_target == self.ignore_label] = 0
pred = pred.gather(1, tmp_target.unsqueeze(1))
pred, ind = pred.contiguous().view(-1,)[mask].contiguous().sort()
min_value = pred[min(self.min_kept, pred.numel() - 1)]
threshold = max(min_value, self.thresh)
pixel_losses = pixel_losses[mask][ind]
pixel_losses = pixel_losses[pred < threshold]
return pixel_losses.mean()
class CrossEntropyLoss2d(nn.Module):
def __init__(self, classes='present', per_image=False, ignore_index=-1):
super(CrossEntropyLoss2d, self).__init__()
self.smooth = classes
self.per_image = per_image
self.ignore_index = ignore_index
self.nlll= nn.NLLLoss()
def forward(self, output, target):
logits = F.softmax(output, dim=1)
loss = lovasz_softmax(logits, target, ignore=self.ignore_index)
loss2 = self.nlll(output, target)
return loss + loss2
class DiceLoss(nn.Module):
def __init__(self, smooth=1., ignore_index=-1):
super(DiceLoss, self).__init__()
self.ignore_index = ignore_index
self.smooth = smooth
def forward(self, output, target):
if self.ignore_index not in range(target.min(), target.max()):
if (target == self.ignore_index).sum() > 0:
target[target == self.ignore_index] = target.min()
target = make_one_hot(target.unsqueeze(dim=1), classes=output.size()[1])
output = F.softmax(output, dim=1)
output_flat = output.contiguous().view(-1)
target_flat = target.contiguous().view(-1)
intersection = (output_flat * target_flat).sum()
loss = 1 - ((2. * intersection + self.smooth) /
(output_flat.sum() + target_flat.sum() + self.smooth))
return loss
class FocalLoss(nn.Module):
def __init__(self, gamma=2, alpha=None, ignore_index=-1, size_average=True):
super(FocalLoss, self).__init__()
self.gamma = gamma
self.size_average = size_average
self.CE_loss = nn.CrossEntropyLoss(reduce=False, ignore_index=ignore_index, weight=alpha)
def forward(self, output, target):
logpt = self.CE_loss(output, target)
pt = torch.exp(-logpt)
loss = ((1-pt)**self.gamma) * logpt
if self.size_average:
return loss.mean()
return loss.sum()
class CE_DiceLoss(nn.Module):
def __init__(self, smooth=1, reduction='mean', ignore_index=255, weight=None):
super(CE_DiceLoss, self).__init__()
self.smooth = smooth
self.dice = DiceLoss()
self.cross_entropy = nn.CrossEntropyLoss(weight= torch.cuda.FloatTensor([1.0405, 0.9311, 1.0038, 1.1441, 1.0039, 0.8766]), reduction=reduction, ignore_index=ignore_index)
self.focal = FocalLoss()
def forward(self, output, target):
CE_loss = self.cross_entropy(output, target)
dice_loss = self.dice(output, target)
focal_loss = self.focal(output, target)
return 3 * CE_loss + dice_loss + 2 * focal_loss
class LovaszSoftmax(nn.Module):
def __init__(self, classes='present', per_image=False, ignore_index=-1):
super(LovaszSoftmax, self).__init__()
self.smooth = classes
self.per_image = per_image
self.ignore_index = ignore_index
def forward(self, output, target):
logits = F.softmax(output, dim=1)
loss = lovasz_softmax(logits, target, ignore=self.ignore_index)
return loss
| 43.274286 | 178 | 0.616532 |
ace2f8849c67cca772d9eeef7c576c0313cdba51 | 81,331 | py | Python | zerver/models.py | rtzll/zulip | b831df8f7fc2f5b89ec998266901ac491d52a7fc | [
"Apache-2.0"
] | null | null | null | zerver/models.py | rtzll/zulip | b831df8f7fc2f5b89ec998266901ac491d52a7fc | [
"Apache-2.0"
] | null | null | null | zerver/models.py | rtzll/zulip | b831df8f7fc2f5b89ec998266901ac491d52a7fc | [
"Apache-2.0"
] | null | null | null | from typing import Any, DefaultDict, Dict, List, Set, Tuple, TypeVar, Text, \
Union, Optional, Sequence, AbstractSet, Pattern, AnyStr, Callable, Iterable
from typing.re import Match
from zerver.lib.str_utils import NonBinaryStr
from django.db import models
from django.db.models.query import QuerySet, F
from django.db.models import Manager, CASCADE, Sum
from django.db.models.functions import Length
from django.conf import settings
from django.contrib.auth.models import AbstractBaseUser, UserManager, \
PermissionsMixin
import django.contrib.auth
from django.core.exceptions import ValidationError
from django.core.validators import URLValidator, MinLengthValidator, \
RegexValidator
from django.dispatch import receiver
from zerver.lib.cache import cache_with_key, flush_user_profile, flush_realm, \
user_profile_by_api_key_cache_key, \
user_profile_by_id_cache_key, user_profile_by_email_cache_key, \
user_profile_cache_key, generic_bulk_cached_fetch, cache_set, flush_stream, \
display_recipient_cache_key, cache_delete, active_user_ids_cache_key, \
get_stream_cache_key, realm_user_dicts_cache_key, \
bot_dicts_in_realm_cache_key, realm_user_dict_fields, \
bot_dict_fields, flush_message, bot_profile_cache_key
from zerver.lib.utils import make_safe_digest, generate_random_token
from zerver.lib.str_utils import ModelReprMixin
from django.db import transaction
from django.utils.timezone import now as timezone_now
from django.contrib.sessions.models import Session
from zerver.lib.timestamp import datetime_to_timestamp
from django.db.models.signals import pre_save, post_save, post_delete
from django.utils.translation import ugettext_lazy as _
from zerver.lib import cache
from zerver.lib.validator import check_int, check_float, check_string, \
check_short_string
from django.utils.encoding import force_text
from bitfield import BitField
from bitfield.types import BitHandler
from collections import defaultdict
from datetime import timedelta
import pylibmc
import re
import logging
import sre_constants
import time
import datetime
import sys
MAX_SUBJECT_LENGTH = 60
MAX_MESSAGE_LENGTH = 10000
MAX_LANGUAGE_ID_LENGTH = 50 # type: int
STREAM_NAMES = TypeVar('STREAM_NAMES', Sequence[Text], AbstractSet[Text])
def query_for_ids(query, user_ids, field):
# type: (QuerySet, List[int], str) -> QuerySet
'''
This function optimizes searches of the form
`user_profile_id in (1, 2, 3, 4)` by quickly
building the where clauses. Profiling shows significant
speedups over the normal Django-based approach.
Use this very carefully! Also, the caller should
guard against empty lists of user_ids.
'''
assert(user_ids)
value_list = ', '.join(str(int(user_id)) for user_id in user_ids)
clause = '%s in (%s)' % (field, value_list)
query = query.extra(
where=[clause]
)
return query
# Doing 1000 remote cache requests to get_display_recipient is quite slow,
# so add a local cache as well as the remote cache cache.
per_request_display_recipient_cache = {} # type: Dict[int, List[Dict[str, Any]]]
def get_display_recipient_by_id(recipient_id, recipient_type, recipient_type_id):
# type: (int, int, Optional[int]) -> Union[Text, List[Dict[str, Any]]]
"""
returns: an object describing the recipient (using a cache).
If the type is a stream, the type_id must be an int; a string is returned.
Otherwise, type_id may be None; an array of recipient dicts is returned.
"""
if recipient_id not in per_request_display_recipient_cache:
result = get_display_recipient_remote_cache(recipient_id, recipient_type, recipient_type_id)
per_request_display_recipient_cache[recipient_id] = result
return per_request_display_recipient_cache[recipient_id]
def get_display_recipient(recipient):
# type: (Recipient) -> Union[Text, List[Dict[str, Any]]]
return get_display_recipient_by_id(
recipient.id,
recipient.type,
recipient.type_id
)
def flush_per_request_caches():
# type: () -> None
global per_request_display_recipient_cache
per_request_display_recipient_cache = {}
global per_request_realm_filters_cache
per_request_realm_filters_cache = {}
@cache_with_key(lambda *args: display_recipient_cache_key(args[0]),
timeout=3600*24*7)
def get_display_recipient_remote_cache(recipient_id, recipient_type, recipient_type_id):
# type: (int, int, Optional[int]) -> Union[Text, List[Dict[str, Any]]]
"""
returns: an appropriate object describing the recipient. For a
stream this will be the stream name as a string. For a huddle or
personal, it will be an array of dicts about each recipient.
"""
if recipient_type == Recipient.STREAM:
assert recipient_type_id is not None
stream = Stream.objects.get(id=recipient_type_id)
return stream.name
# The main priority for ordering here is being deterministic.
# Right now, we order by ID, which matches the ordering of user
# names in the left sidebar.
user_profile_list = (UserProfile.objects.filter(subscription__recipient_id=recipient_id)
.select_related()
.order_by('id'))
return [{'email': user_profile.email,
'full_name': user_profile.full_name,
'short_name': user_profile.short_name,
'id': user_profile.id,
'is_mirror_dummy': user_profile.is_mirror_dummy} for user_profile in user_profile_list]
def get_realm_emoji_cache_key(realm):
# type: (Realm) -> Text
return u'realm_emoji:%s' % (realm.id,)
class Realm(ModelReprMixin, models.Model):
MAX_REALM_NAME_LENGTH = 40
MAX_REALM_SUBDOMAIN_LENGTH = 40
AUTHENTICATION_FLAGS = [u'Google', u'Email', u'GitHub', u'LDAP', u'Dev', u'RemoteUser']
SUBDOMAIN_FOR_ROOT_DOMAIN = ''
name = models.CharField(max_length=MAX_REALM_NAME_LENGTH, null=True) # type: Optional[Text]
string_id = models.CharField(max_length=MAX_REALM_SUBDOMAIN_LENGTH, unique=True) # type: Text
restricted_to_domain = models.BooleanField(default=False) # type: bool
invite_required = models.BooleanField(default=True) # type: bool
invite_by_admins_only = models.BooleanField(default=False) # type: bool
inline_image_preview = models.BooleanField(default=True) # type: bool
inline_url_embed_preview = models.BooleanField(default=True) # type: bool
create_stream_by_admins_only = models.BooleanField(default=False) # type: bool
add_emoji_by_admins_only = models.BooleanField(default=False) # type: bool
mandatory_topics = models.BooleanField(default=False) # type: bool
show_digest_email = models.BooleanField(default=True) # type: bool
name_changes_disabled = models.BooleanField(default=False) # type: bool
email_changes_disabled = models.BooleanField(default=False) # type: bool
description = models.TextField(null=True) # type: Optional[Text]
allow_message_editing = models.BooleanField(default=True) # type: bool
DEFAULT_MESSAGE_CONTENT_EDIT_LIMIT_SECONDS = 600 # if changed, also change in admin.js
message_content_edit_limit_seconds = models.IntegerField(default=DEFAULT_MESSAGE_CONTENT_EDIT_LIMIT_SECONDS) # type: int
message_retention_days = models.IntegerField(null=True) # type: Optional[int]
allow_edit_history = models.BooleanField(default=True) # type: bool
# Valid org_types are {CORPORATE, COMMUNITY}
CORPORATE = 1
COMMUNITY = 2
org_type = models.PositiveSmallIntegerField(default=CORPORATE) # type: int
date_created = models.DateTimeField(default=timezone_now) # type: datetime.datetime
notifications_stream = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE) # type: Optional[Stream]
deactivated = models.BooleanField(default=False) # type: bool
default_language = models.CharField(default=u'en', max_length=MAX_LANGUAGE_ID_LENGTH) # type: Text
authentication_methods = BitField(flags=AUTHENTICATION_FLAGS,
default=2**31 - 1) # type: BitHandler
waiting_period_threshold = models.PositiveIntegerField(default=0) # type: int
# Define the types of the various automatically managed properties
property_types = dict(
add_emoji_by_admins_only=bool,
allow_edit_history=bool,
create_stream_by_admins_only=bool,
default_language=Text,
description=Text,
email_changes_disabled=bool,
invite_required=bool,
invite_by_admins_only=bool,
inline_image_preview=bool,
inline_url_embed_preview=bool,
mandatory_topics=bool,
message_retention_days=(int, type(None)),
name=Text,
name_changes_disabled=bool,
restricted_to_domain=bool,
waiting_period_threshold=int,
) # type: Dict[str, Union[type, Tuple[type, ...]]]
ICON_FROM_GRAVATAR = u'G'
ICON_UPLOADED = u'U'
ICON_SOURCES = (
(ICON_FROM_GRAVATAR, 'Hosted by Gravatar'),
(ICON_UPLOADED, 'Uploaded by administrator'),
)
icon_source = models.CharField(default=ICON_FROM_GRAVATAR, choices=ICON_SOURCES,
max_length=1) # type: Text
icon_version = models.PositiveSmallIntegerField(default=1) # type: int
DEFAULT_NOTIFICATION_STREAM_NAME = u'announce'
def authentication_methods_dict(self):
# type: () -> Dict[Text, bool]
"""Returns the a mapping from authentication flags to their status,
showing only those authentication flags that are supported on
the current server (i.e. if EmailAuthBackend is not configured
on the server, this will not return an entry for "Email")."""
# This mapping needs to be imported from here due to the cyclic
# dependency.
from zproject.backends import AUTH_BACKEND_NAME_MAP
ret = {} # type: Dict[Text, bool]
supported_backends = {backend.__class__ for backend in django.contrib.auth.get_backends()}
for k, v in self.authentication_methods.iteritems():
backend = AUTH_BACKEND_NAME_MAP[k]
if backend in supported_backends:
ret[k] = v
return ret
def __unicode__(self):
# type: () -> Text
return u"<Realm: %s %s>" % (self.string_id, self.id)
@cache_with_key(get_realm_emoji_cache_key, timeout=3600*24*7)
def get_emoji(self):
# type: () -> Dict[Text, Optional[Dict[str, Iterable[Text]]]]
return get_realm_emoji_uncached(self)
def get_admin_users(self):
# type: () -> Sequence[UserProfile]
# TODO: Change return type to QuerySet[UserProfile]
return UserProfile.objects.filter(realm=self, is_realm_admin=True,
is_active=True).select_related()
def get_active_users(self):
# type: () -> Sequence[UserProfile]
# TODO: Change return type to QuerySet[UserProfile]
return UserProfile.objects.filter(realm=self, is_active=True).select_related()
def get_bot_domain(self):
# type: () -> str
# Remove the port. Mainly needed for development environment.
return self.host.split(':')[0]
def get_notifications_stream(self):
# type: () -> Optional[Realm]
if self.notifications_stream is not None and not self.notifications_stream.deactivated:
return self.notifications_stream
return None
@property
def subdomain(self):
# type: () -> Text
return self.string_id
@property
def display_subdomain(self):
# type: () -> Text
"""Likely to be temporary function to avoid signup messages being sent
to an empty topic"""
if self.string_id == "":
return "."
return self.string_id
@property
def uri(self):
# type: () -> str
return settings.EXTERNAL_URI_SCHEME + self.host
@property
def host(self):
# type: () -> str
if self.subdomain not in [None, ""]:
return "%s.%s" % (self.subdomain, settings.EXTERNAL_HOST)
return settings.EXTERNAL_HOST
@property
def is_zephyr_mirror_realm(self):
# type: () -> bool
return self.string_id == "zephyr"
@property
def webathena_enabled(self):
# type: () -> bool
return self.is_zephyr_mirror_realm
@property
def presence_disabled(self):
# type: () -> bool
return self.is_zephyr_mirror_realm
class Meta(object):
permissions = (
('administer', "Administer a realm"),
('api_super_user', "Can send messages as other users for mirroring"),
)
post_save.connect(flush_realm, sender=Realm)
def get_realm(string_id):
# type: (Text) -> Realm
return Realm.objects.filter(string_id=string_id).first()
def name_changes_disabled(realm):
# type: (Optional[Realm]) -> bool
if realm is None:
return settings.NAME_CHANGES_DISABLED
return settings.NAME_CHANGES_DISABLED or realm.name_changes_disabled
class RealmDomain(models.Model):
realm = models.ForeignKey(Realm, on_delete=CASCADE) # type: Realm
# should always be stored lowercase
domain = models.CharField(max_length=80, db_index=True) # type: Text
allow_subdomains = models.BooleanField(default=False)
class Meta(object):
unique_together = ("realm", "domain")
# These functions should only be used on email addresses that have
# been validated via django.core.validators.validate_email
#
# Note that we need to use some care, since can you have multiple @-signs; e.g.
# "tabbott@test"@zulip.com
# is valid email address
def email_to_username(email):
# type: (Text) -> Text
return "@".join(email.split("@")[:-1]).lower()
# Returns the raw domain portion of the desired email address
def email_to_domain(email):
# type: (Text) -> Text
return email.split("@")[-1].lower()
class GetRealmByDomainException(Exception):
pass
# Is a user with the given email address allowed to be in the given realm?
# (This function does not check whether the user has been invited to the realm.
# So for invite-only realms, this is the test for whether a user can be invited,
# not whether the user can sign up currently.)
def email_allowed_for_realm(email, realm):
# type: (Text, Realm) -> bool
if not realm.restricted_to_domain:
return True
domain = email_to_domain(email)
query = RealmDomain.objects.filter(realm=realm)
if query.filter(domain=domain).exists():
return True
else:
query = query.filter(allow_subdomains=True)
while len(domain) > 0:
subdomain, sep, domain = domain.partition('.')
if query.filter(domain=domain).exists():
return True
return False
def get_realm_domains(realm):
# type: (Realm) -> List[Dict[str, Text]]
return list(realm.realmdomain_set.values('domain', 'allow_subdomains'))
class RealmEmoji(ModelReprMixin, models.Model):
author = models.ForeignKey('UserProfile', blank=True, null=True, on_delete=CASCADE)
realm = models.ForeignKey(Realm, on_delete=CASCADE) # type: Realm
# Second part of the regex (negative lookbehind) disallows names ending with one of the punctuation characters
name = models.TextField(validators=[MinLengthValidator(1),
RegexValidator(regex=r'^[0-9a-z.\-_]+(?<![.\-_])$',
message=_("Invalid characters in emoji name"))]) # type: Text
file_name = models.TextField(db_index=True, null=True) # type: Optional[Text]
deactivated = models.BooleanField(default=False) # type: bool
PATH_ID_TEMPLATE = "{realm_id}/emoji/{emoji_file_name}"
class Meta(object):
unique_together = ("realm", "name")
def __unicode__(self):
# type: () -> Text
return u"<RealmEmoji(%s): %s %s>" % (self.realm.string_id, self.name, self.file_name)
def get_realm_emoji_uncached(realm):
# type: (Realm) -> Dict[Text, Dict[str, Any]]
d = {}
from zerver.lib.emoji import get_emoji_url
for row in RealmEmoji.objects.filter(realm=realm).select_related('author'):
author = None
if row.author:
author = {
'id': row.author.id,
'email': row.author.email,
'full_name': row.author.full_name}
d[row.name] = dict(source_url=get_emoji_url(row.file_name, row.realm_id),
deactivated=row.deactivated,
author=author)
return d
def flush_realm_emoji(sender, **kwargs):
# type: (Any, **Any) -> None
realm = kwargs['instance'].realm
cache_set(get_realm_emoji_cache_key(realm),
get_realm_emoji_uncached(realm),
timeout=3600*24*7)
post_save.connect(flush_realm_emoji, sender=RealmEmoji)
post_delete.connect(flush_realm_emoji, sender=RealmEmoji)
def filter_pattern_validator(value):
# type: (Text) -> None
regex = re.compile(r'(?:[\w\-#]*)(\(\?P<\w+>.+\))')
error_msg = 'Invalid filter pattern, you must use the following format OPTIONAL_PREFIX(?P<id>.+)'
if not regex.match(str(value)):
raise ValidationError(error_msg)
try:
re.compile(value)
except sre_constants.error:
# Regex is invalid
raise ValidationError(error_msg)
def filter_format_validator(value):
# type: (str) -> None
regex = re.compile(r'^[\.\/:a-zA-Z0-9_?=-]+%\(([a-zA-Z0-9_-]+)\)s[a-zA-Z0-9_-]*$')
if not regex.match(value):
raise ValidationError('URL format string must be in the following format: `https://example.com/%(\w+)s`')
class RealmFilter(models.Model):
realm = models.ForeignKey(Realm, on_delete=CASCADE) # type: Realm
pattern = models.TextField(validators=[filter_pattern_validator]) # type: Text
url_format_string = models.TextField(validators=[URLValidator(), filter_format_validator]) # type: Text
class Meta(object):
unique_together = ("realm", "pattern")
def __unicode__(self):
# type: () -> Text
return u"<RealmFilter(%s): %s %s>" % (self.realm.string_id, self.pattern, self.url_format_string)
def get_realm_filters_cache_key(realm_id):
# type: (int) -> Text
return u'all_realm_filters:%s' % (realm_id,)
# We have a per-process cache to avoid doing 1000 remote cache queries during page load
per_request_realm_filters_cache = {} # type: Dict[int, List[Tuple[Text, Text, int]]]
def realm_in_local_realm_filters_cache(realm_id):
# type: (int) -> bool
return realm_id in per_request_realm_filters_cache
def realm_filters_for_realm(realm_id):
# type: (int) -> List[Tuple[Text, Text, int]]
if not realm_in_local_realm_filters_cache(realm_id):
per_request_realm_filters_cache[realm_id] = realm_filters_for_realm_remote_cache(realm_id)
return per_request_realm_filters_cache[realm_id]
@cache_with_key(get_realm_filters_cache_key, timeout=3600*24*7)
def realm_filters_for_realm_remote_cache(realm_id):
# type: (int) -> List[Tuple[Text, Text, int]]
filters = []
for realm_filter in RealmFilter.objects.filter(realm_id=realm_id):
filters.append((realm_filter.pattern, realm_filter.url_format_string, realm_filter.id))
return filters
def all_realm_filters():
# type: () -> Dict[int, List[Tuple[Text, Text, int]]]
filters = defaultdict(list) # type: DefaultDict[int, List[Tuple[Text, Text, int]]]
for realm_filter in RealmFilter.objects.all():
filters[realm_filter.realm_id].append((realm_filter.pattern, realm_filter.url_format_string, realm_filter.id))
return filters
def flush_realm_filter(sender, **kwargs):
# type: (Any, **Any) -> None
realm_id = kwargs['instance'].realm_id
cache_delete(get_realm_filters_cache_key(realm_id))
try:
per_request_realm_filters_cache.pop(realm_id)
except KeyError:
pass
post_save.connect(flush_realm_filter, sender=RealmFilter)
post_delete.connect(flush_realm_filter, sender=RealmFilter)
class UserProfile(ModelReprMixin, AbstractBaseUser, PermissionsMixin):
DEFAULT_BOT = 1
"""
Incoming webhook bots are limited to only sending messages via webhooks.
Thus, it is less of a security risk to expose their API keys to third-party services,
since they can't be used to read messages.
"""
INCOMING_WEBHOOK_BOT = 2
# This value is also being used in static/js/settings_bots.js. On updating it here, update it there as well.
OUTGOING_WEBHOOK_BOT = 3
"""
Embedded bots run within the Zulip server itself; events are added to the
embedded_bots queue and then handled by a QueueProcessingWorker.
"""
EMBEDDED_BOT = 4
# For now, don't allow creating other bot types via the UI
ALLOWED_BOT_TYPES = [
DEFAULT_BOT,
INCOMING_WEBHOOK_BOT,
OUTGOING_WEBHOOK_BOT,
EMBEDDED_BOT,
]
SERVICE_BOT_TYPES = [
OUTGOING_WEBHOOK_BOT,
EMBEDDED_BOT
]
# Fields from models.AbstractUser minus last_name and first_name,
# which we don't use; email is modified to make it indexed and unique.
email = models.EmailField(blank=False, db_index=True, unique=True) # type: Text
is_staff = models.BooleanField(default=False) # type: bool
is_active = models.BooleanField(default=True, db_index=True) # type: bool
is_realm_admin = models.BooleanField(default=False, db_index=True) # type: bool
is_bot = models.BooleanField(default=False, db_index=True) # type: bool
bot_type = models.PositiveSmallIntegerField(null=True, db_index=True) # type: Optional[int]
is_api_super_user = models.BooleanField(default=False, db_index=True) # type: bool
date_joined = models.DateTimeField(default=timezone_now) # type: datetime.datetime
is_mirror_dummy = models.BooleanField(default=False) # type: bool
bot_owner = models.ForeignKey('self', null=True, on_delete=models.SET_NULL) # type: Optional[UserProfile]
long_term_idle = models.BooleanField(default=False, db_index=True) # type: bool
USERNAME_FIELD = 'email'
MAX_NAME_LENGTH = 100
MIN_NAME_LENGTH = 3
API_KEY_LENGTH = 32
NAME_INVALID_CHARS = ['*', '`', '>', '"', '@']
# Our custom site-specific fields
full_name = models.CharField(max_length=MAX_NAME_LENGTH) # type: Text
short_name = models.CharField(max_length=MAX_NAME_LENGTH) # type: Text
# pointer points to Message.id, NOT UserMessage.id.
pointer = models.IntegerField() # type: int
last_pointer_updater = models.CharField(max_length=64) # type: Text
realm = models.ForeignKey(Realm, on_delete=CASCADE) # type: Realm
api_key = models.CharField(max_length=API_KEY_LENGTH) # type: Text
tos_version = models.CharField(null=True, max_length=10) # type: Optional[Text]
last_active_message_id = models.IntegerField(null=True) # type: int
### Notifications settings. ###
# Stream notifications.
enable_stream_desktop_notifications = models.BooleanField(default=False) # type: bool
enable_stream_push_notifications = models.BooleanField(default=False) # type: bool
enable_stream_sounds = models.BooleanField(default=False) # type: bool
# PM + @-mention notifications.
enable_desktop_notifications = models.BooleanField(default=True) # type: bool
pm_content_in_desktop_notifications = models.BooleanField(default=True) # type: bool
enable_sounds = models.BooleanField(default=True) # type: bool
enable_offline_email_notifications = models.BooleanField(default=True) # type: bool
enable_offline_push_notifications = models.BooleanField(default=True) # type: bool
enable_online_push_notifications = models.BooleanField(default=False) # type: bool
enable_digest_emails = models.BooleanField(default=True) # type: bool
# Old notification field superseded by existence of stream notification
# settings.
default_desktop_notifications = models.BooleanField(default=True) # type: bool
###
last_reminder = models.DateTimeField(default=timezone_now, null=True) # type: Optional[datetime.datetime]
rate_limits = models.CharField(default=u"", max_length=100) # type: Text # comma-separated list of range:max pairs
# Default streams
default_sending_stream = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE) # type: Optional[Stream]
default_events_register_stream = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE) # type: Optional[Stream]
default_all_public_streams = models.BooleanField(default=False) # type: bool
# UI vars
enter_sends = models.NullBooleanField(default=False) # type: Optional[bool]
autoscroll_forever = models.BooleanField(default=False) # type: bool
left_side_userlist = models.BooleanField(default=False) # type: bool
emoji_alt_code = models.BooleanField(default=False) # type: bool
# display settings
twenty_four_hour_time = models.BooleanField(default=False) # type: bool
default_language = models.CharField(default=u'en', max_length=MAX_LANGUAGE_ID_LENGTH) # type: Text
high_contrast_mode = models.BooleanField(default=False) # type: bool
# Hours to wait before sending another email to a user
EMAIL_REMINDER_WAITPERIOD = 24
# Minutes to wait before warning a bot owner that their bot sent a message
# to a nonexistent stream
BOT_OWNER_STREAM_ALERT_WAITPERIOD = 1
AVATAR_FROM_GRAVATAR = u'G'
AVATAR_FROM_USER = u'U'
AVATAR_SOURCES = (
(AVATAR_FROM_GRAVATAR, 'Hosted by Gravatar'),
(AVATAR_FROM_USER, 'Uploaded by user'),
)
avatar_source = models.CharField(default=AVATAR_FROM_GRAVATAR, choices=AVATAR_SOURCES, max_length=1) # type: Text
avatar_version = models.PositiveSmallIntegerField(default=1) # type: int
TUTORIAL_WAITING = u'W'
TUTORIAL_STARTED = u'S'
TUTORIAL_FINISHED = u'F'
TUTORIAL_STATES = ((TUTORIAL_WAITING, "Waiting"),
(TUTORIAL_STARTED, "Started"),
(TUTORIAL_FINISHED, "Finished"))
tutorial_status = models.CharField(default=TUTORIAL_WAITING, choices=TUTORIAL_STATES, max_length=1) # type: Text
# Contains serialized JSON of the form:
# [("step 1", true), ("step 2", false)]
# where the second element of each tuple is if the step has been
# completed.
onboarding_steps = models.TextField(default=u'[]') # type: Text
alert_words = models.TextField(default=u'[]') # type: Text # json-serialized list of strings
objects = UserManager() # type: UserManager
DEFAULT_UPLOADS_QUOTA = 1024*1024*1024
quota = models.IntegerField(default=DEFAULT_UPLOADS_QUOTA) # type: int
# The maximum length of a timezone in pytz.all_timezones is 32.
# Setting max_length=40 is a safe choice.
# In Django, the convention is to use empty string instead of Null
# for text based fields. For more information, see
# https://docs.djangoproject.com/en/1.10/ref/models/fields/#django.db.models.Field.null.
timezone = models.CharField(max_length=40, default=u'') # type: Text
# Emojisets
APPLE_EMOJISET = u'apple'
EMOJIONE_EMOJISET = u'emojione'
GOOGLE_EMOJISET = u'google'
TWITTER_EMOJISET = u'twitter'
EMOJISET_CHOICES = ((APPLE_EMOJISET, _("Apple style")),
(EMOJIONE_EMOJISET, _("Emoji One style")),
(GOOGLE_EMOJISET, _("Google style")),
(TWITTER_EMOJISET, _("Twitter style")))
emojiset = models.CharField(default=GOOGLE_EMOJISET, choices=EMOJISET_CHOICES, max_length=20) # type: Text
# Define the types of the various automatically managed properties
property_types = dict(
default_language=Text,
emoji_alt_code=bool,
emojiset=Text,
left_side_userlist=bool,
timezone=Text,
twenty_four_hour_time=bool,
high_contrast_mode=bool,
)
notification_setting_types = dict(
enable_desktop_notifications=bool,
enable_digest_emails=bool,
enable_offline_email_notifications=bool,
enable_offline_push_notifications=bool,
enable_online_push_notifications=bool,
enable_sounds=bool,
enable_stream_desktop_notifications=bool,
enable_stream_push_notifications=bool,
enable_stream_sounds=bool,
pm_content_in_desktop_notifications=bool,
)
@property
def profile_data(self):
# type: () -> List[Dict[str, Union[int, float, Text]]]
values = CustomProfileFieldValue.objects.filter(user_profile=self)
user_data = {v.field_id: v.value for v in values}
data = [] # type: List[Dict[str, Union[int, float, Text]]]
for field in custom_profile_fields_for_realm(self.realm_id):
value = user_data.get(field.id, None)
field_type = field.field_type
if value is not None:
converter = field.FIELD_CONVERTERS[field_type]
value = converter(value)
field_data = {} # type: Dict[str, Union[int, float, Text]]
for k, v in field.as_dict().items():
field_data[k] = v
field_data['value'] = value
data.append(field_data)
return data
def can_admin_user(self, target_user):
# type: (UserProfile) -> bool
"""Returns whether this user has permission to modify target_user"""
if target_user.bot_owner == self:
return True
elif self.is_realm_admin and self.realm == target_user.realm:
return True
else:
return False
def __unicode__(self):
# type: () -> Text
return u"<UserProfile: %s %s>" % (self.email, self.realm)
@property
def is_incoming_webhook(self):
# type: () -> bool
return self.bot_type == UserProfile.INCOMING_WEBHOOK_BOT
@property
def is_outgoing_webhook_bot(self):
# type: () -> bool
return self.bot_type == UserProfile.OUTGOING_WEBHOOK_BOT
@property
def is_embedded_bot(self):
# type: () -> bool
return self.bot_type == UserProfile.EMBEDDED_BOT
@property
def is_service_bot(self):
# type: () -> bool
return self.is_bot and self.bot_type in UserProfile.SERVICE_BOT_TYPES
@staticmethod
def emojiset_choices():
# type: () -> Dict[Text, Text]
return {emojiset[0]: force_text(emojiset[1]) for emojiset in UserProfile.EMOJISET_CHOICES}
@staticmethod
def emails_from_ids(user_ids):
# type: (Sequence[int]) -> Dict[int, Text]
rows = UserProfile.objects.filter(id__in=user_ids).values('id', 'email')
return {row['id']: row['email'] for row in rows}
def can_create_streams(self):
# type: () -> bool
diff = (timezone_now() - self.date_joined).days
if self.is_realm_admin:
return True
elif self.realm.create_stream_by_admins_only:
return False
if diff >= self.realm.waiting_period_threshold:
return True
return False
def major_tos_version(self):
# type: () -> int
if self.tos_version is not None:
return int(self.tos_version.split('.')[0])
else:
return -1
def receives_offline_notifications(user_profile):
# type: (UserProfile) -> bool
return ((user_profile.enable_offline_email_notifications or
user_profile.enable_offline_push_notifications) and
not user_profile.is_bot)
def receives_online_notifications(user_profile):
# type: (UserProfile) -> bool
return (user_profile.enable_online_push_notifications and
not user_profile.is_bot)
def receives_stream_notifications(user_profile):
# type: (UserProfile) -> bool
return (user_profile.enable_stream_push_notifications and
not user_profile.is_bot)
def remote_user_to_email(remote_user):
# type: (Text) -> Text
if settings.SSO_APPEND_DOMAIN is not None:
remote_user += "@" + settings.SSO_APPEND_DOMAIN
return remote_user
# Make sure we flush the UserProfile object from our remote cache
# whenever we save it.
post_save.connect(flush_user_profile, sender=UserProfile)
class PreregistrationUser(models.Model):
email = models.EmailField() # type: Text
referred_by = models.ForeignKey(UserProfile, null=True, on_delete=CASCADE) # Optional[UserProfile]
streams = models.ManyToManyField('Stream') # type: Manager
invited_at = models.DateTimeField(auto_now=True) # type: datetime.datetime
realm_creation = models.BooleanField(default=False)
# Indicates whether the user needs a password. Users who were
# created via SSO style auth (e.g. GitHub/Google) generally do not.
password_required = models.BooleanField(default=True)
# status: whether an object has been confirmed.
# if confirmed, set to confirmation.settings.STATUS_ACTIVE
status = models.IntegerField(default=0) # type: int
realm = models.ForeignKey(Realm, null=True, on_delete=CASCADE) # type: Optional[Realm]
invited_as_admin = models.BooleanField(default=False) # type: Optional[bool]
class MultiuseInvite(models.Model):
referred_by = models.ForeignKey(UserProfile, on_delete=CASCADE) # Optional[UserProfile]
streams = models.ManyToManyField('Stream') # type: Manager
realm = models.ForeignKey(Realm, on_delete=CASCADE) # type: Realm
class EmailChangeStatus(models.Model):
new_email = models.EmailField() # type: Text
old_email = models.EmailField() # type: Text
updated_at = models.DateTimeField(auto_now=True) # type: datetime.datetime
user_profile = models.ForeignKey(UserProfile, on_delete=CASCADE) # type: UserProfile
# status: whether an object has been confirmed.
# if confirmed, set to confirmation.settings.STATUS_ACTIVE
status = models.IntegerField(default=0) # type: int
realm = models.ForeignKey(Realm, on_delete=CASCADE) # type: Realm
class AbstractPushDeviceToken(models.Model):
APNS = 1
GCM = 2
KINDS = (
(APNS, 'apns'),
(GCM, 'gcm'),
)
kind = models.PositiveSmallIntegerField(choices=KINDS) # type: int
# The token is a unique device-specific token that is
# sent to us from each device:
# - APNS token if kind == APNS
# - GCM registration id if kind == GCM
last_updated = models.DateTimeField(auto_now=True) # type: datetime.datetime
# [optional] Contains the app id of the device if it is an iOS device
ios_app_id = models.TextField(null=True) # type: Optional[Text]
class Meta(object):
abstract = True
class PushDeviceToken(AbstractPushDeviceToken):
# The user who's device this is
user = models.ForeignKey(UserProfile, db_index=True, on_delete=CASCADE) # type: UserProfile
token = models.CharField(max_length=4096, unique=True) # type: bytes
def generate_email_token_for_stream():
# type: () -> str
return generate_random_token(32)
class Stream(ModelReprMixin, models.Model):
MAX_NAME_LENGTH = 60
name = models.CharField(max_length=MAX_NAME_LENGTH, db_index=True) # type: Text
realm = models.ForeignKey(Realm, db_index=True, on_delete=CASCADE) # type: Realm
invite_only = models.NullBooleanField(default=False) # type: Optional[bool]
# The unique thing about Zephyr public streams is that we never list their
# users. We may try to generalize this concept later, but for now
# we just use a concrete field. (Zephyr public streams aren't exactly like
# invite-only streams--while both are private in terms of listing users,
# for Zephyr we don't even list users to stream members, yet membership
# is more public in the sense that you don't need a Zulip invite to join.
# This field is populated directly from UserProfile.is_zephyr_mirror_realm,
# and the reason for denormalizing field is performance.
is_in_zephyr_realm = models.BooleanField(default=False) # type: bool
# Used by the e-mail forwarder. The e-mail RFC specifies a maximum
# e-mail length of 254, and our max stream length is 30, so we
# have plenty of room for the token.
email_token = models.CharField(
max_length=32, default=generate_email_token_for_stream) # type: str
description = models.CharField(max_length=1024, default=u'') # type: Text
date_created = models.DateTimeField(default=timezone_now) # type: datetime.datetime
deactivated = models.BooleanField(default=False) # type: bool
def __unicode__(self):
# type: () -> Text
return u"<Stream: %s>" % (self.name,)
def is_public(self):
# type: () -> bool
# All streams are private in Zephyr mirroring realms.
return not self.invite_only and not self.is_in_zephyr_realm
class Meta(object):
unique_together = ("name", "realm")
@staticmethod
def num_subscribers_for_stream_id(stream_id):
# type: (int) -> int
return Subscription.objects.filter(
recipient__type=Recipient.STREAM,
recipient__type_id=stream_id,
user_profile__is_active=True,
active=True
).count()
def num_subscribers(self):
# type: () -> int
return Stream.num_subscribers_for_stream_id(self.id)
# This is stream information that is sent to clients
def to_dict(self):
# type: () -> Dict[str, Any]
return dict(name=self.name,
stream_id=self.id,
description=self.description,
invite_only=self.invite_only)
post_save.connect(flush_stream, sender=Stream)
post_delete.connect(flush_stream, sender=Stream)
# The Recipient table is used to map Messages to the set of users who
# received the message. It is implemented as a set of triples (id,
# type_id, type). We have 3 types of recipients: Huddles (for group
# private messages), UserProfiles (for 1:1 private messages), and
# Streams. The recipient table maps a globally unique recipient id
# (used by the Message table) to the type-specific unique id (the
# stream id, user_profile id, or huddle id).
class Recipient(ModelReprMixin, models.Model):
type_id = models.IntegerField(db_index=True) # type: int
type = models.PositiveSmallIntegerField(db_index=True) # type: int
# Valid types are {personal, stream, huddle}
PERSONAL = 1
STREAM = 2
HUDDLE = 3
class Meta(object):
unique_together = ("type", "type_id")
# N.B. If we used Django's choice=... we would get this for free (kinda)
_type_names = {
PERSONAL: 'personal',
STREAM: 'stream',
HUDDLE: 'huddle'}
def type_name(self):
# type: () -> str
# Raises KeyError if invalid
return self._type_names[self.type]
def __unicode__(self):
# type: () -> Text
display_recipient = get_display_recipient(self)
return u"<Recipient: %s (%d, %s)>" % (display_recipient, self.type_id, self.type)
class MutedTopic(ModelReprMixin, models.Model):
user_profile = models.ForeignKey(UserProfile, on_delete=CASCADE)
stream = models.ForeignKey(Stream, on_delete=CASCADE)
recipient = models.ForeignKey(Recipient, on_delete=CASCADE)
topic_name = models.CharField(max_length=MAX_SUBJECT_LENGTH)
class Meta(object):
unique_together = ('user_profile', 'stream', 'topic_name')
def __unicode__(self):
# type: () -> Text
return u"<MutedTopic: (%s, %s, %s)>" % (self.user_profile.email, self.stream.name, self.topic_name)
class Client(ModelReprMixin, models.Model):
name = models.CharField(max_length=30, db_index=True, unique=True) # type: Text
def __unicode__(self):
# type: () -> Text
return u"<Client: %s>" % (self.name,)
get_client_cache = {} # type: Dict[Text, Client]
def get_client(name):
# type: (Text) -> Client
# Accessing KEY_PREFIX through the module is necessary
# because we need the updated value of the variable.
cache_name = cache.KEY_PREFIX + name
if cache_name not in get_client_cache:
result = get_client_remote_cache(name)
get_client_cache[cache_name] = result
return get_client_cache[cache_name]
def get_client_cache_key(name):
# type: (Text) -> Text
return u'get_client:%s' % (make_safe_digest(name),)
@cache_with_key(get_client_cache_key, timeout=3600*24*7)
def get_client_remote_cache(name):
# type: (Text) -> Client
(client, _) = Client.objects.get_or_create(name=name)
return client
@cache_with_key(get_stream_cache_key, timeout=3600*24*7)
def get_realm_stream(stream_name, realm_id):
# type: (Text, int) -> Stream
return Stream.objects.select_related("realm").get(
name__iexact=stream_name.strip(), realm_id=realm_id)
def stream_name_in_use(stream_name, realm_id):
# type: (Text, int) -> bool
return Stream.objects.filter(
name__iexact=stream_name.strip(),
realm_id=realm_id
).exists()
def get_active_streams(realm):
# type: (Optional[Realm]) -> QuerySet
"""
Return all streams (including invite-only streams) that have not been deactivated.
"""
return Stream.objects.filter(realm=realm, deactivated=False)
def get_stream(stream_name, realm):
# type: (Text, Realm) -> Stream
'''
Callers that don't have a Realm object already available should use
get_realm_stream directly, to avoid unnecessarily fetching the
Realm object.
'''
return get_realm_stream(stream_name, realm.id)
def bulk_get_streams(realm, stream_names):
# type: (Realm, STREAM_NAMES) -> Dict[Text, Any]
def fetch_streams_by_name(stream_names):
# type: (List[Text]) -> Sequence[Stream]
#
# This should be just
#
# Stream.objects.select_related("realm").filter(name__iexact__in=stream_names,
# realm_id=realm_id)
#
# But chaining __in and __iexact doesn't work with Django's
# ORM, so we have the following hack to construct the relevant where clause
if len(stream_names) == 0:
return []
upper_list = ", ".join(["UPPER(%s)"] * len(stream_names))
where_clause = "UPPER(zerver_stream.name::text) IN (%s)" % (upper_list,)
return get_active_streams(realm.id).select_related("realm").extra(
where=[where_clause],
params=stream_names)
return generic_bulk_cached_fetch(lambda stream_name: get_stream_cache_key(stream_name, realm.id),
fetch_streams_by_name,
[stream_name.lower() for stream_name in stream_names],
id_fetcher=lambda stream: stream.name.lower())
def get_recipient_cache_key(type, type_id):
# type: (int, int) -> Text
return u"%s:get_recipient:%s:%s" % (cache.KEY_PREFIX, type, type_id,)
@cache_with_key(get_recipient_cache_key, timeout=3600*24*7)
def get_recipient(type, type_id):
# type: (int, int) -> Recipient
return Recipient.objects.get(type_id=type_id, type=type)
def bulk_get_recipients(type, type_ids):
# type: (int, List[int]) -> Dict[int, Any]
def cache_key_function(type_id):
# type: (int) -> Text
return get_recipient_cache_key(type, type_id)
def query_function(type_ids):
# type: (List[int]) -> Sequence[Recipient]
# TODO: Change return type to QuerySet[Recipient]
return Recipient.objects.filter(type=type, type_id__in=type_ids)
return generic_bulk_cached_fetch(cache_key_function, query_function, type_ids,
id_fetcher=lambda recipient: recipient.type_id)
class AbstractMessage(ModelReprMixin, models.Model):
sender = models.ForeignKey(UserProfile, on_delete=CASCADE) # type: UserProfile
recipient = models.ForeignKey(Recipient, on_delete=CASCADE) # type: Recipient
subject = models.CharField(max_length=MAX_SUBJECT_LENGTH, db_index=True) # type: Text
content = models.TextField() # type: Text
rendered_content = models.TextField(null=True) # type: Optional[Text]
rendered_content_version = models.IntegerField(null=True) # type: Optional[int]
pub_date = models.DateTimeField('date published', db_index=True) # type: datetime.datetime
sending_client = models.ForeignKey(Client, on_delete=CASCADE) # type: Client
last_edit_time = models.DateTimeField(null=True) # type: Optional[datetime.datetime]
edit_history = models.TextField(null=True) # type: Optional[Text]
has_attachment = models.BooleanField(default=False, db_index=True) # type: bool
has_image = models.BooleanField(default=False, db_index=True) # type: bool
has_link = models.BooleanField(default=False, db_index=True) # type: bool
class Meta(object):
abstract = True
def __unicode__(self):
# type: () -> Text
display_recipient = get_display_recipient(self.recipient)
return u"<%s: %s / %s / %r>" % (self.__class__.__name__, display_recipient,
self.subject, self.sender)
class ArchivedMessage(AbstractMessage):
archive_timestamp = models.DateTimeField(default=timezone_now, db_index=True) # type: datetime.datetime
class Message(AbstractMessage):
def topic_name(self):
# type: () -> Text
"""
Please start using this helper to facilitate an
eventual switch over to a separate topic table.
"""
return self.subject
def get_realm(self):
# type: () -> Realm
return self.sender.realm
def save_rendered_content(self):
# type: () -> None
self.save(update_fields=["rendered_content", "rendered_content_version"])
@staticmethod
def need_to_render_content(rendered_content, rendered_content_version, bugdown_version):
# type: (Optional[Text], Optional[int], int) -> bool
return (rendered_content is None or
rendered_content_version is None or
rendered_content_version < bugdown_version)
def to_log_dict(self):
# type: () -> Dict[str, Any]
return dict(
id = self.id,
sender_id = self.sender.id,
sender_email = self.sender.email,
sender_realm_str = self.sender.realm.string_id,
sender_full_name = self.sender.full_name,
sender_short_name = self.sender.short_name,
sending_client = self.sending_client.name,
type = self.recipient.type_name(),
recipient = get_display_recipient(self.recipient),
subject = self.topic_name(),
content = self.content,
timestamp = datetime_to_timestamp(self.pub_date))
def sent_by_human(self):
# type: () -> bool
sending_client = self.sending_client.name.lower()
return (sending_client in ('zulipandroid', 'zulipios', 'zulipdesktop',
'zulipmobile', 'zulipelectron', 'snipe',
'website', 'ios', 'android')) or (
'desktop app' in sending_client)
@staticmethod
def content_has_attachment(content):
# type: (Text) -> Match
return re.search(r'[/\-]user[\-_]uploads[/\.-]', content)
@staticmethod
def content_has_image(content):
# type: (Text) -> bool
return bool(re.search(r'[/\-]user[\-_]uploads[/\.-]\S+\.(bmp|gif|jpg|jpeg|png|webp)', content, re.IGNORECASE))
@staticmethod
def content_has_link(content):
# type: (Text) -> bool
return ('http://' in content or
'https://' in content or
'/user_uploads' in content or
(settings.ENABLE_FILE_LINKS and 'file:///' in content))
@staticmethod
def is_status_message(content, rendered_content):
# type: (Text, Text) -> bool
"""
Returns True if content and rendered_content are from 'me_message'
"""
if content.startswith('/me ') and '\n' not in content:
if rendered_content.startswith('<p>') and rendered_content.endswith('</p>'):
return True
return False
def update_calculated_fields(self):
# type: () -> None
# TODO: rendered_content could also be considered a calculated field
content = self.content
self.has_attachment = bool(Message.content_has_attachment(content))
self.has_image = bool(Message.content_has_image(content))
self.has_link = bool(Message.content_has_link(content))
@receiver(pre_save, sender=Message)
def pre_save_message(sender, **kwargs):
# type: (Any, **Any) -> None
if kwargs['update_fields'] is None or "content" in kwargs['update_fields']:
message = kwargs['instance']
message.update_calculated_fields()
def get_context_for_message(message):
# type: (Message) -> QuerySet[Message]
# TODO: Change return type to QuerySet[Message]
return Message.objects.filter(
recipient_id=message.recipient_id,
subject=message.subject,
id__lt=message.id,
pub_date__gt=message.pub_date - timedelta(minutes=15),
).order_by('-id')[:10]
post_save.connect(flush_message, sender=Message)
class Reaction(ModelReprMixin, models.Model):
user_profile = models.ForeignKey(UserProfile, on_delete=CASCADE) # type: UserProfile
message = models.ForeignKey(Message, on_delete=CASCADE) # type: Message
emoji_name = models.TextField() # type: Text
emoji_code = models.TextField() # type: Text
UNICODE_EMOJI = u'unicode_emoji'
REALM_EMOJI = u'realm_emoji'
ZULIP_EXTRA_EMOJI = u'zulip_extra_emoji'
REACTION_TYPES = ((UNICODE_EMOJI, _("Unicode emoji")),
(REALM_EMOJI, _("Realm emoji")),
(ZULIP_EXTRA_EMOJI, _("Zulip extra emoji")))
reaction_type = models.CharField(default=UNICODE_EMOJI, choices=REACTION_TYPES, max_length=30) # type: Text
class Meta(object):
unique_together = ("user_profile", "message", "emoji_name")
@staticmethod
def get_raw_db_rows(needed_ids):
# type: (List[int]) -> List[Dict[str, Any]]
fields = ['message_id', 'emoji_name', 'emoji_code', 'reaction_type',
'user_profile__email', 'user_profile__id', 'user_profile__full_name']
return Reaction.objects.filter(message_id__in=needed_ids).values(*fields)
# Whenever a message is sent, for each user subscribed to the
# corresponding Recipient object, we add a row to the UserMessage
# table indicating that that user received that message. This table
# allows us to quickly query any user's last 1000 messages to generate
# the home view.
#
# Additionally, the flags field stores metadata like whether the user
# has read the message, starred or collapsed the message, was
# mentioned in the message, etc.
#
# UserMessage is the largest table in a Zulip installation, even
# though each row is only 4 integers.
class AbstractUserMessage(ModelReprMixin, models.Model):
user_profile = models.ForeignKey(UserProfile, on_delete=CASCADE) # type: UserProfile
# WARNING: We removed the previously-final flag,
# is_me_message, without clearing any values it might have had in
# the database. So when we next add a flag, you need to do a
# migration to set it to 0 first
ALL_FLAGS = ['read', 'starred', 'collapsed', 'mentioned', 'wildcard_mentioned',
'summarize_in_home', 'summarize_in_stream', 'force_expand', 'force_collapse',
'has_alert_word', "historical"]
flags = BitField(flags=ALL_FLAGS, default=0) # type: BitHandler
class Meta(object):
abstract = True
unique_together = ("user_profile", "message")
@staticmethod
def where_unread():
# type: () -> str
# Use this for Django ORM queries where we are getting lots
# of rows. This custom SQL plays nice with our partial indexes.
# Grep the code for example usage.
return 'flags & 1 = 0'
def flags_list(self):
# type: () -> List[str]
flags = int(self.flags)
return self.flags_list_for_flags(flags)
@staticmethod
def flags_list_for_flags(flags):
# type: (int) -> List[str]
'''
This function is highly optimized, because it actually slows down
sending messages in a naive implementation.
'''
names = AbstractUserMessage.ALL_FLAGS
return [
names[i]
for i in range(len(names))
if flags & (2 ** i)
]
def __unicode__(self):
# type: () -> Text
display_recipient = get_display_recipient(self.message.recipient)
return u"<%s: %s / %s (%s)>" % (self.__class__.__name__, display_recipient,
self.user_profile.email, self.flags_list())
class ArchivedUserMessage(AbstractUserMessage):
message = models.ForeignKey(ArchivedMessage, on_delete=CASCADE) # type: Message
archive_timestamp = models.DateTimeField(default=timezone_now, db_index=True) # type: datetime.datetime
class UserMessage(AbstractUserMessage):
message = models.ForeignKey(Message, on_delete=CASCADE) # type: Message
def parse_usermessage_flags(val):
# type: (int) -> List[str]
flags = []
mask = 1
for flag in UserMessage.ALL_FLAGS:
if val & mask:
flags.append(flag)
mask <<= 1
return flags
class AbstractAttachment(ModelReprMixin, models.Model):
file_name = models.TextField(db_index=True) # type: Text
# path_id is a storage location agnostic representation of the path of the file.
# If the path of a file is http://localhost:9991/user_uploads/a/b/abc/temp_file.py
# then its path_id will be a/b/abc/temp_file.py.
path_id = models.TextField(db_index=True, unique=True) # type: Text
owner = models.ForeignKey(UserProfile, on_delete=CASCADE) # type: UserProfile
realm = models.ForeignKey(Realm, blank=True, null=True, on_delete=CASCADE) # type: Optional[Realm]
is_realm_public = models.BooleanField(default=False) # type: bool
create_time = models.DateTimeField(default=timezone_now,
db_index=True) # type: datetime.datetime
size = models.IntegerField(null=True) # type: Optional[int]
class Meta(object):
abstract = True
def __unicode__(self):
# type: () -> Text
return u"<%s: %s>" % (self.__class__.__name__, self.file_name,)
class ArchivedAttachment(AbstractAttachment):
archive_timestamp = models.DateTimeField(default=timezone_now, db_index=True) # type: datetime.datetime
messages = models.ManyToManyField(ArchivedMessage) # type: Manager
class Attachment(AbstractAttachment):
messages = models.ManyToManyField(Message) # type: Manager
def is_claimed(self):
# type: () -> bool
return self.messages.count() > 0
def to_dict(self):
# type: () -> Dict[str, Any]
return {
'id': self.id,
'name': self.file_name,
'path_id': self.path_id,
'size': self.size,
# convert to JavaScript-style UNIX timestamp so we can take
# advantage of client timezones.
'create_time': time.mktime(self.create_time.timetuple()) * 1000,
'messages': [{
'id': m.id,
'name': time.mktime(m.pub_date.timetuple()) * 1000
} for m in self.messages.all()]
}
def validate_attachment_request(user_profile, path_id):
# type: (UserProfile, Text) -> Optional[bool]
try:
attachment = Attachment.objects.get(path_id=path_id)
messages = attachment.messages.all()
if user_profile == attachment.owner:
# If you own the file, you can access it.
return True
elif attachment.is_realm_public and attachment.realm == user_profile.realm:
# Any user in the realm can access realm-public files
return True
elif UserMessage.objects.filter(user_profile=user_profile, message__in=messages).exists():
# If it was sent in a private message or private stream
# message, then anyone who received that message can access it.
return True
else:
return False
except Attachment.DoesNotExist:
return None
def get_old_unclaimed_attachments(weeks_ago):
# type: (int) -> Sequence[Attachment]
# TODO: Change return type to QuerySet[Attachment]
delta_weeks_ago = timezone_now() - datetime.timedelta(weeks=weeks_ago)
old_attachments = Attachment.objects.filter(messages=None, create_time__lt=delta_weeks_ago)
return old_attachments
class Subscription(ModelReprMixin, models.Model):
user_profile = models.ForeignKey(UserProfile, on_delete=CASCADE) # type: UserProfile
recipient = models.ForeignKey(Recipient, on_delete=CASCADE) # type: Recipient
active = models.BooleanField(default=True) # type: bool
in_home_view = models.NullBooleanField(default=True) # type: Optional[bool]
DEFAULT_STREAM_COLOR = u"#c2c2c2"
color = models.CharField(max_length=10, default=DEFAULT_STREAM_COLOR) # type: Text
pin_to_top = models.BooleanField(default=False) # type: bool
desktop_notifications = models.BooleanField(default=True) # type: bool
audible_notifications = models.BooleanField(default=True) # type: bool
push_notifications = models.BooleanField(default=False) # type: bool
# Combination desktop + audible notifications superseded by the
# above.
notifications = models.BooleanField(default=False) # type: bool
class Meta(object):
unique_together = ("user_profile", "recipient")
def __unicode__(self):
# type: () -> Text
return u"<Subscription: %r -> %s>" % (self.user_profile, self.recipient)
@cache_with_key(user_profile_by_id_cache_key, timeout=3600*24*7)
def get_user_profile_by_id(uid):
# type: (int) -> UserProfile
return UserProfile.objects.select_related().get(id=uid)
@cache_with_key(user_profile_by_email_cache_key, timeout=3600*24*7)
def get_user_profile_by_email(email):
# type: (Text) -> UserProfile
return UserProfile.objects.select_related().get(email__iexact=email.strip())
@cache_with_key(user_profile_by_api_key_cache_key, timeout=3600*24*7)
def get_user_profile_by_api_key(api_key):
# type: (Text) -> UserProfile
return UserProfile.objects.select_related().get(api_key=api_key)
@cache_with_key(user_profile_cache_key, timeout=3600*24*7)
def get_user(email, realm):
# type: (Text, Realm) -> UserProfile
return UserProfile.objects.select_related().get(email__iexact=email.strip(), realm=realm)
def get_user_including_cross_realm(email, realm=None):
# type: (Text, Optional[Realm]) -> UserProfile
if email in get_cross_realm_emails():
return get_system_bot(email)
assert realm is not None
return get_user(email, realm)
@cache_with_key(bot_profile_cache_key, timeout=3600*24*7)
def get_system_bot(email):
# type: (Text) -> UserProfile
return UserProfile.objects.select_related().get(email__iexact=email.strip())
@cache_with_key(realm_user_dicts_cache_key, timeout=3600*24*7)
def get_realm_user_dicts(realm_id):
# type: (int) -> List[Dict[str, Any]]
return UserProfile.objects.filter(
realm_id=realm_id,
).values(*realm_user_dict_fields)
@cache_with_key(active_user_ids_cache_key, timeout=3600*24*7)
def active_user_ids(realm_id):
# type: (int) -> List[int]
query = UserProfile.objects.filter(
realm_id=realm_id,
is_active=True
).values_list('id', flat=True)
return list(query)
@cache_with_key(bot_dicts_in_realm_cache_key, timeout=3600*24*7)
def get_bot_dicts_in_realm(realm):
# type: (Realm) -> List[Dict[str, Any]]
return UserProfile.objects.filter(realm=realm, is_bot=True).values(*bot_dict_fields)
def get_owned_bot_dicts(user_profile, include_all_realm_bots_if_admin=True):
# type: (UserProfile, bool) -> List[Dict[str, Any]]
if user_profile.is_realm_admin and include_all_realm_bots_if_admin:
result = get_bot_dicts_in_realm(user_profile.realm)
else:
result = UserProfile.objects.filter(realm=user_profile.realm, is_bot=True,
bot_owner=user_profile).values(*bot_dict_fields)
# TODO: Remove this import cycle
from zerver.lib.avatar import avatar_url_from_dict
return [{'email': botdict['email'],
'user_id': botdict['id'],
'full_name': botdict['full_name'],
'bot_type': botdict['bot_type'],
'is_active': botdict['is_active'],
'api_key': botdict['api_key'],
'default_sending_stream': botdict['default_sending_stream__name'],
'default_events_register_stream': botdict['default_events_register_stream__name'],
'default_all_public_streams': botdict['default_all_public_streams'],
'owner': botdict['bot_owner__email'],
'avatar_url': avatar_url_from_dict(botdict),
}
for botdict in result]
def get_prereg_user_by_email(email):
# type: (Text) -> PreregistrationUser
# A user can be invited many times, so only return the result of the latest
# invite.
return PreregistrationUser.objects.filter(email__iexact=email.strip()).latest("invited_at")
def get_cross_realm_emails():
# type: () -> Set[Text]
return set(settings.CROSS_REALM_BOT_EMAILS)
# The Huddle class represents a group of individuals who have had a
# Group Private Message conversation together. The actual membership
# of the Huddle is stored in the Subscription table just like with
# Streams, and a hash of that list is stored in the huddle_hash field
# below, to support efficiently mapping from a set of users to the
# corresponding Huddle object.
class Huddle(models.Model):
# TODO: We should consider whether using
# CommaSeparatedIntegerField would be better.
huddle_hash = models.CharField(max_length=40, db_index=True, unique=True) # type: Text
def get_huddle_hash(id_list):
# type: (List[int]) -> Text
id_list = sorted(set(id_list))
hash_key = ",".join(str(x) for x in id_list)
return make_safe_digest(hash_key)
def huddle_hash_cache_key(huddle_hash):
# type: (Text) -> Text
return u"huddle_by_hash:%s" % (huddle_hash,)
def get_huddle(id_list):
# type: (List[int]) -> Huddle
huddle_hash = get_huddle_hash(id_list)
return get_huddle_backend(huddle_hash, id_list)
@cache_with_key(lambda huddle_hash, id_list: huddle_hash_cache_key(huddle_hash), timeout=3600*24*7)
def get_huddle_backend(huddle_hash, id_list):
# type: (Text, List[int]) -> Huddle
with transaction.atomic():
(huddle, created) = Huddle.objects.get_or_create(huddle_hash=huddle_hash)
if created:
recipient = Recipient.objects.create(type_id=huddle.id,
type=Recipient.HUDDLE)
subs_to_create = [Subscription(recipient=recipient,
user_profile_id=user_profile_id)
for user_profile_id in id_list]
Subscription.objects.bulk_create(subs_to_create)
return huddle
def clear_database():
# type: () -> None
pylibmc.Client(['127.0.0.1']).flush_all()
model = None # type: Any
for model in [Message, Stream, UserProfile, Recipient,
Realm, Subscription, Huddle, UserMessage, Client,
DefaultStream]:
model.objects.all().delete()
Session.objects.all().delete()
class UserActivity(models.Model):
user_profile = models.ForeignKey(UserProfile, on_delete=CASCADE) # type: UserProfile
client = models.ForeignKey(Client, on_delete=CASCADE) # type: Client
query = models.CharField(max_length=50, db_index=True) # type: Text
count = models.IntegerField() # type: int
last_visit = models.DateTimeField('last visit') # type: datetime.datetime
class Meta(object):
unique_together = ("user_profile", "client", "query")
class UserActivityInterval(models.Model):
MIN_INTERVAL_LENGTH = datetime.timedelta(minutes=15)
user_profile = models.ForeignKey(UserProfile, on_delete=CASCADE) # type: UserProfile
start = models.DateTimeField('start time', db_index=True) # type: datetime.datetime
end = models.DateTimeField('end time', db_index=True) # type: datetime.datetime
class UserPresence(models.Model):
user_profile = models.ForeignKey(UserProfile, on_delete=CASCADE) # type: UserProfile
client = models.ForeignKey(Client, on_delete=CASCADE) # type: Client
# Valid statuses
ACTIVE = 1
IDLE = 2
timestamp = models.DateTimeField('presence changed') # type: datetime.datetime
status = models.PositiveSmallIntegerField(default=ACTIVE) # type: int
@staticmethod
def status_to_string(status):
# type: (int) -> str
if status == UserPresence.ACTIVE:
return 'active'
elif status == UserPresence.IDLE:
return 'idle'
else:
raise ValueError('Unknown status: %s' % (status,))
@staticmethod
def get_status_dict_by_user(user_profile):
# type: (UserProfile) -> Dict[Text, Dict[Any, Any]]
query = UserPresence.objects.filter(user_profile=user_profile).values(
'client__name',
'status',
'timestamp',
'user_profile__email',
'user_profile__id',
'user_profile__enable_offline_push_notifications',
)
presence_rows = list(query)
mobile_user_ids = set() # type: Set[int]
if PushDeviceToken.objects.filter(user=user_profile).exists():
mobile_user_ids.add(user_profile.id)
return UserPresence.get_status_dicts_for_rows(presence_rows, mobile_user_ids)
@staticmethod
def get_status_dict_by_realm(realm_id):
# type: (int) -> Dict[Text, Dict[Any, Any]]
user_profile_ids = UserProfile.objects.filter(
realm_id=realm_id,
is_active=True,
is_bot=False
).order_by('id').values_list('id', flat=True)
user_profile_ids = list(user_profile_ids)
if not user_profile_ids:
return {}
two_weeks_ago = timezone_now() - datetime.timedelta(weeks=2)
query = UserPresence.objects.filter(
timestamp__gte=two_weeks_ago
).values(
'client__name',
'status',
'timestamp',
'user_profile__email',
'user_profile__id',
'user_profile__enable_offline_push_notifications',
)
query = query_for_ids(
query=query,
user_ids=user_profile_ids,
field='user_profile_id'
)
presence_rows = list(query)
mobile_query = PushDeviceToken.objects.distinct(
'user_id'
).values_list(
'user_id',
flat=True
)
mobile_query = query_for_ids(
query=mobile_query,
user_ids=user_profile_ids,
field='user_id'
)
mobile_user_ids = set(mobile_query)
return UserPresence.get_status_dicts_for_rows(presence_rows, mobile_user_ids)
@staticmethod
def get_status_dicts_for_rows(presence_rows, mobile_user_ids):
# type: (List[Dict[str, Any]], Set[int]) -> Dict[Text, Dict[Any, Any]]
info_row_dct = defaultdict(list) # type: DefaultDict[Text, List[Dict[str, Any]]]
for row in presence_rows:
email = row['user_profile__email']
client_name = row['client__name']
status = UserPresence.status_to_string(row['status'])
dt = row['timestamp']
timestamp = datetime_to_timestamp(dt)
push_enabled = row['user_profile__enable_offline_push_notifications']
has_push_devices = row['user_profile__id'] in mobile_user_ids
pushable = (push_enabled and has_push_devices)
info = dict(
client=client_name,
status=status,
dt=dt,
timestamp=timestamp,
pushable=pushable,
)
info_row_dct[email].append(info)
user_statuses = dict() # type: Dict[str, Dict[str, Any]]
for email, info_rows in info_row_dct.items():
# Note that datetime values have sub-second granularity, which is
# mostly important for avoiding test flakes, but it's also technically
# more precise for real users.
by_time = lambda row: row['dt']
most_recent_info = max(info_rows, key=by_time)
# We don't send datetime values to the client.
for r in info_rows:
del r['dt']
client_dict = {info['client']: info for info in info_rows}
user_statuses[email] = client_dict
# The word "aggegrated" here is possibly misleading.
# It's really just the most recent client's info.
user_statuses[email]['aggregated'] = dict(
client=most_recent_info['client'],
status=most_recent_info['status'],
timestamp=most_recent_info['timestamp'],
)
return user_statuses
@staticmethod
def to_presence_dict(client_name, status, dt, push_enabled=False,
has_push_devices=False):
# type: (Text, int, datetime.datetime, bool, bool) -> Dict[str, Any]
presence_val = UserPresence.status_to_string(status)
timestamp = datetime_to_timestamp(dt)
return dict(
client=client_name,
status=presence_val,
timestamp=timestamp,
pushable=(push_enabled and has_push_devices),
)
def to_dict(self):
# type: () -> Dict[str, Any]
return UserPresence.to_presence_dict(
self.client.name,
self.status,
self.timestamp
)
@staticmethod
def status_from_string(status):
# type: (NonBinaryStr) -> Optional[int]
if status == 'active':
status_val = UserPresence.ACTIVE # type: Optional[int] # See https://github.com/python/mypy/issues/2611
elif status == 'idle':
status_val = UserPresence.IDLE
else:
status_val = None
return status_val
class Meta(object):
unique_together = ("user_profile", "client")
class DefaultStream(models.Model):
realm = models.ForeignKey(Realm, on_delete=CASCADE) # type: Realm
stream = models.ForeignKey(Stream, on_delete=CASCADE) # type: Stream
class Meta(object):
unique_together = ("realm", "stream")
class DefaultStreamGroup(models.Model):
MAX_NAME_LENGTH = 60
name = models.CharField(max_length=MAX_NAME_LENGTH, db_index=True) # type: Text
realm = models.ForeignKey(Realm, on_delete=CASCADE) # type: Realm
streams = models.ManyToManyField('Stream') # type: Manager
class Meta(object):
unique_together = ("realm", "name")
def to_dict(self):
# type: () -> Dict[str, Any]
return dict(name=self.name, streams=[stream.to_dict() for stream in self.streams.all()])
def get_default_stream_groups(realm):
# type: (Realm) -> List[DefaultStreamGroup]
return DefaultStreamGroup.objects.filter(realm=realm)
class AbstractScheduledJob(models.Model):
scheduled_timestamp = models.DateTimeField(db_index=True) # type: datetime.datetime
# JSON representation of arguments to consumer
data = models.TextField() # type: Text
class Meta(object):
abstract = True
class ScheduledEmail(AbstractScheduledJob):
# Exactly one of user or address should be set. These are used to
# filter the set of ScheduledEmails.
user = models.ForeignKey(UserProfile, null=True, on_delete=CASCADE) # type: UserProfile
# Just the address part of a full "name <address>" email address
address = models.EmailField(null=True, db_index=True) # type: Text
# Valid types are below
WELCOME = 1
DIGEST = 2
INVITATION_REMINDER = 3
type = models.PositiveSmallIntegerField() # type: int
def __str__(self):
# type: () -> Text
return u"<ScheduledEmail: %s %s %s>" % (self.type, self.user or self.address,
self.scheduled_timestamp)
EMAIL_TYPES = {
'followup_day1': ScheduledEmail.WELCOME,
'followup_day2': ScheduledEmail.WELCOME,
'digest': ScheduledEmail.DIGEST,
'invitation_reminder': ScheduledEmail.INVITATION_REMINDER,
}
class RealmAuditLog(ModelReprMixin, models.Model):
realm = models.ForeignKey(Realm, on_delete=CASCADE) # type: Realm
acting_user = models.ForeignKey(UserProfile, null=True, related_name='+', on_delete=CASCADE) # type: Optional[UserProfile]
modified_user = models.ForeignKey(UserProfile, null=True, related_name='+', on_delete=CASCADE) # type: Optional[UserProfile]
modified_stream = models.ForeignKey(Stream, null=True, on_delete=CASCADE) # type: Optional[Stream]
event_last_message_id = models.IntegerField(null=True) # type: Optional[int]
event_type = models.CharField(max_length=40) # type: Text
event_time = models.DateTimeField(db_index=True) # type: datetime.datetime
# If True, event_time is an overestimate of the true time. Can be used
# by migrations when introducing a new event_type.
backfilled = models.BooleanField(default=False) # type: bool
extra_data = models.TextField(null=True) # type: Optional[Text]
def __unicode__(self):
# type: () -> str
if self.modified_user is not None:
return u"<RealmAuditLog: %s %s %s>" % (self.modified_user, self.event_type, self.event_time)
if self.modified_stream is not None:
return u"<RealmAuditLog: %s %s %s>" % (self.modified_stream, self.event_type, self.event_time)
return "<RealmAuditLog: %s %s %s>" % (self.realm, self.event_type, self.event_time)
class UserHotspot(models.Model):
user = models.ForeignKey(UserProfile, on_delete=CASCADE) # type: UserProfile
hotspot = models.CharField(max_length=30) # type: Text
timestamp = models.DateTimeField(default=timezone_now) # type: datetime.datetime
class Meta(object):
unique_together = ("user", "hotspot")
class CustomProfileField(models.Model):
realm = models.ForeignKey(Realm, on_delete=CASCADE) # type: Realm
name = models.CharField(max_length=100) # type: Text
INTEGER = 1
FLOAT = 2
SHORT_TEXT = 3
LONG_TEXT = 4
FIELD_TYPE_DATA = [
# Type, Name, Validator, Converter
(INTEGER, u'Integer', check_int, int),
(FLOAT, u'Float', check_float, float),
(SHORT_TEXT, u'Short Text', check_short_string, str),
(LONG_TEXT, u'Long Text', check_string, str),
] # type: List[Tuple[int, Text, Callable[[str, Any], str], Callable[[Any], Any]]]
FIELD_VALIDATORS = {item[0]: item[2] for item in FIELD_TYPE_DATA} # type: Dict[int, Callable[[str, Any], str]]
FIELD_CONVERTERS = {item[0]: item[3] for item in FIELD_TYPE_DATA} # type: Dict[int, Callable[[Any], Any]]
FIELD_TYPE_CHOICES = [(item[0], item[1]) for item in FIELD_TYPE_DATA] # type: List[Tuple[int, Text]]
field_type = models.PositiveSmallIntegerField(choices=FIELD_TYPE_CHOICES,
default=SHORT_TEXT) # type: int
class Meta(object):
unique_together = ('realm', 'name')
def as_dict(self):
# type: () -> Dict[str, Union[int, Text]]
return {
'id': self.id,
'name': self.name,
'type': self.field_type,
}
def custom_profile_fields_for_realm(realm_id):
# type: (int) -> List[CustomProfileField]
return CustomProfileField.objects.filter(realm=realm_id).order_by('name')
class CustomProfileFieldValue(models.Model):
user_profile = models.ForeignKey(UserProfile, on_delete=CASCADE) # type: UserProfile
field = models.ForeignKey(CustomProfileField, on_delete=CASCADE) # type: CustomProfileField
value = models.TextField() # type: Text
class Meta(object):
unique_together = ('user_profile', 'field')
# Interfaces for services
# They provide additional functionality like parsing message to obtain query url, data to be sent to url,
# and parsing the response.
GENERIC_INTERFACE = u'GenericService'
SLACK_INTERFACE = u'SlackOutgoingWebhookService'
# A Service corresponds to either an outgoing webhook bot or an embedded bot.
# The type of Service is determined by the bot_type field of the referenced
# UserProfile.
#
# If the Service is an outgoing webhook bot:
# - name is any human-readable identifier for the Service
# - base_url is the address of the third-party site
# - token is used for authentication with the third-party site
#
# If the Service is an embedded bot:
# - name is the canonical name for the type of bot (e.g. 'xkcd' for an instance
# of the xkcd bot); multiple embedded bots can have the same name, but all
# embedded bots with the same name will run the same code
# - base_url and token are currently unused
class Service(models.Model):
name = models.CharField(max_length=UserProfile.MAX_NAME_LENGTH) # type: Text
# Bot user corresponding to the Service. The bot_type of this user
# deterines the type of service. If non-bot services are added later,
# user_profile can also represent the owner of the Service.
user_profile = models.ForeignKey(UserProfile, on_delete=CASCADE) # type: UserProfile
base_url = models.TextField() # type: Text
token = models.TextField() # type: Text
# Interface / API version of the service.
interface = models.PositiveSmallIntegerField(default=1) # type: int
# Valid interfaces are {generic, zulip_bot_service, slack}
GENERIC = 1
SLACK = 2
ALLOWED_INTERFACE_TYPES = [
GENERIC,
SLACK,
]
# N.B. If we used Django's choice=... we would get this for free (kinda)
_interfaces = {
GENERIC: GENERIC_INTERFACE,
SLACK: SLACK_INTERFACE,
} # type: Dict[int, Text]
def interface_name(self):
# type: () -> Text
# Raises KeyError if invalid
return self._interfaces[self.interface]
def get_realm_outgoing_webhook_services_name(realm):
# type: (Realm) -> List[Any]
return list(Service.objects.filter(user_profile__realm=realm, user_profile__is_bot=True,
user_profile__bot_type=UserProfile.OUTGOING_WEBHOOK_BOT).values('name'))
def get_bot_services(user_profile_id):
# type: (str) -> List[Service]
return list(Service.objects.filter(user_profile__id=user_profile_id))
def get_service_profile(user_profile_id, service_name):
# type: (str, str) -> Service
return Service.objects.get(user_profile__id=user_profile_id, name=service_name)
class BotUserStateData(models.Model):
bot_profile = models.ForeignKey(UserProfile, on_delete=CASCADE) # type: UserProfile
key = models.TextField(db_index=True) # type: Text
value = models.TextField() # type: Text
class Meta(object):
unique_together = ("bot_profile", "key")
def get_bot_state(bot_profile, key):
# type: (UserProfile, Text) -> Text
return BotUserStateData.objects.get(bot_profile=bot_profile, key=key).value
def set_bot_state(bot_profile, key, value):
# type: (UserProfile, Text, Text) -> None
obj, created = BotUserStateData.objects.get_or_create(bot_profile=bot_profile, key=key,
defaults={'value': value})
if not created:
obj.value = value
obj.save()
def remove_bot_state(bot_profile, key):
# type: (UserProfile, Text) -> None
removed_ctr, removed_entries = BotUserStateData.objects.get(bot_profile=bot_profile, key=key).delete()
def is_key_in_bot_state(bot_profile, key):
# type: (UserProfile, Text) -> bool
return BotUserStateData.objects.filter(bot_profile=bot_profile, key=key).exists()
def get_bot_state_size(bot_profile, key=None):
# type: (UserProfile, Optional[Text]) -> int
if key is None:
return BotUserStateData.objects.filter(bot_profile=bot_profile) \
.annotate(key_size=Length('key'), value_size=Length('value')) \
.aggregate(sum=Sum(F('key_size')+F('value_size')))['sum'] or 0
else:
try:
return len(key) + len(BotUserStateData.objects.get(bot_profile=bot_profile, key=key).value)
except BotUserStateData.DoesNotExist:
return 0
| 41.117796 | 145 | 0.676138 |
ace2f8dbb21abcb0d5b7714c9133bedf7ebe4a7b | 5,787 | py | Python | src/search_vip_resource.py | spencercjh/myFirstPython | 89b1744e5dd9c5008646eefd65144819bed06b9d | [
"Apache-2.0"
] | null | null | null | src/search_vip_resource.py | spencercjh/myFirstPython | 89b1744e5dd9c5008646eefd65144819bed06b9d | [
"Apache-2.0"
] | null | null | null | src/search_vip_resource.py | spencercjh/myFirstPython | 89b1744e5dd9c5008646eefd65144819bed06b9d | [
"Apache-2.0"
] | null | null | null | import json
import logging
import os
import time
from multiprocessing import Pool
from const import get_one_book_all_chapters_url_front, server_host, get_one_book_all_chapters_url_end, count_process, \
file_location
from http_request import get_one_book_all_chapters, get_all_books, sc_ftqq_send_message
from judge_free import judge_free
LOG_FORMAT = ('%(levelname) -10s %(asctime)s %(name) -30s %(funcName) '
'-35s %(lineno) -5d: %(message)s')
logging.basicConfig(level=logging.DEBUG, format=LOG_FORMAT)
LOGGER = logging.getLogger("search_vip")
handler = logging.FileHandler("../log/search_vip.log")
LOGGER.addHandler(handler)
def save_one_chapter_source(book_id_inner, chapter_id):
LOGGER.info("进程 %s 存储删除URL:id为" % os.getpid() + book_id_inner + "的id为" + chapter_id + "章节")
url = str(
server_host + get_one_book_all_chapters_url_front + book_id_inner + get_one_book_all_chapters_url_end +
chapter_id)
with open(file_location + 'saveUrl.txt', 'a', encoding="utf-8") as save_url_file_inner:
save_url_file_inner.write(url + "\n")
def traverse_one_book(book_id_inner):
delete_chapter_count = 0
free_chapter_count = 0
global one_book_all_chapters_result
LOGGER.info("开始遍历图书" + book_id_inner)
try:
one_book_all_chapters_result = get_one_book_all_chapters(book_id_inner)
except Exception as e:
LOGGER.error(e)
# 某一本书的所有章节请求失败,保存出错的bookId
LOGGER.error("图书:" + book_id_inner + "章节遍历出错")
with open(file_location + "saveErrorBook.txt", 'a', encoding="utf-8") as save_error_book_id_file:
save_error_book_id_file.write(
server_host + get_one_book_all_chapters_url_front + book_id_inner + get_one_book_all_chapters_url_end + "\n")
one_book_all_chapters_json_result = json.loads(one_book_all_chapters_result)
one_book_all_chapters = one_book_all_chapters_json_result['data']
previous = False
for chapter in one_book_all_chapters:
zh_source_url = chapter['zhSourceURL']
chapter_id = chapter['id']
chapter_name = chapter['name']
LOGGER.info("开始遍历图书" + book_id_inner + "的章节" + chapter_id + "章节名:" + chapter_name)
# url判空
if None is zh_source_url or 0 == len(zh_source_url):
free_chapter_count += 1
continue
# 如果是收费章节,就删除对应的网站资源,如果前一章是收费章节,那当前章节就一定是收费章节
try:
if previous:
delete_chapter_count += 1
save_one_chapter_source(book_id_inner, chapter_id)
elif judge_free(zh_source_url) is False:
delete_chapter_count += 1
previous = True
save_one_chapter_source(book_id_inner, chapter_id)
else:
free_chapter_count += 1
except Exception as e:
LOGGER.error(e)
# 某一章的页面请求失败,保存出错的bookId和chapterId
LOGGER.error("图书:" + book_id_inner + "第" + chapter_id + "章节遍历出错")
with open(file_location + "saveErrorChapter.txt", 'a', encoding="utf-8") as save_error_book_id_file:
save_error_book_id_file.write(
server_host + get_one_book_all_chapters_url_front + book_id_inner +
get_one_book_all_chapters_url_end +
chapter_id + "\n")
return (delete_chapter_count + free_chapter_count) == len(one_book_all_chapters), delete_chapter_count, free_chapter_count, len(
one_book_all_chapters), book_id_inner
def remove_file():
try:
os.remove('../log/search_vip.log')
os.remove(file_location + 'saveUrl.txt')
os.remove(file_location + 'saveChapterId.txt')
os.remove(file_location + "saveErrorChapter.txt")
os.remove(file_location + "saveErrorBook.txt")
os.remove(file_location + "errorTraverseBook.txt")
except Exception as e:
LOGGER.debug(e)
def search_all_vip_chapter():
start_time = time.time()
remove_file()
global all_books_http_result
all_books_http_result = []
try:
all_books_http_result = get_all_books()
except Exception as e:
LOGGER.error(e)
LOGGER.error("超时,解决网络问题后再起一次!")
json_result = json.loads(all_books_http_result)
all_books = json_result['data']
book_id_list = []
# 遍历所有书,获取所有的图书id
for book in all_books:
book_id_list.append(book['id'])
# 多进程:一本书一个进程
pool = Pool(count_process)
# 遍历所有的图书id,通过每本书的id去获取该书所有章节
for book_id in book_id_list:
# 遍历某一本书的全部章节
traverse_one_book_result = pool.apply_async(traverse_one_book, args=(book_id,))
if not traverse_one_book_result.get()[0]:
with open(file_location + "errorTraverseBook.txt", 'a', encoding='utf-8') as error_traverse_book_file:
error_traverse_book_file.write(str(traverse_one_book_result.get()) + "\n")
LOGGER.error("遍历结果不正确" + str(traverse_one_book_result.get()))
else:
LOGGER.info("图书:" + traverse_one_book_result.get()[4] + "付费章节搜索成功")
pool.close()
pool.join()
LOGGER.info("\n\n全部需要删除URL查找成功")
save_chapter_id()
LOGGER.info("\n\n存储chapterId成功")
seconds, minutes, hours = int(time.time() - start_time), 0, 0
LOGGER.info("\n Complete time cost {:>02d}:{:>02d}:{:>02d}".format(hours, minutes, seconds))
sc_ftqq_send_message("搜索收费章节完成", "耗时" + str(seconds) + "秒")
def save_chapter_id():
with open(file_location + 'saveChapterId.txt', 'w', encoding="utf-8") as save_chapter_id_file:
with open(file_location + 'saveUrl.txt', 'r', encoding="utf-8") as save_url_file_inner:
for url in save_url_file_inner:
save_chapter_id_file.write(url[url.find("chapters/") + 9:])
if __name__ == '__main__':
search_all_vip_chapter()
| 41.934783 | 132 | 0.674443 |
ace2f8f30b5925e1e78f513b4a44b3aecf71f7c4 | 232 | py | Python | zoomus/components/__init__.py | appfluence/zoomus | a14e1f08700b9dad89f00b0d5c2a73a24d421c78 | [
"Apache-2.0"
] | 2 | 2020-03-14T14:47:18.000Z | 2020-04-06T23:20:54.000Z | zoomus/components/__init__.py | appfluence/zoomus | a14e1f08700b9dad89f00b0d5c2a73a24d421c78 | [
"Apache-2.0"
] | null | null | null | zoomus/components/__init__.py | appfluence/zoomus | a14e1f08700b9dad89f00b0d5c2a73a24d421c78 | [
"Apache-2.0"
] | 1 | 2022-03-04T11:54:56.000Z | 2022-03-04T11:54:56.000Z | """Zoom.us REST API Python Client Components"""
from __future__ import absolute_import
from . import (
meeting,
recording,
report,
user,
webinar)
__author__ = "Patrick R. Schmid"
__email__ = "prschmid@act.md"
| 16.571429 | 47 | 0.685345 |
ace2f96b3ed96ef107a052e8ab3a15fd669c6d46 | 5,204 | py | Python | luvdis/rom.py | arantonitis/luvdis | 53e5f4aa1314ecafbb8b464409d57ae178a1cb1f | [
"MIT"
] | 17 | 2020-06-02T18:14:10.000Z | 2021-09-24T16:56:28.000Z | luvdis/rom.py | arantonitis/luvdis | 53e5f4aa1314ecafbb8b464409d57ae178a1cb1f | [
"MIT"
] | 4 | 2020-03-31T17:23:43.000Z | 2021-04-23T06:40:16.000Z | luvdis/rom.py | aarant/luvdis | 53e5f4aa1314ecafbb8b464409d57ae178a1cb1f | [
"MIT"
] | 1 | 2020-05-28T05:16:19.000Z | 2020-05-28T05:16:19.000Z | """ GBA ROM class & tools. """
import sys
import pickle
import pkg_resources
import xml.etree.ElementTree as ET
from hashlib import sha1, md5
from io import BytesIO
from luvdis.common import eprint, warn
from luvdis.disasm import disasm
DB_F = pkg_resources.resource_stream('luvdis', 'gba-db.pickle')
ROM_DB = None # Actual ROM db object
class ROMInfo:
""" GBA ROM information.
Attributes:
name (str): ROM title/name.
size (int): ROM size in bytes.
md5 (bytes): This ROM's md5 hash.
sha1 (bytes): This ROM's sha1 hash.
serial (str): This ROM's serial/game code.
status (str): ROM/dump status. 'verified' means a dump is good.
"""
__slots__ = ('name', 'size', 'md5', 'sha1', 'serial', 'status')
def __init__(self, name, size, md5, sha1, serial, status):
self.name, self.size, self.md5, self.sha1, self.serial, self.status = name, size, md5, sha1, serial, status
def __str__(self):
return f'{self.name} #{self.serial} ({self.status})'
class ROM:
""" GBA ROM representation.
See https://problemkaputt.de/gbatek.htm#gbacartridgeheader
Attributes:
size (int): Size of the ROM in bytes.
Args:
path (str): Path to ROM.
detect (bool): Whether to attempt & display ROM detection. Defaults to `True`.
"""
def __init__(self, path, detect=True):
self._info = False
with open(path, 'rb') as f:
self.buffer = f.read()
if len(self.buffer) > 0x02000000: # 32 MiB
warn(f'ROM size {len(self.buffer)/2**20:.2f} MiB larger than 32 MiB address space.')
self.size = len(self.buffer)
self.f = BytesIO(self.buffer)
if detect:
info = self.info
if info:
status = '✔' if info.status == 'verified' else f'({info.status})'
eprint(f"ROM detected: '{info.name}' {status}")
if info.status != 'verified': # Bad dump
digest = ''.join('%02X' % b for b in info.sha1)
warn(f'Unverified/bad dump! sha1: {digest}')
else:
eprint(f"ROM unknown: '{self.title}' {self.game_code}-{self.maker_code}")
@property
def title(self):
""" Title/name of this ROM. """
b = self.readb(0x0A0, 12)
index = b.find(0)
if index != -1:
b = b[:index]
return b.decode('ascii')
@property
def game_code(self):
""" Game code of this ROM. """
b = self.readb(0x0AC, 4)
return b.decode('ascii')
@property
def maker_code(self):
""" Maker code of this ROM. """
b = self.readb(0x0B0, 2)
return b.decode('ascii')
@property
def info(self):
""" ROM info, if available. Otherwise `None`. """
global ROM_DB, DB_F
if self._info is not False: # Cache info value
return self._info
if ROM_DB is None:
ROM_DB = pickle.load(DB_F)
by_serial, by_md5, by_sha1 = ROM_DB
h = sha1()
h.update(self.buffer)
digest = h.digest()
info0 = by_sha1.get(digest, None)
h = md5()
h.update(self.buffer)
digest = h.digest()
info1 = by_md5.get(digest, None)
self._info = info0 if info0 is info1 else None
return self._info
def read(self, addr, size=1, safe=True):
""" Read a little-endian integer of any size at an address.
Args:
addr (int): Address to read.
size (int): Size of integer in bytes.
safe (bool): Maintain original cursor position. Defaults to `True`.
"""
if safe:
cursor = self.f.tell()
self.f.seek(addr & 0xffffff)
b = self.f.read(size)
if safe:
self.f.seek(cursor)
return int.from_bytes(b, 'little', signed=False)
def readb(self, addr, n): # Read n bytes at address
addr &= 0xffffff
return self.buffer[addr:addr+n]
def dist(self, addr=0x08000000, count=None): # Disassemble ROM
self.f.seek(addr & 0xffffff)
if count is None:
yield from disasm(self.f, addr)
else:
yield from disasm(self.f, addr, count)
def make_rom_db(path): # Build db from XML
tree = ET.parse(path)
by_serial = {}
by_md5 = {}
by_sha1 = {} # Serial -> ROMInfo
for game in tree.findall('game'):
for rom in game.findall('rom'):
name, size, md5, sha1, serial, status = (rom.get(attr) for attr in ('name', 'size', 'md5', 'sha1', 'serial', 'status'))
if name[-4:] == '.gba':
name = name[:-4]
print(name)
size = int(size)
if serial is None: # Skip games without serial numbers
continue
serial = serial.upper()
md5 = bytes.fromhex(md5)
sha1 = bytes.fromhex(sha1)
obj = ROMInfo(name, size, md5, sha1, serial, status)
by_serial[serial] = by_md5[md5] = by_sha1[sha1] = obj
db = (by_serial, by_md5, by_sha1)
with open('gba-db.pickle', 'wb') as f:
pickle.dump(db, f)
| 32.525 | 131 | 0.556879 |
ace2f97c9623377cdcbd0855c3d8a30781faf0d7 | 5,511 | py | Python | dm_control/suite/cheetah_distractor.py | dingyiming0427/dm_control | 4e9c3a0c91002ac49308faf6c61aa3ddad2ef548 | [
"Apache-2.0"
] | null | null | null | dm_control/suite/cheetah_distractor.py | dingyiming0427/dm_control | 4e9c3a0c91002ac49308faf6c61aa3ddad2ef548 | [
"Apache-2.0"
] | null | null | null | dm_control/suite/cheetah_distractor.py | dingyiming0427/dm_control | 4e9c3a0c91002ac49308faf6c61aa3ddad2ef548 | [
"Apache-2.0"
] | null | null | null | # Copyright 2017 The dm_control Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Cheetah Domain."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import numpy as np
import random
from dm_control import mujoco
from dm_control.rl import control
from dm_control.suite import base
from dm_control.suite import common
from dm_control.utils import containers
from dm_control.utils import rewards
# How long the simulation will run, in seconds.
_DEFAULT_TIME_LIMIT = 10
# Running speed above which reward is 1.
_RUN_SPEED = 10
SUITE = containers.TaggedTasks()
def get_model_and_assets():
"""Returns a tuple containing the model XML string and a dict of assets."""
return common.read_model('cheetah_distractor.xml'), common.ASSETS
@SUITE.add()
def run(time_limit=_DEFAULT_TIME_LIMIT, random=None, environment_kwargs=None):
"""Returns the run task."""
physics = Physics.from_xml_string(*get_model_and_assets())
task = Cheetah(random=random)
environment_kwargs = environment_kwargs or {}
return control.Environment(physics, task, time_limit=time_limit,
**environment_kwargs)
@SUITE.add()
def run_linear(time_limit=_DEFAULT_TIME_LIMIT, random=None, environment_kwargs=None):
"""Returns the run task."""
physics = Physics.from_xml_string(*get_model_and_assets())
task = Cheetah(random=random, distractor_style=1)
environment_kwargs = environment_kwargs or {}
return control.Environment(physics, task, time_limit=time_limit,
**environment_kwargs)
class Physics(mujoco.Physics):
"""Physics simulation with additional features for the Cheetah domain."""
def speed(self):
"""Returns the horizontal speed of the Cheetah."""
return self.named.data.sensordata['torso_subtreelinvel'][0]
class Cheetah(base.Task):
"""A `Task` to train a running Cheetah."""
def __init__(self, random=random, distractor_style=0):
self._distractor_style = distractor_style
self._step_size = 0.5
self.sample_new_dir()
self.x1 = np.random.uniform(-1, 1)
self.x2 = np.random.uniform(-1, 1)
super(Cheetah, self).__init__(random=random)
def initialize_episode(self, physics):
"""Sets the state of the environment at the start of each episode."""
# The indexing below assumes that all joints have a single DOF.
assert physics.model.nq == physics.model.njnt
is_limited = physics.model.jnt_limited == 1
lower, upper = physics.model.jnt_range[is_limited].T
physics.data.qpos[is_limited] = self.random.uniform(lower, upper)
# Stabilize the model before the actual simulation.
for _ in range(200):
physics.step()
physics.data.time = 0
self._timeout_progress = 0
cheetah_x = physics.data.qpos[0]
physics.named.data.qpos['dis1x'] = cheetah_x + self.x1
physics.named.data.qpos['dis1y'] = np.random.uniform(1, 2)
physics.named.data.qpos['dis2x'] = cheetah_x + self.x2
physics.named.data.qpos['dis2y'] = np.random.uniform(1, 2)
super(Cheetah, self).initialize_episode(physics)
def sample_new_dir(self):
dirs = np.random.uniform(-1, 1, size=(2, 2))
dirs = dirs / (np.linalg.norm(dirs, axis=1, keepdims=True) + 1e-8)
self._current_dir = dirs * self._step_size
def get_observation(self, physics):
"""Returns an observation of the state, ignoring horizontal position."""
obs = collections.OrderedDict()
# Ignores horizontal position to maintain translational invariance.
obs['position'] = physics.data.qpos[1:].copy()
obs['velocity'] = physics.velocity()
cheetah_x = physics.data.qpos[0]
if self._distractor_style == 0:
physics.named.data.qpos['dis1x'] = cheetah_x + np.random.uniform(-2, 2)
physics.named.data.qpos['dis1y'] = np.random.uniform(0, 3)
physics.named.data.qpos['dis2x'] = cheetah_x + np.random.uniform(-2, 2)
physics.named.data.qpos['dis2y'] = np.random.uniform(0, 3)
elif self._distractor_style == 1:
if random.random() < 0.15:
self.sample_new_dir()
self.x1 = np.clip(self.x1 + self._current_dir[0, 0], -3, 3)
self.x2 = np.clip(self.x2 + self._current_dir[1, 0], -3, 3)
physics.named.data.qpos['dis1x'] = cheetah_x + self.x1
physics.named.data.qpos['dis1y'] = np.clip(physics.named.data.qpos['dis1y'] + self._current_dir[0, 1], 0, 3)
physics.named.data.qpos['dis2x'] = cheetah_x + self.x2
physics.named.data.qpos['dis2y'] = np.clip(physics.named.data.qpos['dis2y'] + self._current_dir[1, 1], 0, 3)
return obs
def get_reward(self, physics):
"""Returns a reward to the agent."""
return rewards.tolerance(physics.speed(),
bounds=(_RUN_SPEED, float('inf')),
margin=_RUN_SPEED,
value_at_margin=0,
sigmoid='linear')
| 37.489796 | 114 | 0.687897 |
ace2f9eaa4f0c44f7c91b722b151ef23f2f26a9a | 22,271 | py | Python | wbb/modules/admin.py | Kingswibu/Fujiwarabot | 059019f811e961e6ce0fae19b9c22a153f8b3267 | [
"MIT"
] | 2 | 2022-01-22T11:20:57.000Z | 2022-01-22T11:37:41.000Z | wbb/modules/admin.py | Kingswibu/Fujiwarabot | 059019f811e961e6ce0fae19b9c22a153f8b3267 | [
"MIT"
] | null | null | null | wbb/modules/admin.py | Kingswibu/Fujiwarabot | 059019f811e961e6ce0fae19b9c22a153f8b3267 | [
"MIT"
] | 3 | 2022-01-23T18:00:50.000Z | 2022-03-22T15:10:46.000Z | """
MIT License
Copyright (c) 2021 TheHamkerCat
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import asyncio
from pyrogram import filters
from pyrogram.types import CallbackQuery, ChatPermissions, Message
from wbb import BOT_ID, SUDOERS, app
from wbb.core.decorators.errors import capture_err
from wbb.core.keyboard import ikb
from wbb.utils.dbfunctions import (add_warn, get_warn, int_to_alpha,
remove_warns, save_filter)
from wbb.utils.functions import (extract_user, extract_user_and_reason,
time_converter)
__MODULE__ = "Admin"
__HELP__ = """/ban - Ban A User
/dban - Delete the replied message banning its sender
/tban - Ban A User For Specific Time
/unban - Unban A User
/warn - Warn A User
/dwarn - Delete the replied message warning its sender
/rmwarns - Remove All Warning of A User
/warns - Show Warning Of A User
/kick - Kick A User
/dkick - Delete the replied message kicking its sender
/purge - Purge Messages
/del - Delete Replied Message
/promote - Promote A Member
/fullpromote - Promote A Member With All Rights
/demote - Demote A Member
/pin - Pin A Message
/mute - Mute A User
/tmute - Mute A User For Specific Time
/unmute - Unmute A User
/ban_ghosts - Ban Deleted Accounts
/report | @admins | @admin - Report A Message To Admins."""
async def member_permissions(chat_id: int, user_id: int):
perms = []
try:
member = await app.get_chat_member(chat_id, user_id)
except Exception:
return []
if member.can_post_messages:
perms.append("can_post_messages")
if member.can_edit_messages:
perms.append("can_edit_messages")
if member.can_delete_messages:
perms.append("can_delete_messages")
if member.can_restrict_members:
perms.append("can_restrict_members")
if member.can_promote_members:
perms.append("can_promote_members")
if member.can_change_info:
perms.append("can_change_info")
if member.can_invite_users:
perms.append("can_invite_users")
if member.can_pin_messages:
perms.append("can_pin_messages")
if member.can_manage_voice_chats:
perms.append("can_manage_voice_chats")
return perms
from wbb.core.decorators.permissions import adminsOnly
async def list_admins(chat_id: int):
return [
member.user.id
async for member in app.iter_chat_members(
chat_id, filter="administrators"
)
]
async def current_chat_permissions(chat_id):
perms = []
perm = (await app.get_chat(chat_id)).permissions
if perm.can_send_messages:
perms.append("can_send_messages")
if perm.can_send_media_messages:
perms.append("can_send_media_messages")
if perm.can_send_stickers:
perms.append("can_send_stickers")
if perm.can_send_animations:
perms.append("can_send_animations")
if perm.can_send_games:
perms.append("can_send_games")
if perm.can_use_inline_bots:
perms.append("can_use_inline_bots")
if perm.can_add_web_page_previews:
perms.append("can_add_web_page_previews")
if perm.can_send_polls:
perms.append("can_send_polls")
if perm.can_change_info:
perms.append("can_change_info")
if perm.can_invite_users:
perms.append("can_invite_users")
if perm.can_pin_messages:
perms.append("can_pin_messages")
return perms
# Get List Of Members In A Chat
async def list_members(group_id):
return [member.user.id async for member in app.iter_chat_members(group_id)]
# Purge Messages
@app.on_message(filters.command("purge") & ~filters.edited & ~filters.private)
@adminsOnly("can_delete_messages")
async def purgeFunc(_, message: Message):
await message.delete()
if not message.reply_to_message:
return await message.reply_text("Reply to a message to purge from.")
chat_id = message.chat.id
message_ids = []
for message_id in range(
message.reply_to_message.message_id,
message.message_id,
):
message_ids.append(message_id)
# Max message deletion limit is 100
if len(message_ids) == 100:
await app.delete_messages(
chat_id=chat_id,
message_ids=message_ids,
revoke=True, # For both sides
)
# To delete more than 100 messages, start again
message_ids = []
# Delete if any messages left
if message_ids:
await app.delete_messages(
chat_id=chat_id,
message_ids=message_ids,
revoke=True,
)
# Kick members
@app.on_message(
filters.command(["kick", "dkick"]) & ~filters.edited & ~filters.private
)
@adminsOnly("can_restrict_members")
async def kickFunc(_, message: Message):
user_id, reason = await extract_user_and_reason(message)
if not user_id:
return await message.reply_text("I can't find that user.")
if user_id == BOT_ID:
return await message.reply_text(
"I can't kick myself, i can leave if you want."
)
if user_id in SUDOERS:
return await message.reply_text("You Wanna Kick The Elevated One?")
if user_id in (await list_admins(message.chat.id)):
return await message.reply_text(
"I can't kick an admin, You know the rules, so do i."
)
mention = (await app.get_users(user_id)).mention
msg = f"""
**Kicked User:** {mention}
**Kicked By:** {message.from_user.mention if message.from_user else 'Anon'}
**Reason:** {reason or 'No Reason Provided.'}"""
if message.command[0][0] == "d":
await message.reply_to_message.delete()
await message.chat.kick_member(user_id)
await message.reply_text(msg)
await asyncio.sleep(1)
await message.chat.unban_member(user_id)
# Ban members
@app.on_message(
filters.command(["ban", "dban", "tban"])
& ~filters.edited
& ~filters.private
)
@adminsOnly("can_restrict_members")
async def banFunc(_, message: Message):
user_id, reason = await extract_user_and_reason(message, sender_chat=True)
if not user_id:
return await message.reply_text("I can't find that user.")
if user_id == BOT_ID:
return await message.reply_text(
"I can't ban myself, i can leave if you want."
)
if user_id in SUDOERS:
return await message.reply_text(
"You Wanna Ban The Elevated One?, RECONSIDER!"
)
if user_id in (await list_admins(message.chat.id)):
return await message.reply_text(
"I can't ban an admin, You know the rules, so do i."
)
try:
mention = (await app.get_users(user_id)).mention
except IndexError:
mention = (
message.reply_to_message.sender_chat.title
if message.reply_to_message
else "Anon"
)
msg = (
f"**Banned User:** {mention}\n"
f"**Banned By:** {message.from_user.mention if message.from_user else 'Anon'}\n"
)
if message.command[0][0] == "d":
await message.reply_to_message.delete()
if message.command[0] == "tban":
split = reason.split(None, 1)
time_value = split[0]
temp_reason = split[1] if len(split) > 1 else ""
temp_ban = await time_converter(message, time_value)
msg += f"**Banned For:** {time_value}\n"
if temp_reason:
msg += f"**Reason:** {temp_reason}"
try:
if len(time_value[:-1]) < 3:
await message.chat.kick_member(user_id, until_date=temp_ban)
await message.reply_text(msg)
else:
await message.reply_text("You can't use more than 99")
except AttributeError:
pass
return
if reason:
msg += f"**Reason:** {reason}"
await message.chat.kick_member(user_id)
await message.reply_text(msg)
# Unban members
@app.on_message(filters.command("unban") & ~filters.edited & ~filters.private)
@adminsOnly("can_restrict_members")
async def unbanFunc(_, message: Message):
# we don't need reasons for unban, also, we
# don't need to get "text_mention" entity, because
# normal users won't get text_mention if the the user
# they want to unban is not in the group.
if len(message.command) == 2:
user = message.text.split(None, 1)[1]
elif len(message.command) == 1 and message.reply_to_message:
user = message.reply_to_message.from_user.id
else:
return await message.reply_text(
"Provide a username or reply to a user's message to unban."
)
await message.chat.unban_member(user)
umention = (await app.get_users(user)).mention
await message.reply_text(f"Unbanned! {umention}")
# Delete messages
@app.on_message(filters.command("del") & ~filters.edited & ~filters.private)
@adminsOnly("can_delete_messages")
async def deleteFunc(_, message: Message):
if not message.reply_to_message:
return await message.reply_text("Reply To A Message To Delete It")
await message.reply_to_message.delete()
await message.delete()
# Promote Members
@app.on_message(
filters.command(["promote", "fullpromote"])
& ~filters.edited
& ~filters.private
)
@adminsOnly("can_promote_members")
async def promoteFunc(_, message: Message):
user_id = await extract_user(message)
umention = (await app.get_users(user_id)).mention
if not user_id:
return await message.reply_text("I can't find that user.")
bot = await app.get_chat_member(message.chat.id, BOT_ID)
if user_id == BOT_ID:
return await message.reply_text("I can't promote myself.")
if not bot.can_promote_members:
return await message.reply_text("I don't have enough permissions")
if message.command[0][0] == "f":
await message.chat.promote_member(
user_id=user_id,
can_change_info=bot.can_change_info,
can_invite_users=bot.can_invite_users,
can_delete_messages=bot.can_delete_messages,
can_restrict_members=bot.can_restrict_members,
can_pin_messages=bot.can_pin_messages,
can_promote_members=bot.can_promote_members,
can_manage_chat=bot.can_manage_chat,
can_manage_voice_chats=bot.can_manage_voice_chats,
)
return await message.reply_text(f"Fully Promoted! {umention}")
await message.chat.promote_member(
user_id=user_id,
can_change_info=False,
can_invite_users=bot.can_invite_users,
can_delete_messages=bot.can_delete_messages,
can_restrict_members=False,
can_pin_messages=False,
can_promote_members=False,
can_manage_chat=bot.can_manage_chat,
can_manage_voice_chats=bot.can_manage_voice_chats,
)
await message.reply_text(f"Promoted! {umention}")
# Demote Member
@app.on_message(filters.command("demote") & ~filters.edited & ~filters.private)
@adminsOnly("can_promote_members")
async def demote(_, message: Message):
user_id = await extract_user(message)
if not user_id:
return await message.reply_text("I can't find that user.")
if user_id == BOT_ID:
return await message.reply_text("I can't demote myself.")
if user_id in SUDOERS:
return await message.reply_text(
"You wanna demote the elevated one?, RECONSIDER!"
)
await message.chat.promote_member(
user_id=user_id,
can_change_info=False,
can_invite_users=False,
can_delete_messages=False,
can_restrict_members=False,
can_pin_messages=False,
can_promote_members=False,
can_manage_chat=False,
can_manage_voice_chats=False,
)
umention = (await app.get_users(user_id)).mention
await message.reply_text(f"Demoted! {umention}")
# Pin Messages
@app.on_message(filters.command("pin") & ~filters.edited & ~filters.private)
@adminsOnly("can_pin_messages")
async def pin(_, message: Message):
if not message.reply_to_message:
return await message.reply_text("Reply to a message to pin it.")
r = message.reply_to_message
await r.pin(disable_notification=True)
await message.reply(
f"**Pinned [this]({r.link}) message.**",
disable_web_page_preview=True,
)
msg = "Please check the pinned message: ~ " + f"[Check, {r.link}]"
filter_ = dict(type="text", data=msg)
await save_filter(message.chat.id, "~pinned", filter_)
# Mute members
@app.on_message(
filters.command(["mute", "tmute"]) & ~filters.edited & ~filters.private
)
@adminsOnly("can_restrict_members")
async def mute(_, message: Message):
user_id, reason = await extract_user_and_reason(message)
if not user_id:
return await message.reply_text("I can't find that user.")
if user_id == BOT_ID:
return await message.reply_text("I can't mute myself.")
if user_id in SUDOERS:
return await message.reply_text(
"You wanna mute the elevated one?, RECONSIDER!"
)
if user_id in (await list_admins(message.chat.id)):
return await message.reply_text(
"I can't mute an admin, You know the rules, so do i."
)
mention = (await app.get_users(user_id)).mention
keyboard = ikb({"🚨 Unmute 🚨": f"unmute_{user_id}"})
msg = (
f"**Muted User:** {mention}\n"
f"**Muted By:** {message.from_user.mention if message.from_user else 'Anon'}\n"
)
if message.command[0] == "tmute":
split = reason.split(None, 1)
time_value = split[0]
temp_reason = split[1] if len(split) > 1 else ""
temp_mute = await time_converter(message, time_value)
msg += f"**Muted For:** {time_value}\n"
if temp_reason:
msg += f"**Reason:** {temp_reason}"
try:
if len(time_value[:-1]) < 3:
await message.chat.restrict_member(
user_id,
permissions=ChatPermissions(),
until_date=temp_mute,
)
await message.reply_text(msg, reply_markup=keyboard)
else:
await message.reply_text("You can't use more than 99")
except AttributeError:
pass
return
if reason:
msg += f"**Reason:** {reason}"
await message.chat.restrict_member(user_id, permissions=ChatPermissions())
await message.reply_text(msg, reply_markup=keyboard)
# Unmute members
@app.on_message(filters.command("unmute") & ~filters.edited & ~filters.private)
@adminsOnly("can_restrict_members")
async def unmute(_, message: Message):
user_id = await extract_user(message)
if not user_id:
return await message.reply_text("I can't find that user.")
await message.chat.unban_member(user_id)
umention = (await app.get_users(user_id)).mention
await message.reply_text(f"Unmuted! {umention}")
# Ban deleted accounts
@app.on_message(filters.command("ban_ghosts") & ~filters.private)
@adminsOnly("can_restrict_members")
async def ban_deleted_accounts(_, message: Message):
chat_id = message.chat.id
deleted_users = []
async for i in app.iter_chat_members(chat_id):
if i.user.is_deleted:
deleted_users.append(i.user.id)
if deleted_users:
banned_users = 0
for deleted_user in deleted_users:
try:
await message.chat.kick_member(deleted_user)
except Exception:
pass
banned_users += 1
await message.reply_text(f"Banned {banned_users} Deleted Accounts")
else:
await message.reply_text("There are no deleted accounts in this chat")
@app.on_message(
filters.command(["warn", "dwarn"]) & ~filters.edited & ~filters.private
)
@adminsOnly("can_restrict_members")
async def warn_user(_, message: Message):
user_id, reason = await extract_user_and_reason(message)
chat_id = message.chat.id
if not user_id:
return await message.reply_text("I can't find that user.")
if user_id == BOT_ID:
return await message.reply_text(
"I can't warn myself, i can leave if you want."
)
if user_id in SUDOERS:
return await message.reply_text(
"You Wanna Warn The Elevated One?, RECONSIDER!"
)
if user_id in (await list_admins(chat_id)):
return await message.reply_text(
"I can't warn an admin, You know the rules, so do i."
)
if user_id not in (await list_members(chat_id)):
return await message.reply_text("This user isn't here.")
user, warns = await asyncio.gather(
app.get_users(user_id),
get_warn(chat_id, await int_to_alpha(user_id)),
)
mention = user.mention
keyboard = ikb({"🚨 Remove Warn 🚨": f"unwarn_{user_id}"})
warns = warns["warns"] if warns else 0
if message.command[0][0] == "d":
await message.reply_to_message.delete()
if warns >= 2:
await message.chat.kick_member(user_id)
await message.reply_text(
f"Number of warns of {mention} exceeded, BANNED!"
)
await remove_warns(chat_id, await int_to_alpha(user_id))
else:
warn = {"warns": warns + 1}
msg = f"""
**Warned User:** {mention}
**Warned By:** {message.from_user.mention if message.from_user else 'Anon'}
**Reason:** {reason or 'No Reason Provided.'}
**Warns:** {warns + 1}/3"""
await message.reply_text(msg, reply_markup=keyboard)
await add_warn(chat_id, await int_to_alpha(user_id), warn)
@app.on_callback_query(filters.regex("unwarn_"))
async def remove_warning(_, cq: CallbackQuery):
from_user = cq.from_user
chat_id = cq.message.chat.id
permissions = await member_permissions(chat_id, from_user.id)
permission = "can_restrict_members"
if permission not in permissions:
return await cq.answer(
"You don't have enough permissions to perform this action.\n"
+ f"Permission needed: {permission}",
show_alert=True,
)
user_id = cq.data.split("_")[1]
warns = await get_warn(chat_id, await int_to_alpha(user_id))
if warns:
warns = warns["warns"]
if not warns or warns == 0:
return await cq.answer("User has no warnings.")
warn = {"warns": warns - 1}
await add_warn(chat_id, await int_to_alpha(user_id), warn)
text = cq.message.text.markdown
text = f"~~{text}~~\n\n"
text += f"__Warn removed by {from_user.mention}__"
await cq.message.edit(text)
# Rmwarns
@app.on_message(
filters.command("rmwarns") & ~filters.edited & ~filters.private
)
@adminsOnly("can_restrict_members")
async def remove_warnings(_, message: Message):
if not message.reply_to_message:
return await message.reply_text(
"Reply to a message to remove a user's warnings."
)
user_id = message.reply_to_message.from_user.id
mention = message.reply_to_message.from_user.mention
chat_id = message.chat.id
warns = await get_warn(chat_id, await int_to_alpha(user_id))
if warns:
warns = warns["warns"]
if warns == 0 or not warns:
await message.reply_text(f"{mention} have no warnings.")
else:
await remove_warns(chat_id, await int_to_alpha(user_id))
await message.reply_text(f"Removed warnings of {mention}.")
# Warns
@app.on_message(filters.command("warns") & ~filters.edited & ~filters.private)
@capture_err
async def check_warns(_, message: Message):
user_id = await extract_user(message)
if not user_id:
return await message.reply_text("I can't find that user.")
warns = await get_warn(message.chat.id, await int_to_alpha(user_id))
mention = (await app.get_users(user_id)).mention
if warns:
warns = warns["warns"]
else:
return await message.reply_text(f"{mention} has no warnings.")
return await message.reply_text(f"{mention} has {warns}/3 warnings.")
# Report
@app.on_message(
(
filters.command("report")
| filters.command(["admins", "admin"], prefixes="@")
)
& ~filters.edited
& ~filters.private
)
@capture_err
async def report_user(_, message):
if not message.reply_to_message:
return await message.reply_text(
"Reply to a message to report that user."
)
if message.reply_to_message.from_user.id == message.from_user.id:
return await message.reply_text("Why are you reporting yourself ?")
list_of_admins = await list_admins(message.chat.id)
if message.reply_to_message.from_user.id in list_of_admins:
return await message.reply_text(
"Do you know that the user you are replying is an admin ?"
)
user_mention = message.reply_to_message.from_user.mention
text = f"Reported {user_mention} to admins!"
admin_data = await app.get_chat_members(
chat_id=message.chat.id, filter="administrators"
) # will it giv floods ?
for admin in admin_data:
if admin.user.is_bot or admin.user.is_deleted:
# return bots or deleted admins
continue
text += f"[\u2063](tg://user?id={admin.user.id})"
await message.reply_to_message.reply_text(text)
| 34.001527 | 88 | 0.665888 |
ace2fb6e65b81af5ec4299e4712ce9ae0babc7e1 | 1,449 | py | Python | BOJ/graph_boj/tree_order.py | mrbartrns/swacademy_structure | 778f0546030385237c383d81ec37d5bd9ed1272d | [
"MIT"
] | null | null | null | BOJ/graph_boj/tree_order.py | mrbartrns/swacademy_structure | 778f0546030385237c383d81ec37d5bd9ed1272d | [
"MIT"
] | null | null | null | BOJ/graph_boj/tree_order.py | mrbartrns/swacademy_structure | 778f0546030385237c383d81ec37d5bd9ed1272d | [
"MIT"
] | null | null | null | # BOJ 1991
import sys
si = sys.stdin.readline
"""
tree = [
None,
"A",
"B",
"C",
"D",
None,
"E",
"F",
None,
None,
None,
None,
None,
None,
None,
"G",
]
"""
n = int(si())
tree = [None] * (2 ** n)
for _ in range(n):
root, c1, c2 = si().split()
if not tree[1]:
tree[1] = root
if 2 < len(tree) and c1 != ".":
tree[2] = c1
if 3 < len(tree) and c2 != ".":
tree[3] = c2
else:
for i in range(len(tree)):
if tree[i] == root:
if 2 * i < len(tree) and c1 != ".":
tree[2 * i] = c1
if 2 * i + 1 < len(tree) and c2 != ".":
tree[2 * i + 1] = c2
def pre_order(root):
print(tree[root], end="")
if root * 2 < len(tree) and tree[root * 2]:
pre_order(root * 2)
if root * 2 + 1 < len(tree) and tree[root * 2 + 1]:
pre_order(root * 2 + 1)
def inorder(root):
if root * 2 < len(tree) and tree[root * 2]:
inorder(root * 2)
print(tree[root], end="")
if root * 2 + 1 < len(tree) and tree[root * 2 + 1]:
inorder(root * 2 + 1)
def post_order(root):
if root * 2 < len(tree) and tree[root * 2]:
post_order(root * 2)
if root * 2 + 1 < len(tree) and tree[root * 2 + 1]:
post_order(root * 2 + 1)
print(tree[root], end="")
pre_order(1)
print()
inorder(1)
print()
post_order(1) | 19.849315 | 55 | 0.449275 |
ace2fc1eea395594bdb0ad6e073f86f71a7db724 | 799 | py | Python | django/conf/locale/km/formats.py | kix/django | 5262a288df07daa050a0e17669c3f103f47a8640 | [
"BSD-3-Clause"
] | 790 | 2015-01-03T02:13:39.000Z | 2020-05-10T19:53:57.000Z | AppServer/lib/django-1.5/django/conf/locale/km/formats.py | nlake44/appscale | 6944af660ca4cb772c9b6c2332ab28e5ef4d849f | [
"Apache-2.0"
] | 1,361 | 2015-01-08T23:09:40.000Z | 2020-04-14T00:03:04.000Z | AppServer/lib/django-1.5/django/conf/locale/km/formats.py | nlake44/appscale | 6944af660ca4cb772c9b6c2332ab28e5ef4d849f | [
"Apache-2.0"
] | 155 | 2015-01-08T22:59:31.000Z | 2020-04-08T08:01:53.000Z | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'j ខែ F ឆ្នាំ Y'
TIME_FORMAT = 'G:i:s'
DATETIME_FORMAT = 'j ខែ F ឆ្នាំ Y, G:i:s'
# YEAR_MONTH_FORMAT =
MONTH_DAY_FORMAT = 'j F'
SHORT_DATE_FORMAT = 'j M Y'
SHORT_DATETIME_FORMAT = 'j M Y, G:i:s'
# FIRST_DAY_OF_WEEK =
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
# DATE_INPUT_FORMATS =
# TIME_INPUT_FORMATS =
# DATETIME_INPUT_FORMATS =
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = '.'
# NUMBER_GROUPING =
| 31.96 | 77 | 0.73592 |
ace2fe0696bc8079214bf9c58bd05207d32065e3 | 3,659 | py | Python | VideoAnalytics/FaceRedactorEventBased/AzureFunction/EventGrid_AMSJob/__init__.py | harmke/media-services-v3-python | dbb67ae9c131b52afa9596e654491560f9f33115 | [
"MIT"
] | null | null | null | VideoAnalytics/FaceRedactorEventBased/AzureFunction/EventGrid_AMSJob/__init__.py | harmke/media-services-v3-python | dbb67ae9c131b52afa9596e654491560f9f33115 | [
"MIT"
] | null | null | null | VideoAnalytics/FaceRedactorEventBased/AzureFunction/EventGrid_AMSJob/__init__.py | harmke/media-services-v3-python | dbb67ae9c131b52afa9596e654491560f9f33115 | [
"MIT"
] | null | null | null | import json
import logging
import os
from datetime import datetime, timedelta
from urllib.parse import quote
import adal
from msrestazure.azure_active_directory import MSIAuthentication, AdalAuthentication
from msrestazure.azure_cloud import AZURE_PUBLIC_CLOUD
from azure.identity import ClientSecretCredential, DefaultAzureCredential
from azure.mgmt.media import AzureMediaServices
from azure.mgmt.media.models import (
Asset,
Job,
JobInputHttp,
JobOutputAsset)
import azure.functions as func
from azure.storage.filedatalake import DataLakeServiceClient, FileSasPermissions, generate_file_sas
def main(event: func.EventGridEvent):
result = json.dumps({
'id': event.id,
'data': event.get_json(),
'topic': event.topic,
'subject': event.subject,
'event_type': event.event_type,
})
logging.info('Python EventGrid trigger processed an event: %s', result)
blob_url = event.get_json().get('url')
blob_name = blob_url.split("/")[-1].split("?")[0]
origin_container_name = blob_url.split("/")[-2].split("?")[0]
storage_account_name = blob_url.split("//")[1].split(".")[0]
ams_account_name = os.getenv('ACCOUNTNAME')
resource_group_name = os.getenv('RESOURCEGROUP')
subscription_id = os.getenv('SUBSCRIPTIONID')
client_id = os.getenv('AZURE_CLIENT_ID')
client_secret = os.getenv('AZURE_CLIENT_SECRET')
TENANT_ID = os.getenv('AZURE_TENANT_ID')
storage_blob_url = 'https://' + storage_account_name + '.blob.core.windows.net/'
transform_name = 'faceredact'
LOGIN_ENDPOINT = AZURE_PUBLIC_CLOUD.endpoints.active_directory
RESOURCE = AZURE_PUBLIC_CLOUD.endpoints.active_directory_resource_id
out_asset_name = 'faceblurringOutput_' + datetime.utcnow().strftime("%m-%d-%Y_%H:%M:%S")
out_alternate_id = 'faceblurringOutput_' + datetime.utcnow().strftime("%m-%d-%Y_%H:%M:%S")
out_description = 'Redacted video with blurred faces'
context = adal.AuthenticationContext(LOGIN_ENDPOINT + "/" + TENANT_ID)
credentials = AdalAuthentication(context.acquire_token_with_client_credentials, RESOURCE, client_id, client_secret)
client = AzureMediaServices(credentials, subscription_id)
output_asset = Asset(alternate_id=out_alternate_id,
description=out_description)
client.assets.create_or_update(
resource_group_name, ams_account_name, out_asset_name, output_asset)
token_credential = DefaultAzureCredential()
datalake_service_client = DataLakeServiceClient(account_url=storage_blob_url,
credential=token_credential)
delegation_key = datalake_service_client.get_user_delegation_key(
key_start_time=datetime.utcnow(), key_expiry_time=datetime.utcnow() + timedelta(hours=1))
sas_token = generate_file_sas(account_name=storage_account_name, file_system_name=origin_container_name, directory_name="",
file_name=blob_name, credential=delegation_key, permission=FileSasPermissions(read=True),
expiry=datetime.utcnow() + timedelta(hours=1), protocol="https")
sas_url = "{}?{}".format(quote(blob_url, safe='/:'), sas_token)
job_name = 'Faceblurring-job_' + datetime.utcnow().strftime("%m-%d-%Y_%H:%M:%S")
job_input = JobInputHttp(label="Video_asset", files=[sas_url])
job_output = JobOutputAsset(asset_name=out_asset_name)
job_parameters = Job(input=job_input, outputs=[job_output])
client.jobs.create(resource_group_name, ams_account_name,
transform_name, job_name, parameters=job_parameters)
| 45.7375 | 127 | 0.720142 |
ace2fe600cb4e41dc1d11304b1deeac487a0acd5 | 10,217 | bzl | Python | bazel/envoy_test.bzl | td-arash/envoy | acb884768f257c32af4a17cc5b00b4fb274e30af | [
"Apache-2.0"
] | null | null | null | bazel/envoy_test.bzl | td-arash/envoy | acb884768f257c32af4a17cc5b00b4fb274e30af | [
"Apache-2.0"
] | null | null | null | bazel/envoy_test.bzl | td-arash/envoy | acb884768f257c32af4a17cc5b00b4fb274e30af | [
"Apache-2.0"
] | null | null | null | load("@rules_python//python:defs.bzl", "py_binary")
load("@rules_cc//cc:defs.bzl", "cc_binary", "cc_library", "cc_test")
# DO NOT LOAD THIS FILE. Load envoy_build_system.bzl instead.
# Envoy test targets. This includes both test library and test binary targets.
load("@bazel_tools//tools/build_defs/pkg:pkg.bzl", "pkg_tar")
load(":envoy_binary.bzl", "envoy_cc_binary")
load(":envoy_library.bzl", "tcmalloc_external_deps")
load(
":envoy_internal.bzl",
"envoy_copts",
"envoy_external_dep_path",
"envoy_linkstatic",
"envoy_select_force_libcpp",
"envoy_stdlib_deps",
"tcmalloc_external_dep",
)
# Envoy C++ related test infrastructure (that want gtest, gmock, but may be
# relied on by envoy_cc_test_library) should use this function.
def _envoy_cc_test_infrastructure_library(
name,
srcs = [],
hdrs = [],
data = [],
external_deps = [],
deps = [],
repository = "",
tags = [],
include_prefix = None,
copts = [],
**kargs):
# Add implicit tcmalloc external dependency(if available) in order to enable CPU and heap profiling in tests.
deps += tcmalloc_external_deps(repository)
cc_library(
name = name,
srcs = srcs,
hdrs = hdrs,
data = data,
copts = envoy_copts(repository, test = True) + copts,
testonly = 1,
deps = deps + [envoy_external_dep_path(dep) for dep in external_deps] + [
envoy_external_dep_path("googletest"),
],
tags = tags,
include_prefix = include_prefix,
alwayslink = 1,
linkstatic = envoy_linkstatic(),
**kargs
)
# Compute the test linkopts based on various options.
def _envoy_test_linkopts():
return select({
"@envoy//bazel:apple": [
# See note here: https://luajit.org/install.html
"-pagezero_size 10000",
"-image_base 100000000",
],
"@envoy//bazel:windows_x86_64": [
"-DEFAULTLIB:advapi32.lib",
"-DEFAULTLIB:ws2_32.lib",
"-DEFAULTLIB:iphlpapi.lib",
"-WX",
],
# TODO(mattklein123): It's not great that we universally link against the following libs.
# In particular, -latomic and -lrt are not needed on all platforms. Make this more granular.
"//conditions:default": ["-pthread", "-lrt", "-ldl"],
}) + envoy_select_force_libcpp([], ["-lstdc++fs", "-latomic"])
# Envoy C++ fuzz test targets. These are not included in coverage runs.
def envoy_cc_fuzz_test(
name,
corpus,
dictionaries = [],
repository = "",
size = "medium",
deps = [],
tags = [],
**kwargs):
if not (corpus.startswith("//") or corpus.startswith(":") or corpus.startswith("@")):
corpus_name = name + "_corpus"
corpus = native.glob([corpus + "/**"])
native.filegroup(
name = corpus_name,
srcs = corpus,
)
else:
corpus_name = corpus
tar_src = [corpus_name]
if dictionaries:
tar_src += dictionaries
pkg_tar(
name = name + "_corpus_tar",
srcs = tar_src,
testonly = 1,
)
fuzz_copts = ["-DFUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION"]
test_lib_name = name + "_lib"
envoy_cc_test_library(
name = test_lib_name,
deps = deps + envoy_stdlib_deps() + [
repository + "//test/fuzz:fuzz_runner_lib",
],
repository = repository,
tags = tags,
**kwargs
)
cc_test(
name = name,
copts = fuzz_copts + envoy_copts("@envoy", test = True),
linkopts = _envoy_test_linkopts() + select({
"@envoy//bazel:libfuzzer": ["-fsanitize=fuzzer"],
"//conditions:default": [],
}),
linkstatic = envoy_linkstatic(),
args = select({
"@envoy//bazel:libfuzzer_coverage": ["$(locations %s)" % corpus_name],
"@envoy//bazel:libfuzzer": [],
"//conditions:default": ["$(locations %s)" % corpus_name],
}),
data = [corpus_name],
# No fuzzing on macOS or Windows
deps = select({
"@envoy//bazel:apple": [repository + "//test:dummy_main"],
"@envoy//bazel:windows_x86_64": [repository + "//test:dummy_main"],
"@envoy//bazel:libfuzzer": [
":" + test_lib_name,
],
"//conditions:default": [
":" + test_lib_name,
repository + "//test/fuzz:main",
],
}),
size = size,
tags = ["fuzz_target"] + tags,
)
# This target exists only for
# https://github.com/google/oss-fuzz/blob/master/projects/envoy/build.sh. It won't yield
# anything useful on its own, as it expects to be run in an environment where the linker options
# provide a path to FuzzingEngine.
cc_binary(
name = name + "_driverless",
copts = fuzz_copts + envoy_copts("@envoy", test = True),
linkopts = ["-lFuzzingEngine"] + _envoy_test_linkopts(),
linkstatic = 1,
testonly = 1,
deps = [":" + test_lib_name],
tags = ["manual"] + tags,
)
# Envoy C++ test targets should be specified with this function.
def envoy_cc_test(
name,
srcs = [],
data = [],
# List of pairs (Bazel shell script target, shell script args)
repository = "",
external_deps = [],
deps = [],
tags = [],
args = [],
copts = [],
shard_count = None,
coverage = True,
local = False,
size = "medium",
flaky = False):
coverage_tags = tags + ([] if coverage else ["nocoverage"])
cc_test(
name = name,
srcs = srcs,
data = data,
copts = envoy_copts(repository, test = True) + copts,
linkopts = _envoy_test_linkopts(),
linkstatic = envoy_linkstatic(),
malloc = tcmalloc_external_dep(repository),
deps = envoy_stdlib_deps() + deps + [envoy_external_dep_path(dep) for dep in external_deps + ["googletest"]] + [
repository + "//test:main",
],
# from https://github.com/google/googletest/blob/6e1970e2376c14bf658eb88f655a054030353f9f/googlemock/src/gmock.cc#L51
# 2 - by default, mocks act as StrictMocks.
args = args + ["--gmock_default_mock_behavior=2"],
tags = coverage_tags,
local = local,
shard_count = shard_count,
size = size,
flaky = flaky,
)
# Envoy C++ test related libraries (that want gtest, gmock) should be specified
# with this function.
def envoy_cc_test_library(
name,
srcs = [],
hdrs = [],
data = [],
external_deps = [],
deps = [],
repository = "",
tags = [],
include_prefix = None,
copts = [],
**kargs):
deps = deps + [
repository + "//test/test_common:printers_includes",
]
# Same as envoy_cc_library
srcs += select({
"@envoy//bazel:compdb_build": ["@envoy//bazel/external:empty.cc"],
"//conditions:default": [],
})
_envoy_cc_test_infrastructure_library(
name,
srcs,
hdrs,
data,
external_deps,
deps,
repository,
tags,
include_prefix,
copts,
visibility = ["//visibility:public"],
**kargs
)
# Envoy test binaries should be specified with this function.
def envoy_cc_test_binary(
name,
tags = [],
**kargs):
envoy_cc_binary(
name,
testonly = 1,
linkopts = _envoy_test_linkopts(),
tags = tags + ["compilation_db_dep"],
**kargs
)
# Envoy benchmark binaries should be specified with this function.
def envoy_cc_benchmark_binary(
name,
deps = [],
**kargs):
envoy_cc_test_binary(
name,
deps = deps + ["//test/benchmark:main"],
**kargs
)
# Tests to validate that Envoy benchmarks run successfully should be specified with this function.
def envoy_benchmark_test(
name,
benchmark_binary,
data = [],
tags = [],
**kargs):
native.sh_test(
name = name,
srcs = ["//bazel:test_for_benchmark_wrapper.sh"],
data = [":" + benchmark_binary] + data,
args = ["%s/%s" % (native.package_name(), benchmark_binary)],
tags = tags + ["nocoverage"],
**kargs
)
# Envoy Python test binaries should be specified with this function.
def envoy_py_test_binary(
name,
external_deps = [],
deps = [],
**kargs):
py_binary(
name = name,
deps = deps + [envoy_external_dep_path(dep) for dep in external_deps],
**kargs
)
# Envoy C++ mock targets should be specified with this function.
def envoy_cc_mock(name, **kargs):
envoy_cc_test_library(name = name, **kargs)
# Envoy shell tests that need to be included in coverage run should be specified with this function.
def envoy_sh_test(
name,
srcs = [],
data = [],
coverage = True,
cc_binary = [],
tags = [],
**kargs):
if coverage:
if cc_binary == []:
fail("cc_binary is required for coverage-enabled test.")
test_runner_cc = name + "_test_runner.cc"
native.genrule(
name = name + "_gen_test_runner",
srcs = srcs,
outs = [test_runner_cc],
cmd = "$(location //bazel:gen_sh_test_runner.sh) $(SRCS) >> $@",
tools = ["//bazel:gen_sh_test_runner.sh"],
)
envoy_cc_test(
name = name,
srcs = [test_runner_cc],
data = srcs + data + cc_binary,
tags = tags,
deps = ["//test/test_common:environment_lib"] + cc_binary,
**kargs
)
else:
native.sh_test(
name = name,
srcs = ["//bazel:sh_test_wrapper.sh"],
data = srcs + data + cc_binary,
args = srcs,
tags = tags + ["nocoverage"],
**kargs
)
| 31.436923 | 125 | 0.557796 |
ace2fee687ffaf705cbe293fc6863e1bdc39872d | 717 | py | Python | app/tests/analyzer/test_kundt.py | apardyl/kraksat-receiver | 9d9f17853e2a19c657a096f0dc395142df29cc98 | [
"MIT"
] | 1 | 2019-10-01T19:04:28.000Z | 2019-10-01T19:04:28.000Z | app/tests/analyzer/test_kundt.py | apardyl/kraksat-receiver | 9d9f17853e2a19c657a096f0dc395142df29cc98 | [
"MIT"
] | null | null | null | app/tests/analyzer/test_kundt.py | apardyl/kraksat-receiver | 9d9f17853e2a19c657a096f0dc395142df29cc98 | [
"MIT"
] | null | null | null | import unittest
import os
from app.analyzer.kundt import Kundt
class KundtTest(unittest.TestCase):
def test_basic(self):
points = []
for i in range(0, 1000, 10):
points.append((i, i))
speed_of_sound = Kundt.speed_of_sound(points)
self.assertIsNotNone(speed_of_sound)
def test_with_real_data(self):
data = open(os.path.dirname(__file__) + '/capture8.csv')
points = []
for line in data:
i, y, x = line.strip().split(sep=';')
points.append((float(x.replace(',', '.')), float(y)))
data.close()
speed_of_sound = Kundt.speed_of_sound(points)
self.assertAlmostEqual(340, speed_of_sound, delta=5)
| 29.875 | 65 | 0.610879 |
ace2ff74c5b54468707bc1c7db212c09891c718e | 31,674 | py | Python | tests/functional/test_download.py | fungi/pip | 3120988cff5c245a64f4f2e726112da5a77b941e | [
"MIT"
] | null | null | null | tests/functional/test_download.py | fungi/pip | 3120988cff5c245a64f4f2e726112da5a77b941e | [
"MIT"
] | 1 | 2022-03-07T01:14:16.000Z | 2022-03-07T01:14:16.000Z | tests/functional/test_download.py | fungi/pip | 3120988cff5c245a64f4f2e726112da5a77b941e | [
"MIT"
] | 1 | 2021-09-27T11:14:58.000Z | 2021-09-27T11:14:58.000Z | import os.path
import shutil
import textwrap
from hashlib import sha256
from typing import List
import pytest
from pip._internal.cli.status_codes import ERROR
from pip._internal.utils.urls import path_to_url
from tests.conftest import MockServer, ScriptFactory
from tests.lib import PipTestEnvironment, TestData, create_really_basic_wheel
from tests.lib.path import Path
from tests.lib.server import file_response
def fake_wheel(data: TestData, wheel_path: str) -> None:
wheel_name = os.path.basename(wheel_path)
name, version, rest = wheel_name.split("-", 2)
wheel_data = create_really_basic_wheel(name, version)
data.packages.joinpath(wheel_path).write_bytes(wheel_data)
@pytest.mark.network
def test_download_if_requested(script: PipTestEnvironment) -> None:
"""
It should download (in the scratch path) and not install if requested.
"""
result = script.pip("download", "-d", "pip_downloads", "INITools==0.1")
result.did_create(Path("scratch") / "pip_downloads" / "INITools-0.1.tar.gz")
result.did_not_create(script.site_packages / "initools")
@pytest.mark.network
def test_basic_download_setuptools(script: PipTestEnvironment) -> None:
"""
It should download (in the scratch path) and not install if requested.
"""
result = script.pip("download", "setuptools")
setuptools_prefix = str(Path("scratch") / "setuptools")
assert any(path.startswith(setuptools_prefix) for path in result.files_created)
def test_download_wheel(script: PipTestEnvironment, data: TestData) -> None:
"""
Test using "pip download" to download a *.whl archive.
"""
result = script.pip(
"download", "--no-index", "-f", data.packages, "-d", ".", "meta"
)
result.did_create(Path("scratch") / "meta-1.0-py2.py3-none-any.whl")
result.did_not_create(script.site_packages / "piptestpackage")
@pytest.mark.network
def test_single_download_from_requirements_file(script: PipTestEnvironment) -> None:
"""
It should support download (in the scratch path) from PyPI from a
requirements file
"""
script.scratch_path.joinpath("test-req.txt").write_text(
textwrap.dedent(
"""
INITools==0.1
"""
)
)
result = script.pip(
"download",
"-r",
script.scratch_path / "test-req.txt",
"-d",
".",
)
result.did_create(Path("scratch") / "INITools-0.1.tar.gz")
result.did_not_create(script.site_packages / "initools")
@pytest.mark.network
def test_basic_download_should_download_dependencies(
script: PipTestEnvironment,
) -> None:
"""
It should download dependencies (in the scratch path)
"""
result = script.pip("download", "Paste[openid]==1.7.5.1", "-d", ".")
result.did_create(Path("scratch") / "Paste-1.7.5.1.tar.gz")
openid_tarball_prefix = str(Path("scratch") / "python-openid-")
assert any(path.startswith(openid_tarball_prefix) for path in result.files_created)
result.did_not_create(script.site_packages / "openid")
def test_download_wheel_archive(script: PipTestEnvironment, data: TestData) -> None:
"""
It should download a wheel archive path
"""
wheel_filename = "colander-0.9.9-py2.py3-none-any.whl"
wheel_path = "/".join((data.find_links, wheel_filename))
result = script.pip("download", wheel_path, "-d", ".", "--no-deps")
result.did_create(Path("scratch") / wheel_filename)
def test_download_should_download_wheel_deps(
script: PipTestEnvironment, data: TestData
) -> None:
"""
It should download dependencies for wheels(in the scratch path)
"""
wheel_filename = "colander-0.9.9-py2.py3-none-any.whl"
dep_filename = "translationstring-1.1.tar.gz"
wheel_path = "/".join((data.find_links, wheel_filename))
result = script.pip(
"download", wheel_path, "-d", ".", "--find-links", data.find_links, "--no-index"
)
result.did_create(Path("scratch") / wheel_filename)
result.did_create(Path("scratch") / dep_filename)
@pytest.mark.network
def test_download_should_skip_existing_files(script: PipTestEnvironment) -> None:
"""
It should not download files already existing in the scratch dir
"""
script.scratch_path.joinpath("test-req.txt").write_text(
textwrap.dedent(
"""
INITools==0.1
"""
)
)
result = script.pip(
"download",
"-r",
script.scratch_path / "test-req.txt",
"-d",
".",
)
result.did_create(Path("scratch") / "INITools-0.1.tar.gz")
result.did_not_create(script.site_packages / "initools")
# adding second package to test-req.txt
script.scratch_path.joinpath("test-req.txt").write_text(
textwrap.dedent(
"""
INITools==0.1
python-openid==2.2.5
"""
)
)
# only the second package should be downloaded
result = script.pip(
"download",
"-r",
script.scratch_path / "test-req.txt",
"-d",
".",
)
openid_tarball_prefix = str(Path("scratch") / "python-openid-")
assert any(path.startswith(openid_tarball_prefix) for path in result.files_created)
result.did_not_create(Path("scratch") / "INITools-0.1.tar.gz")
result.did_not_create(script.site_packages / "initools")
result.did_not_create(script.site_packages / "openid")
@pytest.mark.network
def test_download_vcs_link(script: PipTestEnvironment) -> None:
"""
It should allow -d flag for vcs links, regression test for issue #798.
"""
result = script.pip(
"download", "-d", ".", "git+https://github.com/pypa/pip-test-package.git"
)
result.did_create(Path("scratch") / "pip-test-package-0.1.1.zip")
result.did_not_create(script.site_packages / "piptestpackage")
def test_only_binary_set_then_download_specific_platform(
script: PipTestEnvironment, data: TestData
) -> None:
"""
Confirm that specifying an interpreter/platform constraint
is allowed when ``--only-binary=:all:`` is set.
"""
fake_wheel(data, "fake-1.0-py2.py3-none-any.whl")
result = script.pip(
"download",
"--no-index",
"--find-links",
data.find_links,
"--only-binary=:all:",
"--dest",
".",
"--platform",
"linux_x86_64",
"fake",
)
result.did_create(Path("scratch") / "fake-1.0-py2.py3-none-any.whl")
def test_no_deps_set_then_download_specific_platform(
script: PipTestEnvironment, data: TestData
) -> None:
"""
Confirm that specifying an interpreter/platform constraint
is allowed when ``--no-deps`` is set.
"""
fake_wheel(data, "fake-1.0-py2.py3-none-any.whl")
result = script.pip(
"download",
"--no-index",
"--find-links",
data.find_links,
"--no-deps",
"--dest",
".",
"--platform",
"linux_x86_64",
"fake",
)
result.did_create(Path("scratch") / "fake-1.0-py2.py3-none-any.whl")
def test_download_specific_platform_fails(
script: PipTestEnvironment, data: TestData
) -> None:
"""
Confirm that specifying an interpreter/platform constraint
enforces that ``--no-deps`` or ``--only-binary=:all:`` is set.
"""
fake_wheel(data, "fake-1.0-py2.py3-none-any.whl")
result = script.pip(
"download",
"--no-index",
"--find-links",
data.find_links,
"--dest",
".",
"--platform",
"linux_x86_64",
"fake",
expect_error=True,
)
assert "--only-binary=:all:" in result.stderr
def test_no_binary_set_then_download_specific_platform_fails(
script: PipTestEnvironment, data: TestData
) -> None:
"""
Confirm that specifying an interpreter/platform constraint
enforces that ``--only-binary=:all:`` is set without ``--no-binary``.
"""
fake_wheel(data, "fake-1.0-py2.py3-none-any.whl")
result = script.pip(
"download",
"--no-index",
"--find-links",
data.find_links,
"--only-binary=:all:",
"--no-binary=fake",
"--dest",
".",
"--platform",
"linux_x86_64",
"fake",
expect_error=True,
)
assert "--only-binary=:all:" in result.stderr
def test_download_specify_platform(script: PipTestEnvironment, data: TestData) -> None:
"""
Test using "pip download --platform" to download a .whl archive
supported for a specific platform
"""
fake_wheel(data, "fake-1.0-py2.py3-none-any.whl")
# Confirm that universal wheels are returned even for specific
# platforms.
result = script.pip(
"download",
"--no-index",
"--find-links",
data.find_links,
"--only-binary=:all:",
"--dest",
".",
"--platform",
"linux_x86_64",
"fake",
)
result.did_create(Path("scratch") / "fake-1.0-py2.py3-none-any.whl")
result = script.pip(
"download",
"--no-index",
"--find-links",
data.find_links,
"--only-binary=:all:",
"--dest",
".",
"--platform",
"macosx_10_9_x86_64",
"fake",
)
data.reset()
fake_wheel(data, "fake-1.0-py2.py3-none-macosx_10_9_x86_64.whl")
fake_wheel(data, "fake-2.0-py2.py3-none-linux_x86_64.whl")
result = script.pip(
"download",
"--no-index",
"--find-links",
data.find_links,
"--only-binary=:all:",
"--dest",
".",
"--platform",
"macosx_10_10_x86_64",
"fake",
)
result.did_create(Path("scratch") / "fake-1.0-py2.py3-none-macosx_10_9_x86_64.whl")
# OSX platform wheels are not backward-compatible.
result = script.pip(
"download",
"--no-index",
"--find-links",
data.find_links,
"--only-binary=:all:",
"--dest",
".",
"--platform",
"macosx_10_8_x86_64",
"fake",
expect_error=True,
)
# No linux wheel provided for this version.
result = script.pip(
"download",
"--no-index",
"--find-links",
data.find_links,
"--only-binary=:all:",
"--dest",
".",
"--platform",
"linux_x86_64",
"fake==1",
expect_error=True,
)
result = script.pip(
"download",
"--no-index",
"--find-links",
data.find_links,
"--only-binary=:all:",
"--dest",
".",
"--platform",
"linux_x86_64",
"fake==2",
)
result.did_create(Path("scratch") / "fake-2.0-py2.py3-none-linux_x86_64.whl")
# Test with multiple supported platforms specified.
data.reset()
fake_wheel(data, "fake-3.0-py2.py3-none-linux_x86_64.whl")
result = script.pip(
"download",
"--no-index",
"--find-links",
data.find_links,
"--only-binary=:all:",
"--dest",
".",
"--platform",
"manylinux1_x86_64",
"--platform",
"linux_x86_64",
"--platform",
"any",
"fake==3",
)
result.did_create(Path("scratch") / "fake-3.0-py2.py3-none-linux_x86_64.whl")
class TestDownloadPlatformManylinuxes:
"""
"pip download --platform" downloads a .whl archive supported for
manylinux platforms.
"""
@pytest.mark.parametrize(
"platform",
[
"linux_x86_64",
"manylinux1_x86_64",
"manylinux2010_x86_64",
"manylinux2014_x86_64",
],
)
def test_download_universal(
self, platform: str, script: PipTestEnvironment, data: TestData
) -> None:
"""
Universal wheels are returned even for specific platforms.
"""
fake_wheel(data, "fake-1.0-py2.py3-none-any.whl")
result = script.pip(
"download",
"--no-index",
"--find-links",
data.find_links,
"--only-binary=:all:",
"--dest",
".",
"--platform",
platform,
"fake",
)
result.did_create(Path("scratch") / "fake-1.0-py2.py3-none-any.whl")
@pytest.mark.parametrize(
"wheel_abi,platform",
[
("manylinux1_x86_64", "manylinux1_x86_64"),
("manylinux1_x86_64", "manylinux2010_x86_64"),
("manylinux2010_x86_64", "manylinux2010_x86_64"),
("manylinux1_x86_64", "manylinux2014_x86_64"),
("manylinux2010_x86_64", "manylinux2014_x86_64"),
("manylinux2014_x86_64", "manylinux2014_x86_64"),
],
)
def test_download_compatible_manylinuxes(
self,
wheel_abi: str,
platform: str,
script: PipTestEnvironment,
data: TestData,
) -> None:
"""
Earlier manylinuxes are compatible with later manylinuxes.
"""
wheel = f"fake-1.0-py2.py3-none-{wheel_abi}.whl"
fake_wheel(data, wheel)
result = script.pip(
"download",
"--no-index",
"--find-links",
data.find_links,
"--only-binary=:all:",
"--dest",
".",
"--platform",
platform,
"fake",
)
result.did_create(Path("scratch") / wheel)
def test_explicit_platform_only(
self, data: TestData, script: PipTestEnvironment
) -> None:
"""
When specifying the platform, manylinux1 needs to be the
explicit platform--it won't ever be added to the compatible
tags.
"""
fake_wheel(data, "fake-1.0-py2.py3-none-linux_x86_64.whl")
script.pip(
"download",
"--no-index",
"--find-links",
data.find_links,
"--only-binary=:all:",
"--dest",
".",
"--platform",
"linux_x86_64",
"fake",
)
def test_download__python_version(script: PipTestEnvironment, data: TestData) -> None:
"""
Test using "pip download --python-version" to download a .whl archive
supported for a specific interpreter
"""
fake_wheel(data, "fake-1.0-py2.py3-none-any.whl")
result = script.pip(
"download",
"--no-index",
"--find-links",
data.find_links,
"--only-binary=:all:",
"--dest",
".",
"--python-version",
"2",
"fake",
)
result.did_create(Path("scratch") / "fake-1.0-py2.py3-none-any.whl")
result = script.pip(
"download",
"--no-index",
"--find-links",
data.find_links,
"--only-binary=:all:",
"--dest",
".",
"--python-version",
"3",
"fake",
)
result = script.pip(
"download",
"--no-index",
"--find-links",
data.find_links,
"--only-binary=:all:",
"--dest",
".",
"--python-version",
"27",
"fake",
)
result = script.pip(
"download",
"--no-index",
"--find-links",
data.find_links,
"--only-binary=:all:",
"--dest",
".",
"--python-version",
"33",
"fake",
)
data.reset()
fake_wheel(data, "fake-1.0-py2-none-any.whl")
fake_wheel(data, "fake-2.0-py3-none-any.whl")
# No py3 provided for version 1.
result = script.pip(
"download",
"--no-index",
"--find-links",
data.find_links,
"--only-binary=:all:",
"--dest",
".",
"--python-version",
"3",
"fake==1.0",
expect_error=True,
)
result = script.pip(
"download",
"--no-index",
"--find-links",
data.find_links,
"--only-binary=:all:",
"--dest",
".",
"--python-version",
"2",
"fake",
)
result.did_create(Path("scratch") / "fake-1.0-py2-none-any.whl")
result = script.pip(
"download",
"--no-index",
"--find-links",
data.find_links,
"--only-binary=:all:",
"--dest",
".",
"--python-version",
"26",
"fake",
)
result = script.pip(
"download",
"--no-index",
"--find-links",
data.find_links,
"--only-binary=:all:",
"--dest",
".",
"--python-version",
"3",
"fake",
)
result.did_create(Path("scratch") / "fake-2.0-py3-none-any.whl")
def make_wheel_with_python_requires(
script: PipTestEnvironment, package_name: str, python_requires: str
) -> Path:
"""
Create a wheel using the given python_requires.
:return: the path to the wheel file.
"""
package_dir = script.scratch_path / package_name
package_dir.mkdir()
text = textwrap.dedent(
"""\
from setuptools import setup
setup(name='{}',
python_requires='{}',
version='1.0')
"""
).format(package_name, python_requires)
package_dir.joinpath("setup.py").write_text(text)
script.run(
"python",
"setup.py",
"bdist_wheel",
"--universal",
cwd=package_dir,
)
file_name = f"{package_name}-1.0-py2.py3-none-any.whl"
return package_dir / "dist" / file_name
@pytest.mark.usefixtures("with_wheel")
def test_download__python_version_used_for_python_requires(
script: PipTestEnvironment, data: TestData
) -> None:
"""
Test that --python-version is used for the Requires-Python check.
"""
wheel_path = make_wheel_with_python_requires(
script,
"mypackage",
python_requires="==3.2",
)
wheel_dir = os.path.dirname(wheel_path)
def make_args(python_version: str) -> List[str]:
return [
"download",
"--no-index",
"--find-links",
wheel_dir,
"--only-binary=:all:",
"--dest",
".",
"--python-version",
python_version,
"mypackage==1.0",
]
args = make_args("33")
result = script.pip(*args, expect_error=True)
expected_err = (
"ERROR: Package 'mypackage' requires a different Python: "
"3.3.0 not in '==3.2'"
)
assert expected_err in result.stderr, f"stderr: {result.stderr}"
# Now try with a --python-version that satisfies the Requires-Python.
args = make_args("32")
script.pip(*args) # no exception
@pytest.mark.usefixtures("with_wheel")
def test_download_ignore_requires_python_dont_fail_with_wrong_python(
script: PipTestEnvironment,
) -> None:
"""
Test that --ignore-requires-python ignores Requires-Python check.
"""
wheel_path = make_wheel_with_python_requires(
script,
"mypackage",
python_requires="==999",
)
wheel_dir = os.path.dirname(wheel_path)
result = script.pip(
"download",
"--ignore-requires-python",
"--no-index",
"--find-links",
wheel_dir,
"--only-binary=:all:",
"--dest",
".",
"mypackage==1.0",
)
result.did_create(Path("scratch") / "mypackage-1.0-py2.py3-none-any.whl")
def test_download_specify_abi(script: PipTestEnvironment, data: TestData) -> None:
"""
Test using "pip download --abi" to download a .whl archive
supported for a specific abi
"""
fake_wheel(data, "fake-1.0-py2.py3-none-any.whl")
result = script.pip(
"download",
"--no-index",
"--find-links",
data.find_links,
"--only-binary=:all:",
"--dest",
".",
"--implementation",
"fk",
"--abi",
"fake_abi",
"fake",
)
result.did_create(Path("scratch") / "fake-1.0-py2.py3-none-any.whl")
result = script.pip(
"download",
"--no-index",
"--find-links",
data.find_links,
"--only-binary=:all:",
"--dest",
".",
"--implementation",
"fk",
"--abi",
"none",
"fake",
)
result = script.pip(
"download",
"--no-index",
"--find-links",
data.find_links,
"--only-binary=:all:",
"--dest",
".",
"--abi",
"cp27m",
"fake",
)
data.reset()
fake_wheel(data, "fake-1.0-fk2-fakeabi-fake_platform.whl")
result = script.pip(
"download",
"--no-index",
"--find-links",
data.find_links,
"--only-binary=:all:",
"--dest",
".",
"--python-version",
"2",
"--implementation",
"fk",
"--platform",
"fake_platform",
"--abi",
"fakeabi",
"fake",
)
result.did_create(Path("scratch") / "fake-1.0-fk2-fakeabi-fake_platform.whl")
result = script.pip(
"download",
"--no-index",
"--find-links",
data.find_links,
"--only-binary=:all:",
"--dest",
".",
"--implementation",
"fk",
"--platform",
"fake_platform",
"--abi",
"none",
"fake",
expect_error=True,
)
data.reset()
fake_wheel(data, "fake-1.0-fk2-otherabi-fake_platform.whl")
result = script.pip(
"download",
"--no-index",
"--find-links",
data.find_links,
"--only-binary=:all:",
"--dest",
".",
"--python-version",
"2",
"--implementation",
"fk",
"--platform",
"fake_platform",
"--abi",
"fakeabi",
"--abi",
"otherabi",
"--abi",
"none",
"fake",
)
result.did_create(Path("scratch") / "fake-1.0-fk2-otherabi-fake_platform.whl")
def test_download_specify_implementation(
script: PipTestEnvironment, data: TestData
) -> None:
"""
Test using "pip download --abi" to download a .whl archive
supported for a specific abi
"""
fake_wheel(data, "fake-1.0-py2.py3-none-any.whl")
result = script.pip(
"download",
"--no-index",
"--find-links",
data.find_links,
"--only-binary=:all:",
"--dest",
".",
"--implementation",
"fk",
"fake",
)
result.did_create(Path("scratch") / "fake-1.0-py2.py3-none-any.whl")
data.reset()
fake_wheel(data, "fake-1.0-fk3-none-any.whl")
result = script.pip(
"download",
"--no-index",
"--find-links",
data.find_links,
"--only-binary=:all:",
"--dest",
".",
"--implementation",
"fk",
"--python-version",
"3",
"fake",
)
result.did_create(Path("scratch") / "fake-1.0-fk3-none-any.whl")
result = script.pip(
"download",
"--no-index",
"--find-links",
data.find_links,
"--only-binary=:all:",
"--dest",
".",
"--implementation",
"fk",
"--python-version",
"2",
"fake",
expect_error=True,
)
def test_download_exit_status_code_when_no_requirements(
script: PipTestEnvironment,
) -> None:
"""
Test download exit status code when no requirements specified
"""
result = script.pip("download", expect_error=True)
assert "You must give at least one requirement to download" in result.stderr
assert result.returncode == ERROR
def test_download_exit_status_code_when_blank_requirements_file(
script: PipTestEnvironment,
) -> None:
"""
Test download exit status code when blank requirements file specified
"""
script.scratch_path.joinpath("blank.txt").write_text("\n")
script.pip("download", "-r", "blank.txt")
def test_download_prefer_binary_when_tarball_higher_than_wheel(
script: PipTestEnvironment, data: TestData
) -> None:
fake_wheel(data, "source-0.8-py2.py3-none-any.whl")
result = script.pip(
"download",
"--prefer-binary",
"--no-index",
"-f",
data.packages,
"-d",
".",
"source",
)
result.did_create(Path("scratch") / "source-0.8-py2.py3-none-any.whl")
result.did_not_create(Path("scratch") / "source-1.0.tar.gz")
def test_prefer_binary_tarball_higher_than_wheel_req_file(
script: PipTestEnvironment, data: TestData
) -> None:
fake_wheel(data, "source-0.8-py2.py3-none-any.whl")
script.scratch_path.joinpath("test-req.txt").write_text(
textwrap.dedent(
"""
--prefer-binary
source
"""
)
)
result = script.pip(
"download",
"-r",
script.scratch_path / "test-req.txt",
"--no-index",
"-f",
data.packages,
"-d",
".",
)
result.did_create(Path("scratch") / "source-0.8-py2.py3-none-any.whl")
result.did_not_create(Path("scratch") / "source-1.0.tar.gz")
def test_download_prefer_binary_when_wheel_doesnt_satisfy_req(
script: PipTestEnvironment, data: TestData
) -> None:
fake_wheel(data, "source-0.8-py2.py3-none-any.whl")
script.scratch_path.joinpath("test-req.txt").write_text(
textwrap.dedent(
"""
source>0.9
"""
)
)
result = script.pip(
"download",
"--prefer-binary",
"--no-index",
"-f",
data.packages,
"-d",
".",
"-r",
script.scratch_path / "test-req.txt",
)
result.did_create(Path("scratch") / "source-1.0.tar.gz")
result.did_not_create(Path("scratch") / "source-0.8-py2.py3-none-any.whl")
def test_prefer_binary_when_wheel_doesnt_satisfy_req_req_file(
script: PipTestEnvironment, data: TestData
) -> None:
fake_wheel(data, "source-0.8-py2.py3-none-any.whl")
script.scratch_path.joinpath("test-req.txt").write_text(
textwrap.dedent(
"""
--prefer-binary
source>0.9
"""
)
)
result = script.pip(
"download",
"--no-index",
"-f",
data.packages,
"-d",
".",
"-r",
script.scratch_path / "test-req.txt",
)
result.did_create(Path("scratch") / "source-1.0.tar.gz")
result.did_not_create(Path("scratch") / "source-0.8-py2.py3-none-any.whl")
def test_download_prefer_binary_when_only_tarball_exists(
script: PipTestEnvironment, data: TestData
) -> None:
result = script.pip(
"download",
"--prefer-binary",
"--no-index",
"-f",
data.packages,
"-d",
".",
"source",
)
result.did_create(Path("scratch") / "source-1.0.tar.gz")
def test_prefer_binary_when_only_tarball_exists_req_file(
script: PipTestEnvironment, data: TestData
) -> None:
script.scratch_path.joinpath("test-req.txt").write_text(
textwrap.dedent(
"""
--prefer-binary
source
"""
)
)
result = script.pip(
"download",
"--no-index",
"-f",
data.packages,
"-d",
".",
"-r",
script.scratch_path / "test-req.txt",
)
result.did_create(Path("scratch") / "source-1.0.tar.gz")
@pytest.fixture(scope="session")
def shared_script(
tmpdir_factory: pytest.TempdirFactory, script_factory: ScriptFactory
) -> PipTestEnvironment:
tmpdir = Path(str(tmpdir_factory.mktemp("download_shared_script")))
script = script_factory(tmpdir.joinpath("workspace"))
return script
def test_download_file_url(
shared_script: PipTestEnvironment, shared_data: TestData, tmpdir: Path
) -> None:
download_dir = tmpdir / "download"
download_dir.mkdir()
downloaded_path = download_dir / "simple-1.0.tar.gz"
simple_pkg = shared_data.packages / "simple-1.0.tar.gz"
shared_script.pip(
"download",
"-d",
str(download_dir),
"--no-index",
path_to_url(str(simple_pkg)),
)
assert downloaded_path.exists()
assert simple_pkg.read_bytes() == downloaded_path.read_bytes()
def test_download_file_url_existing_ok_download(
shared_script: PipTestEnvironment, shared_data: TestData, tmpdir: Path
) -> None:
download_dir = tmpdir / "download"
download_dir.mkdir()
downloaded_path = download_dir / "simple-1.0.tar.gz"
fake_existing_package = shared_data.packages / "simple-2.0.tar.gz"
shutil.copy(str(fake_existing_package), str(downloaded_path))
downloaded_path_bytes = downloaded_path.read_bytes()
digest = sha256(downloaded_path_bytes).hexdigest()
simple_pkg = shared_data.packages / "simple-1.0.tar.gz"
url = "{}#sha256={}".format(path_to_url(simple_pkg), digest)
shared_script.pip("download", "-d", str(download_dir), url)
assert downloaded_path_bytes == downloaded_path.read_bytes()
def test_download_file_url_existing_bad_download(
shared_script: PipTestEnvironment, shared_data: TestData, tmpdir: Path
) -> None:
download_dir = tmpdir / "download"
download_dir.mkdir()
downloaded_path = download_dir / "simple-1.0.tar.gz"
fake_existing_package = shared_data.packages / "simple-2.0.tar.gz"
shutil.copy(str(fake_existing_package), str(downloaded_path))
simple_pkg = shared_data.packages / "simple-1.0.tar.gz"
simple_pkg_bytes = simple_pkg.read_bytes()
digest = sha256(simple_pkg_bytes).hexdigest()
url = "{}#sha256={}".format(path_to_url(simple_pkg), digest)
shared_script.pip("download", "-d", str(download_dir), url)
assert simple_pkg_bytes == downloaded_path.read_bytes()
def test_download_http_url_bad_hash(
shared_script: PipTestEnvironment,
shared_data: TestData,
tmpdir: Path,
mock_server: MockServer,
) -> None:
"""
If already-downloaded file has bad checksum, re-download.
"""
download_dir = tmpdir / "download"
download_dir.mkdir()
downloaded_path = download_dir / "simple-1.0.tar.gz"
fake_existing_package = shared_data.packages / "simple-2.0.tar.gz"
shutil.copy(str(fake_existing_package), str(downloaded_path))
simple_pkg = shared_data.packages / "simple-1.0.tar.gz"
simple_pkg_bytes = simple_pkg.read_bytes()
digest = sha256(simple_pkg_bytes).hexdigest()
mock_server.set_responses([file_response(simple_pkg)])
mock_server.start()
base_address = f"http://{mock_server.host}:{mock_server.port}"
url = f"{base_address}/simple-1.0.tar.gz#sha256={digest}"
shared_script.pip("download", "-d", str(download_dir), url)
assert simple_pkg_bytes == downloaded_path.read_bytes()
mock_server.stop()
requests = mock_server.get_requests()
assert len(requests) == 1
assert requests[0]["PATH_INFO"] == "/simple-1.0.tar.gz"
assert requests[0]["HTTP_ACCEPT_ENCODING"] == "identity"
def test_download_editable(
script: PipTestEnvironment, data: TestData, tmpdir: Path
) -> None:
"""
Test 'pip download' of editables in requirement file.
"""
editable_path = str(data.src / "simplewheel-1.0").replace(os.path.sep, "/")
requirements_path = tmpdir / "requirements.txt"
requirements_path.write_text("-e " + editable_path + "\n")
download_dir = tmpdir / "download_dir"
script.pip(
"download", "--no-deps", "-r", str(requirements_path), "-d", str(download_dir)
)
downloads = os.listdir(download_dir)
assert len(downloads) == 1
assert downloads[0].endswith(".zip")
| 27.164666 | 88 | 0.573562 |
ace2ff8a7d50fff54eb082db058f8cb675b8abd6 | 12,364 | py | Python | numba/core/utils.py | ssikdar1/numba | 700d38e4f5004d84966c260d5c9050d0fb637f5d | [
"BSD-2-Clause"
] | null | null | null | numba/core/utils.py | ssikdar1/numba | 700d38e4f5004d84966c260d5c9050d0fb637f5d | [
"BSD-2-Clause"
] | null | null | null | numba/core/utils.py | ssikdar1/numba | 700d38e4f5004d84966c260d5c9050d0fb637f5d | [
"BSD-2-Clause"
] | null | null | null | import atexit
import builtins
import functools
import os
import operator
import threading
import timeit
import math
import sys
import traceback
import weakref
from types import ModuleType
from collections.abc import Mapping
import numpy as np
from inspect import signature as pysignature # noqa: F401
from inspect import Signature as pySignature # noqa: F401
from inspect import Parameter as pyParameter # noqa: F401
from numba.core.config import (PYVERSION, MACHINE_BITS, # noqa: F401
DEVELOPER_MODE) # noqa: F401
INT_TYPES = (int,)
longint = int
get_ident = threading.get_ident
intern = sys.intern
file_replace = os.replace
asbyteint = int
# ------------------------------------------------------------------------------
# Start: Originally from `numba.six` under the following license
"""Utilities for writing code that runs on Python 2 and 3"""
# Copyright (c) 2010-2015 Benjamin Peterson
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
def add_metaclass(metaclass):
"""Class decorator for creating a class with a metaclass."""
def wrapper(cls):
orig_vars = cls.__dict__.copy()
slots = orig_vars.get('__slots__')
if slots is not None:
if isinstance(slots, str):
slots = [slots]
for slots_var in slots:
orig_vars.pop(slots_var)
orig_vars.pop('__dict__', None)
orig_vars.pop('__weakref__', None)
return metaclass(cls.__name__, cls.__bases__, orig_vars)
return wrapper
def reraise(tp, value, tb=None):
if value is None:
value = tp()
if value.__traceback__ is not tb:
raise value.with_traceback(tb)
raise value
def iteritems(d, **kw):
return iter(d.items(**kw))
def itervalues(d, **kw):
return iter(d.values(**kw))
get_function_globals = operator.attrgetter("__globals__")
# End: Originally from `numba.six` under the following license
# ------------------------------------------------------------------------------
def erase_traceback(exc_value):
"""
Erase the traceback and hanging locals from the given exception instance.
"""
if exc_value.__traceback__ is not None:
traceback.clear_frames(exc_value.__traceback__)
return exc_value.with_traceback(None)
# Mapping between operator module functions and the corresponding built-in
# operators.
BINOPS_TO_OPERATORS = {
'+': operator.add,
'-': operator.sub,
'*': operator.mul,
'//': operator.floordiv,
'/': operator.truediv,
'%': operator.mod,
'**': operator.pow,
'&': operator.and_,
'|': operator.or_,
'^': operator.xor,
'<<': operator.lshift,
'>>': operator.rshift,
'==': operator.eq,
'!=': operator.ne,
'<': operator.lt,
'<=': operator.le,
'>': operator.gt,
'>=': operator.ge,
'is': operator.is_,
'is not': operator.is_not,
# This one has its args reversed!
'in': operator.contains
}
INPLACE_BINOPS_TO_OPERATORS = {
'+=': operator.iadd,
'-=': operator.isub,
'*=': operator.imul,
'//=': operator.ifloordiv,
'/=': operator.itruediv,
'%=': operator.imod,
'**=': operator.ipow,
'&=': operator.iand,
'|=': operator.ior,
'^=': operator.ixor,
'<<=': operator.ilshift,
'>>=': operator.irshift,
}
UNARY_BUITINS_TO_OPERATORS = {
'+': operator.pos,
'-': operator.neg,
'~': operator.invert,
'not': operator.not_,
'is_true': operator.truth
}
OPERATORS_TO_BUILTINS = {
operator.add: '+',
operator.iadd: '+=',
operator.sub: '-',
operator.isub: '-=',
operator.mul: '*',
operator.imul: '*=',
operator.floordiv: '//',
operator.ifloordiv: '//=',
operator.truediv: '/',
operator.itruediv: '/=',
operator.mod: '%',
operator.imod: '%=',
operator.pow: '**',
operator.ipow: '**=',
operator.and_: '&',
operator.iand: '&=',
operator.or_: '|',
operator.ior: '|=',
operator.xor: '^',
operator.ixor: '^=',
operator.lshift: '<<',
operator.ilshift: '<<=',
operator.rshift: '>>',
operator.irshift: '>>=',
operator.eq: '==',
operator.ne: '!=',
operator.lt: '<',
operator.le: '<=',
operator.gt: '>',
operator.ge: '>=',
operator.is_: 'is',
operator.is_not: 'is not',
# This one has its args reversed!
operator.contains: 'in',
# Unary
operator.pos: '+',
operator.neg: '-',
operator.invert: '~',
operator.not_: 'not',
operator.truth: 'is_true',
}
BINOPS_TO_OPERATORS['@'] = operator.matmul
INPLACE_BINOPS_TO_OPERATORS['@='] = operator.imatmul
_shutting_down = False
def _at_shutdown():
global _shutting_down
_shutting_down = True
def shutting_down(globals=globals):
"""
Whether the interpreter is currently shutting down.
For use in finalizers, __del__ methods, and similar; it is advised
to early bind this function rather than look it up when calling it,
since at shutdown module globals may be cleared.
"""
# At shutdown, the attribute may have been cleared or set to None.
v = globals().get('_shutting_down')
return v is True or v is None
# weakref.finalize registers an exit function that runs all finalizers for
# which atexit is True. Some of these finalizers may call shutting_down() to
# check whether the interpreter is shutting down. For this to behave correctly,
# we need to make sure that _at_shutdown is called before the finalizer exit
# function. Since atexit operates as a LIFO stack, we first contruct a dummy
# finalizer then register atexit to ensure this ordering.
weakref.finalize(lambda: None, lambda: None)
atexit.register(_at_shutdown)
class ConfigOptions(object):
OPTIONS = {}
def __init__(self):
self._values = self.OPTIONS.copy()
def set(self, name, value=True):
if name not in self.OPTIONS:
raise NameError("Invalid flag: %s" % name)
self._values[name] = value
def unset(self, name):
self.set(name, False)
def _check_attr(self, name):
if name not in self.OPTIONS:
raise AttributeError("Invalid flag: %s" % name)
def __getattr__(self, name):
self._check_attr(name)
return self._values[name]
def __setattr__(self, name, value):
if name.startswith('_'):
super(ConfigOptions, self).__setattr__(name, value)
else:
self._check_attr(name)
self._values[name] = value
def __repr__(self):
return "Flags(%s)" % ', '.join('%s=%s' % (k, v)
for k, v in self._values.items()
if v is not False)
def copy(self):
copy = type(self)()
copy._values = self._values.copy()
return copy
def __eq__(self, other):
return (isinstance(other, ConfigOptions) and
other._values == self._values)
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash(tuple(sorted(self._values.items())))
class SortedMap(Mapping):
"""Immutable
"""
def __init__(self, seq):
self._values = []
self._index = {}
for i, (k, v) in enumerate(sorted(seq)):
self._index[k] = i
self._values.append((k, v))
def __getitem__(self, k):
i = self._index[k]
return self._values[i][1]
def __len__(self):
return len(self._values)
def __iter__(self):
return iter(k for k, v in self._values)
class UniqueDict(dict):
def __setitem__(self, key, value):
if key in self:
raise AssertionError("key already in dictionary: %r" % (key,))
super(UniqueDict, self).__setitem__(key, value)
# Django's cached_property
# see https://docs.djangoproject.com/en/dev/ref/utils/#django.utils.functional.cached_property # noqa: E501
class cached_property(object):
"""
Decorator that converts a method with a single self argument into a
property cached on the instance.
Optional ``name`` argument allows you to make cached properties of other
methods. (e.g. url = cached_property(get_absolute_url, name='url') )
"""
def __init__(self, func, name=None):
self.func = func
self.name = name or func.__name__
def __get__(self, instance, type=None):
if instance is None:
return self
res = instance.__dict__[self.name] = self.func(instance)
return res
def runonce(fn):
@functools.wraps(fn)
def inner():
if not inner._ran:
res = fn()
inner._result = res
inner._ran = True
return inner._result
inner._ran = False
return inner
def bit_length(intval):
"""
Return the number of bits necessary to represent integer `intval`.
"""
assert isinstance(intval, INT_TYPES)
if intval >= 0:
return len(bin(intval)) - 2
else:
return len(bin(-intval - 1)) - 2
def stream_list(lst):
"""
Given a list, return an infinite iterator of iterators.
Each iterator iterates over the list from the last seen point up to
the current end-of-list.
In effect, each iterator will give the newly appended elements from the
previous iterator instantiation time.
"""
def sublist_iterator(start, stop):
return iter(lst[start:stop])
start = 0
while True:
stop = len(lst)
yield sublist_iterator(start, stop)
start = stop
class BenchmarkResult(object):
def __init__(self, func, records, loop):
self.func = func
self.loop = loop
self.records = np.array(records) / loop
self.best = np.min(self.records)
def __repr__(self):
name = getattr(self.func, "__name__", self.func)
args = (name, self.loop, self.records.size, format_time(self.best))
return "%20s: %10d loops, best of %d: %s per loop" % args
def format_time(tm):
units = "s ms us ns ps".split()
base = 1
for unit in units[:-1]:
if tm >= base:
break
base /= 1000
else:
unit = units[-1]
return "%.1f%s" % (tm / base, unit)
def benchmark(func, maxsec=1):
timer = timeit.Timer(func)
number = 1
result = timer.repeat(1, number)
# Too fast to be measured
while min(result) / number == 0:
number *= 10
result = timer.repeat(3, number)
best = min(result) / number
if best >= maxsec:
return BenchmarkResult(func, result, number)
# Scale it up to make it close the maximum time
max_per_run_time = maxsec / 3 / number
number = max(max_per_run_time / best / 3, 1)
# Round to the next power of 10
number = int(10 ** math.ceil(math.log10(number)))
records = timer.repeat(3, number)
return BenchmarkResult(func, records, number)
RANGE_ITER_OBJECTS = (builtins.range,)
# A dummy module for dynamically-generated functions
_dynamic_modname = '<dynamic>'
_dynamic_module = ModuleType(_dynamic_modname)
_dynamic_module.__builtins__ = builtins
def chain_exception(new_exc, old_exc):
"""Set the __cause__ attribute on *new_exc* for explicit exception
chaining. Returns the inplace modified *new_exc*.
"""
if DEVELOPER_MODE:
new_exc.__cause__ = old_exc
return new_exc
| 28.292906 | 110 | 0.630136 |
ace2ffa7753c89281c67fda7ce69a3a8ff174fba | 3,402 | py | Python | scripts/get-lantz.py | cycomanic/lantz | f2da006a5cf1a232c1576412cd63e565768ab4e1 | [
"BSD-3-Clause"
] | null | null | null | scripts/get-lantz.py | cycomanic/lantz | f2da006a5cf1a232c1576412cd63e565768ab4e1 | [
"BSD-3-Clause"
] | null | null | null | scripts/get-lantz.py | cycomanic/lantz | f2da006a5cf1a232c1576412cd63e565768ab4e1 | [
"BSD-3-Clause"
] | null | null | null | #! /usr/bin/env python
# -*- coding: utf-8 -*-
import sys
if not sys.version_info >= (3, 2, 1):
print('Lantz requires Python >= 3.2.1')
sys.exit(1)
import os
import time
import platform
import argparse
import subprocess
import urllib.request
import concurrent.futures
if platform.architecture()[0] != '32bit' or not sys.platform.startswith('win'):
print('Only 32bit Python running on Windows is currently supported by get-lantz.py')
sys.exit(2)
parser = argparse.ArgumentParser('Get Lantz!')
parser.add_argument('-e', '--editable', action='store_true',
help='Install Lantz as an editable package')
args = parser.parse_args()
URLS = {'setuptools': ('distribute_setup.py', 'http://python-distribute.org/{}'),
'pip': ('get-pip.py', 'https://raw.github.com/pypa/pip/master/contrib/{}'),
'PyQt4': ('PyQt-Py3.2-x86-gpl-4.9.4-1.exe', 'http://www.riverbankcomputing.co.uk/static/Downloads/PyQt4/{}'),
'numpy': ('numpy-1.6.2-win32-superpack-python3.2.exe', 'http://sourceforge.net/projects/numpy/files/NumPy/1.6.2/{}/download'),
'scipy': ('scipy-0.10.1-win32-superpack-python3.2.exe', 'http://sourceforge.net/projects/scipy/files/scipy/0.10.1/{}/download'),
'git': ('Git-1.7.11-preview20120710.exe', 'http://msysgit.googlecode.com/files/{}'),
'matplotlib': ('', ''),
'visa': ('visa520full.exe', 'http://ftp.ni.com/support/softlib/visa/NI-VISA/5.2/win/{}')}
if not args.editable:
del URLS['git']
def download(filename, url):
if os.path.exists(filename):
print('File found {}'.format(filename))
return
if '{}' in url:
url = url.format(filename)
start = time.time()
print('Downloading {}'.format(filename))
urllib.request.urlretrieve(url, filename)
print('Downloaded {} in {:.2f} secs'.format(filename, time.time() - start))
print(' Checking '.center(20, '-'))
INSTALL = []
for check in ('setuptools', 'pip', 'PyQt4', 'numpy', 'scipy'):
try:
__import__(check)
print('No need to install {}'.format(check))
except ImportError:
INSTALL.append(check)
print('Adding {} to install list'.format(check))
if args.editable:
try:
subprocess.call(['git', '--version'])
print('No need to install git')
except Exception as e:
print('Adding git to install list')
INSTALL.append('git')
INSTALL.append('visa')
os.chdir(os.path.dirname(os.path.abspath(__file__)))
print('Working directory: {}'.format(os.getcwd()))
print(' Downloading '.center(20, '-'))
with concurrent.futures.ThreadPoolExecutor(max_workers=5) as executor:
futs = [executor.submit(download, *filename_url)
for filename, filename_url in URLS.items()
if filename in INSTALL]
concurrent.futures.wait(futs)
print(' Installing '.center(20, '-'))
for key in ('setuptools', 'pip', ):
if key in INSTALL:
subprocess.call([sys.executable, URLS[key][0]])
for key in ('PyQt4', 'numpy', 'scipy', 'git', 'visa'):
if key in INSTALL:
subprocess.call([URLS[key][0], ])
PIP = os.path.join(os.path.dirname(sys.executable), 'Scripts', 'pip')
REQS = ['colorama', 'pyserial', 'sphinx', 'pyyaml']
if args.editable:
subprocess.call([PIP, 'install', ] + REQS)
subprocess.call([PIP, 'install', '-e', 'lantz'])
else:
subprocess.call([PIP, 'install', ] + REQS + ['lantz'])
| 31.794393 | 136 | 0.635509 |
ace3001e147df3ddf4af790731cd29cfa2c295da | 9,198 | py | Python | calamari_ocr/scripts/eval.py | timothydereuse/calamari | eba8e9c35d2c301319cc9cb15d25124460aee2db | [
"Apache-2.0"
] | null | null | null | calamari_ocr/scripts/eval.py | timothydereuse/calamari | eba8e9c35d2c301319cc9cb15d25124460aee2db | [
"Apache-2.0"
] | null | null | null | calamari_ocr/scripts/eval.py | timothydereuse/calamari | eba8e9c35d2c301319cc9cb15d25124460aee2db | [
"Apache-2.0"
] | null | null | null | from dataclasses import dataclass, field
from typing import Optional
import tfaip.util.logging
from argparse import ArgumentParser
import os
import json
import numpy as np
from calamari_ocr.ocr.dataset.params import DATA_GENERATOR_CHOICES
from paiargparse import PAIArgumentParser, pai_dataclass, pai_meta
from tfaip.data.pipeline.definitions import PipelineMode
from calamari_ocr.ocr import SavedCalamariModel
from calamari_ocr.ocr.dataset.datareader.base import CalamariDataGeneratorParams
from calamari_ocr.ocr.dataset.datareader.file import FileDataParams
from calamari_ocr.ocr.evaluator import EvaluatorParams
from calamari_ocr.utils import glob_all, split_all_ext
logger = tfaip.util.logging.logger(__name__)
def print_confusions(r, n_confusions):
# sort descending
if n_confusions != 0 and r["total_sync_errs"] > 0:
total_percent = 0
keys = sorted(r["confusion"].items(), key=lambda item: -item[1])
print("{:8s} {:8s} {:8s} {:10s}".format("GT", "PRED", "COUNT", "PERCENT"))
for i, ((gt, pred), count) in enumerate(keys):
gt_fmt = "{" + gt + "}"
pred_fmt = "{" + pred + "}"
if i == n_confusions:
break
percent = count * max(len(gt), len(pred)) / r["total_sync_errs"]
print("{:8s} {:8s} {:8d} {:10.2%}".format(gt_fmt, pred_fmt, count, percent))
total_percent += percent
print("The remaining but hidden errors make up {:.2%}".format(1.0 - total_percent))
def print_worst_lines(r, gt_samples, n_worst_lines):
if len(r["single"]) != len(gt_samples):
raise Exception("Mismatch in number of predictions and gt files")
sorted_lines = sorted(zip(r["single"], gt_samples), key=lambda a: -a[0][1])
if n_worst_lines < 0:
n_worst_lines = len(gt_samples)
if n_worst_lines > 0:
print("{:60s} {:4s} {:3s} {:3s} {}".format("GT FILE", "LEN", "ERR", "SER", "CONFUSIONS"))
for (len_gt, errs, sync_errs, confusion, gt_pred), sample in sorted_lines[:n_worst_lines]:
print("{:60s} {:4d} {:3d} {:3d} {}".format(sample["id"][-60:], len_gt, errs, sync_errs, confusion))
def write_xlsx(xlsx_file, eval_datas):
logger.info("Writing xlsx file to {}".format(xlsx_file))
import xlsxwriter
workbook = xlsxwriter.Workbook(xlsx_file)
for eval_data in eval_datas:
prefix = eval_data["prefix"]
r = eval_data["results"]
gt_files = eval_data["gt_files"]
# all files
ws = workbook.add_worksheet("{} - per line".format(prefix))
for i, heading in enumerate(
[
"GT FILE",
"GT",
"PRED",
"LEN",
"ERR",
"CER",
"REL. ERR",
"SYNC ERR",
"CONFUSIONS",
]
):
ws.write(0, i, heading)
sorted_lines = sorted(zip(r["single"], gt_files), key=lambda a: -a[0][1])
all_cs = []
for i, ((len_gt, errs, sync_errs, confusion, (gt, pred)), gt_file) in enumerate(sorted_lines):
ws.write(i + 1, 0, gt_file)
ws.write(i + 1, 1, gt.strip())
ws.write(i + 1, 2, pred.strip())
ws.write(i + 1, 3, len_gt)
ws.write(i + 1, 4, errs)
ws.write(i + 1, 5, errs / max(len(gt), len(pred), 1))
ws.write(i + 1, 6, errs / r["total_char_errs"] if r["total_char_errs"] > 0 else 0)
ws.write(i + 1, 7, sync_errs)
ws.write(i + 1, 8, "{}".format(confusion))
all_cs.append(errs / max(len(gt), len(pred), 1))
# total confusions
ws = workbook.add_worksheet("{} - global".format(prefix))
for i, heading in enumerate(["GT", "PRED", "COUNT", "PERCENT"]):
ws.write(0, i, heading)
keys = sorted(r["confusion"].items(), key=lambda item: -item[1])
for i, ((gt, pred), count) in enumerate(keys):
gt_fmt = "{" + gt + "}"
pred_fmt = "{" + pred + "}"
percent = count * max(len(gt), len(pred)) / r["total_sync_errs"]
ws.write(i + 1, 0, gt_fmt)
ws.write(i + 1, 1, pred_fmt)
ws.write(i + 1, 2, count)
ws.write(i + 1, 3, percent)
# histogram of cers
hsl = "{} - histogram".format(prefix)
ws = workbook.add_worksheet(hsl)
ws.write_row("A1", ["Class", "Count"])
hist, bin_edges = np.histogram(all_cs, bins="auto")
ws.write_column("A2", bin_edges)
ws.write_column("B2", hist)
chart = workbook.add_chart({"type": "column"})
chart.add_series(
{
"name": "CER hist",
"categories": "='{}'!$A$2:$A${}".format(hsl, 2 + len(bin_edges)),
"values": "='{}'!$B$2:$B${}".format(hsl, 2 + len(bin_edges)),
}
)
chart.set_title({"name": "CER distribution"})
chart.set_x_axis({"name": "CER"})
chart.set_y_axis({"name": "Amount"})
ws.insert_chart("D2", chart, {"x_offset": 25, "y_offset": 10})
workbook.close()
@pai_dataclass
@dataclass
class EvalArgs:
gt: CalamariDataGeneratorParams = field(
default_factory=FileDataParams,
metadata=pai_meta(help="GT", mode="flat", choices=DATA_GENERATOR_CHOICES),
)
pred: Optional[CalamariDataGeneratorParams] = field(
default=None,
metadata=pai_meta(
help="Optional prediction dataset",
mode="flat",
choices=DATA_GENERATOR_CHOICES,
),
)
n_confusions: int = field(
default=10,
metadata=pai_meta(
help="Only print n most common confusions. Defaults to 10, use -1 for all.",
mode="flat",
),
)
n_worst_lines: int = field(
default=0,
metadata=pai_meta(help="Print the n worst recognized text lines with its error", mode="flat"),
)
xlsx_output: Optional[str] = field(
default=None,
metadata=pai_meta(help="Optionally write a xlsx file with the evaluation results", mode="flat"),
)
non_existing_file_handling_mode: str = field(
default="error",
metadata=pai_meta(
mode="flat",
choices=["error", "skip", "empty"],
help="How to handle non existing .pred.txt files. Possible modes: skip, empty, error. "
"'Skip' will simply skip the evaluation of that file (not counting it to errors). "
"'Empty' will handle this file as would it be empty (fully checking for errors)."
"'Error' will throw an exception if a file is not existing. This is the default behaviour.",
),
)
skip_empty_gt: bool = field(
default=False,
metadata=pai_meta(help="Ignore lines of the gt that are empty.", mode="flat"),
)
checkpoint: Optional[str] = field(
default=None,
metadata=pai_meta(
help="Specify an optional checkpoint to parse the text preprocessor (for the gt txt files)",
mode="flat",
),
)
evaluator: EvaluatorParams = field(
default_factory=EvaluatorParams,
metadata=pai_meta(
mode="flat",
fix_dc=True,
),
)
def run():
main(parse_args())
def parse_args(args=None):
parser = PAIArgumentParser()
parser.add_root_argument("root", EvalArgs, ignore=["gt.images", "pred.images"])
return parser.parse_args(args=args).root
def main(args: EvalArgs):
# Local imports (imports that require tensorflow)
from calamari_ocr.ocr.scenario import CalamariScenario
from calamari_ocr.ocr.dataset.data import Data
from calamari_ocr.ocr.evaluator import Evaluator
if args.checkpoint:
saved_model = SavedCalamariModel(args.checkpoint, auto_update=True)
trainer_params = CalamariScenario.trainer_cls().params_cls().from_dict(saved_model.dict)
data_params = trainer_params.scenario.data
else:
data_params = Data.default_params()
data = Data(data_params)
pred_data = args.pred if args.pred is not None else args.gt.to_prediction()
evaluator = Evaluator(args.evaluator, data=data)
evaluator.preload_gt(gt_dataset=args.gt)
r = evaluator.run(gt_dataset=args.gt, pred_dataset=pred_data)
# TODO: More output
print("Evaluation result")
print("=================")
print("")
print(
"Got mean normalized label error rate of {:.2%} ({} errs, {} total chars, {} sync errs)".format(
r["avg_ler"], r["total_char_errs"], r["total_chars"], r["total_sync_errs"]
)
)
# sort descending
print_confusions(r, args.n_confusions)
samples = data.create_pipeline(evaluator.params.setup, args.gt).reader().samples()
print_worst_lines(r, samples, args.n_worst_lines)
if args.xlsx_output:
write_xlsx(
args.xlsx_output,
[
{
"prefix": "evaluation",
"results": r,
"gt_files": [s["id"] for s in samples],
}
],
)
return r
if __name__ == "__main__":
run()
| 34.449438 | 111 | 0.586432 |
ace301f69de100b70c202edbfe0fbc0270d04a70 | 389 | py | Python | multiml_htautau/task/metrics.py | UTokyo-ICEPP/multiml_htautau | 5f926c2291a55f57419aa0130d07e2a793fc7353 | [
"Apache-2.0"
] | null | null | null | multiml_htautau/task/metrics.py | UTokyo-ICEPP/multiml_htautau | 5f926c2291a55f57419aa0130d07e2a793fc7353 | [
"Apache-2.0"
] | null | null | null | multiml_htautau/task/metrics.py | UTokyo-ICEPP/multiml_htautau | 5f926c2291a55f57419aa0130d07e2a793fc7353 | [
"Apache-2.0"
] | null | null | null | from multiml.agent.metric import BaseMetric
class CustomMSEMetric(BaseMetric):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self._name = 'custom_mse'
def calculate(self):
y_true, y_pred = self.get_true_pred_data()
from .loss import Tau4vecCalibLoss_np
return Tau4vecCalibLoss_np(pt_scale=1e-2, use_pxyz=True)(y_true, y_pred)
| 27.785714 | 80 | 0.694087 |
ace3033025a1c13f7639411c4deb98d5208a291b | 13,828 | py | Python | tamiltts/vasikka.py | CRE2525/open-tamil | ffc02509f7b8a6a17644c85799a475a8ba623954 | [
"MIT"
] | 1 | 2021-08-03T19:35:18.000Z | 2021-08-03T19:35:18.000Z | tamiltts/vasikka.py | CRE2525/open-tamil | ffc02509f7b8a6a17644c85799a475a8ba623954 | [
"MIT"
] | null | null | null | tamiltts/vasikka.py | CRE2525/open-tamil | ffc02509f7b8a6a17644c85799a475a8ba623954 | [
"MIT"
] | null | null | null | #!/usr/bin/python
# This Python file uses the following encoding: utf-8
# Port of Prof. Vasu Renganathan's Tamil TTS to Python
# This file is released under terms of MIT License
# (C) 2017 - Ezhil Language Foundation
import tamil
import os
import sys
import re
import time
import codecs
from pprint import pprint
_DEBUG = False
# Syllable to AF mapper
class Syllable2AF(object):
numeral_digits = [
"saiphar",
"onru",
"irandu",
"muunru",
"naanku",
"ainthu",
"aaru",
"eezu",
"ettu",
"onpathu",
"pattu",
]
AudioMap = {
".": "stop",
u" ": "space1",
u" ": "space",
u"அ": "a",
u"ஆ": "aa",
u"இ": "i",
u"ஈ": "ii",
u"உ": "u",
u"ஊ": "uu",
u"எ": "e",
u"ஏ": "ee",
u"ஐ": "ai",
u"ஒ": "o",
u"ஓ": "oo",
u"ஔ": "au",
u"ஃ": "space1",
u"க": "ka",
u"கா": "kaa",
u"கி": "ki",
u"கீ": "kii",
u"கு": "ku",
u"கூ": "kuu",
u"கெ": "ke",
u"கே": "kee",
u"கை": "kai",
u"கொ": "ko",
u"கோ": "koo",
u"கௌ": "kau",
u"க்": "k",
u"க்ஷ": "ksha",
u"க்ஷா": "kshaa",
u"க்ஷி": "kshi",
u"க்ஷீ": "kshii",
u"க்ஷு": "kshu",
u"க்ஷூ": "kshuu",
u"க்ஷெ": "kshe",
u"க்ஷே": "kshee",
u"க்ஷை": "kshai",
u"க்ஷொ": "ksho",
u"க்ஷோ": "kshoo",
u"க்ஷௌ": "kshau",
u"ங": "nga",
u"ஙா": "ngaa",
u"ஙி": "ngi",
u"ஙீ": "ngii",
u"ஙு": "ngu",
u"ஙூ": "nguu",
u"ஙெ": "nge",
u"ஙே": "ngee",
u"ஙை": "ngai",
u"ஙொ": "ngo",
u"ஙோ": "ngoo",
u"ஙௌ": "ngau",
u"ங்": "ng",
u"ச": "ca",
u"சா": "caa",
u"சி": "ci",
u"சீ": "cii",
u"சு": "cu",
u"சூ": "cuu",
u"செ": "ce",
u"சே": "cee",
u"சை": "cai",
u"சொ": "co",
u"சோ": "coo",
u"சௌ": "cau",
u"ச்": "c",
u"ஜ": "ja",
u"ஜா": "jaa",
u"ஜி": "ji",
u"ஜீ": "jii",
u"ஜு": "ju",
u"ஜூ": "juu",
u"ஜெ": "je",
u"ஜே": "jee",
u"ஜை": "jai",
u"ஜொ": "jo",
u"ஜோ": "joo",
u"ஜௌ": "jau",
u"ஜ்": "j",
u"ஞ": "nja",
u"ஞா": "njaa",
u"ஞி": "nji",
u"ஞீ": "njii",
u"ஞு": "nju",
u"ஞூ": "njuu",
u"ஞெ": "nje",
u"ஞே": "njee",
u"ஞை": "njai",
u"ஞொ": "njo",
u"ஞோ": "njoo",
u"ஞௌ": "njau",
u"ஞ்": "nj",
u"ட": "ta",
u"டா": "taa",
u"டி": "ti",
u"டீ": "tii",
u"டு": "tu",
u"டூ": "tuu",
u"டெ": "te",
u"டே": "tee",
u"டை": "tai",
u"டொ": "to",
u"டோ": "too",
u"டௌ": "tau",
u"ட்": "t",
u"ண": "nnna",
u"ணா": "nnnaa",
u"ணி": "nnni",
u"ணீ": "nnnii",
u"ணு": "nnnu",
u"ணூ": "nnnuu",
u"ணெ": "nnne",
u"ணே": "nnnee",
u"ணை": "nnnai",
u"ணொ": "nnno",
u"ணோ": "nnnoo",
u"ணௌ": "nnnau",
u"ண்": "nnn",
u"த": "tha",
u"தா": "thaa",
u"தி": "thi",
u"தீ": "thii",
u"து": "thu",
u"தூ": "thuu",
u"தெ": "the",
u"தே": "thee",
u"தை": "thai",
u"தொ": "tho",
u"தோ": "thoo",
u"தௌ": "thau",
u"த்": "th",
u"ந": "na",
u"நா": "naa",
u"நி": "ni",
u"நீ": "nii",
u"நு": "nu",
u"நூ": "nuu",
u"நெ": "ne",
u"நே": "nee",
u"நை": "nai",
u"நொ": "no",
u"நோ": "noo",
u"நௌ": "nau",
u"ந்": "n",
u"ன": "nna",
u"னா": "nnaa",
u"னி": "nni",
u"னீ": "nnii",
u"னு": "nnu",
u"னூ": "nnuu",
u"னெ": "nne",
u"னே": "nnee",
u"னை": "nnai",
u"னொ": "nno",
u"னோ": "nnoo",
u"னௌ": "nnau",
u"ன்": "nn",
u"ப": "pa",
u"பா": "paa",
u"பி": "pi",
u"பீ": "pii",
u"பு": "pu",
u"பூ": "puu",
u"பெ": "pe",
u"பே": "pee",
u"பை": "pai",
u"பொ": "po",
u"போ": "poo",
u"பௌ": "pau",
u"ப்": "p",
u"ம": "ma",
u"மா": "maa",
u"மி": "mi",
u"மீ": "mii",
u"மு": "mu",
u"மூ": "muu",
u"மெ": "me",
u"மே": "mee",
u"மை": "mai",
u"மொ": "mo",
u"மோ": "moo",
u"மௌ": "mau",
u"ம்": "m",
u"ய": "ya",
u"யா": "yaa",
u"யி": "yi",
u"யீ": "yii",
u"யு": "yu",
u"யூ": "yuu",
u"யெ": "ye",
u"யே": "yee",
u"யை": "yai",
u"யொ": "yo",
u"யோ": "yoo",
u"யௌ": "yau",
u"ய்": "y",
u"ர": "ra",
u"ரா": "raa",
u"ரி": "ri",
u"ரீ": "rii",
u"ரு": "ru",
u"ரூ": "ruu",
u"ரெ": "re",
u"ரே": "ree",
u"ரை": "rai",
u"ரொ": "ro",
u"ரோ": "roo",
u"ரௌ": "rau",
u"ர்": "r",
u"ற": "rra",
u"றா": "rraa",
u"றி": "rri",
u"றீ": "rrii",
u"று": "rru",
u"றூ": "rruu",
u"றெ": "rre",
u"றே": "rree",
u"றை": "rrai",
u"றொ": "rro",
u"றோ": "rroo",
u"றௌ": "rrau",
u"ற்": "rr",
u"ல": "la",
u"லா": "laa",
u"லி": "li",
u"லீ": "lii",
u"லு": "lu",
u"லூ": "luu",
u"லெ": "le",
u"லே": "lee",
u"லை": "lai",
u"லொ": "lo",
u"லோ": "loo",
u"லௌ": "lau",
u"ல்": "l",
u"ள": "lla",
u"ளா": "llaa",
u"ளி": "lli",
u"ளீ": "llii",
u"ளு": "llu",
u"ளூ": "lluu",
u"ளெ": "lle",
u"ளே": "llee",
u"ளை": "llai",
u"ளொ": "llo",
u"ளோ": "lloo",
u"ளௌ": "llau",
u"ள்": "ll",
u"ழ": "za",
u"ழா": "zaa",
u"ழி": "zi",
u"ழீ": "zii",
u"ழு": "zu",
u"ழூ": "zuu",
u"ழெ": "ze",
u"ழே": "zee",
u"ழை": "zai",
u"ழொ": "zo",
u"ழோ": "zoo",
u"ழௌ": "zau",
u"ழ்": "z",
u"வ": "va",
u"வா": "vaa",
u"வி": "vi",
u"வீ": "vii",
u"வு": "vu",
u"வூ": "vuu",
u"வெ": "ve",
u"வே": "vee",
u"வை": "vai",
u"வொ": "vo",
u"வோ": "voo",
u"வௌ": "vau",
u"வ்": "v",
u"ஶ": "space1",
u"ஶா": "space1",
u"ஶி": "space1",
u"ஶீ": "space1",
u"ஶு": "space1",
u"ஶூ": "space1",
u"ஶெ": "space1",
u"ஶே": "space1",
u"ஶை": "space1",
u"ஶொ": "space1",
u"ஶோ": "space1",
u"ஶௌ": "space1",
u"ஷ": "sha",
u"ஷா": "shaa",
u"ஷி": "shi",
u"ஷீ": "shii",
u"ஷு": "shu",
u"ஷூ": "shuu",
u"ஷெ": "she",
u"ஷே": "shee",
u"ஷை": "shai",
u"ஷொ": "sho",
u"ஷோ": "shoo",
u"ஷௌ": "shau",
u"ஷ்": "sh",
u"ஸ": "sa",
u"ஸா": "saa",
u"ஸி": "si",
u"ஸீ": "sii",
u"ஸு": "su",
u"ஸூ": "suu",
u"ஸெ": "se",
u"ஸே": "see",
u"ஸை": "sai",
u"ஸொ": "so",
u"ஸோ": "soo",
u"ஸௌ": "sau",
u"ஸ்": "s",
u"ஹ": "ha",
u"ஹா": "haa",
u"ஹி": "hi",
u"ஹீ": "hii",
u"ஹு": "hu",
u"ஹூ": "huu",
u"ஹெ": "he",
u"ஹே": "hee",
u"ஹை": "hai",
u"ஹொ": "ho",
u"ஹோ": "hoo",
u"ஹௌ": "hau",
u"ஹ்": "h",
}
def __init__(self):
super(Syllable2AF, self).__init__()
@staticmethod
def syllable_mapper_uyir_mei(in_syllable):
data = Syllable2AF.AudioMap.get(in_syllable, None)
if not data:
# convert digits
try:
digit = int(in_syllable)
return Syllable2AF.numeral_digits[digit]
except ValueError as vex:
data = "space" # bad literal, _, +, -, (, ) etc.
return data
# make this number go to 247+
if _DEBUG:
print(
"filled = %d/%d"
% (
len(list(filter(len, Syllable2AF.AudioMap.values()))),
len(Syllable2AF.AudioMap.values()),
)
)
# text to be output as audio
class SubjectText(object):
def __init__(self, text):
super(SubjectText, self).__init__()
self.text = text
self.filename = "<text>"
self.syllables = []
self.audiomapping = ["space", "space1"] # ambient start
self._map_to_syllables()
self._build_audio_mapping()
self._soften_stops()
def reset(self):
while len(self.audiomapping) > 0:
self.audiomapping.pop()
def _map_to_syllables(self):
tamil_or_spc = lambda x: tamil.utf8.istamil(x) or x in [
" ",
"\r",
"\n",
".",
",",
";",
"?",
"!",
]
self.syllables = filter(tamil_or_spc, tamil.utf8.get_letters(self.text))
def _build_audio_mapping(self):
REPEAT_SPACE = 2
for syllable in self.syllables:
# syll = AudioMap.get(syllable,"space")
# syll = syll != '' and syll or "space"
syll = Syllable2AF.syllable_mapper_uyir_mei(syllable)
self.audiomapping.append(syll)
if syll in ["space1", "space"]:
for i in range(0, REPEAT_SPACE):
self.audiomapping.append(syll)
# port of 'soften stops' subroutine from Prof. Vasu's code.
# soften intervocalic and after nasal stop consonants
def _soften_stops(self):
for pos in range(1, len(self.audiomapping)):
curr_s = self.audiomapping[pos]
prev_s = self.audiomapping[pos - 1]
next_s = (
(pos < len(self.audiomapping) - 1)
and self.audiomapping[pos + 1]
or None
)
if curr_s.startswith("k"):
if prev_s.startswith("ng"):
curr_s = "ng" + curr_s[1:]
elif curr_s != "k" and prev_s != "k" and prev_s != "space":
curr_s = "h" + curr_s[1:]
elif curr_s.startswith("c"):
if prev_s == "nj":
curr_s = "j" + curr_s[1:]
elif curr_s != "c" and prev_s != "c" and prev_s != "space":
curr_s = "s" + curr_s[1:]
elif curr_s.startswith("th"):
if prev_s in ["n", "u"] or (
curr_s != "th" and prev_s != "th" and prev_s != "space"
):
if _DEBUG:
curr_old = curr_s
curr_s = "dh" + curr_s[2:]
if _DEBUG:
print(
"Softening %s%s -> %s%s"
% (prev_s, curr_old, prev_s, curr_s)
)
elif next_s and next_s.find("space") >= 0:
curr_s = "dh" + curr_s[2:]
elif curr_s.startswith("t"):
if prev_s == "nnn" or (
curr_s != "t" and prev_s != "space" and prev_s != "t"
):
curr_s = "d" + curr_s[1:]
elif curr_s.startswith("p"):
if prev_s == "m" or (
curr_s != "p" and prev_s != "p" and prev_s != "space"
):
curr_s = "b" + curr_s[1:]
self.audiomapping[pos] = curr_s
def get_audiofile_order(self):
# ['va','na','ka','m']
if _DEBUG:
pprint(self.audiomapping)
return self.audiomapping
# driver class
class ConcatennativeTTS(object):
TARGETDIR = os.path.join(os.path.split(__file__)[0], "tamilsound")
FMT = "mp3"
def __init__(self, text, outputfile):
super(ConcatennativeTTS, self).__init__()
self.outputfile = outputfile
self.subject_text = SubjectText(text)
def run(self):
self._mergeaudio() # write to outputfile
def _mergeaudio(self):
full_syllable_files = [
os.path.join(
ConcatennativeTTS.TARGETDIR, syllable_file + "." + ConcatennativeTTS.FMT
)
for syllable_file in self.subject_text.get_audiofile_order()
]
missing = []
with open(self.outputfile, "wb") as out_fp:
for f in full_syllable_files:
try:
with open(f, "rb") as in_fp:
out_fp.write(in_fp.read())
except IOError as ioe:
if _DEBUG:
missing.append(os.path.basename(f).split(".")[0])
print(
'Warning: cannot synthesize syllable "%s" with error \n\t[%s]'
% (os.path.basename(f).split(".")[0], str(ioe))
)
if _DEBUG and len(missing) >= 1:
pprint(set(missing))
return
if __name__ == u"__main__":
if len(sys.argv) < 3:
print("Usage: ")
print("driver.py $AUDIO_OUTPUTFILENAME $TAMIL_TEXTFILENAME")
sys.exit(-1)
filename_in = sys.argv[2]
outputfile = sys.argv[1]
start = time.time()
with codecs.open(filename_in, "r", "UTF-8") as fp:
data = fp.read()
tts = ConcatennativeTTS(data, outputfile)
tts.run()
lapsed = time.time() - start
print(u"Tamil speech output to %s in %g(s)" % (outputfile, lapsed))
sys.exit(0)
| 25.607407 | 88 | 0.361368 |
ace30339a8f22a441f459cea1e3983e81f37caa7 | 18,054 | py | Python | tensorflow/python/saved_model/save_test.py | xykong1958/tensorflow | d8fe10aae9e6be9cd49ab7e68c1ca4989f0be42b | [
"Apache-2.0"
] | 3 | 2016-08-20T04:02:24.000Z | 2019-04-21T06:18:41.000Z | tensorflow/python/saved_model/save_test.py | xiyihong/tensorflow | f90532431c3785166cff35ff427b652fe460f60b | [
"Apache-2.0"
] | 59 | 2019-06-17T09:37:49.000Z | 2022-01-19T01:21:34.000Z | tensorflow/python/saved_model/save_test.py | xiyihong/tensorflow | f90532431c3785166cff35ff427b652fe460f60b | [
"Apache-2.0"
] | 1 | 2019-10-31T09:22:30.000Z | 2019-10-31T09:22:30.000Z | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for trackable object SavedModel save."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
from tensorflow.python.client import session as session_lib
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.eager import backprop
from tensorflow.python.eager import def_function
from tensorflow.python.eager import function
from tensorflow.python.eager import test
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_spec
from tensorflow.python.framework import test_util
from tensorflow.python.framework import versions
from tensorflow.python.keras.layers import core
from tensorflow.python.keras.optimizer_v2 import adam
from tensorflow.python.lib.io import file_io
from tensorflow.python.module import module
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import lookup_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables
from tensorflow.python.saved_model import loader
from tensorflow.python.saved_model import loader_impl
from tensorflow.python.saved_model import save
from tensorflow.python.saved_model import signature_constants
from tensorflow.python.saved_model import tag_constants
from tensorflow.python.training.tracking import tracking
from tensorflow.python.training.tracking import util
from tensorflow.python.util import compat
class _ModelWithOptimizer(util.Checkpoint):
def __init__(self):
self.dense = core.Dense(1)
self.optimizer = adam.Adam(0.01)
@def_function.function(
input_signature=(tensor_spec.TensorSpec([None, 2], dtypes.float32),
tensor_spec.TensorSpec([None], dtypes.float32)))
def call(self, x, y):
with backprop.GradientTape() as tape:
loss = math_ops.reduce_mean((self.dense(x) - y) ** 2.)
trainable_variables = self.dense.trainable_variables
gradients = tape.gradient(loss, trainable_variables)
self.optimizer.apply_gradients(zip(gradients, trainable_variables))
return {"loss": loss}
def _import_and_infer(
save_dir, inputs,
signature_key=signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY):
"""Import a SavedModel into a TF 1.x-style graph and run `signature_key`."""
graph = ops.Graph()
with graph.as_default(), session_lib.Session() as session:
model = loader.load(session, [tag_constants.SERVING], save_dir)
signature = model.signature_def[signature_key]
assert set(inputs.keys()) == set(signature.inputs.keys())
feed_dict = {}
for arg_name in inputs.keys():
feed_dict[graph.get_tensor_by_name(signature.inputs[arg_name].name)] = (
inputs[arg_name])
output_dict = {}
for output_name, output_tensor_info in signature.outputs.items():
output_dict[output_name] = graph.get_tensor_by_name(
output_tensor_info.name)
return session.run(output_dict, feed_dict=feed_dict)
class SaveTest(test.TestCase):
def test_method_save_signature(self):
root = tracking.AutoTrackable()
root.f = def_function.function(
lambda x: 2. * x,
input_signature=[tensor_spec.TensorSpec(None, dtypes.float32)])
root.f(constant_op.constant(1.))
save_dir = os.path.join(self.get_temp_dir(), "saved_model")
save.save(root, save_dir, root.f)
self.assertEqual(
{"output_0": 2.},
_import_and_infer(save_dir, {"x": 1.}))
def test_method_save_concrete(self):
root = tracking.AutoTrackable()
root.f = def_function.function(
lambda z: {"out": 2. * z})
root.f(constant_op.constant(1.))
save_dir = os.path.join(self.get_temp_dir(), "saved_model")
save.save(
root,
save_dir,
{"non_default_key": root.f.get_concrete_function(
tensor_spec.TensorSpec(None, dtypes.float32))})
self.assertEqual(
{"out": 2.},
_import_and_infer(
save_dir, {"z": 1.}, signature_key="non_default_key"))
def test_version_information_included(self):
root = tracking.AutoTrackable()
save_dir = os.path.join(self.get_temp_dir(), "saved_model")
save.save(root, save_dir)
saved_model_proto = loader_impl.parse_saved_model(save_dir)
self.assertEqual(
versions.__version__,
saved_model_proto.meta_graphs[0].meta_info_def.tensorflow_version)
self.assertEqual(
versions.__git_version__,
saved_model_proto.meta_graphs[0].meta_info_def.tensorflow_git_version)
def test_non_concrete_error(self):
root = tracking.AutoTrackable()
root.f = def_function.function(lambda x: 2. * x)
root.f(constant_op.constant(1.))
save_dir = os.path.join(self.get_temp_dir(), "saved_model")
with self.assertRaisesRegexp(
ValueError, "Expected a TensorFlow function"):
save.save(root, save_dir, root.f)
def test_captures_unreachable_variable(self):
root = tracking.AutoTrackable()
unreachable_variable = variables.Variable([5.0, 2.0])
root.reachable_variable = variables.Variable([1.0, 3.0])
@def_function.function
def increase_variable(x):
return 2 * unreachable_variable * x + root.reachable_variable
root.f = increase_variable
self.assertAllEqual([101.0, 83.0],
root.f(constant_op.constant([10.0, 20.0])).numpy())
save_dir = os.path.join(self.get_temp_dir(), "saved_model")
with self.assertRaisesRegexp(KeyError, "not reachable from root"):
save.save(root, save_dir)
def test_nested_inputs(self):
root = tracking.AutoTrackable()
root.f = def_function.function(
lambda x: 2. * x[0],
input_signature=([tensor_spec.TensorSpec(None, dtypes.float32),
tensor_spec.TensorSpec(None, dtypes.float32)],))
root.f([constant_op.constant(1.), constant_op.constant(1.)])
# Concrete functions must always have uniquely named Tensor inputs. Save
# relies on this.
with self.assertRaisesRegexp(
ValueError, "two arguments named 'x'"):
root.f.get_concrete_function()
def test_nested_outputs(self):
root = tracking.AutoTrackable()
root.f = def_function.function(lambda x: (2. * x, (3. * x, 4. * x)))
root.f(constant_op.constant(1.))
to_save = root.f.get_concrete_function(constant_op.constant(1.))
save_dir = os.path.join(self.get_temp_dir(), "saved_model")
with self.assertRaisesRegexp(
ValueError, "non-flat outputs"):
save.save(root, save_dir, to_save)
def test_nested_dict_outputs(self):
root = util.Checkpoint(
f=def_function.function(
lambda x: {"a": 2. * x, "b": (3. * x, 4. * x)}))
root.f(constant_op.constant(1.))
to_save = root.f.get_concrete_function(constant_op.constant(1.))
save_dir = os.path.join(self.get_temp_dir(), "saved_model")
with self.assertRaisesRegexp(
ValueError, "dictionary containing non-Tensor value"):
save.save(root, save_dir, to_save)
def test_variable(self):
root = tracking.AutoTrackable()
root.v1 = variables.Variable(3.)
root.v2 = variables.Variable(2.)
root.f = def_function.function(
lambda x: root.v1 * root.v2 * x)
root.f(constant_op.constant(1.))
to_save = root.f.get_concrete_function(constant_op.constant(1.))
save_dir = os.path.join(self.get_temp_dir(), "saved_model")
save.save(root, save_dir, to_save)
self.assertAllEqual({"output_0": 12.},
_import_and_infer(save_dir, {"x": 2.}))
def test_optimizer(self):
x = constant_op.constant([[3., 4.]])
y = constant_op.constant([2.])
model = _ModelWithOptimizer()
first_loss = model.call(x, y)
save_dir = os.path.join(self.get_temp_dir(), "saved_model")
save.save(model, save_dir, model.call)
second_loss = model.call(x, y)
self.assertNotEqual(first_loss, second_loss)
self.assertAllClose(
second_loss,
_import_and_infer(save_dir, {"x": [[3., 4.]], "y": [2.]}))
def test_single_method_default_signature(self):
model = _ModelWithOptimizer()
x = constant_op.constant([[3., 4.]])
y = constant_op.constant([2.])
model.call(x, y)
save_dir = os.path.join(self.get_temp_dir(), "saved_model")
save.save(model, save_dir)
self.assertIn("loss",
_import_and_infer(save_dir,
{"x": [[3., 4.]], "y": [2.]}))
def test_single_function_default_signature(self):
model = tracking.AutoTrackable()
model.f = def_function.function(lambda: 3., input_signature=())
model.f()
save_dir = os.path.join(self.get_temp_dir(), "saved_model")
save.save(model, save_dir)
self.assertAllClose({"output_0": 3.},
_import_and_infer(save_dir, {}))
def test_single_function_no_signature(self):
model = tracking.AutoTrackable()
model.f = def_function.function(lambda: 3.)
save_dir = os.path.join(self.get_temp_dir(), "saved_model")
save.save(model, save_dir)
def test_find_default_save_function(self):
class ObjWithDefaultSignature(util.Checkpoint):
@def_function.function(input_signature=[tensor_spec.TensorSpec(
shape=None, dtype=dtypes.float32)])
def _default_save_signature(self, x):
return x + x + 1
obj = ObjWithDefaultSignature()
save_dir = os.path.join(self.get_temp_dir(), "saved_model")
save.save(obj, save_dir)
self.assertAllClose(
{"output_0": 7.}, _import_and_infer(save_dir, {"x": 3.}))
def test_docstring(self):
class Adder(util.Checkpoint):
@def_function.function(input_signature=[tensor_spec.TensorSpec(
shape=None, dtype=dtypes.float32)])
def add(self, x):
return x + x + 1.
to_save = Adder()
to_save.add(constant_op.constant(1.))
save_dir = os.path.join(self.get_temp_dir(), "saved_model")
save.save(to_save, save_dir)
self.assertAllClose({"output_0": 7.},
_import_and_infer(save_dir, {"x": 3.}))
def test_datastructures(self):
class HasDatastructures(util.Checkpoint):
def __init__(self):
self.a = [1.]
self.a.append(variables.Variable(2.))
self.b = {"a": variables.Variable(3.)}
@def_function.function(input_signature=[tensor_spec.TensorSpec(
shape=None, dtype=dtypes.float32)])
def add(self, x):
return x + math_ops.add_n(self.a) + self.b["a"]
to_save = HasDatastructures()
to_save.add(constant_op.constant(1.))
save_dir = os.path.join(self.get_temp_dir(), "saved_model")
save.save(to_save, save_dir)
self.assertAllClose({"output_0": 10.},
_import_and_infer(save_dir, {"x": 4.}))
def test_default_attr_stripping(self):
class Complex(util.Checkpoint):
@def_function.function(input_signature=[])
def __call__(self):
return math_ops.complex(
constant_op.constant(1.),
constant_op.constant(2.),
name="complex")
to_save = Complex()
to_save()
save_dir = os.path.join(self.get_temp_dir(), "saved_model")
save.save(to_save, save_dir)
graph = ops.Graph()
with graph.as_default(), self.session(graph) as session:
loader.load(session, [tag_constants.SERVING], save_dir)
func, = [f for name, f in graph._functions.items() if "call" in name]
complex_node, = [
node for node in func.definition.node_def if node.op == "Complex"]
self.assertNotIn("T", complex_node.attr)
self.assertNotIn("Tout", complex_node.attr)
def test_signature_attribute_reserved(self):
root = util.Checkpoint(signatures=variables.Variable(1.))
save_dir = os.path.join(self.get_temp_dir(), "saved_model")
with self.assertRaisesRegexp(ValueError, "del obj.signatures"):
save.save(root, save_dir)
del root.signatures
save.save(root, save_dir)
def test_function_with_captured_dataset(self):
class HasDataset(module.Module):
def __init__(self):
super(HasDataset, self).__init__()
self.dataset = (
dataset_ops.Dataset.range(5)
.map(lambda x: x ** 2))
@def_function.function
def __call__(self, x):
current_sum = array_ops.zeros([], dtype=dtypes.int64)
for element in self.dataset:
current_sum += x * element
return current_sum
root = HasDataset()
save_dir = os.path.join(self.get_temp_dir(), "saved_model")
save.save(
root, save_dir,
signatures=root.__call__.get_concrete_function(
tensor_spec.TensorSpec(None, dtypes.int64)))
self.assertAllClose({"output_0": 3 * (1 + 4 + 9 + 16)},
_import_and_infer(save_dir, {"x": 3}))
class AssetTests(test.TestCase):
def setUp(self):
super(AssetTests, self).setUp()
self._vocab_path = os.path.join(self.get_temp_dir(), "vocab.txt")
with open(self._vocab_path, "w") as f:
f.write("alpha\nbeta\ngamma\n")
def test_asset_path_returned(self):
root = tracking.AutoTrackable()
root.path = tracking.TrackableAsset(self._vocab_path)
save_dir = os.path.join(self.get_temp_dir(), "saved_model")
root.get_asset = def_function.function(lambda: root.path.asset_path)
save.save(root, save_dir, signatures=root.get_asset.get_concrete_function())
second_dir = os.path.join(self.get_temp_dir(), "second_dir")
file_io.rename(save_dir, second_dir)
imported_path = _import_and_infer(second_dir, {})["output_0"]
self.assertIn(compat.as_str_any(second_dir),
compat.as_str_any(imported_path))
def test_table(self):
initializer = lookup_ops.TextFileInitializer(
self._vocab_path,
key_dtype=dtypes.string,
key_index=lookup_ops.TextFileIndex.WHOLE_LINE,
value_dtype=dtypes.int64,
value_index=lookup_ops.TextFileIndex.LINE_NUMBER)
root = util.Checkpoint(table=lookup_ops.HashTable(
initializer, default_value=-1))
root.table_user = def_function.function(
root.table.lookup,
input_signature=[tensor_spec.TensorSpec(None, dtypes.string)])
self.assertEqual(
2,
self.evaluate(root.table_user(constant_op.constant("gamma"))))
save_dir = os.path.join(self.get_temp_dir(), "saved_model")
save.save(root, save_dir)
file_io.delete_file(self._vocab_path)
self.assertAllClose(
{"output_0": [2, 0]},
_import_and_infer(save_dir, {"keys": ["gamma", "alpha"]}))
second_dir = os.path.join(self.get_temp_dir(), "second_dir")
# Asset paths should track the location the SavedModel is loaded from.
file_io.rename(save_dir, second_dir)
self.assertAllClose(
{"output_0": [2, 1]},
_import_and_infer(second_dir, {"keys": ["gamma", "beta"]}))
def test_unused_asset(self):
root = tracking.AutoTrackable()
root.f = def_function.function(
lambda x: 2. * x,
input_signature=[tensor_spec.TensorSpec(None, dtypes.float32)])
root.asset = tracking.TrackableAsset(self._vocab_path)
export_dir = os.path.join(self.get_temp_dir(), "save_dir")
save.save(root, export_dir)
self.assertAllClose(
{"output_0": [0.2]},
_import_and_infer(export_dir, {"x": [0.1]}))
def test_sensible_function_building_exception(self):
root = util.Checkpoint(v=variables.Variable(2.))
root.f = def_function.function(
lambda x: 2. * root.v,
input_signature=[tensor_spec.TensorSpec(None, dtypes.float32)])
export_dir = os.path.join(self.get_temp_dir(), "save_dir")
@def_function.function
def _calls_save():
save.save(root, export_dir)
with self.assertRaisesRegexp(AssertionError, "tf.function"):
_calls_save()
class _ModelWithOptimizerUsingDefun(util.Checkpoint):
def __init__(self):
self.dense = core.Dense(1)
self.optimizer = adam.Adam(0.01)
# Using defun due to control flow v2 cycles, b/121159261. def_function uses
# conds to gate variable initialization and so triggers cond reference cycles,
# but the thing being wrapped here does not use cond itself.
@function.defun(
input_signature=(tensor_spec.TensorSpec([None, 2], dtypes.float32),
tensor_spec.TensorSpec([None], dtypes.float32)),
)
def call(self, x, y):
with backprop.GradientTape() as tape:
loss = math_ops.reduce_mean((self.dense(x) - y) ** 2.)
trainable_variables = self.dense.trainable_variables
gradients = tape.gradient(loss, trainable_variables)
self.optimizer.apply_gradients(zip(gradients, trainable_variables))
return {"loss": loss}
class MemoryTests(test.TestCase):
def setUp(self):
self._model = _ModelWithOptimizerUsingDefun()
@test_util.assert_no_garbage_created
def test_no_reference_cycles(self):
x = constant_op.constant([[3., 4.]])
y = constant_op.constant([2.])
self._model.call(x, y)
if sys.version_info[0] < 3:
# TODO(allenl): debug reference cycles in Python 2.x
self.skipTest("This test only works in Python 3+. Reference cycles are "
"created in older Python versions.")
save_dir = os.path.join(self.get_temp_dir(), "saved_model")
save.save(self._model, save_dir, self._model.call)
if __name__ == "__main__":
test.main()
| 38.088608 | 80 | 0.687493 |
ace30414f8575801588dc1cb26c378eec30873c1 | 6,448 | py | Python | src/miniut/log.py | JuanS3/utilities | ac155baf1ef4b9e59b145eff2ed59c5db3e17a2f | [
"MIT"
] | null | null | null | src/miniut/log.py | JuanS3/utilities | ac155baf1ef4b9e59b145eff2ed59c5db3e17a2f | [
"MIT"
] | null | null | null | src/miniut/log.py | JuanS3/utilities | ac155baf1ef4b9e59b145eff2ed59c5db3e17a2f | [
"MIT"
] | null | null | null | import os
import logging
import functools
from datetime import datetime as dt
from miniut.exceptions import RestoreLog
from miniut import config as cfg
FOLDER_LOGS_DEFAULT: str = 'Logs'
__folder_logs: str = FOLDER_LOGS_DEFAULT
__log: logging.Logger = None
__log_name: str = 'logging.log'
__log_ok: bool = True
__log_aux: str = ''
__lvl: str = ''
__STANDARD_LVL: str = ' '
__LVL_INDENT: int = 2
_START_LANGS = {cfg.ENG : 'START',
cfg.ESP : 'INICIA',
}
_END_LANGS = {cfg.ENG : 'END',
cfg.ESP : 'TERMINA',
}
_RESTORED_LOG_LANGS = {cfg.ENG : 'RESTORED',
cfg.ESP : 'RECONSTRUIDO',
}
_RESTORE_EXCEPT_LANGS = {cfg.ENG : 'Impossible to restore log',
cfg.ESP : 'No ha sido posible reconstruir el log',
}
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
# ~~ decorators ~~ #
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
def block(message_block: str or dict):
def decorator(func):
@functools.wraps(func)
def wrapped(*args, **kwargs):
message = message_block
if isinstance(message_block, dict):
message = message_block[cfg.lang()]
start_block(message)
value = func(*args, **kwargs)
end_block(message)
return value
return wrapped
return decorator
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
# ~~ functions ~~ #
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
def init(log_name: str = 'logging',
folder_log: str = FOLDER_LOGS_DEFAULT,
time: bool = True
) -> None:
"""
Initialize the logging module
Parameters
----------
log_name : str
Name of logging file
folder_log : str, optional
Folder where the logging file should be stored, by default 'Logs'
time : bool, optional
True in case the logging file name has the time with format '%Y%m%d-%H%M%S'
False in case the time in the name is not necessary, by default True
"""
global __log_name, __folder_logs, __log, __lvl
__lvl = ''
log_time: str = dt.now().strftime('%Y%m%d-%H%M%S') if time else ''
__log_name = f'{log_name} - {log_time}.log'
__folder_logs = folder_log
if not os.path.exists(__folder_logs):
os.makedirs(__folder_logs)
format = logging.Formatter('%(asctime)-8s - %(levelname)-8s - %(message)s')
heandler = logging.FileHandler(f'{__folder_logs}/{__log_name}', encoding='UTF-8')
heandler.setFormatter(format)
__log = logging.getLogger(__log_name)
__log.setLevel(logging.DEBUG)
__log.addHandler(heandler)
def get_folder_log() -> str:
return __folder_logs
def get_log_name() -> str:
return __log_name
def _add_lvl() -> None:
"""
Add one level (indentation)
"""
global __lvl
__lvl += (__STANDARD_LVL * __LVL_INDENT)
def _sub_lvl() -> None:
"""
Substract one level (indentation)
"""
global __lvl
__lvl = __lvl[:-__LVL_INDENT]
def _bad_log() -> None:
"""
Indicate the log has an error and should be restored
"""
global __log_ok
__log_ok = False
def start_block(message: str) -> None:
"""
Start a block of messages
Parameters
----------
message : str
The title of the block
"""
info(f'# {_START_LANGS[cfg.lang()]} {message.upper()} #')
_add_lvl()
def end_block(message: str) -> None:
"""
End a block of messages
Parameters
----------
message : str
The title of the block
"""
_sub_lvl()
info(f'# {_END_LANGS[cfg.lang()]} {message.upper()} #')
def _message_log_aux(type_message: str, msg: str) -> None:
"""
Put message to display in the log in case will be necessary to restore
Parameters
----------
type_message : str
Type of message like 'INFO', 'WARNING', 'ERROR', etc.
msg : str
The message to display in the log
"""
global __log_aux
log_time = dt.now().strftime('%Y-%m-%d %H:%M:%S')
__log_aux += f'{log_time} - {type_message:<10} - {msg}\n'
def info(message: str) -> None:
"""
Information message into the log
Parameters
----------
message : str
The message to display in the log
"""
msg = f'{__lvl}{message}'
try:
__log.info(msg)
except:
_bad_log()
finally:
_message_log_aux(type_message='INFO', msg=msg)
def warning(message: str) -> None:
"""
Warning message into the log
Parameters
----------
message : str
The message to display in the log
"""
msg = f'{__lvl}{message}'
try:
__log.warning(msg)
except:
_bad_log()
finally:
_message_log_aux(type_message='WARNING', msg=msg)
def critical(message: str) -> None:
"""
Critial message to display in the log
Parameters
----------
message : str
The message to display in the log
"""
msg = f'{__lvl}{message}'
try:
__log.critical(msg)
except:
_bad_log()
finally:
_message_log_aux(type_message='CRITICAL', msg=msg)
def error(message: str) -> None:
"""
Error message to display in the log
Parameters
----------
message : str
The message to display in the log
"""
msg = f'{__lvl}>>> {message} <<<'
try:
__log.error(msg)
except:
_bad_log()
finally:
_message_log_aux(type_message='ERROR', msg=msg)
def _restore_log() -> None:
"""
Try to restore the log file at the current location.
Raises
------
RestoreLog
In case the log file cannot be restored
"""
log_file_name = f'{__log_name[:-4]} - {_RESTORED_LOG_LANGS[cfg.lang()]}.log'
try:
with open(f'{__folder_logs}/{log_file_name}', 'w', encoding='UTF-8') as f:
f.write(__log_aux)
except Exception as e:
raise RestoreLog(message=_RESTORE_EXCEPT_LANGS[cfg.lang()],
error=str(e)
)
def close() -> None:
"""
If the log file had any problem to write then try to restore it.
"""
if not __log_ok:
_restore_log()
| 23.97026 | 85 | 0.546836 |
ace304390cad8d22239df8f883e2120cec144e2b | 234 | py | Python | src/rest_framework_jwt/__init__.py | drozdowsky/django-rest-framework-jwt | ad88ffbb4a23d11ce06c5e4edaeb886aad05401f | [
"MIT"
] | null | null | null | src/rest_framework_jwt/__init__.py | drozdowsky/django-rest-framework-jwt | ad88ffbb4a23d11ce06c5e4edaeb886aad05401f | [
"MIT"
] | null | null | null | src/rest_framework_jwt/__init__.py | drozdowsky/django-rest-framework-jwt | ad88ffbb4a23d11ce06c5e4edaeb886aad05401f | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
__title__ = 'drf-jwt'
__version__ = '1.13.0'
__author__ = 'José Padilla, Styria Digital'
__license__ = 'MIT'
__copyright__ = '2014-2017 Blimp LLC, 2018 Styria Digital'
# Version synonym
VERSION = __version__
| 21.272727 | 58 | 0.709402 |
ace304b930a6c2b42ceb2c393b2d767ee83fef7a | 6,301 | py | Python | 1WayGAN_Train.py | jerofad/deep-photo-enhancer | 8f004fd5c6a4191d983baff985da76b254eb31eb | [
"MIT"
] | 44 | 2020-03-22T16:05:02.000Z | 2022-03-27T07:17:21.000Z | 1WayGAN_Train.py | jerofad/deep-photo-enhancer | 8f004fd5c6a4191d983baff985da76b254eb31eb | [
"MIT"
] | 6 | 2020-03-23T18:57:10.000Z | 2022-03-12T00:19:51.000Z | 1WayGAN_Train.py | jerofad/deep-photo-enhancer | 8f004fd5c6a4191d983baff985da76b254eb31eb | [
"MIT"
] | 10 | 2020-04-07T12:48:42.000Z | 2021-12-23T07:17:09.000Z | import torch.optim as optim
from torchvision.utils import save_image
from datetime import datetime
from libs.compute import *
from libs.constant import *
from libs.model import *
if __name__ == "__main__":
start_time = datetime.now()
# Creating generator and discriminator
generator = Generator()
generator = nn.DataParallel(generator)
generator.load_state_dict(torch.load('./gan1_pretrain_50_12.pth'))
generator.train()
discriminator = Discriminator()
discriminator = nn.DataParallel(discriminator)
if torch.cuda.is_available():
generator.cuda(device=device)
discriminator.cuda(device=device)
# Loading Training and Test Set Data
trainLoader1, trainLoader2, trainLoader_cross, testLoader = data_loader()
# MSE Loss and Optimizer
criterion = nn.MSELoss()
optimizer_g = optim.Adam(generator.parameters(), lr=LEARNING_RATE, betas=(BETA1, BETA2))
optimizer_d = optim.Adam(discriminator.parameters(), lr=LEARNING_RATE, betas=(BETA1, BETA2))
learning_rate = LEARNING_RATE
# Training Network
dataiter = iter(testLoader)
gt_test, data_test = dataiter.next()
input_test, dummy = data_test
testInput = Variable(input_test.type(Tensor_gpu))
batches_done = 0
generator_loss = []
discriminator_loss = []
for epoch in range(NUM_EPOCHS_TRAIN):
for param_group in optimizer_d.param_groups:
param_group['lr'] = adjustLearningRate(learning_rate, epoch_num=epoch, decay_rate=DECAY_RATE)
for param_group in optimizer_g.param_groups:
param_group['lr'] = adjustLearningRate(learning_rate, epoch_num=epoch, decay_rate=DECAY_RATE)
for i, (data, gt1) in enumerate(trainLoader_cross, 0):
input, dummy = data
groundTruth, dummy = gt1
trainInput = Variable(input.type(Tensor_gpu)) # X
real_imgs = Variable(groundTruth.type(Tensor_gpu)) # Y
# TRAIN GENERATOR
optimizer_g.zero_grad()
fake_imgs = generator(trainInput) # Y'
gLoss = computeGeneratorLoss(trainInput, fake_imgs, discriminator, criterion)
gLoss.backward(retain_graph=True)
optimizer_g.step()
# TRAIN DISCRIMINATOR
optimizer_d.zero_grad()
# Real Images
realValid = discriminator(real_imgs) # D_Y
# Fake Images
fakeValid = discriminator(fake_imgs) # D_Y'
gradientPenalty = computeGradientPenaltyFor1WayGAN(discriminator, real_imgs.data, fake_imgs.data)
dLoss = computeDiscriminatorLoss(realValid, fakeValid, gradientPenalty)
dLoss.backward(retain_graph=True)
optimizer_d.step()
print("[Epoch %d/%d] [Batch %d/%d] [D loss: %f] [G loss: %f]" % (
epoch + 1, NUM_EPOCHS_TRAIN, i + 1, len(trainLoader_cross), dLoss.item(), gLoss.item()))
f = open("./models/log/log_Train.txt", "a+")
f.write("[Epoch %d/%d] [Batch %d/%d] [D loss: %f] [G loss: %f]\n" % (
epoch + 1, NUM_EPOCHS_TRAIN, i + 1, len(trainLoader_cross), dLoss.item(), gLoss.item()))
f.close()
if batches_done % 50 == 0:
for k in range(0, fake_imgs.data.shape[0]):
save_image(fake_imgs.data[k], "./models/train_images/1Way/1Way_Train_%d_%d_%d.png" % (epoch+1, batches_done+1, k+1),
nrow=1,
normalize=True)
torch.save(generator.state_dict(),
'./models/train_checkpoint/1Way/gan1_train_' + str(epoch+1) + '_' + str(i+1) + '.pth')
torch.save(discriminator.state_dict(),
'./models/train_checkpoint/1Way/discriminator_train_' + str(epoch+1) + '_' + str(i+1) + '.pth')
fake_test_imgs = generator(testInput)
for k in range(0, fake_test_imgs.data.shape[0]):
save_image(fake_test_imgs.data[k],
"./models/train_test_images/1Way/1Way_Train_Test_%d_%d_%d.png" % (epoch+1,batches_done+1, k+1),
nrow=1, normalize=True)
batches_done += 1
print("Done training discriminator on iteration: %d" % i)
# TEST NETWORK
batches_done = 0
with torch.no_grad():
psnrAvg = 0.0
for j, (gt, data) in enumerate(testLoader, 0):
input, dummy = data
groundTruth, dummy = gt
trainInput = Variable(input.type(Tensor_gpu))
real_imgs = Variable(groundTruth.type(Tensor_gpu))
output = generator(trainInput)
loss = criterion(output, real_imgs)
psnr = 10 * torch.log10(1 / loss)
psnrAvg += psnr
if batches_done >= 95:
for k in range(0, output.data.shape[0]):
save_image(output.data[k],
"./models/test_images/1Way/test_%d_%d_%d.png" % (batches_done + 1, j + 1, k + 1),
nrow=1,
normalize=True)
for k in range(0, real_imgs.data.shape[0]):
save_image(real_imgs.data[k],
"./models/gt_images/1Way/gt_%d_%d_%d.png" % (batches_done + 1, j + 1, k + 1),
nrow=1,
normalize=True)
for k in range(0, trainInput.data.shape[0]):
save_image(trainInput.data[k],
"./models/input_images/1Way/input_%d_%d_%d.png" % (batches_done + 1, j + 1, k + 1), nrow=1,
normalize=True)
batches_done += 5
print("Loss loss: %f" % loss)
print("PSNR Avg: %f" % (psnrAvg / (j + 1)))
f = open("./models/log/psnr_Score.txt", "a+")
f.write("PSNR Avg: %f" % (psnrAvg / (j + 1)))
f = open("./models/log/psnr_Score.txt", "a+")
f.write("Final PSNR Avg: %f" % (psnrAvg / len(testLoader)))
print("Final PSNR Avg: %f" % (psnrAvg / len(testLoader)))
end_time = datetime.now()
print(end_time - start_time)
| 43.157534 | 136 | 0.566101 |
ace304e661bff3dc03e3a59850cd138bc7e62a37 | 1,839 | py | Python | azure_functions_devops_build/service_endpoint/github_service_endpoint_manager.py | coolgeeck/delwar1 | 5d3b2b5dc4933974ff26b0f0a869061129259046 | [
"MIT"
] | 16 | 2019-02-17T22:01:32.000Z | 2022-03-31T22:59:46.000Z | azure_functions_devops_build/service_endpoint/github_service_endpoint_manager.py | coolgeeck/delwar1 | 5d3b2b5dc4933974ff26b0f0a869061129259046 | [
"MIT"
] | 19 | 2019-02-11T23:31:51.000Z | 2021-06-01T23:20:35.000Z | azure_functions_devops_build/service_endpoint/github_service_endpoint_manager.py | coolgeeck/delwar1 | 5d3b2b5dc4933974ff26b0f0a869061129259046 | [
"MIT"
] | 21 | 2019-01-28T21:01:12.000Z | 2022-03-07T16:18:29.000Z | import vsts.service_endpoint.v4_1.models as models
from vsts.exceptions import VstsServiceError
from ..base.base_manager import BaseManager
from .service_endpoint_utils import sanitize_github_repository_fullname
class GithubServiceEndpointManager(BaseManager):
def __init__(self, organization_name, project_name, creds):
super(GithubServiceEndpointManager, self).__init__(
creds, organization_name=organization_name, project_name=project_name
)
def get_github_service_endpoints(self, repository_fullname):
service_endpoint_name = self._get_service_github_endpoint_name(repository_fullname)
try:
result = self._service_endpoint_client.get_service_endpoints_by_names(
self._project_name,
[service_endpoint_name],
type="github"
)
except VstsServiceError:
return []
return result
def create_github_service_endpoint(self, repository_fullname, github_pat):
data = {}
auth = models.endpoint_authorization.EndpointAuthorization(
parameters={
"accessToken": github_pat
},
scheme="PersonalAccessToken"
)
service_endpoint_name = self._get_service_github_endpoint_name(repository_fullname)
service_endpoint = models.service_endpoint.ServiceEndpoint(
administrators_group=None,
authorization=auth,
data=data,
name=service_endpoint_name,
type="github",
url="http://github.com"
)
return self._service_endpoint_client.create_service_endpoint(service_endpoint, self._project_name)
def _get_service_github_endpoint_name(self, repository_name):
return sanitize_github_repository_fullname(repository_name)
| 39.978261 | 106 | 0.698749 |
ace30720c0f118ab597a9af6e5203a249ba96b82 | 9,107 | py | Python | nova/tests/unit/virt/xenapi/stubs.py | nfvri/nova | 2ce5a440c44eb512f07adacd313304e226bb56a0 | [
"Apache-2.0"
] | 1 | 2016-07-18T22:05:01.000Z | 2016-07-18T22:05:01.000Z | nova/tests/unit/virt/xenapi/stubs.py | nfvri/nova | 2ce5a440c44eb512f07adacd313304e226bb56a0 | [
"Apache-2.0"
] | 1 | 2021-03-31T19:29:01.000Z | 2021-03-31T19:29:01.000Z | nova/tests/unit/virt/xenapi/stubs.py | nfvri/nova | 2ce5a440c44eb512f07adacd313304e226bb56a0 | [
"Apache-2.0"
] | 1 | 2021-11-12T03:55:41.000Z | 2021-11-12T03:55:41.000Z | # Copyright (c) 2010 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Stubouts, mocks and fixtures for the test suite."""
import pickle
import random
import sys
import fixtures
import mock
from os_xenapi.client import session
from os_xenapi.client import XenAPI
from oslo_serialization import jsonutils
from nova import test
from nova.virt.xenapi import fake
def stubout_session(test, cls, product_version=(5, 6, 2),
product_brand='XenServer', platform_version=(1, 9, 0),
**opt_args):
"""Stubs out methods from XenAPISession."""
test.stub_out('os_xenapi.client.session.XenAPISession._create_session',
lambda s, url: cls(url, **opt_args))
test.stub_out('os_xenapi.client.session.XenAPISession.'
'_get_product_version_and_brand',
lambda s: (product_version, product_brand))
test.stub_out('os_xenapi.client.session.XenAPISession.'
'_get_platform_version',
lambda s: platform_version)
def _make_fake_vdi():
sr_ref = fake.get_all('SR')[0]
vdi_ref = fake.create_vdi('', sr_ref)
vdi_rec = fake.get_record('VDI', vdi_ref)
return vdi_rec['uuid']
class FakeSessionForVMTests(fake.SessionBase):
"""Stubs out a XenAPISession for VM tests."""
_fake_iptables_save_output = ("# Generated by iptables-save v1.4.10 on "
"Sun Nov 6 22:49:02 2011\n"
"*filter\n"
":INPUT ACCEPT [0:0]\n"
":FORWARD ACCEPT [0:0]\n"
":OUTPUT ACCEPT [0:0]\n"
"COMMIT\n"
"# Completed on Sun Nov 6 22:49:02 2011\n")
def host_call_plugin(self, _1, _2, plugin, method, _5):
plugin = plugin.rstrip('.py')
if plugin == 'glance' and method == 'download_vhd2':
root_uuid = _make_fake_vdi()
return pickle.dumps(dict(root=dict(uuid=root_uuid)))
elif (plugin, method) == ('xenhost', 'iptables_config'):
return fake.as_json(out=self._fake_iptables_save_output,
err='')
else:
return (super(FakeSessionForVMTests, self).
host_call_plugin(_1, _2, plugin, method, _5))
def VM_start(self, _1, ref, _2, _3):
vm = fake.get_record('VM', ref)
if vm['power_state'] != 'Halted':
raise XenAPI.Failure(['VM_BAD_POWER_STATE', ref, 'Halted',
vm['power_state']])
vm['power_state'] = 'Running'
vm['is_a_template'] = False
vm['is_control_domain'] = False
vm['domid'] = random.randrange(1, 1 << 16)
return vm
def VM_start_on(self, _1, vm_ref, host_ref, _2, _3):
vm_rec = self.VM_start(_1, vm_ref, _2, _3)
vm_rec['resident_on'] = host_ref
def VDI_snapshot(self, session_ref, vm_ref, _1):
sr_ref = "fakesr"
return fake.create_vdi('fakelabel', sr_ref, read_only=True)
def SR_scan(self, session_ref, sr_ref):
pass
class FakeSessionForFirewallTests(FakeSessionForVMTests):
"""Stubs out a XenApi Session for doing IPTable Firewall tests."""
def __init__(self, uri, test_case=None):
super(FakeSessionForFirewallTests, self).__init__(uri)
if hasattr(test_case, '_in_rules'):
self._in_rules = test_case._in_rules
if hasattr(test_case, '_in6_filter_rules'):
self._in6_filter_rules = test_case._in6_filter_rules
self._test_case = test_case
def host_call_plugin(self, _1, _2, plugin, method, args):
"""Mock method for host_call_plugin to be used in unit tests
for the dom0 iptables Firewall drivers for XenAPI
"""
plugin = plugin.rstrip('.py')
if plugin == 'xenhost' and method == 'iptables_config':
# The command to execute is a json-encoded list
cmd_args = args.get('cmd_args', None)
cmd = jsonutils.loads(cmd_args)
if not cmd:
ret_str = ''
else:
output = ''
process_input = args.get('process_input', None)
if cmd == ['ip6tables-save', '-c']:
output = '\n'.join(self._in6_filter_rules)
if cmd == ['iptables-save', '-c']:
output = '\n'.join(self._in_rules)
if cmd == ['iptables-restore', '-c', ]:
lines = process_input.split('\n')
if '*filter' in lines:
if self._test_case is not None:
self._test_case._out_rules = lines
output = '\n'.join(lines)
if cmd == ['ip6tables-restore', '-c', ]:
lines = process_input.split('\n')
if '*filter' in lines:
output = '\n'.join(lines)
ret_str = fake.as_json(out=output, err='')
return ret_str
else:
return (super(FakeSessionForVMTests, self).
host_call_plugin(_1, _2, plugin, method, args))
class ReplaceModule(fixtures.Fixture):
"""Replace a module with a fake module."""
def __init__(self, name, new_value):
self.name = name
self.new_value = new_value
def _restore(self, old_value):
sys.modules[self.name] = old_value
def setUp(self):
super(ReplaceModule, self).setUp()
old_value = sys.modules.get(self.name)
sys.modules[self.name] = self.new_value
self.addCleanup(self._restore, old_value)
class FakeSessionForVolumeTests(fake.SessionBase):
"""Stubs out a XenAPISession for Volume tests."""
def VDI_introduce(self, _1, uuid, _2, _3, _4, _5,
_6, _7, _8, _9, _10, _11):
valid_vdi = False
refs = fake.get_all('VDI')
for ref in refs:
rec = fake.get_record('VDI', ref)
if rec['uuid'] == uuid:
valid_vdi = True
if not valid_vdi:
raise XenAPI.Failure([['INVALID_VDI', 'session', self._session]])
class FakeSessionForVolumeFailedTests(FakeSessionForVolumeTests):
"""Stubs out a XenAPISession for Volume tests: it injects failures."""
def VDI_introduce(self, _1, uuid, _2, _3, _4, _5,
_6, _7, _8, _9, _10, _11):
# This is for testing failure
raise XenAPI.Failure([['INVALID_VDI', 'session', self._session]])
def PBD_unplug(self, _1, ref):
rec = fake.get_record('PBD', ref)
rec['currently-attached'] = False
def SR_forget(self, _1, ref):
pass
class FakeSessionForFailedMigrateTests(FakeSessionForVMTests):
def VM_assert_can_migrate(self, session, vmref, migrate_data,
live, vdi_map, vif_map, options):
raise XenAPI.Failure("XenAPI VM.assert_can_migrate failed")
def host_migrate_receive(self, session, hostref, networkref, options):
raise XenAPI.Failure("XenAPI host.migrate_receive failed")
def VM_migrate_send(self, session, vmref, migrate_data, islive, vdi_map,
vif_map, options):
raise XenAPI.Failure("XenAPI VM.migrate_send failed")
# FIXME(sirp): XenAPITestBase is deprecated, all tests should be converted
# over to use XenAPITestBaseNoDB
class XenAPITestBase(test.TestCase):
def setUp(self):
super(XenAPITestBase, self).setUp()
self.useFixture(ReplaceModule('XenAPI', fake))
fake.reset()
def stubout_get_this_vm_uuid(self):
def f(session):
vms = [rec['uuid'] for rec
in fake.get_all_records('VM').values()
if rec['is_control_domain']]
return vms[0]
self.stub_out('nova.virt.xenapi.vm_utils.get_this_vm_uuid', f)
class XenAPITestBaseNoDB(test.NoDBTestCase):
def setUp(self):
super(XenAPITestBaseNoDB, self).setUp()
self.useFixture(ReplaceModule('XenAPI', fake))
fake.reset()
@staticmethod
def get_fake_session(error=None):
fake_session = mock.MagicMock()
session.apply_session_helpers(fake_session)
if error is not None:
class FakeException(Exception):
details = [error, "a", "b", "c"]
fake_session.XenAPI.Failure = FakeException
fake_session.call_xenapi.side_effect = FakeException
return fake_session
| 37.632231 | 78 | 0.596135 |
ace3074a5fa9f7aa7e2b668abb0f4895ec4b9b3e | 1,134 | py | Python | Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/registry.py | linuxonly801/awesome-DeepLearning | b063757fa130c4d56aea5cce2e592610f1e169f9 | [
"Apache-2.0"
] | 5 | 2022-01-30T07:35:58.000Z | 2022-02-08T05:45:20.000Z | Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/registry.py | linuxonly801/awesome-DeepLearning | b063757fa130c4d56aea5cce2e592610f1e169f9 | [
"Apache-2.0"
] | null | null | null | Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/registry.py | linuxonly801/awesome-DeepLearning | b063757fa130c4d56aea5cce2e592610f1e169f9 | [
"Apache-2.0"
] | 1 | 2022-03-07T10:51:21.000Z | 2022-03-07T10:51:21.000Z | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..utils import Registry
BACKBONES = Registry('backbone')
HEADS = Registry('head')
RECOGNIZERS = Registry('recognizer')
LOCALIZERS = Registry('localizer')
PARTITIONERS = Registry('partitioner')
LOSSES = Registry('loss')
ROI_EXTRACTORS = Registry('roi_extractor')
DETECTORS = Registry('detectors')
BBOX_ASSIGNERS = Registry('bbox_assigner')
BBOX_SAMPLERS = Registry('bbox_sampler')
BBOX_CODERS = Registry('bbox_coder')
ESTIMATORS = Registry('estimator')
MULTIMODAL = Registry('multimodal')
SEGMENT = Registry('segment')
| 36.580645 | 74 | 0.768078 |
ace3074b11fc36c90dc846010d3fc0860eab5548 | 12,499 | py | Python | airflow/utils/db.py | codejunction/airflow | 04614841c77154cae64df175252a3bcf64d4e6ea | [
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 1 | 2021-01-06T21:22:13.000Z | 2021-01-06T21:22:13.000Z | airflow/utils/db.py | codejunction/airflow | 04614841c77154cae64df175252a3bcf64d4e6ea | [
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 2 | 2018-03-23T10:16:45.000Z | 2018-09-09T11:47:07.000Z | airflow/utils/db.py | codejunction/airflow | 04614841c77154cae64df175252a3bcf64d4e6ea | [
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import contextlib
import os
from functools import wraps
from airflow import settings
from airflow.configuration import conf
from airflow.utils.log.logging_mixin import LoggingMixin
log = LoggingMixin().log
@contextlib.contextmanager
def create_session():
"""
Contextmanager that will create and teardown a session.
"""
session = settings.Session()
try:
yield session
session.commit()
except Exception:
session.rollback()
raise
finally:
session.close()
def provide_session(func):
"""
Function decorator that provides a session if it isn't provided.
If you want to reuse a session or run the function as part of a
database transaction, you pass it to the function, if not this wrapper
will create one and close it for you.
"""
@wraps(func)
def wrapper(*args, **kwargs):
arg_session = 'session'
func_params = func.__code__.co_varnames
session_in_args = arg_session in func_params and \
func_params.index(arg_session) < len(args)
session_in_kwargs = arg_session in kwargs
if session_in_kwargs or session_in_args:
return func(*args, **kwargs)
else:
with create_session() as session:
kwargs[arg_session] = session
return func(*args, **kwargs)
return wrapper
@provide_session
def merge_conn(conn, session=None):
from airflow.models import Connection
if not session.query(Connection).filter(Connection.conn_id == conn.conn_id).first():
session.add(conn)
session.commit()
@provide_session
def add_default_pool_if_not_exists(session=None):
from airflow.models.pool import Pool
if not Pool.get_pool(Pool.DEFAULT_POOL_NAME, session=session):
default_pool = Pool(
pool=Pool.DEFAULT_POOL_NAME,
slots=conf.getint(section='core', key='non_pooled_task_slot_count',
fallback=128),
description="Default pool",
)
session.add(default_pool)
session.commit()
def initdb():
from airflow import models
from airflow.models import Connection
upgradedb()
merge_conn(
Connection(
conn_id='airflow_db', conn_type='mysql',
host='mysql', login='root', password='',
schema='airflow'))
merge_conn(
Connection(
conn_id='local_mysql', conn_type='mysql',
host='localhost', login='airflow', password='airflow',
schema='airflow'))
merge_conn(
Connection(
conn_id='presto_default', conn_type='presto',
host='localhost',
schema='hive', port=3400))
merge_conn(
Connection(
conn_id='google_cloud_default', conn_type='google_cloud_platform',
schema='default',))
merge_conn(
Connection(
conn_id='hive_cli_default', conn_type='hive_cli', port=10000,
host='localhost', extra='{"use_beeline": true, "auth": ""}',
schema='default',))
merge_conn(
Connection(
conn_id='pig_cli_default', conn_type='pig_cli',
schema='default',))
merge_conn(
Connection(
conn_id='hiveserver2_default', conn_type='hiveserver2',
host='localhost',
schema='default', port=10000))
merge_conn(
Connection(
conn_id='metastore_default', conn_type='hive_metastore',
host='localhost', extra="{\"authMechanism\": \"PLAIN\"}",
port=9083))
merge_conn(
Connection(
conn_id='mongo_default', conn_type='mongo',
host='mongo', port=27017))
merge_conn(
Connection(
conn_id='mysql_default', conn_type='mysql',
login='root',
schema='airflow',
host='mysql'))
merge_conn(
Connection(
conn_id='postgres_default', conn_type='postgres',
login='postgres',
password='airflow',
schema='airflow',
host='postgres'))
merge_conn(
Connection(
conn_id='sqlite_default', conn_type='sqlite',
host='/tmp/sqlite_default.db'))
merge_conn(
Connection(
conn_id='http_default', conn_type='http',
host='https://www.httpbin.org/'))
merge_conn(
Connection(
conn_id='mssql_default', conn_type='mssql',
host='localhost', port=1433))
merge_conn(
Connection(
conn_id='vertica_default', conn_type='vertica',
host='localhost', port=5433))
merge_conn(
Connection(
conn_id='wasb_default', conn_type='wasb',
extra='{"sas_token": null}'))
merge_conn(
Connection(
conn_id='webhdfs_default', conn_type='hdfs',
host='localhost', port=50070))
merge_conn(
Connection(
conn_id='ssh_default', conn_type='ssh',
host='localhost'))
merge_conn(
Connection(
conn_id='sftp_default', conn_type='sftp',
host='localhost', port=22, login='airflow',
extra='''
{"key_file": "~/.ssh/id_rsa", "no_host_key_check": true}
'''))
merge_conn(
Connection(
conn_id='fs_default', conn_type='fs',
extra='{"path": "/"}'))
merge_conn(
Connection(
conn_id='aws_default', conn_type='aws'))
merge_conn(
Connection(
conn_id='spark_default', conn_type='spark',
host='yarn', extra='{"queue": "root.default"}'))
merge_conn(
Connection(
conn_id='druid_broker_default', conn_type='druid',
host='druid-broker', port=8082, extra='{"endpoint": "druid/v2/sql"}'))
merge_conn(
Connection(
conn_id='druid_ingest_default', conn_type='druid',
host='druid-overlord', port=8081, extra='{"endpoint": "druid/indexer/v1/task"}'))
merge_conn(
Connection(
conn_id='redis_default', conn_type='redis',
host='redis', port=6379,
extra='{"db": 0}'))
merge_conn(
Connection(
conn_id='sqoop_default', conn_type='sqoop',
host='rmdbs', extra=''))
merge_conn(
Connection(
conn_id='emr_default', conn_type='emr',
extra='''
{ "Name": "default_job_flow_name",
"LogUri": "s3://my-emr-log-bucket/default_job_flow_location",
"ReleaseLabel": "emr-4.6.0",
"Instances": {
"Ec2KeyName": "mykey",
"Ec2SubnetId": "somesubnet",
"InstanceGroups": [
{
"Name": "Master nodes",
"Market": "ON_DEMAND",
"InstanceRole": "MASTER",
"InstanceType": "r3.2xlarge",
"InstanceCount": 1
},
{
"Name": "Slave nodes",
"Market": "ON_DEMAND",
"InstanceRole": "CORE",
"InstanceType": "r3.2xlarge",
"InstanceCount": 1
}
],
"TerminationProtected": false,
"KeepJobFlowAliveWhenNoSteps": false
},
"Applications":[
{ "Name": "Spark" }
],
"VisibleToAllUsers": true,
"JobFlowRole": "EMR_EC2_DefaultRole",
"ServiceRole": "EMR_DefaultRole",
"Tags": [
{
"Key": "app",
"Value": "analytics"
},
{
"Key": "environment",
"Value": "development"
}
]
}
'''))
merge_conn(
Connection(
conn_id='databricks_default', conn_type='databricks',
host='localhost'))
merge_conn(
Connection(
conn_id='qubole_default', conn_type='qubole',
host='localhost'))
merge_conn(
Connection(
conn_id='segment_default', conn_type='segment',
extra='{"write_key": "my-segment-write-key"}')),
merge_conn(
Connection(
conn_id='azure_data_lake_default', conn_type='azure_data_lake',
extra='{"tenant": "<TENANT>", "account_name": "<ACCOUNTNAME>" }'))
merge_conn(
Connection(
conn_id='azure_cosmos_default', conn_type='azure_cosmos',
extra='{"database_name": "<DATABASE_NAME>", "collection_name": "<COLLECTION_NAME>" }'))
merge_conn(
Connection(
conn_id='azure_container_instances_default', conn_type='azure_container_instances',
extra='{"tenantId": "<TENANT>", "subscriptionId": "<SUBSCRIPTION ID>" }'))
merge_conn(
Connection(
conn_id='cassandra_default', conn_type='cassandra',
host='cassandra', port=9042))
merge_conn(
Connection(
conn_id='dingding_default', conn_type='http',
host='', password=''))
merge_conn(
Connection(
conn_id='opsgenie_default', conn_type='http',
host='', password=''))
merge_conn(
Connection(
conn_id='pinot_admin_default', conn_type='pinot',
host='localhost', port=9000))
dagbag = models.DagBag()
# Save individual DAGs in the ORM
for dag in dagbag.dags.values():
dag.sync_to_db()
# Deactivate the unknown ones
models.DAG.deactivate_unknown_dags(dagbag.dags.keys())
from flask_appbuilder.models.sqla import Base
Base.metadata.create_all(settings.engine)
def upgradedb():
# alembic adds significant import time, so we import it lazily
from alembic import command
from alembic.config import Config
log.info("Creating tables")
current_dir = os.path.dirname(os.path.abspath(__file__))
package_dir = os.path.normpath(os.path.join(current_dir, '..'))
directory = os.path.join(package_dir, 'migrations')
config = Config(os.path.join(package_dir, 'alembic.ini'))
config.set_main_option('script_location', directory.replace('%', '%%'))
config.set_main_option('sqlalchemy.url', settings.SQL_ALCHEMY_CONN.replace('%', '%%'))
command.upgrade(config, 'heads')
add_default_pool_if_not_exists()
def resetdb():
"""
Clear out the database
"""
from airflow import models
# We need to add this model manually to get reset working well
# noinspection PyUnresolvedReferences
from airflow.models.serialized_dag import SerializedDagModel # noqa: F401
# alembic adds significant import time, so we import it lazily
# noinspection PyUnresolvedReferences
from alembic.migration import MigrationContext
log.info("Dropping tables that exist")
connection = settings.engine.connect()
models.base.Base.metadata.drop_all(connection)
mc = MigrationContext.configure(connection)
if mc._version.exists(connection):
mc._version.drop(connection)
from flask_appbuilder.models.sqla import Base
Base.metadata.drop_all(connection)
initdb()
| 34.816156 | 99 | 0.569806 |
ace307c668e237a44452396f832f2b58fe0658c4 | 3,191 | py | Python | tsai/models/XceptionTime.py | imilas/tsai | 0dc4833ddd9ef5404c20c8379698d1f3666a2d8f | [
"Apache-2.0"
] | 1,545 | 2020-11-10T22:23:00.000Z | 2022-03-31T19:50:24.000Z | tsai/models/XceptionTime.py | imilas/tsai | 0dc4833ddd9ef5404c20c8379698d1f3666a2d8f | [
"Apache-2.0"
] | 345 | 2020-11-10T20:23:48.000Z | 2022-03-31T16:36:35.000Z | tsai/models/XceptionTime.py | imilas/tsai | 0dc4833ddd9ef5404c20c8379698d1f3666a2d8f | [
"Apache-2.0"
] | 220 | 2020-11-19T21:13:55.000Z | 2022-03-31T23:08:37.000Z | # AUTOGENERATED! DO NOT EDIT! File to edit: nbs/106_models.XceptionTime.ipynb (unless otherwise specified).
__all__ = ['XceptionModule', 'XceptionBlock', 'XceptionTime']
# Cell
from ..imports import *
from .layers import *
from .utils import *
# Cell
# This is an unofficial PyTorch implementation developed by Ignacio Oguiza - oguiza@gmail.com based on:
# Rahimian, E., Zabihi, S., Atashzar, S. F., Asif, A., & Mohammadi, A. (2019).
# XceptionTime: A Novel Deep Architecture based on Depthwise Separable Convolutions for Hand Gesture Classification. arXiv preprint arXiv:1911.03803.
# and
# Fawaz, H. I., Lucas, B., Forestier, G., Pelletier, C., Schmidt, D. F., Weber, J., ... & Petitjean, F. (2019).
# InceptionTime: Finding AlexNet for Time Series Classification. arXiv preprint arXiv:1909.04939.
# Official InceptionTime tensorflow implementation: https://github.com/hfawaz/InceptionTime
class XceptionModule(Module):
def __init__(self, ni, nf, ks=40, bottleneck=True):
ks = [ks // (2**i) for i in range(3)]
ks = [k if k % 2 != 0 else k - 1 for k in ks] # ensure odd ks
self.bottleneck = Conv1d(ni, nf, 1, bias=False) if bottleneck else noop
self.convs = nn.ModuleList([SeparableConv1d(nf if bottleneck else ni, nf, k, bias=False) for k in ks])
self.maxconvpool = nn.Sequential(*[nn.MaxPool1d(3, stride=1, padding=1), Conv1d(ni, nf, 1, bias=False)])
self.concat = Concat()
def forward(self, x):
input_tensor = x
x = self.bottleneck(input_tensor)
x = self.concat([l(x) for l in self.convs] + [self.maxconvpool(input_tensor)])
return x
@delegates(XceptionModule.__init__)
class XceptionBlock(Module):
def __init__(self, ni, nf, residual=True, **kwargs):
self.residual = residual
self.xception, self.shortcut = nn.ModuleList(), nn.ModuleList()
for i in range(4):
if self.residual and (i-1) % 2 == 0: self.shortcut.append(BN1d(n_in) if n_in == n_out else ConvBlock(n_in, n_out * 4 * 2, 1, act=None))
n_out = nf * 2 ** i
n_in = ni if i == 0 else n_out * 2
self.xception.append(XceptionModule(n_in, n_out, **kwargs))
self.add = Add()
self.act = nn.ReLU()
def forward(self, x):
res = x
for i in range(4):
x = self.xception[i](x)
if self.residual and (i + 1) % 2 == 0: res = x = self.act(self.add(x, self.shortcut[i//2](res)))
return x
@delegates(XceptionBlock.__init__)
class XceptionTime(Module):
def __init__(self, c_in, c_out, nf=16, nb_filters=None, adaptive_size=50, **kwargs):
nf = ifnone(nf, nb_filters)
self.block = XceptionBlock(c_in, nf, **kwargs)
self.head_nf = nf * 32
self.head = nn.Sequential(nn.AdaptiveAvgPool1d(adaptive_size),
ConvBlock(self.head_nf, self.head_nf//2, 1),
ConvBlock(self.head_nf//2, self.head_nf//4, 1),
ConvBlock(self.head_nf//4, c_out, 1),
GAP1d(1))
def forward(self, x):
x = self.block(x)
x = self.head(x)
return x | 44.943662 | 149 | 0.616735 |
ace308cafb6934a42a5da799910920643aa878c2 | 1,159 | py | Python | zippy/benchmarks/src/benchmarks/python-graph/core/setup.py | lucapele/pele-c | ff6d06794a171f8e1b08fc6246446d9777116f56 | [
"BSD-3-Clause"
] | 319 | 2016-09-22T15:54:48.000Z | 2022-03-18T02:36:58.000Z | zippy/benchmarks/src/benchmarks/python-graph/core/setup.py | lucapele/pele-c | ff6d06794a171f8e1b08fc6246446d9777116f56 | [
"BSD-3-Clause"
] | 9 | 2016-11-03T21:56:41.000Z | 2020-08-09T19:27:37.000Z | core/setup.py | svn2github/python-graph | fdc8a016b26e07db0ae093c0d7df84289d5f4526 | [
"MIT"
] | 27 | 2016-10-06T16:05:32.000Z | 2022-03-18T02:37:00.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
try:
from setuptools import setup, find_packages
except ImportError as ie:
import distribute_setup
distribute_setup.use_setuptools()
from setuptools import setup, find_packages
# Startup
appname = "python-graph-core"
appversion = "1.8.2"
setup(
name = appname,
version = appversion,
author = "Pedro Matiello",
namespace_packages = ["pygraph"],
packages = ["pygraph"] + [ os.path.join("pygraph", a) for a in find_packages("pygraph") ],
author_email = "pmatiello@gmail.com",
description = "A library for working with graphs in Python",
license = "MIT",
keywords = "python graphs hypergraphs networks library algorithms",
url = "http://code.google.com/p/python-graph/",
classifiers = ["License :: OSI Approved :: MIT License","Topic :: Software Development :: Libraries :: Python Modules"],
long_description = "python-graph is a library for working with graphs in Python. This software provides a suitable data structure for representing graphs and a whole set of important algorithms.",
)
| 37.387097 | 204 | 0.675582 |
ace30a1669eba02f578d088143fa954ecb63bd32 | 1,120 | py | Python | examples/py/theocean.py | diwenshi61/ccxt | ebdda10e7c4ed8841d572f3bfe198b5f0e949cf6 | [
"MIT"
] | 24,910 | 2017-10-27T21:41:59.000Z | 2022-03-31T23:08:57.000Z | examples/py/theocean.py | diwenshi61/ccxt | ebdda10e7c4ed8841d572f3bfe198b5f0e949cf6 | [
"MIT"
] | 8,201 | 2017-10-28T10:19:28.000Z | 2022-03-31T23:49:37.000Z | examples/py/theocean.py | diwenshi61/ccxt | ebdda10e7c4ed8841d572f3bfe198b5f0e949cf6 | [
"MIT"
] | 6,632 | 2017-10-28T02:53:24.000Z | 2022-03-31T23:20:14.000Z | # -*- coding: utf-8 -*-
import os
import sys
root = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
sys.path.append(root + '/python')
import ccxt # noqa: E402
walletAddress = os.environ['WALLET_ADDRESS']
privateKey = os.environ['PRIVATE_KEY']
apiKey = os.environ['API_KEY']
secret = os.environ['SECRET']
ocean = ccxt.theocean({
'walletAddress': walletAddress,
'privateKey': privateKey,
'apiKey': apiKey,
'secret': secret
})
# get balance
balance = ocean.fetch_balance_by_code('REP')
print('REP balance: ', balance)
# get order book
order_book = ocean.fetch_order_book('REP/ZRX')
print('REP/ZRX orderbook: ', order_book)
# placing order
place_result = ocean.create_order('REP/ZRX', 'limit', 'sell', '0.5', '30')
id = place_result['id']
print('result of placing order: ', place_result)
# cancel order
if place_result['remaining'] > 0:
cancel_result = ocean.cancel_order(id)
print('cancel result: ', cancel_result)
# cancel all open user orders
cancel_all_orders_result = ocean.cancel_all_orders()
print('cancel all orders result: ', cancel_all_orders_result)
| 26.046512 | 83 | 0.717857 |
ace30aa45078f8fc9b8da03ab332ef353a74d679 | 37,527 | py | Python | synapse/federation/federation_server.py | jklippel/synapse | 451f25172afc0ce46e416c73fa703c5edf279d54 | [
"Apache-2.0"
] | 2 | 2021-07-07T10:21:41.000Z | 2021-12-28T00:13:20.000Z | synapse/federation/federation_server.py | jklippel/synapse | 451f25172afc0ce46e416c73fa703c5edf279d54 | [
"Apache-2.0"
] | 2 | 2021-12-17T21:45:54.000Z | 2021-12-29T20:12:09.000Z | synapse/federation/federation_server.py | jklippel/synapse | 451f25172afc0ce46e416c73fa703c5edf279d54 | [
"Apache-2.0"
] | null | null | null | # Copyright 2015, 2016 OpenMarket Ltd
# Copyright 2018 New Vector Ltd
# Copyright 2019 Matrix.org Federation C.I.C
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import random
from typing import (
TYPE_CHECKING,
Any,
Awaitable,
Callable,
Dict,
Iterable,
List,
Optional,
Tuple,
Union,
)
from prometheus_client import Counter, Gauge, Histogram
from twisted.internet import defer
from twisted.internet.abstract import isIPAddress
from twisted.python import failure
from synapse.api.constants import EduTypes, EventTypes
from synapse.api.errors import (
AuthError,
Codes,
FederationError,
IncompatibleRoomVersionError,
NotFoundError,
SynapseError,
UnsupportedRoomVersionError,
)
from synapse.api.room_versions import KNOWN_ROOM_VERSIONS
from synapse.events import EventBase
from synapse.federation.federation_base import FederationBase, event_from_pdu_json
from synapse.federation.persistence import TransactionActions
from synapse.federation.units import Edu, Transaction
from synapse.http.servlet import assert_params_in_dict
from synapse.logging.context import (
make_deferred_yieldable,
nested_logging_context,
run_in_background,
)
from synapse.logging.opentracing import log_kv, start_active_span_from_edu, trace
from synapse.logging.utils import log_function
from synapse.replication.http.federation import (
ReplicationFederationSendEduRestServlet,
ReplicationGetQueryRestServlet,
)
from synapse.types import JsonDict
from synapse.util import glob_to_regex, json_decoder, unwrapFirstError
from synapse.util.async_helpers import Linearizer, concurrently_execute
from synapse.util.caches.response_cache import ResponseCache
from synapse.util.stringutils import parse_server_name
if TYPE_CHECKING:
from synapse.server import HomeServer
# when processing incoming transactions, we try to handle multiple rooms in
# parallel, up to this limit.
TRANSACTION_CONCURRENCY_LIMIT = 10
logger = logging.getLogger(__name__)
received_pdus_counter = Counter("synapse_federation_server_received_pdus", "")
received_edus_counter = Counter("synapse_federation_server_received_edus", "")
received_queries_counter = Counter(
"synapse_federation_server_received_queries", "", ["type"]
)
pdu_process_time = Histogram(
"synapse_federation_server_pdu_process_time",
"Time taken to process an event",
)
last_pdu_ts_metric = Gauge(
"synapse_federation_last_received_pdu_time",
"The timestamp of the last PDU which was successfully received from the given domain",
labelnames=("server_name",),
)
class FederationServer(FederationBase):
def __init__(self, hs: "HomeServer"):
super().__init__(hs)
self.auth = hs.get_auth()
self.handler = hs.get_federation_handler()
self.state = hs.get_state_handler()
self.device_handler = hs.get_device_handler()
# Ensure the following handlers are loaded since they register callbacks
# with FederationHandlerRegistry.
hs.get_directory_handler()
self._server_linearizer = Linearizer("fed_server")
# origins that we are currently processing a transaction from.
# a dict from origin to txn id.
self._active_transactions = {} # type: Dict[str, str]
# We cache results for transaction with the same ID
self._transaction_resp_cache = ResponseCache(
hs.get_clock(), "fed_txn_handler", timeout_ms=30000
) # type: ResponseCache[Tuple[str, str]]
self.transaction_actions = TransactionActions(self.store)
self.registry = hs.get_federation_registry()
# We cache responses to state queries, as they take a while and often
# come in waves.
self._state_resp_cache = ResponseCache(
hs.get_clock(), "state_resp", timeout_ms=30000
) # type: ResponseCache[Tuple[str, str]]
self._state_ids_resp_cache = ResponseCache(
hs.get_clock(), "state_ids_resp", timeout_ms=30000
) # type: ResponseCache[Tuple[str, str]]
self._federation_metrics_domains = (
hs.config.federation.federation_metrics_domains
)
async def on_backfill_request(
self, origin: str, room_id: str, versions: List[str], limit: int
) -> Tuple[int, Dict[str, Any]]:
with (await self._server_linearizer.queue((origin, room_id))):
origin_host, _ = parse_server_name(origin)
await self.check_server_matches_acl(origin_host, room_id)
pdus = await self.handler.on_backfill_request(
origin, room_id, versions, limit
)
res = self._transaction_from_pdus(pdus).get_dict()
return 200, res
async def on_incoming_transaction(
self, origin: str, transaction_data: JsonDict
) -> Tuple[int, Dict[str, Any]]:
# keep this as early as possible to make the calculated origin ts as
# accurate as possible.
request_time = self._clock.time_msec()
transaction = Transaction(**transaction_data)
transaction_id = transaction.transaction_id # type: ignore
if not transaction_id:
raise Exception("Transaction missing transaction_id")
logger.debug("[%s] Got transaction", transaction_id)
# Reject malformed transactions early: reject if too many PDUs/EDUs
if len(transaction.pdus) > 50 or ( # type: ignore
hasattr(transaction, "edus") and len(transaction.edus) > 100 # type: ignore
):
logger.info("Transaction PDU or EDU count too large. Returning 400")
return 400, {}
# we only process one transaction from each origin at a time. We need to do
# this check here, rather than in _on_incoming_transaction_inner so that we
# don't cache the rejection in _transaction_resp_cache (so that if the txn
# arrives again later, we can process it).
current_transaction = self._active_transactions.get(origin)
if current_transaction and current_transaction != transaction_id:
logger.warning(
"Received another txn %s from %s while still processing %s",
transaction_id,
origin,
current_transaction,
)
return 429, {
"errcode": Codes.UNKNOWN,
"error": "Too many concurrent transactions",
}
# CRITICAL SECTION: we must now not await until we populate _active_transactions
# in _on_incoming_transaction_inner.
# We wrap in a ResponseCache so that we de-duplicate retried
# transactions.
return await self._transaction_resp_cache.wrap(
(origin, transaction_id),
self._on_incoming_transaction_inner,
origin,
transaction,
request_time,
)
async def _on_incoming_transaction_inner(
self, origin: str, transaction: Transaction, request_time: int
) -> Tuple[int, Dict[str, Any]]:
# CRITICAL SECTION: the first thing we must do (before awaiting) is
# add an entry to _active_transactions.
assert origin not in self._active_transactions
self._active_transactions[origin] = transaction.transaction_id # type: ignore
try:
result = await self._handle_incoming_transaction(
origin, transaction, request_time
)
return result
finally:
del self._active_transactions[origin]
async def _handle_incoming_transaction(
self, origin: str, transaction: Transaction, request_time: int
) -> Tuple[int, Dict[str, Any]]:
"""Process an incoming transaction and return the HTTP response
Args:
origin: the server making the request
transaction: incoming transaction
request_time: timestamp that the HTTP request arrived at
Returns:
HTTP response code and body
"""
response = await self.transaction_actions.have_responded(origin, transaction)
if response:
logger.debug(
"[%s] We've already responded to this request",
transaction.transaction_id, # type: ignore
)
return response
logger.debug("[%s] Transaction is new", transaction.transaction_id) # type: ignore
# We process PDUs and EDUs in parallel. This is important as we don't
# want to block things like to device messages from reaching clients
# behind the potentially expensive handling of PDUs.
pdu_results, _ = await make_deferred_yieldable(
defer.gatherResults(
[
run_in_background(
self._handle_pdus_in_txn, origin, transaction, request_time
),
run_in_background(self._handle_edus_in_txn, origin, transaction),
],
consumeErrors=True,
).addErrback(unwrapFirstError)
)
response = {"pdus": pdu_results}
logger.debug("Returning: %s", str(response))
await self.transaction_actions.set_response(origin, transaction, 200, response)
return 200, response
async def _handle_pdus_in_txn(
self, origin: str, transaction: Transaction, request_time: int
) -> Dict[str, dict]:
"""Process the PDUs in a received transaction.
Args:
origin: the server making the request
transaction: incoming transaction
request_time: timestamp that the HTTP request arrived at
Returns:
A map from event ID of a processed PDU to any errors we should
report back to the sending server.
"""
received_pdus_counter.inc(len(transaction.pdus)) # type: ignore
origin_host, _ = parse_server_name(origin)
pdus_by_room = {} # type: Dict[str, List[EventBase]]
newest_pdu_ts = 0
for p in transaction.pdus: # type: ignore
# FIXME (richardv): I don't think this works:
# https://github.com/matrix-org/synapse/issues/8429
if "unsigned" in p:
unsigned = p["unsigned"]
if "age" in unsigned:
p["age"] = unsigned["age"]
if "age" in p:
p["age_ts"] = request_time - int(p["age"])
del p["age"]
# We try and pull out an event ID so that if later checks fail we
# can log something sensible. We don't mandate an event ID here in
# case future event formats get rid of the key.
possible_event_id = p.get("event_id", "<Unknown>")
# Now we get the room ID so that we can check that we know the
# version of the room.
room_id = p.get("room_id")
if not room_id:
logger.info(
"Ignoring PDU as does not have a room_id. Event ID: %s",
possible_event_id,
)
continue
try:
room_version = await self.store.get_room_version(room_id)
except NotFoundError:
logger.info("Ignoring PDU for unknown room_id: %s", room_id)
continue
except UnsupportedRoomVersionError as e:
# this can happen if support for a given room version is withdrawn,
# so that we still get events for said room.
logger.info("Ignoring PDU: %s", e)
continue
event = event_from_pdu_json(p, room_version)
pdus_by_room.setdefault(room_id, []).append(event)
if event.origin_server_ts > newest_pdu_ts:
newest_pdu_ts = event.origin_server_ts
pdu_results = {}
# we can process different rooms in parallel (which is useful if they
# require callouts to other servers to fetch missing events), but
# impose a limit to avoid going too crazy with ram/cpu.
async def process_pdus_for_room(room_id: str):
with nested_logging_context(room_id):
logger.debug("Processing PDUs for %s", room_id)
try:
await self.check_server_matches_acl(origin_host, room_id)
except AuthError as e:
logger.warning(
"Ignoring PDUs for room %s from banned server", room_id
)
for pdu in pdus_by_room[room_id]:
event_id = pdu.event_id
pdu_results[event_id] = e.error_dict()
return
for pdu in pdus_by_room[room_id]:
pdu_results[pdu.event_id] = await process_pdu(pdu)
async def process_pdu(pdu: EventBase) -> JsonDict:
event_id = pdu.event_id
with pdu_process_time.time():
with nested_logging_context(event_id):
try:
await self._handle_received_pdu(origin, pdu)
return {}
except FederationError as e:
logger.warning("Error handling PDU %s: %s", event_id, e)
return {"error": str(e)}
except Exception as e:
f = failure.Failure()
logger.error(
"Failed to handle PDU %s",
event_id,
exc_info=(f.type, f.value, f.getTracebackObject()), # type: ignore
)
return {"error": str(e)}
await concurrently_execute(
process_pdus_for_room, pdus_by_room.keys(), TRANSACTION_CONCURRENCY_LIMIT
)
if newest_pdu_ts and origin in self._federation_metrics_domains:
last_pdu_ts_metric.labels(server_name=origin).set(newest_pdu_ts / 1000)
return pdu_results
async def _handle_edus_in_txn(self, origin: str, transaction: Transaction):
"""Process the EDUs in a received transaction."""
async def _process_edu(edu_dict):
received_edus_counter.inc()
edu = Edu(
origin=origin,
destination=self.server_name,
edu_type=edu_dict["edu_type"],
content=edu_dict["content"],
)
await self.registry.on_edu(edu.edu_type, origin, edu.content)
await concurrently_execute(
_process_edu,
getattr(transaction, "edus", []),
TRANSACTION_CONCURRENCY_LIMIT,
)
async def on_room_state_request(
self, origin: str, room_id: str, event_id: str
) -> Tuple[int, Dict[str, Any]]:
origin_host, _ = parse_server_name(origin)
await self.check_server_matches_acl(origin_host, room_id)
in_room = await self.auth.check_host_in_room(room_id, origin)
if not in_room:
raise AuthError(403, "Host not in room.")
# we grab the linearizer to protect ourselves from servers which hammer
# us. In theory we might already have the response to this query
# in the cache so we could return it without waiting for the linearizer
# - but that's non-trivial to get right, and anyway somewhat defeats
# the point of the linearizer.
with (await self._server_linearizer.queue((origin, room_id))):
resp = dict(
await self._state_resp_cache.wrap(
(room_id, event_id),
self._on_context_state_request_compute,
room_id,
event_id,
)
)
room_version = await self.store.get_room_version_id(room_id)
resp["room_version"] = room_version
return 200, resp
async def on_state_ids_request(
self, origin: str, room_id: str, event_id: str
) -> Tuple[int, Dict[str, Any]]:
if not event_id:
raise NotImplementedError("Specify an event")
origin_host, _ = parse_server_name(origin)
await self.check_server_matches_acl(origin_host, room_id)
in_room = await self.auth.check_host_in_room(room_id, origin)
if not in_room:
raise AuthError(403, "Host not in room.")
resp = await self._state_ids_resp_cache.wrap(
(room_id, event_id),
self._on_state_ids_request_compute,
room_id,
event_id,
)
return 200, resp
async def _on_state_ids_request_compute(self, room_id, event_id):
state_ids = await self.handler.get_state_ids_for_pdu(room_id, event_id)
auth_chain_ids = await self.store.get_auth_chain_ids(room_id, state_ids)
return {"pdu_ids": state_ids, "auth_chain_ids": auth_chain_ids}
async def _on_context_state_request_compute(
self, room_id: str, event_id: str
) -> Dict[str, list]:
if event_id:
pdus = await self.handler.get_state_for_pdu(
room_id, event_id
) # type: Iterable[EventBase]
else:
pdus = (await self.state.get_current_state(room_id)).values()
auth_chain = await self.store.get_auth_chain(
room_id, [pdu.event_id for pdu in pdus]
)
return {
"pdus": [pdu.get_pdu_json() for pdu in pdus],
"auth_chain": [pdu.get_pdu_json() for pdu in auth_chain],
}
async def on_pdu_request(
self, origin: str, event_id: str
) -> Tuple[int, Union[JsonDict, str]]:
pdu = await self.handler.get_persisted_pdu(origin, event_id)
if pdu:
return 200, self._transaction_from_pdus([pdu]).get_dict()
else:
return 404, ""
async def on_query_request(
self, query_type: str, args: Dict[str, str]
) -> Tuple[int, Dict[str, Any]]:
received_queries_counter.labels(query_type).inc()
resp = await self.registry.on_query(query_type, args)
return 200, resp
async def on_make_join_request(
self, origin: str, room_id: str, user_id: str, supported_versions: List[str]
) -> Dict[str, Any]:
origin_host, _ = parse_server_name(origin)
await self.check_server_matches_acl(origin_host, room_id)
room_version = await self.store.get_room_version_id(room_id)
if room_version not in supported_versions:
logger.warning(
"Room version %s not in %s", room_version, supported_versions
)
raise IncompatibleRoomVersionError(room_version=room_version)
pdu = await self.handler.on_make_join_request(origin, room_id, user_id)
time_now = self._clock.time_msec()
return {"event": pdu.get_pdu_json(time_now), "room_version": room_version}
async def on_invite_request(
self, origin: str, content: JsonDict, room_version_id: str
) -> Dict[str, Any]:
room_version = KNOWN_ROOM_VERSIONS.get(room_version_id)
if not room_version:
raise SynapseError(
400,
"Homeserver does not support this room version",
Codes.UNSUPPORTED_ROOM_VERSION,
)
pdu = event_from_pdu_json(content, room_version)
origin_host, _ = parse_server_name(origin)
await self.check_server_matches_acl(origin_host, pdu.room_id)
pdu = await self._check_sigs_and_hash(room_version, pdu)
ret_pdu = await self.handler.on_invite_request(origin, pdu, room_version)
time_now = self._clock.time_msec()
return {"event": ret_pdu.get_pdu_json(time_now)}
async def on_send_join_request(
self, origin: str, content: JsonDict
) -> Dict[str, Any]:
logger.debug("on_send_join_request: content: %s", content)
assert_params_in_dict(content, ["room_id"])
room_version = await self.store.get_room_version(content["room_id"])
pdu = event_from_pdu_json(content, room_version)
origin_host, _ = parse_server_name(origin)
await self.check_server_matches_acl(origin_host, pdu.room_id)
logger.debug("on_send_join_request: pdu sigs: %s", pdu.signatures)
pdu = await self._check_sigs_and_hash(room_version, pdu)
res_pdus = await self.handler.on_send_join_request(origin, pdu)
time_now = self._clock.time_msec()
return {
"state": [p.get_pdu_json(time_now) for p in res_pdus["state"]],
"auth_chain": [p.get_pdu_json(time_now) for p in res_pdus["auth_chain"]],
}
async def on_make_leave_request(
self, origin: str, room_id: str, user_id: str
) -> Dict[str, Any]:
origin_host, _ = parse_server_name(origin)
await self.check_server_matches_acl(origin_host, room_id)
pdu = await self.handler.on_make_leave_request(origin, room_id, user_id)
room_version = await self.store.get_room_version_id(room_id)
time_now = self._clock.time_msec()
return {"event": pdu.get_pdu_json(time_now), "room_version": room_version}
async def on_send_leave_request(self, origin: str, content: JsonDict) -> dict:
logger.debug("on_send_leave_request: content: %s", content)
assert_params_in_dict(content, ["room_id"])
room_version = await self.store.get_room_version(content["room_id"])
pdu = event_from_pdu_json(content, room_version)
origin_host, _ = parse_server_name(origin)
await self.check_server_matches_acl(origin_host, pdu.room_id)
logger.debug("on_send_leave_request: pdu sigs: %s", pdu.signatures)
pdu = await self._check_sigs_and_hash(room_version, pdu)
await self.handler.on_send_leave_request(origin, pdu)
return {}
async def on_event_auth(
self, origin: str, room_id: str, event_id: str
) -> Tuple[int, Dict[str, Any]]:
with (await self._server_linearizer.queue((origin, room_id))):
origin_host, _ = parse_server_name(origin)
await self.check_server_matches_acl(origin_host, room_id)
time_now = self._clock.time_msec()
auth_pdus = await self.handler.on_event_auth(event_id)
res = {"auth_chain": [a.get_pdu_json(time_now) for a in auth_pdus]}
return 200, res
@log_function
async def on_query_client_keys(
self, origin: str, content: Dict[str, str]
) -> Tuple[int, Dict[str, Any]]:
return await self.on_query_request("client_keys", content)
async def on_query_user_devices(
self, origin: str, user_id: str
) -> Tuple[int, Dict[str, Any]]:
keys = await self.device_handler.on_federation_query_user_devices(user_id)
return 200, keys
@trace
async def on_claim_client_keys(
self, origin: str, content: JsonDict
) -> Dict[str, Any]:
query = []
for user_id, device_keys in content.get("one_time_keys", {}).items():
for device_id, algorithm in device_keys.items():
query.append((user_id, device_id, algorithm))
log_kv({"message": "Claiming one time keys.", "user, device pairs": query})
results = await self.store.claim_e2e_one_time_keys(query)
json_result = {} # type: Dict[str, Dict[str, dict]]
for user_id, device_keys in results.items():
for device_id, keys in device_keys.items():
for key_id, json_str in keys.items():
json_result.setdefault(user_id, {})[device_id] = {
key_id: json_decoder.decode(json_str)
}
logger.info(
"Claimed one-time-keys: %s",
",".join(
(
"%s for %s:%s" % (key_id, user_id, device_id)
for user_id, user_keys in json_result.items()
for device_id, device_keys in user_keys.items()
for key_id, _ in device_keys.items()
)
),
)
return {"one_time_keys": json_result}
async def on_get_missing_events(
self,
origin: str,
room_id: str,
earliest_events: List[str],
latest_events: List[str],
limit: int,
) -> Dict[str, list]:
with (await self._server_linearizer.queue((origin, room_id))):
origin_host, _ = parse_server_name(origin)
await self.check_server_matches_acl(origin_host, room_id)
logger.debug(
"on_get_missing_events: earliest_events: %r, latest_events: %r,"
" limit: %d",
earliest_events,
latest_events,
limit,
)
missing_events = await self.handler.on_get_missing_events(
origin, room_id, earliest_events, latest_events, limit
)
if len(missing_events) < 5:
logger.debug(
"Returning %d events: %r", len(missing_events), missing_events
)
else:
logger.debug("Returning %d events", len(missing_events))
time_now = self._clock.time_msec()
return {"events": [ev.get_pdu_json(time_now) for ev in missing_events]}
@log_function
async def on_openid_userinfo(self, token: str) -> Optional[str]:
ts_now_ms = self._clock.time_msec()
return await self.store.get_user_id_for_open_id_token(token, ts_now_ms)
def _transaction_from_pdus(self, pdu_list: List[EventBase]) -> Transaction:
"""Returns a new Transaction containing the given PDUs suitable for
transmission.
"""
time_now = self._clock.time_msec()
pdus = [p.get_pdu_json(time_now) for p in pdu_list]
return Transaction(
origin=self.server_name,
pdus=pdus,
origin_server_ts=int(time_now),
destination=None,
)
async def _handle_received_pdu(self, origin: str, pdu: EventBase) -> None:
"""Process a PDU received in a federation /send/ transaction.
If the event is invalid, then this method throws a FederationError.
(The error will then be logged and sent back to the sender (which
probably won't do anything with it), and other events in the
transaction will be processed as normal).
It is likely that we'll then receive other events which refer to
this rejected_event in their prev_events, etc. When that happens,
we'll attempt to fetch the rejected event again, which will presumably
fail, so those second-generation events will also get rejected.
Eventually, we get to the point where there are more than 10 events
between any new events and the original rejected event. Since we
only try to backfill 10 events deep on received pdu, we then accept the
new event, possibly introducing a discontinuity in the DAG, with new
forward extremities, so normal service is approximately returned,
until we try to backfill across the discontinuity.
Args:
origin: server which sent the pdu
pdu: received pdu
Raises: FederationError if the signatures / hash do not match, or
if the event was unacceptable for any other reason (eg, too large,
too many prev_events, couldn't find the prev_events)
"""
# We've already checked that we know the room version by this point
room_version = await self.store.get_room_version(pdu.room_id)
# Check signature.
try:
pdu = await self._check_sigs_and_hash(room_version, pdu)
except SynapseError as e:
raise FederationError("ERROR", e.code, e.msg, affected=pdu.event_id)
await self.handler.on_receive_pdu(origin, pdu, sent_to_us_directly=True)
def __str__(self) -> str:
return "<ReplicationLayer(%s)>" % self.server_name
async def exchange_third_party_invite(
self, sender_user_id: str, target_user_id: str, room_id: str, signed: Dict
) -> None:
await self.handler.exchange_third_party_invite(
sender_user_id, target_user_id, room_id, signed
)
async def on_exchange_third_party_invite_request(self, event_dict: Dict) -> None:
await self.handler.on_exchange_third_party_invite_request(event_dict)
async def check_server_matches_acl(self, server_name: str, room_id: str) -> None:
"""Check if the given server is allowed by the server ACLs in the room
Args:
server_name: name of server, *without any port part*
room_id: ID of the room to check
Raises:
AuthError if the server does not match the ACL
"""
state_ids = await self.store.get_current_state_ids(room_id)
acl_event_id = state_ids.get((EventTypes.ServerACL, ""))
if not acl_event_id:
return
acl_event = await self.store.get_event(acl_event_id)
if server_matches_acl_event(server_name, acl_event):
return
raise AuthError(code=403, msg="Server is banned from room")
def server_matches_acl_event(server_name: str, acl_event: EventBase) -> bool:
"""Check if the given server is allowed by the ACL event
Args:
server_name: name of server, without any port part
acl_event: m.room.server_acl event
Returns:
True if this server is allowed by the ACLs
"""
logger.debug("Checking %s against acl %s", server_name, acl_event.content)
# first of all, check if literal IPs are blocked, and if so, whether the
# server name is a literal IP
allow_ip_literals = acl_event.content.get("allow_ip_literals", True)
if not isinstance(allow_ip_literals, bool):
logger.warning("Ignoring non-bool allow_ip_literals flag")
allow_ip_literals = True
if not allow_ip_literals:
# check for ipv6 literals. These start with '['.
if server_name[0] == "[":
return False
# check for ipv4 literals. We can just lift the routine from twisted.
if isIPAddress(server_name):
return False
# next, check the deny list
deny = acl_event.content.get("deny", [])
if not isinstance(deny, (list, tuple)):
logger.warning("Ignoring non-list deny ACL %s", deny)
deny = []
for e in deny:
if _acl_entry_matches(server_name, e):
# logger.info("%s matched deny rule %s", server_name, e)
return False
# then the allow list.
allow = acl_event.content.get("allow", [])
if not isinstance(allow, (list, tuple)):
logger.warning("Ignoring non-list allow ACL %s", allow)
allow = []
for e in allow:
if _acl_entry_matches(server_name, e):
# logger.info("%s matched allow rule %s", server_name, e)
return True
# everything else should be rejected.
# logger.info("%s fell through", server_name)
return False
def _acl_entry_matches(server_name: str, acl_entry: Any) -> bool:
if not isinstance(acl_entry, str):
logger.warning(
"Ignoring non-str ACL entry '%s' (is %s)", acl_entry, type(acl_entry)
)
return False
regex = glob_to_regex(acl_entry)
return bool(regex.match(server_name))
class FederationHandlerRegistry:
"""Allows classes to register themselves as handlers for a given EDU or
query type for incoming federation traffic.
"""
def __init__(self, hs: "HomeServer"):
self.config = hs.config
self.clock = hs.get_clock()
self._instance_name = hs.get_instance_name()
# These are safe to load in monolith mode, but will explode if we try
# and use them. However we have guards before we use them to ensure that
# we don't route to ourselves, and in monolith mode that will always be
# the case.
self._get_query_client = ReplicationGetQueryRestServlet.make_client(hs)
self._send_edu = ReplicationFederationSendEduRestServlet.make_client(hs)
self.edu_handlers = (
{}
) # type: Dict[str, Callable[[str, dict], Awaitable[None]]]
self.query_handlers = (
{}
) # type: Dict[str, Callable[[dict], Awaitable[JsonDict]]]
# Map from type to instance names that we should route EDU handling to.
# We randomly choose one instance from the list to route to for each new
# EDU received.
self._edu_type_to_instance = {} # type: Dict[str, List[str]]
def register_edu_handler(
self, edu_type: str, handler: Callable[[str, JsonDict], Awaitable[None]]
) -> None:
"""Sets the handler callable that will be used to handle an incoming
federation EDU of the given type.
Args:
edu_type: The type of the incoming EDU to register handler for
handler: A callable invoked on incoming EDU
of the given type. The arguments are the origin server name and
the EDU contents.
"""
if edu_type in self.edu_handlers:
raise KeyError("Already have an EDU handler for %s" % (edu_type,))
logger.info("Registering federation EDU handler for %r", edu_type)
self.edu_handlers[edu_type] = handler
def register_query_handler(
self, query_type: str, handler: Callable[[dict], Awaitable[JsonDict]]
) -> None:
"""Sets the handler callable that will be used to handle an incoming
federation query of the given type.
Args:
query_type: Category name of the query, which should match
the string used by make_query.
handler: Invoked to handle
incoming queries of this type. The return will be yielded
on and the result used as the response to the query request.
"""
if query_type in self.query_handlers:
raise KeyError("Already have a Query handler for %s" % (query_type,))
logger.info("Registering federation query handler for %r", query_type)
self.query_handlers[query_type] = handler
def register_instance_for_edu(self, edu_type: str, instance_name: str) -> None:
"""Register that the EDU handler is on a different instance than master."""
self._edu_type_to_instance[edu_type] = [instance_name]
def register_instances_for_edu(
self, edu_type: str, instance_names: List[str]
) -> None:
"""Register that the EDU handler is on multiple instances."""
self._edu_type_to_instance[edu_type] = instance_names
async def on_edu(self, edu_type: str, origin: str, content: dict) -> None:
if not self.config.use_presence and edu_type == EduTypes.Presence:
return
# Check if we have a handler on this instance
handler = self.edu_handlers.get(edu_type)
if handler:
with start_active_span_from_edu(content, "handle_edu"):
try:
await handler(origin, content)
except SynapseError as e:
logger.info("Failed to handle edu %r: %r", edu_type, e)
except Exception:
logger.exception("Failed to handle edu %r", edu_type)
return
# Check if we can route it somewhere else that isn't us
instances = self._edu_type_to_instance.get(edu_type, ["master"])
if self._instance_name not in instances:
# Pick an instance randomly so that we don't overload one.
route_to = random.choice(instances)
try:
await self._send_edu(
instance_name=route_to,
edu_type=edu_type,
origin=origin,
content=content,
)
except SynapseError as e:
logger.info("Failed to handle edu %r: %r", edu_type, e)
except Exception:
logger.exception("Failed to handle edu %r", edu_type)
return
# Oh well, let's just log and move on.
logger.warning("No handler registered for EDU type %s", edu_type)
async def on_query(self, query_type: str, args: dict) -> JsonDict:
handler = self.query_handlers.get(query_type)
if handler:
return await handler(args)
# Check if we can route it somewhere else that isn't us
if self._instance_name == "master":
return await self._get_query_client(query_type=query_type, args=args)
# Uh oh, no handler! Let's raise an exception so the request returns an
# error.
logger.warning("No handler registered for query type %s", query_type)
raise NotFoundError("No handler for Query type '%s'" % (query_type,))
| 38.807653 | 95 | 0.631758 |
ace30c32c6fae7e5697bee0b245a473e7aa1e9d2 | 29,498 | py | Python | json_regex_diff/jsondiff.py | adamknox-wf/json-regex-difftool | cc59ccd8f41f94955bfb516ec4a7d86b7572e675 | [
"Apache-2.0"
] | 9 | 2015-10-22T14:19:23.000Z | 2021-04-19T06:45:36.000Z | json_regex_diff/jsondiff.py | adamknox-wf/json-regex-difftool | cc59ccd8f41f94955bfb516ec4a7d86b7572e675 | [
"Apache-2.0"
] | 4 | 2015-03-06T21:04:58.000Z | 2016-11-30T19:44:20.000Z | json_regex_diff/jsondiff.py | adamknox-wf/json-regex-difftool | cc59ccd8f41f94955bfb516ec4a7d86b7572e675 | [
"Apache-2.0"
] | 7 | 2015-03-20T20:23:33.000Z | 2021-06-30T02:59:59.000Z | #!/usr/bin/env python
import argparse
import copy
import json
import logging
import os
import re
from builtins import bytes
from six import text_type
class JsonDiff(object):
def __init__(self, new_json, model_map, logger=logging.getLogger(),
is_directory=False, list_depth=0):
self._logger = logger
self.new_json = new_json
self.model = model_map
self.is_directory = is_directory
self.difference = []
# variable to control how deep to recursively search
# currently not used
self.list_depth = list_depth
@classmethod
def from_json(cls, new_json, old_json, logger=logging.getLogger()):
"""
Helper constructor to allow diff given json objects
:param new_json: New json file to compare
:param old_json: Old version of the json as base from comparison
** Important to note that multiple 'models' are not currently supported
when passing in raw json **
"""
model_map = {'old_json': old_json}
return cls(new_json, model_map, logger, is_directory=False)
@classmethod
def from_file(cls, json_file, json_model, logger=logging.getLogger()):
"""
Helper constructor to allow diff files
:param json_file: Path to new json file
:param json_model: Path to old json file or directory of models to
compare with
** Important to note that multiple 'models' are not currently supported
when computing a diff **
"""
try:
new_json = json.load(open(json_file))
except IOError:
logger.error("JSON File not found. Check name and try again")
new_json = None
exit(1)
# Set up model map
model_map = {}
if os.path.isfile(json_model):
is_directory = False
try:
model_map[json_model] = json.load(open(json_model))
except IOError:
logger.error("Model file not found. "
"Check name and try again")
exit(1)
elif os.path.isdir(json_model):
is_directory = True
for item in os.listdir(json_model):
try:
if not json_model.endswith('/'):
json_model += '/'
filename = json_model + item
model_map[item] = json.load(open(filename))
except IOError:
logger.error("Could not open file")
else:
is_directory = False
logger.error("File or directory not found. "
"Check name and try again.")
exit(1)
if len(model_map) < 1:
logger.error("No files could be read in specified directory")
exit(1)
return cls(new_json, model_map, logger, is_directory)
@staticmethod
def _clear_match_row(match_table, row, cur_index):
for i in range(len(match_table[0])):
match_table[row][i] = 0
match_table[row][cur_index] = 1
@staticmethod
def _clear_match_col(match_table, col, cur_index):
for i in range(len(match_table[0])):
match_table[i][col] = 0
match_table[cur_index][col] = 1
def _one_to_one(self, strings, regexes):
dim = len(strings)
match_chart = [[0 for i in range(dim)] for j in range(dim)]
# set up matching table
# will be a 2d array with:
# 0s indicating no match
# 1s indicating a match
for r in range(dim):
for s in range(dim):
match = re.match(regexes[r], strings[s])
if match:
match_chart[r][s] = 1
# minimize match table
# sum the rows and columns
# rows
sums = [sum(match_chart[k][:]) for k in range(dim)]
# add in columns
sums.extend(sum([match_chart[i][j] for i in range(dim)])
for j in range(dim))
num_matches, index, turns_wo_match = 0, 0, 0
max_index = 2 * dim
minimized = [False for i in range(2 * dim)]
# loop until all matched or no more minimization is possible
while num_matches < max_index and turns_wo_match < max_index \
and not sums == [1] * (2 * dim):
if sums[index] == 0:
return {} # no match for one of the fields
elif sums[index] == 1 and not minimized[index]:
# find coordinate
if index < dim: # in a row
for i in range(dim):
if match_chart[index][i] == 1:
self._clear_match_col(match_chart, i, index)
minimized[index] = True
continue
else: # in a col
for i in range(dim):
if match_chart[i][index] == 1:
self._clear_match_row(match_chart, i, index)
minimized[index] = True
continue
turns_wo_match = 0
num_matches += 1
# update sums
sums = [sum(match_chart[k][:]) for k in range(dim)]
# add in columns
sums.extend(sum([match_chart[i][j] for i in range(dim)])
for j in range(dim))
else:
turns_wo_match += 1
index = (index + 1) % max_index
if num_matches == max_index or sums == [1] * (2 * dim):
final_mapping = {}
for i in range(dim):
# find match
for j in range(dim):
if match_chart[i][j] == 1:
final_mapping[regexes[i]] = strings[j]
continue
return final_mapping
else: # ambiguous
self._logger.error("Ambiguous matching please fix your model "
"to use more specific regexes")
exit(1)
def _lists_equal(self, json_list, regex_list):
# length check
if not len(json_list) == len(regex_list):
return False
# go through indices and ensure they are all equal
for index in range(len(json_list)):
if not type(json_list[index]) == type(regex_list[index]):
return False
if isinstance(json_list[index], dict):
# do json comparison
if not self.equals_model(json_list[index], regex_list[index]):
return False
elif isinstance(json_list[index], list):
# another list comparison
if not self._lists_equal(json_list[index], regex_list[index]):
return False
elif isinstance(json_list[index], text_type):
# regex match
if not re.match(regex_list[index], json_list[index]):
return False
else:
# some other type
if not json_list[index] == regex_list[index]:
return False
return True
def equals_model(self, json_input, model):
"""
Our general process will be to read both inputs as json objects
We will then conduct a DFS
At each level, check that the size of the key set is the same
Check that the key set has a 1-1 correspondence
Check for each key that the values are the same
The model will treat all keys as regexes. All values will be
dicts, lists, or regexes
"""
json_keys = []
model_keys = []
if isinstance(json_input, dict) and isinstance(model, dict):
json_keys = list(json_input)
model_keys = list(model)
elif isinstance(json_input, list) and isinstance(model, list):
return self._lists_equal(json_input, model)
elif type(json_input) is not type(model):
return False
else:
self._logger.error("Not proper JSON format. "
"Please check your input")
exit(1)
# check size
if not len(json_keys) == len(model_keys):
return False
# check 1-1 correspondence
key_matches = self._one_to_one(json_keys, model_keys)
if not len(json_keys) == len(list(key_matches)):
return False
# check values
for key in key_matches:
if not type(json_input.get((key_matches[key]))) == \
type(model[key]):
return False
if isinstance(model[key], dict):
# recursive search
if not self.equals_model(json_input.get(key_matches[key]),
model[key]):
return False
# otherwise continue
elif isinstance(model[key], list):
# lists are deterministic! yay!
if not self._lists_equal(json_input.get(key_matches[key]),
model[key]):
return False
elif isinstance(model[key], text_type):
if not re.match(model[key], json_input.get(key_matches[key])):
return False
# maybe an int or something?
else:
if not json_input.get(key_matches[key]) == model[key]:
return False
# if we make it through all of this, hooray! Match
return True
@staticmethod
def equals_json(_json1, _json2):
"""
This module assumes that we are passing to json files.
To determine equivalence we will simply load both, and compare with
direct equivalence
:return True if equal, False otherwise
"""
return _json1 == _json2
def diff_model(self, _json1, _json2, path='', depth=-1):
if not type(_json1) == type(_json2):
if isinstance(
_json2, text_type) and type(_json1) not in [list, dict]:
# Potential regex match
self._diff_json_item(_json1, _json2, path, True)
else:
self.difference.append('TypeDifference : {} - {}:'
' ({}), {}: ({})'
.format(path, type(_json1).__name__,
text_type(_json1),
type(_json2).__name__,
text_type(_json2)))
else:
# they are the same type
# Three choices: dict, list, item
if isinstance(_json1, dict):
self._diff_json_dict(_json1, _json2, path, depth, True)
elif isinstance(_json1, list):
self._diff_json_list(_json1, _json2, path, depth, True)
else:
self._diff_json_item(_json1, _json2, path, True)
def diff_json(self, _json1, _json2, path='', depth=-1):
"""
This code computes the diff between two different JSON objects.
It also computes a line by line delta to be used to determine
similarity
This scoring will be especially useful in the regex version as it will
allow for easier classification
This code follows a very similar structure to
https://github.com/monsur/jsoncompare/blob/master/jsoncompare.
Assume json1 is new and json2 is old
Depth should be -1 for full recursive search
Depth == 0 -> do straight list or dict equivalence
Depth > 0 do recursive search, but decrement depth so we do not search
forever
** Currently depth is not used. This code is added to ease enhancements
in the future should we decide **
Resulting difference is stored in the class's self.difference variable
"""
if not type(_json1) == type(_json2):
self.difference.append('TypeDifference : {} - is {}: ({}),'
' but was {}: ({})'
.format(path, type(_json1).__name__,
text_type(_json1),
type(_json2).__name__,
text_type(_json2)))
else:
# they are the same type
# Three choices: dict, list, item
if isinstance(_json1, dict):
self._diff_json_dict(_json1, _json2, path, depth, False)
elif isinstance(_json1, list):
self._diff_json_list(_json1, _json2, path, depth, False)
else:
self._diff_json_item(_json1, _json2, path, False)
def _diff_json_dict(self, _json1, _json2, path, depth, use_regex):
# Depth greater > 0 indicates we should compare keys
# Negative depth means continuously recursively search
if not depth == 0:
json1_keys = list(_json1)
json2_keys = list(_json2)
matched_keys = []
for key in json1_keys:
if len(path) == 0:
new_path = key
else:
new_path = '{}.{}'.format(path, key)
if key in json2_keys:
# match
matched_keys.append(key)
json2_keys.remove(key)
else:
# key in json1 that is not in json2
# expand that k-v pair into diff
self._expand_diff(_json1[key], new_path, True)
for key in json2_keys:
if len(path) == 0:
new_path = key
else:
new_path = '{}.{}'.format(path, key)
# all keys remaining are in 2, but not 1
# expand these k-v pairs into diff as well
self._expand_diff(_json2[key], new_path, False)
# now that we have matched keys, recursively search
for key in matched_keys:
if len(path) == 0:
new_path = key
else:
new_path = '{}.{}'.format(path, key)
if use_regex:
self.diff_model(_json1[key], _json2[key], new_path,
depth - 1)
else:
self.diff_json(_json1[key], _json2[key], new_path,
depth - 1)
def _diff_json_list(self, _json1, _json2, path, depth, use_regex):
# save a snapshot of difference for comparison
# in the different recursive branches
current_difference = copy.deepcopy(self.difference)
json2_original = copy.deepcopy(_json2)
json1_matches = []
# Try to find a match for each item in JSON1
'''
' This WILL find a match for the first item in a a list of similar
' dictionaries even if later dicts in the list are a better match
'
' TODO Fix this bug -- 2 pass diff?
'''
cur_index = 0
for (index, item) in enumerate(_json1):
prev_index = cur_index
# map from the index in the list to irrelevance score
# irrelevance score is higher the more unrelated
# 0 is perfect match
index_to_irrelevance = {}
# map from the index in the list to the changeset associated
# between this 'item' and _json2[index]
index_to_changeset = {}
while cur_index < len(_json2):
if not use_regex and item == _json2[cur_index]:
# perfect match
index_to_irrelevance[cur_index] = 0
json1_matches.append(item)
_json2.remove(_json2[cur_index])
break
elif use_regex and type(item) not in [list, dict]:
if isinstance(_json2[cur_index], text_type):
# we can use as a pattern though item could be an
# integer say
match = re.match(_json2[cur_index], text_type(item))
if match:
index_to_irrelevance[cur_index] = 0
json1_matches.append(item)
_json2.remove(_json2[cur_index])
break
else:
# no possible match
index_to_irrelevance[cur_index] = -1
else:
# Can't use regex-- test strict equality
if item == _json2[cur_index]:
# perfect match
index_to_irrelevance = 0
json1_matches.append(item)
_json2.remove(_json2[cur_index])
else:
# no match possible
index_to_irrelevance[cur_index] = -1
continue
elif depth == 0 or type(item) not in [list, dict] or type(
item) is not type(_json2[cur_index]):
# failed surface match
# there might be a match later on in the list
index_to_irrelevance[
cur_index] = -1 # to indicate no possible match
else:
# failed, but do recursive search to find best match
new_path = "{}[{}]".format(path, index)
if use_regex:
self.diff_model(item, _json2[cur_index], new_path,
depth - 1)
else:
self.diff_json(item, _json2[cur_index], new_path,
depth - 1)
# determine the difference of the recursive branch to find
# best match
index_to_irrelevance[cur_index] = len(
[diff_item for diff_item in self.difference if
diff_item not in current_difference])
index_to_changeset[cur_index] = [diff_item for diff_item in
self.difference if
diff_item not in
current_difference]
# set difference back to before the diff
self.difference = copy.deepcopy(current_difference)
self._logger.debug("Resetting diff from recursive branch")
cur_index += 1
'''
' Matching strategy
'
' 1) If there is a 0 irrelevance: perfect match, move to next item
' 2) If there are all -1 irrelevance: no match, pick lowest index
' 3) If there are any with > 0 irrelevance pick the lowest one as
' best match
' - In case of tie, lowest index wins
'''
indices = list(index_to_irrelevance)
if len(indices) == 0:
break
indices.sort()
best_match_score = -1
match_index = indices[0]
for i in indices:
if index_to_irrelevance[i] == 0:
best_match_score = 0
break
elif index_to_irrelevance[i] < 0:
continue
else:
if best_match_score < 0 \
or index_to_irrelevance[i] < best_match_score:
best_match_score = index_to_irrelevance[i]
match_index = i
if best_match_score > 0:
# treat as: 'better than nothing match so we'll take it'
self.difference.extend(index_to_changeset[match_index])
for entry in index_to_changeset[match_index]:
self._logger.debug(entry)
json1_matches.append(item)
_json2.remove(_json2[match_index])
cur_index = match_index # Should be the after the match
elif best_match_score < 0:
cur_index = prev_index
# At this point we have two lists with the items that weren't matched
match_index = 0
for index in range(len(_json1)):
if match_index < len(json1_matches) and _json1[index] == \
json1_matches[match_index]:
match_index += 1
else:
new_path = "{}[{}]".format(path, index)
self._expand_diff(_json1[index], new_path, True)
original_index = 0
for index in range(len(_json2)):
# Find the item in the original
while not _json2[index] == json2_original[::-1][original_index]:
original_index = (original_index + 1) % len(json2_original)
new_path = "{}[{}]".format(path, len(
json2_original) - original_index - 1)
self._expand_diff(_json2[index], new_path, False)
original_index = (original_index + 1) % len(json2_original)
def _diff_json_item(self, _json1, _json2, path, use_regex):
if isinstance(_json1, text_type) :
_json1 = _json1.encode('ascii', 'ignore')
if isinstance(_json2, text_type):
_json2 = _json2.encode('ascii', 'ignore')
if use_regex and isinstance(_json2, bytes):
match = re.match(_json2, bytes(_json1))
if not match:
self.difference.append(
'Changed: {} to {} from {}'.format(path, _json1, _json2))
self._logger.debug('Changed: {} to {} from {}'
.format(path, _json1, _json2))
else:
if not _json1 == _json2:
self.difference.append(
'Changed: {} to {} from {}'.format(path, _json1, _json2))
self._logger.debug('Changed: {} to {} from {}'
.format(path, _json1, _json2))
def _expand_diff(self, blob, path, new_item):
"""
recursively add everything at this 'level' to the diff
:param blob: The item (can be list, dict or item) to expand into the
diff
:param path: current path of the item
:param new_item: true if we are in new json (things added),
false if old (things removed)
"""
# Three possibilities: dict, list, item
if new_item:
c = '+'
else:
c = '-'
if isinstance(blob, dict):
for key in blob:
if len(path) == 0:
new_path = key
else:
new_path = "{}.{}".format(path, key)
if type(blob[key]) not in [list, dict]:
if isinstance(blob[key], text_type):
self.difference.append(
'{}: {}={}'.format(c, new_path,
blob[key].encode('ascii',
'ignore')))
self._logger.debug('{}: {}={}'
.format(c, new_path,
blob[key]
.encode('ascii', 'ignore')))
else:
self.difference.append(
'{}: {}={}'.format(c, new_path, blob[key]))
self._logger.debug(
'{}: {}={}'.format(c, new_path, blob[key]))
else:
self._expand_diff(blob[key], new_path, new_item)
elif isinstance(blob, list):
for (index, item) in enumerate(blob):
new_path = "{}[{}]".format(path, index)
if isinstance(blob[index], (list, dict)):
self._expand_diff(item[index], new_path, new_item)
if isinstance(blob[index], text_type):
self.difference.append(
'{}: {}={}'.format(c, new_path,
blob[index].encode('ascii',
'ignore')))
self._logger.debug(
'{}: {}={}'.format(c, new_path,
blob[index].encode('ascii',
'ignore')))
else:
self.difference.append(
'{}: {}={}'.format(c, new_path, blob[index]))
self._logger.debug(
'{}: {}={}'.format(c, new_path, blob[index]))
else:
pass
else:
self.difference.append('{}: {}={}'.format(c, path, blob))
self._logger.debug('{}: {}={}'.format(c, path, blob))
def comparison(self, use_model):
for model_name in self.model:
if use_model:
if self.equals_model(self.new_json, self.model[model_name]):
return model_name if self.is_directory else True
else:
if self.equals_json(self.new_json, self.model[model_name]):
return model_name if self.is_directory else True
# no match
return False
def diff(self, use_model):
difference = []
self._logger.info(self.model)
for model_name in self.model:
if use_model:
self.diff_model(self.new_json, self.model[model_name])
else:
self.diff_json(self.new_json, self.model[model_name])
self._logger.info('Diff from {}\n'.format(model_name))
for change in self.difference:
# log instead of print,
# in case a module wants to suppress output
self._logger.info(change.encode('ascii', 'ignore'))
difference.append(self.difference)
# Reinitialize so that we can run against multiple models
self.difference = []
self.list_depth = 0
return difference if len(difference) > 1 else difference[0]
def main():
p = argparse.ArgumentParser(
description='Tool to check equivalence and difference of two JSON '
'files with regex support',
formatter_class=argparse.RawTextHelpFormatter,
epilog='NOTE: If there are no regexes in your JSON do not use the '
'--use_model flag\n'
'\n'
'Usage examples: \n'
'\n'
'To do JSON to JSON comparison (default behavior):\n'
' ./json_diff.py path/to/file1.json path/to/file2.json \n'
'\n'
'To compare a single json file against a directory of models:\n'
' ./json_diff.py --use_model path/to/file.json '
'path/to/models\n'
'\n'
'To compute the diff between to JSON documents: \n'
' ./json_diff.py -d path/to/new.json path/to/old.json'
)
p.add_argument('--use_model', action="store_true",
help="Determine whether to treat second input as regular "
"json or a model file with regex support")
p.add_argument('-d', '--diff', action="store_true",
help="Set tool to do diff instead of comparison. "
"(comparison if not flagged).")
p.add_argument('--logging_level', default='INFO', help="e.g. WARNING, "
"INFO, DEBUG, 10,"
"50, etc...")
p.add_argument('json', help='The path of the json file')
p.add_argument('json_model', metavar='json/json_model',
help="The path of the .json file or directory of .json "
"models with regex support"
"**Note diffs between a file and a directory are not "
"supported.")
options = p.parse_args()
console_handler = logging.StreamHandler()
console_handler.setFormatter(logging.Formatter(
'%(asctime)s - %(name)s - %(levelname)s - %(message)s'))
logger = logging.getLogger('jsondiff')
logger.addHandler(console_handler)
logger.setLevel(options.logging_level)
diff_engine = JsonDiff.from_file(options.json, options.json_model, logger)
if options.diff:
if os.path.isdir(options.json_model):
raise Exception(
"Unsupported operation: We do not allow diff against a "
"directory. Must provide a filename")
else:
diff_engine.diff(options.use_model)
else:
logger.info(diff_engine.comparison(options.use_model))
if __name__ == "__main__":
main()
| 42.019943 | 79 | 0.502271 |
ace30c843f063103e606979facc1a784a9a0b25e | 876 | py | Python | tests/__init__.py | enricorusso/incubator-ariatosca | 3748b1962697712bde29c9de781d867c6c5ffad1 | [
"Apache-2.0"
] | 1 | 2018-10-13T06:32:10.000Z | 2018-10-13T06:32:10.000Z | tests/__init__.py | enricorusso/incubator-ariatosca | 3748b1962697712bde29c9de781d867c6c5ffad1 | [
"Apache-2.0"
] | null | null | null | tests/__init__.py | enricorusso/incubator-ariatosca | 3748b1962697712bde29c9de781d867c6c5ffad1 | [
"Apache-2.0"
] | 1 | 2020-06-16T15:13:06.000Z | 2020-06-16T15:13:06.000Z | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from . import storage, mock
ROOT_DIR = os.path.dirname(os.path.dirname(__file__))
| 41.714286 | 74 | 0.773973 |
ace30dbe1a5a25a3de4a4d5eb9f935b6b5e031dd | 2,459 | py | Python | q2_feature_table/_merge.py | stevendbrown/q2-feature-table | 14ead90c8bf1a29cbbd8d35bf8c06329677e7642 | [
"BSD-3-Clause"
] | null | null | null | q2_feature_table/_merge.py | stevendbrown/q2-feature-table | 14ead90c8bf1a29cbbd8d35bf8c06329677e7642 | [
"BSD-3-Clause"
] | null | null | null | q2_feature_table/_merge.py | stevendbrown/q2-feature-table | 14ead90c8bf1a29cbbd8d35bf8c06329677e7642 | [
"BSD-3-Clause"
] | null | null | null | # ----------------------------------------------------------------------------
# Copyright (c) 2016-2018, QIIME 2 development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
import biom
import pandas as pd
import collections
def overlap_methods():
return ('error_on_overlapping_sample', 'error_on_overlapping_feature',
'sum')
def _get_overlapping(tables, axis):
ids = collections.Counter()
for table in tables:
ids.update(table.ids(axis=axis))
return {e for e, c in ids.items() if c > 1}
def merge(tables: biom.Table,
overlap_method: str='error_on_overlapping_sample') -> biom.Table:
if len(tables) == 1:
return tables[0]
if overlap_method == 'error_on_overlapping_sample':
try:
return tables[0].concat(tables[1:], 'sample')
except biom.exception.DisjointIDError:
overlapping = _get_overlapping(tables, 'sample')
raise ValueError('Same samples are present in some of the '
'provided tables: %s' % ', '.join(overlapping))
elif overlap_method == 'error_on_overlapping_feature':
try:
return tables[0].concat(tables[1:], 'observation')
except biom.exception.DisjointIDError:
overlapping = _get_overlapping(tables, 'observation')
raise ValueError('Same features are present in some of the '
'provided tables: %s' % ', '.join(overlapping))
elif overlap_method == 'sum':
tables = iter(tables)
result = next(tables) # There is always at least 1
for table in tables:
result = result.merge(table)
return result
else:
raise ValueError('Invalid overlap method: %s. Please provide one of '
'the following methods: %s.' %
(overlap_method, ', '.join(overlap_methods())))
def _merge_feature_data(data: pd.Series) -> pd.Series:
data = iter(data)
result = next(data) # There is always at least 1
for d in data:
result = result.combine_first(d)
return result
def merge_seqs(data: pd.Series) -> pd.Series:
return _merge_feature_data(data)
def merge_taxa(data: pd.Series) -> pd.Series:
return _merge_feature_data(data)
| 34.633803 | 78 | 0.591297 |
ace30ee4e650ee5d52dd435370d8442f2f43bdee | 947 | py | Python | problems/76_minimum_window_substring.py | lucasheriques/leetcode-solutions | cd36b5df46a75a0cb17569faf9cf56186864f68a | [
"MIT"
] | null | null | null | problems/76_minimum_window_substring.py | lucasheriques/leetcode-solutions | cd36b5df46a75a0cb17569faf9cf56186864f68a | [
"MIT"
] | null | null | null | problems/76_minimum_window_substring.py | lucasheriques/leetcode-solutions | cd36b5df46a75a0cb17569faf9cf56186864f68a | [
"MIT"
] | null | null | null | from collections import Counter
class Solution:
def minWindow(self, s: str, t: str) -> str:
if len(s) < len(t) or not s or not t:
return ""
table = Counter(t)
counter = len(table)
begin = end = 0
response = (0, len(s))
while end < len(s):
endchar = s[end]
if endchar in table:
table[endchar] -= 1
if table[endchar] == 0:
counter -= 1
end += 1
while counter == 0:
if response[1] - response[0] > end - begin:
response = (begin, end)
beginchar = s[begin]
if beginchar in table:
table[beginchar] += 1
if table[beginchar] > 0:
counter += 1
begin += 1
return s[response[0]:response[1]]
Solution().minWindow("ab", "b")
| 22.023256 | 59 | 0.428722 |
ace30f4d1a4e1430d215089f1344748e3b886f28 | 2,497 | py | Python | Aula 37/View/web.py | Katakhan/TrabalhosPython2 | ab47af0ff3c00922857578e58a1a149d9e65e229 | [
"MIT"
] | null | null | null | Aula 37/View/web.py | Katakhan/TrabalhosPython2 | ab47af0ff3c00922857578e58a1a149d9e65e229 | [
"MIT"
] | null | null | null | Aula 37/View/web.py | Katakhan/TrabalhosPython2 | ab47af0ff3c00922857578e58a1a149d9e65e229 | [
"MIT"
] | null | null | null | from flask import Flask, render_template, request, redirect
import sys
sys.path.append(r'C:\Users\900132\Desktop\GitHub\TrabalhosPython2\Aula 37')
from Controller.squads_controller import SquadsController
from Controller.framework_frontend_controller import FrameworkFrontendController
from Controller.linguagem_backend_controller import LinguagemBackendController
from Controller.sqbds_controller import SgbdsController
from Model.squads import Squads
from Model.framework_frontend import FrameworkFrontend
from Model.linguagem_backend import LinguagemBackend
from Model.sgbds import Sgbds
from Model.sgbds import Sgbds
app = Flask(__name__, template_folder="template")
squads_controller = SquadsController()
front_controller = FrameworkFrontendController()
back_controller = LinguagemBackendController()
sgbds_controller = SgbdsController()
nome = 'Cadastros'
@app.route('/')
def inicio():
return render_template('index.html', titulo_app = nome )
@app.route('/listar')
def listar():
times = squads_controller.listar_todos()
return render_template('listar.html', titulo_app = nome, lista = times)
@app.route('/cadastrar')
def cadastrar():
squads = Squads()
squads.linguagemBackend = LinguagemBackend()
squads.frameworkFrontend = FrameworkFrontend()
squads.sgbds = Sgbds()
if 'id' in request.args:
id = request.args['id']
squads = squads_controller.buscar_por_id(id)
return render_template('cadastrar.html', titulo_app = nome, squads = squads )
@app.route('/excluir')
def excluir():
id = int(request.args['id'])
squads_controller.deletar(id)
return redirect('/listar')
@app.route('/salvar')
def salvar():
squads = Squads()
squads.id = request.args['id']
squads.nome = request.args['nome']
squads.descricao = request.args['descricao']
squads.numeroPessoas = request.args['numeroPessoas']
back = LinguagemBackend()
back.id = request.args['id_backend']
back.nome = request.args['linguagemBackEnd']
front = FrameworkFrontend()
front.id = request.args['id_front']
front.nome = request.args['frameworkFrontEnd']
sgbds = Sgbds()
sgbds.id = request.args['id_sgbds']
sgbds.nome = request.args['nome']
squads.linguagemBackend = back
squads.frameworkFrontend = front
squads.sgbds = sgbds
if squads.id == 0:
squads_controller.salvar(squads)
else:
squads_controller.alterar(squads)
return redirect('/listar')
app.run(debug=True)
| 30.45122 | 81 | 0.737285 |
ace30f62ebca37bbc100f09c6e1ca3a7ec0b229a | 1,922 | py | Python | ML/50-mlps/17-keras-cnn-both-dropout/main.py | PepSalehi/algorithms | 1c20f57185e6324aa840ccff98e69764b4213131 | [
"MIT"
] | null | null | null | ML/50-mlps/17-keras-cnn-both-dropout/main.py | PepSalehi/algorithms | 1c20f57185e6324aa840ccff98e69764b4213131 | [
"MIT"
] | null | null | null | ML/50-mlps/17-keras-cnn-both-dropout/main.py | PepSalehi/algorithms | 1c20f57185e6324aa840ccff98e69764b4213131 | [
"MIT"
] | 1 | 2019-12-09T21:40:46.000Z | 2019-12-09T21:40:46.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# 3rd party modules
from keras.callbacks import CSVLogger, ModelCheckpoint
from keras.layers import Dense, Flatten, Dropout, Conv2D, MaxPooling2D
from keras.models import Sequential
import numpy as np
# internal modules
import hasy_tools
# Load the data
data = hasy_tools.load_data()
x_train = data['x_train']
y_train = data['y_train']
x_validate = data['x_train']
y_validate = data['y_train']
x_test = data['x_test']
y_test = data['y_test']
# One-Hot encoding
y_train = np.eye(hasy_tools.n_classes)[y_train.squeeze()]
y_validate = np.eye(hasy_tools.n_classes)[y_validate.squeeze()]
y_test = np.eye(hasy_tools.n_classes)[y_test.squeeze()]
# Preprocessing
x_train = hasy_tools.preprocess(x_train)
x_validate = hasy_tools.preprocess(x_validate)
x_test = hasy_tools.preprocess(x_test)
# Define the model
model = Sequential()
model.add(Conv2D(16, (3, 3)))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(16, (3, 3)))
model.add(Flatten())
model.add(Dense(128, activation='tanh'))
model.add(Dropout(0.50))
model.add(Dense(256))
model.add(Dropout(0.50))
model.add(Dense(hasy_tools.n_classes, activation='softmax'))
# Compile model
model.compile(loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])
# Fit the model
csv_logger = CSVLogger('log.csv', append=True, separator=';')
checkpointer = ModelCheckpoint(filepath='checkpoint.h5',
verbose=1,
period=10,
save_best_only=True)
model.fit(x_train, y_train,
validation_data=(x_validate, y_validate),
epochs=700,
batch_size=128,
callbacks=[csv_logger, checkpointer])
# Serialize model
model.save('model.h5')
# evaluate the model
scores = model.evaluate(x_test, y_test)
print("\n%s: %.2f%%" % (model.metrics_names[1], scores[1] * 100))
| 28.264706 | 70 | 0.687825 |
ace30fb478fa5e5e18056f09ba79153e26ce1022 | 11,362 | py | Python | test/functional/rpc_psbt.py | pocketnetapp/pocketnet.core | 2bdcd58bd8a4210d1117c06d52295c0b62c4061d | [
"Apache-2.0"
] | 1 | 2022-02-07T14:19:54.000Z | 2022-02-07T14:19:54.000Z | test/functional/rpc_psbt.py | pocketnetapp/pocketnet.core | 2bdcd58bd8a4210d1117c06d52295c0b62c4061d | [
"Apache-2.0"
] | null | null | null | test/functional/rpc_psbt.py | pocketnetapp/pocketnet.core | 2bdcd58bd8a4210d1117c06d52295c0b62c4061d | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) 2018-2022 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the Partially Signed Transaction RPCs.
"""
from test_framework.test_framework import PocketcoinTestFramework
from test_framework.util import assert_equal, assert_raises_rpc_error, find_output
import json
import os
MAX_BIP125_RBF_SEQUENCE = 0xfffffffd
# Create one-input, one-output, no-fee transaction:
class PSBTTest(PocketcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = False
self.num_nodes = 3
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
# Create and fund a raw tx for sending 10 PKOIN
psbtx1 = self.nodes[0].walletcreatefundedpsbt([], {self.nodes[2].getnewaddress():10})['psbt']
# Node 1 should not be able to add anything to it but still return the psbtx same as before
psbtx = self.nodes[1].walletprocesspsbt(psbtx1)['psbt']
assert_equal(psbtx1, psbtx)
# Sign the transaction and send
signed_tx = self.nodes[0].walletprocesspsbt(psbtx)['psbt']
final_tx = self.nodes[0].finalizepsbt(signed_tx)['hex']
self.nodes[0].sendrawtransaction(final_tx)
# Create p2sh, p2wpkh, and p2wsh addresses
pubkey0 = self.nodes[0].getaddressinfo(self.nodes[0].getnewaddress())['pubkey']
pubkey1 = self.nodes[1].getaddressinfo(self.nodes[1].getnewaddress())['pubkey']
pubkey2 = self.nodes[2].getaddressinfo(self.nodes[2].getnewaddress())['pubkey']
p2sh = self.nodes[1].addmultisigaddress(2, [pubkey0, pubkey1, pubkey2], "", "legacy")['address']
p2wsh = self.nodes[1].addmultisigaddress(2, [pubkey0, pubkey1, pubkey2], "", "bech32")['address']
p2sh_p2wsh = self.nodes[1].addmultisigaddress(2, [pubkey0, pubkey1, pubkey2], "", "p2sh-segwit")['address']
p2wpkh = self.nodes[1].getnewaddress("", "bech32")
p2pkh = self.nodes[1].getnewaddress("", "legacy")
p2sh_p2wpkh = self.nodes[1].getnewaddress("", "p2sh-segwit")
# fund those addresses
rawtx = self.nodes[0].createrawtransaction([], {p2sh:10, p2wsh:10, p2wpkh:10, p2sh_p2wsh:10, p2sh_p2wpkh:10, p2pkh:10})
rawtx = self.nodes[0].fundrawtransaction(rawtx, {"changePosition":3})
signed_tx = self.nodes[0].signrawtransactionwithwallet(rawtx['hex'])['hex']
txid = self.nodes[0].sendrawtransaction(signed_tx)
self.nodes[0].generate(6)
self.sync_all()
# Find the output pos
p2sh_pos = -1
p2wsh_pos = -1
p2wpkh_pos = -1
p2pkh_pos = -1
p2sh_p2wsh_pos = -1
p2sh_p2wpkh_pos = -1
decoded = self.nodes[0].decoderawtransaction(signed_tx)
for out in decoded['vout']:
if out['scriptPubKey']['addresses'][0] == p2sh:
p2sh_pos = out['n']
elif out['scriptPubKey']['addresses'][0] == p2wsh:
p2wsh_pos = out['n']
elif out['scriptPubKey']['addresses'][0] == p2wpkh:
p2wpkh_pos = out['n']
elif out['scriptPubKey']['addresses'][0] == p2sh_p2wsh:
p2sh_p2wsh_pos = out['n']
elif out['scriptPubKey']['addresses'][0] == p2sh_p2wpkh:
p2sh_p2wpkh_pos = out['n']
elif out['scriptPubKey']['addresses'][0] == p2pkh:
p2pkh_pos = out['n']
# spend single key from node 1
rawtx = self.nodes[1].walletcreatefundedpsbt([{"txid":txid,"vout":p2wpkh_pos},{"txid":txid,"vout":p2sh_p2wpkh_pos},{"txid":txid,"vout":p2pkh_pos}], {self.nodes[1].getnewaddress():29.99})['psbt']
walletprocesspsbt_out = self.nodes[1].walletprocesspsbt(rawtx)
assert_equal(walletprocesspsbt_out['complete'], True)
self.nodes[1].sendrawtransaction(self.nodes[1].finalizepsbt(walletprocesspsbt_out['psbt'])['hex'])
# partially sign multisig things with node 1
psbtx = self.nodes[1].walletcreatefundedpsbt([{"txid":txid,"vout":p2wsh_pos},{"txid":txid,"vout":p2sh_pos},{"txid":txid,"vout":p2sh_p2wsh_pos}], {self.nodes[1].getnewaddress():29.99})['psbt']
walletprocesspsbt_out = self.nodes[1].walletprocesspsbt(psbtx)
psbtx = walletprocesspsbt_out['psbt']
assert_equal(walletprocesspsbt_out['complete'], False)
# partially sign with node 2. This should be complete and sendable
walletprocesspsbt_out = self.nodes[2].walletprocesspsbt(psbtx)
assert_equal(walletprocesspsbt_out['complete'], True)
self.nodes[2].sendrawtransaction(self.nodes[2].finalizepsbt(walletprocesspsbt_out['psbt'])['hex'])
# check that walletprocesspsbt fails to decode a non-psbt
rawtx = self.nodes[1].createrawtransaction([{"txid":txid,"vout":p2wpkh_pos}], {self.nodes[1].getnewaddress():9.99})
assert_raises_rpc_error(-22, "TX decode failed", self.nodes[1].walletprocesspsbt, rawtx)
# Convert a non-psbt to psbt and make sure we can decode it
rawtx = self.nodes[0].createrawtransaction([], {self.nodes[1].getnewaddress():10})
rawtx = self.nodes[0].fundrawtransaction(rawtx)
new_psbt = self.nodes[0].converttopsbt(rawtx['hex'])
self.nodes[0].decodepsbt(new_psbt)
# Make sure that a psbt with signatures cannot be converted
signedtx = self.nodes[0].signrawtransactionwithwallet(rawtx['hex'])
assert_raises_rpc_error(-22, "TX decode failed", self.nodes[0].converttopsbt, signedtx['hex'])
# Explicitly allow converting non-empty txs
new_psbt = self.nodes[0].converttopsbt(rawtx['hex'])
self.nodes[0].decodepsbt(new_psbt)
# Create outputs to nodes 1 and 2
node1_addr = self.nodes[1].getnewaddress()
node2_addr = self.nodes[2].getnewaddress()
txid1 = self.nodes[0].sendtoaddress(node1_addr, 13)
txid2 =self.nodes[0].sendtoaddress(node2_addr, 13)
self.nodes[0].generate(6)
self.sync_all()
vout1 = find_output(self.nodes[1], txid1, 13)
vout2 = find_output(self.nodes[2], txid2, 13)
# Create a psbt spending outputs from nodes 1 and 2
psbt_orig = self.nodes[0].createpsbt([{"txid":txid1, "vout":vout1}, {"txid":txid2, "vout":vout2}], {self.nodes[0].getnewaddress():25.999})
# Update psbts, should only have data for one input and not the other
psbt1 = self.nodes[1].walletprocesspsbt(psbt_orig)['psbt']
psbt1_decoded = self.nodes[0].decodepsbt(psbt1)
assert psbt1_decoded['inputs'][0] and not psbt1_decoded['inputs'][1]
psbt2 = self.nodes[2].walletprocesspsbt(psbt_orig)['psbt']
psbt2_decoded = self.nodes[0].decodepsbt(psbt2)
assert not psbt2_decoded['inputs'][0] and psbt2_decoded['inputs'][1]
# Combine, finalize, and send the psbts
combined = self.nodes[0].combinepsbt([psbt1, psbt2])
finalized = self.nodes[0].finalizepsbt(combined)['hex']
self.nodes[0].sendrawtransaction(finalized)
self.nodes[0].generate(6)
self.sync_all()
# Test additional args in walletcreatepsbt
# Make sure both pre-included and funded inputs
# have the correct sequence numbers based on
# replaceable arg
block_height = self.nodes[0].getblockcount()
unspent = self.nodes[0].listunspent()[0]
psbtx_info = self.nodes[0].walletcreatefundedpsbt([{"txid":unspent["txid"], "vout":unspent["vout"]}], [{self.nodes[2].getnewaddress():unspent["amount"]+1}], block_height+2, {"replaceable":True}, False)
decoded_psbt = self.nodes[0].decodepsbt(psbtx_info["psbt"])
for tx_in, psbt_in in zip(decoded_psbt["tx"]["vin"], decoded_psbt["inputs"]):
assert_equal(tx_in["sequence"], MAX_BIP125_RBF_SEQUENCE)
assert "bip32_derivs" not in psbt_in
assert_equal(decoded_psbt["tx"]["locktime"], block_height+2)
# Same construction with only locktime set
psbtx_info = self.nodes[0].walletcreatefundedpsbt([{"txid":unspent["txid"], "vout":unspent["vout"]}], [{self.nodes[2].getnewaddress():unspent["amount"]+1}], block_height, {}, True)
decoded_psbt = self.nodes[0].decodepsbt(psbtx_info["psbt"])
for tx_in, psbt_in in zip(decoded_psbt["tx"]["vin"], decoded_psbt["inputs"]):
assert tx_in["sequence"] > MAX_BIP125_RBF_SEQUENCE
assert "bip32_derivs" in psbt_in
assert_equal(decoded_psbt["tx"]["locktime"], block_height)
# Same construction without optional arguments
psbtx_info = self.nodes[0].walletcreatefundedpsbt([{"txid":unspent["txid"], "vout":unspent["vout"]}], [{self.nodes[2].getnewaddress():unspent["amount"]+1}])
decoded_psbt = self.nodes[0].decodepsbt(psbtx_info["psbt"])
for tx_in in decoded_psbt["tx"]["vin"]:
assert tx_in["sequence"] > MAX_BIP125_RBF_SEQUENCE
assert_equal(decoded_psbt["tx"]["locktime"], 0)
# BIP 174 Test Vectors
# Check that unknown values are just passed through
unknown_psbt = "cHNidP8BAD8CAAAAAf//////////////////////////////////////////AAAAAAD/////AQAAAAAAAAAAA2oBAAAAAAAACg8BAgMEBQYHCAkPAQIDBAUGBwgJCgsMDQ4PAAA="
unknown_out = self.nodes[0].walletprocesspsbt(unknown_psbt)['psbt']
assert_equal(unknown_psbt, unknown_out)
# Open the data file
with open(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'data/rpc_psbt.json'), encoding='utf-8') as f:
d = json.load(f)
invalids = d['invalid']
valids = d['valid']
creators = d['creator']
signers = d['signer']
combiners = d['combiner']
finalizers = d['finalizer']
extractors = d['extractor']
# Invalid PSBTs
for invalid in invalids:
assert_raises_rpc_error(-22, "TX decode failed", self.nodes[0].decodepsbt, invalid)
# Valid PSBTs
for valid in valids:
self.nodes[0].decodepsbt(valid)
# Creator Tests
for creator in creators:
created_tx = self.nodes[0].createpsbt(creator['inputs'], creator['outputs'])
assert_equal(created_tx, creator['result'])
# Signer tests
for i, signer in enumerate(signers):
self.nodes[2].createwallet("wallet{}".format(i))
wrpc = self.nodes[2].get_wallet_rpc("wallet{}".format(i))
for key in signer['privkeys']:
wrpc.importprivkey(key)
signed_tx = wrpc.walletprocesspsbt(signer['psbt'])['psbt']
assert_equal(signed_tx, signer['result'])
# Combiner test
for combiner in combiners:
combined = self.nodes[2].combinepsbt(combiner['combine'])
assert_equal(combined, combiner['result'])
# Finalizer test
for finalizer in finalizers:
finalized = self.nodes[2].finalizepsbt(finalizer['finalize'], False)['psbt']
assert_equal(finalized, finalizer['result'])
# Extractor test
for extractor in extractors:
extracted = self.nodes[2].finalizepsbt(extractor['extract'], True)['hex']
assert_equal(extracted, extractor['result'])
if __name__ == '__main__':
PSBTTest().main()
| 49.4 | 209 | 0.645485 |
ace310a575760e9edaf3df76d49dbc925c0ddc80 | 6,663 | py | Python | openff/interchange/drivers/report.py | openforcefield/openff-interchange | 275bd4146dd2724c5eeb2b52d3177b53371edb7c | [
"MIT"
] | 10 | 2021-06-17T20:10:53.000Z | 2022-02-24T15:43:25.000Z | openff/interchange/drivers/report.py | openforcefield/openff-interchange | 275bd4146dd2724c5eeb2b52d3177b53371edb7c | [
"MIT"
] | 198 | 2021-06-11T19:49:08.000Z | 2022-03-31T13:33:12.000Z | openff/interchange/drivers/report.py | openforcefield/openff-interchange | 275bd4146dd2724c5eeb2b52d3177b53371edb7c | [
"MIT"
] | 7 | 2021-06-18T18:17:32.000Z | 2022-01-25T18:40:52.000Z | """Storing and processing results of energy evaluations."""
import warnings
from typing import Dict, Optional
import pandas as pd
from openff.units import unit
from pydantic import validator
from openff.interchange.exceptions import EnergyError, MissingEnergyError
from openff.interchange.models import DefaultModel
from openff.interchange.types import FloatQuantity
kj_mol = unit.kilojoule / unit.mol
class EnergyReport(DefaultModel):
"""A lightweight class containing single-point energies as computed by energy tests."""
# TODO: Use FloatQuantity, not float
energies: Dict[str, Optional[FloatQuantity]] = {
"Bond": None,
"Angle": None,
"Torsion": None,
"vdW": None,
"Electrostatics": None,
}
@validator("energies")
def validate_energies(cls, v: Dict) -> Dict:
for key, val in v.items():
if not isinstance(val, unit.Quantity):
v[key] = FloatQuantity.validate_type(val)
return v
def __getitem__(self, item: str) -> Optional[FloatQuantity]:
if type(item) != str:
raise LookupError(
"Only str arguments can be currently be used for lookups.\n"
f"Found item {item} of type {type(item)}"
)
if item in self.energies.keys():
return self.energies[item]
if item.lower() == "total":
return sum(self.energies.values()) # type: ignore[return-value]
else:
return None
def update_energies(self, new_energies: Dict) -> None:
"""Update the energies in this report with new value(s)."""
self.energies.update(self.validate_energies(new_energies))
# TODO: Better way of exposing tolerances
def compare(
self,
other: "EnergyReport",
custom_tolerances: Optional[Dict[str, FloatQuantity]] = None,
) -> None:
"""
Compare this `EnergyReport` to another `EnergyReport`.
Energies are grouped into four categories (bond, angle, torsion, and nonbonded) with
default tolerances for each set to 1e-3 kJ/mol.
.. warning :: This API is experimental and subject to change.
Parameters
----------
other: EnergyReport
The other `EnergyReport` to compare energies against
custom_tolerances: dict of str: `FloatQuantity`, optional
Custom energy tolerances to use to use in comparisons.
"""
tolerances: Dict[str, FloatQuantity] = {
"Bond": 1e-3 * kj_mol,
"Angle": 1e-3 * kj_mol,
"Torsion": 1e-3 * kj_mol,
"vdW": 1e-3 * kj_mol,
"Electrostatics": 1e-3 * kj_mol,
}
if custom_tolerances is not None:
tolerances.update(custom_tolerances)
tolerances = self.validate_energies(tolerances)
errors = pd.DataFrame()
for key in self.energies:
if self.energies[key] is None and other.energies[key] is None:
continue
if self.energies[key] is None and other.energies[key] is None:
raise MissingEnergyError
# TODO: Remove this when OpenMM's NonbondedForce is split out
if key == "Nonbonded":
if "Nonbonded" in other.energies:
this_nonbonded = self.energies["Nonbonded"]
other_nonbonded = other.energies["Nonbonded"]
else:
this_nonbonded = self.energies["Nonbonded"]
other_nonbonded = other.energies["vdW"] + other.energies["Electrostatics"] # type: ignore
elif key in ["vdW", "Electrostatics"] and key not in other.energies:
this_nonbonded = self.energies["vdW"] + self.energies["Electrostatics"] # type: ignore
other_nonbonded = other.energies["Nonbonded"]
else:
diff = self.energies[key] - other.energies[key] # type: ignore[operator]
tolerance = tolerances[key]
if abs(diff) > tolerance:
data: Dict = {
"key": [key],
"diff": [diff],
"tol": [tolerance],
"ener1": [self.energies[key]],
"ener2": [other.energies[key]],
}
error = pd.DataFrame.from_dict(data)
errors = errors.append(error)
continue
diff = this_nonbonded - other_nonbonded # type: ignore
try:
tolerance = tolerances[key]
except KeyError as e:
if "Nonbonded" in str(e):
tolerance = tolerances["vdW"] + tolerances["Electrostatics"] # type: ignore[assignment]
else:
raise e
if abs(diff) > tolerance:
data: Dict = { # type: ignore[no-redef]
"key": ["Nonbonded"],
"diff": [diff],
"tol": [tolerance],
"ener1": [this_nonbonded],
"ener2": [other_nonbonded],
}
error = pd.DataFrame.from_dict(data)
errors = errors.append(error)
if len(errors) > 0:
for col_name in ["diff", "tol", "ener1", "ener2"]:
col_mod = [x.m_as(kj_mol) for x in errors[col_name]]
errors[col_name] = col_mod
raise EnergyError(
"\nSome energy difference(s) exceed tolerances! "
"\nAll values are reported in kJ/mol:"
"\n" + str(errors.to_string(index=False))
)
# TODO: Return energy differences even if none are greater than tolerance
# This might result in mis-matched keys
def __sub__(self, other: "EnergyReport") -> Dict[str, FloatQuantity]:
diff = dict()
for key in self.energies:
if key not in other.energies:
warnings.warn(f"Did not find key {key} in second report")
continue
diff[key]: FloatQuantity = self.energies[key] - other.energies[key] # type: ignore
return diff # type: ignore
def __str__(self) -> str:
return (
"Energies:\n\n"
f"Bond: \t\t{self['Bond']}\n"
f"Angle: \t\t{self['Angle']}\n"
f"Torsion: \t\t{self['Torsion']}\n"
f"Nonbonded: \t\t{self['Nonbonded']}\n"
f"vdW: \t\t{self['vdW']}\n"
f"Electrostatics:\t\t{self['Electrostatics']}\n"
)
| 37.644068 | 110 | 0.547051 |
ace3110afa4b9c2a38dbe3e9eb1d8230c0bf2229 | 7,274 | py | Python | cifar_imagenet/models/cifar/adagradnet_fixed.py | minhtannguyen/RAdam | 44f403288df375bae0785cc82dd8c888eaaaa441 | [
"Apache-2.0"
] | null | null | null | cifar_imagenet/models/cifar/adagradnet_fixed.py | minhtannguyen/RAdam | 44f403288df375bae0785cc82dd8c888eaaaa441 | [
"Apache-2.0"
] | null | null | null | cifar_imagenet/models/cifar/adagradnet_fixed.py | minhtannguyen/RAdam | 44f403288df375bae0785cc82dd8c888eaaaa441 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
"""
momentum net
"""
import torch
import torch.nn as nn
import math
from torch.nn.parameter import Parameter
__all__ = ['adagradnet_fixed']
def conv3x3(in_planes, out_planes, stride=1):
"3x3 convolution with padding"
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None, step_size=2.0, momentum=0.5):
super(BasicBlock, self).__init__()
self.bn1 = nn.BatchNorm2d(inplanes)
self.relu = nn.ReLU(inplace=True)
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn2 = nn.BatchNorm2d(planes)
self.conv2 = conv3x3(planes, planes)
self.downsample = downsample
self.stride = stride
# for momentum net
self.step_size = step_size
self.momentum = momentum
def forward(self, invec):
x, y = invec[0], invec[1]
residualx = x
residualy = y
out = self.bn1(x)
out = self.relu(out)
out = self.conv1(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv2(out)
if self.downsample is not None:
residualx = self.downsample(x)
residualy = self.downsample(y)
outy = residualx - self.step_size*out
outx = (1.0 + self.momentum) * outy - self.momentum * residualy
return [outx, outy]
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None, downsamplegrad=None, step_size=2.0, momentum=0.5):
super(Bottleneck, self).__init__()
self.bn1 = nn.BatchNorm2d(inplanes)
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.downsamplegrad = downsamplegrad
self.stride = stride
# for momentum net
self.step_size = step_size
self.momentum = momentum
def forward(self, invec):
x, sumgrad = invec[0], invec[1]
residualx = x
out = self.bn1(x)
out = self.relu(out)
out = self.conv1(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn3(out)
out = self.relu(out)
out = self.conv3(out)
if self.downsample is not None:
residualx = self.downsample(x)
if self.downsamplegrad is not None:
sumgrad = self.downsamplegrad(torch.rsqrt(sumgrad))
sumgrad = sumgrad * sumgrad
sumgrad = sumgrad + torch.mean(out * out, dim=0, keepdim=True)
out = out * torch.rsqrt(sumgrad + 1e-8)
out = out + residualx
return [out, sumgrad]
class MomentumNet(nn.Module):
def __init__(self, depth, step_size=2.0, momentum=0.5, num_classes=1000, block_name='BasicBlock', feature_vec='x'):
super(MomentumNet, self).__init__()
# Model type specifies number of layers for CIFAR-10 model
if block_name.lower() == 'basicblock':
assert (depth - 2) % 6 == 0, 'When use basicblock, depth should be 6n+2, e.g. 20, 32, 44, 56, 110, 1202'
n = (depth - 2) // 6
block = BasicBlock
elif block_name.lower() == 'bottleneck':
assert (depth - 2) % 9 == 0, 'When use bottleneck, depth should be 9n+2, e.g. 20, 29, 47, 56, 110, 1199'
n = (depth - 2) // 9
block = Bottleneck
else:
raise ValueError('block_name shoule be Basicblock or Bottleneck')
self.inplanes = 16
# for momentum net
self.step_size = step_size
self.momentum = momentum
self.feature_vec = feature_vec
self.conv1 = nn.Conv2d(3, 16, kernel_size=3, padding=1,
bias=False)
self.layer1 = self._make_layer(block, 16, n, step_size=self.step_size, momentum=self.momentum)
self.layer2 = self._make_layer(block, 32, n, stride=2, step_size=self.step_size, momentum=self.momentum)
self.layer3 = self._make_layer(block, 64, n, stride=2, step_size=self.step_size, momentum=self.momentum)
self.bn = nn.BatchNorm2d(64 * block.expansion)
self.relu = nn.ReLU(inplace=True)
self.avgpool = nn.AvgPool2d(8)
self.fc = nn.Linear(64 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1, step_size=2.0, momentum=0.5):
downsample = None
downsamplegrad = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
)
downsamplegrad = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample, downsamplegrad, step_size=step_size, momentum=momentum))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes, step_size=step_size, momentum=momentum))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
out = [x, torch.zeros_like(x)]
out = self.layer1(out) # 32x32
out = self.layer2(out) # 16x16
out = self.layer3(out) # 8x8
if self.feature_vec=='x':
x = out[0]
else:
x = out[1]
x = self.bn(x)
x = self.relu(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
def adagradnet_fixed(**kwargs):
"""
Constructs a ResNet model.
"""
return MomentumNet(**kwargs)
# def momentum_net20(**kwargs):
# return MomentumNet(num_classes=10, depth=20, block_name="basicblock")
# def momentum_net56(**kwargs):
# return MomentumNet(num_classes=10, depth=56, block_name="bottleneck")
# def momentum_net110(**kwargs):
# return MomentumNet(num_classes=10, depth=110, block_name="bottleneck")
# def momentum_net164(**kwargs):
# return MomentumNet(num_classes=10, depth=164, block_name="bottleneck")
# def momentum_net290(**kwargs):
# return MomentumNet(num_classes=10, depth=290, block_name="bottleneck")
| 34.311321 | 127 | 0.589772 |
ace31121532fa003e59ac5628756baab8bd01de8 | 33 | py | Python | tests/__init__.py | matz-e/sphinx-bluebrain-theme | fb2a48fcc95705c04f50bfb5306fdb476765f956 | [
"MIT"
] | 2 | 2020-08-08T18:47:51.000Z | 2021-07-23T13:56:51.000Z | tests/__init__.py | matz-e/sphinx-bluebrain-theme | fb2a48fcc95705c04f50bfb5306fdb476765f956 | [
"MIT"
] | 15 | 2020-04-09T13:24:12.000Z | 2022-03-29T08:24:45.000Z | tests/__init__.py | matz-e/sphinx-bluebrain-theme | fb2a48fcc95705c04f50bfb5306fdb476765f956 | [
"MIT"
] | 2 | 2021-04-22T08:15:13.000Z | 2021-12-22T08:23:37.000Z | """Theme and converter tests."""
| 16.5 | 32 | 0.666667 |
ace31161d2dd015ab9938edbbabf3b83e71ca10d | 9,372 | py | Python | src/cloudformation_cli_python_lib/resource.py | Brianwithay21/cloudformation-cli-python-plugin | 8d2b8b3e663900251cf7642640d52da64f70f432 | [
"Apache-2.0"
] | 14 | 2019-11-19T22:49:42.000Z | 2019-11-24T21:15:05.000Z | src/cloudformation_cli_python_lib/resource.py | Brianwithay21/cloudformation-cli-python-plugin | 8d2b8b3e663900251cf7642640d52da64f70f432 | [
"Apache-2.0"
] | 19 | 2019-11-20T01:52:12.000Z | 2019-11-25T23:10:09.000Z | src/cloudformation_cli_python_lib/resource.py | Brianwithay21/cloudformation-cli-python-plugin | 8d2b8b3e663900251cf7642640d52da64f70f432 | [
"Apache-2.0"
] | 7 | 2019-11-19T23:44:14.000Z | 2019-11-20T19:01:37.000Z | import json
import logging
import traceback
from datetime import datetime
from functools import wraps
from typing import Any, Callable, MutableMapping, Optional, Tuple, Type, Union
from .boto3_proxy import SessionProxy, _get_boto_session
from .exceptions import InternalFailure, InvalidRequest, _HandlerError
from .interface import (
Action,
BaseResourceHandlerRequest,
HandlerErrorCode,
OperationStatus,
ProgressEvent,
)
from .log_delivery import ProviderLogHandler
from .metrics import MetricsPublisherProxy
from .utils import (
BaseModel,
Credentials,
HandlerRequest,
KitchenSinkEncoder,
LambdaContext,
TestEvent,
UnmodelledRequest,
)
LOG = logging.getLogger(__name__)
MUTATING_ACTIONS = (Action.CREATE, Action.UPDATE, Action.DELETE)
HandlerSignature = Callable[
[Optional[SessionProxy], Any, MutableMapping[str, Any]], ProgressEvent
]
def _ensure_serialize(
entrypoint: Callable[
[Any, MutableMapping[str, Any], Any],
Union[ProgressEvent, MutableMapping[str, Any]],
]
) -> Callable[[Any, MutableMapping[str, Any], Any], Any]:
@wraps(entrypoint)
def wrapper(self: Any, event: MutableMapping[str, Any], context: Any) -> Any:
try:
response = entrypoint(self, event, context)
serialized = json.dumps(response, cls=KitchenSinkEncoder)
except Exception: # pylint: disable=broad-except
return ProgressEvent.failed( # pylint: disable=protected-access
HandlerErrorCode.InternalFailure
)._serialize()
return json.loads(serialized)
return wrapper
class Resource:
def __init__(
self,
type_name: str,
resouce_model_cls: Type[BaseModel],
type_configuration_model_cls: Optional[Type[BaseModel]] = None,
) -> None:
self.type_name = type_name
self._model_cls: Type[BaseModel] = resouce_model_cls
self._type_configuration_model_cls: Optional[
Type[BaseModel]
] = type_configuration_model_cls
self._handlers: MutableMapping[Action, HandlerSignature] = {}
def handler(self, action: Action) -> Callable[[HandlerSignature], HandlerSignature]:
def _add_handler(f: HandlerSignature) -> HandlerSignature:
self._handlers[action] = f
return f
return _add_handler
def _invoke_handler(
self,
session: Optional[SessionProxy],
request: BaseResourceHandlerRequest,
action: Action,
callback_context: MutableMapping[str, Any],
) -> ProgressEvent:
try:
handler = self._handlers[action]
except KeyError:
return ProgressEvent.failed(
HandlerErrorCode.InternalFailure, f"No handler for {action}"
)
progress = handler(session, request, callback_context)
is_in_progress = progress.status == OperationStatus.IN_PROGRESS
is_mutable = action in MUTATING_ACTIONS
if is_in_progress and not is_mutable:
raise InternalFailure("READ and LIST handlers must return synchronously.")
return progress
def _parse_test_request(
self, event_data: MutableMapping[str, Any]
) -> Tuple[
Optional[SessionProxy],
BaseResourceHandlerRequest,
Action,
MutableMapping[str, Any],
]:
try:
event = TestEvent(**event_data)
creds = Credentials(**event.credentials)
request: BaseResourceHandlerRequest = UnmodelledRequest(
**event.request
).to_modelled(self._model_cls, self._type_configuration_model_cls)
session = _get_boto_session(creds, event.region)
action = Action[event.action]
except Exception as e: # pylint: disable=broad-except
LOG.exception("Invalid request")
raise InternalFailure(f"{e} ({type(e).__name__})") from e
return session, request, action, event.callbackContext or {}
@_ensure_serialize
def test_entrypoint(
self, event: MutableMapping[str, Any], _context: Any
) -> ProgressEvent:
msg = "Uninitialized"
try:
session, request, action, callback_context = self._parse_test_request(event)
return self._invoke_handler(session, request, action, callback_context)
except _HandlerError as e:
LOG.exception("Handler error")
return e.to_progress_event()
except Exception: # pylint: disable=broad-except
LOG.exception("Exception caught")
except BaseException: # pylint: disable=broad-except
LOG.critical("Base exception caught (this is usually bad)", exc_info=True)
return ProgressEvent.failed(HandlerErrorCode.InternalFailure, msg)
@staticmethod
def _parse_request(
event_data: MutableMapping[str, Any]
) -> Tuple[
Tuple[Optional[SessionProxy], Optional[SessionProxy]],
Action,
MutableMapping[str, Any],
HandlerRequest,
]:
try:
event = HandlerRequest.deserialize(event_data)
caller_sess = _get_boto_session(event.requestData.callerCredentials)
provider_sess = _get_boto_session(event.requestData.providerCredentials)
# credentials are used when rescheduling, so can't zero them out (for now)
action = Action[event.action]
callback_context = event.callbackContext or {}
except Exception as e: # pylint: disable=broad-except
LOG.exception("Invalid request")
raise InvalidRequest(f"{e} ({type(e).__name__})") from e
return ((caller_sess, provider_sess), action, callback_context, event)
def _cast_resource_request(
self, request: HandlerRequest
) -> BaseResourceHandlerRequest:
try:
return UnmodelledRequest(
clientRequestToken=request.bearerToken,
desiredResourceState=request.requestData.resourceProperties,
previousResourceState=request.requestData.previousResourceProperties,
desiredResourceTags=request.requestData.stackTags,
previousResourceTags=request.requestData.previousStackTags,
systemTags=request.requestData.systemTags,
previousSystemTags=request.requestData.previousSystemTags,
awsAccountId=request.awsAccountId,
logicalResourceIdentifier=request.requestData.logicalResourceId,
stackId=request.stackId,
region=request.region,
typeConfiguration=request.requestData.typeConfiguration,
).to_modelled(self._model_cls, self._type_configuration_model_cls)
except Exception as e: # pylint: disable=broad-except
LOG.exception("Invalid request")
raise InvalidRequest(f"{e} ({type(e).__name__})") from e
# TODO: refactor to reduce branching and locals
@_ensure_serialize # noqa: C901
def __call__( # pylint: disable=too-many-locals # noqa: C901
self, event_data: MutableMapping[str, Any], context: LambdaContext
) -> MutableMapping[str, Any]:
logs_setup = False
def print_or_log(message: str) -> None:
if logs_setup:
LOG.exception(message, exc_info=True)
else:
print(message)
traceback.print_exc()
try:
sessions, action, callback, event = self._parse_request(event_data)
caller_sess, provider_sess = sessions
request = self._cast_resource_request(event)
metrics = MetricsPublisherProxy()
if event.requestData.providerLogGroupName and provider_sess:
ProviderLogHandler.setup(event, provider_sess)
logs_setup = True
metrics.add_metrics_publisher(provider_sess, event.resourceType)
metrics.publish_invocation_metric(datetime.utcnow(), action)
start_time = datetime.utcnow()
error = None
try:
progress = self._invoke_handler(caller_sess, request, action, callback)
except Exception as e: # pylint: disable=broad-except
error = e
m_secs = (datetime.utcnow() - start_time).total_seconds() * 1000.0
metrics.publish_duration_metric(datetime.utcnow(), action, m_secs)
if error:
metrics.publish_exception_metric(datetime.utcnow(), action, error)
raise error
except _HandlerError as e:
print_or_log("Handler error")
progress = e.to_progress_event()
except Exception as e: # pylint: disable=broad-except
print_or_log("Exception caught {0}".format(e))
progress = ProgressEvent.failed(HandlerErrorCode.InternalFailure)
except BaseException as e: # pylint: disable=broad-except
print_or_log("Base exception caught (this is usually bad) {0}".format(e))
progress = ProgressEvent.failed(HandlerErrorCode.InternalFailure)
if progress.result:
progress.result = None
# use the raw event_data as a last-ditch attempt to call back if the
# request is invalid
return progress._serialize() # pylint: disable=protected-access
| 39.711864 | 88 | 0.653969 |
ace311cbba66a61d5081dbac18fd511573bdeffd | 498 | py | Python | recipes/Python/577222_Create_module_dependency_graph_/recipe-577222.py | tdiprima/code | 61a74f5f93da087d27c70b2efe779ac6bd2a3b4f | [
"MIT"
] | 2,023 | 2017-07-29T09:34:46.000Z | 2022-03-24T08:00:45.000Z | recipes/Python/577222_Create_module_dependency_graph_/recipe-577222.py | unhacker/code | 73b09edc1b9850c557a79296655f140ce5e853db | [
"MIT"
] | 32 | 2017-09-02T17:20:08.000Z | 2022-02-11T17:49:37.000Z | recipes/Python/577222_Create_module_dependency_graph_/recipe-577222.py | unhacker/code | 73b09edc1b9850c557a79296655f140ce5e853db | [
"MIT"
] | 780 | 2017-07-28T19:23:28.000Z | 2022-03-25T20:39:41.000Z | import sys
import types
visited = set()
def tree(parent, mod, indent = 0):
print '"%s" -> "%s" ; '%(parent, mod.__name__)
if mod in visited:
return
visited.add(mod)
for i in dir(mod):
obj = getattr(mod, i)
if isinstance(obj, types.ModuleType):
tree(mod.__name__, obj, indent + 1)
if __name__ == "__main__":
class Foo: pass
Foo.__name__ = "Top"
mod = __import__(sys.argv[1])
print "Digraph F {"
tree(Foo, mod)
print "}"
| 21.652174 | 50 | 0.570281 |
ace3126a9db165c19af4ca2993f2ed4f597e3b34 | 1,829 | py | Python | python/phonenumbers/data/region_JM.py | trueroll/python-phonenumbers | 96b06f1b0ae593218b2240af3b62975fb97537dd | [
"Apache-2.0"
] | 1 | 2021-03-21T14:24:08.000Z | 2021-03-21T14:24:08.000Z | python/phonenumbers/data/region_JM.py | trueroll/python-phonenumbers | 96b06f1b0ae593218b2240af3b62975fb97537dd | [
"Apache-2.0"
] | null | null | null | python/phonenumbers/data/region_JM.py | trueroll/python-phonenumbers | 96b06f1b0ae593218b2240af3b62975fb97537dd | [
"Apache-2.0"
] | null | null | null | """Auto-generated file, do not edit by hand. JM metadata"""
from ..phonemetadata import NumberFormat, PhoneNumberDesc, PhoneMetadata
PHONE_METADATA_JM = PhoneMetadata(id='JM', country_code=1, international_prefix='011',
general_desc=PhoneNumberDesc(national_number_pattern='(?:[58]\\d\\d|658|900)\\d{7}', possible_length=(10,), possible_length_local_only=(7,)),
fixed_line=PhoneNumberDesc(national_number_pattern='8766060\\d{3}|(?:658(?:2(?:[0-8]\\d|9[0-46-9])|[3-9]\\d\\d)|876(?:5(?:02|1[0-468]|2[35]|63)|6(?:0[1-3579]|1[0237-9]|[23]\\d|40|5[06]|6[2-589]|7[05]|8[04]|9[4-9])|7(?:0[2-689]|[1-6]\\d|8[056]|9[45])|9(?:0[1-8]|1[02378]|[2-8]\\d|9[2-468])))\\d{4}', example_number='8765230123', possible_length=(10,), possible_length_local_only=(7,)),
mobile=PhoneNumberDesc(national_number_pattern='(?:658295|876(?:2(?:[14-9]\\d|2[013-9]|3[7-9])|[348]\\d\\d|5(?:0[13-9]|1[579]|[2-57-9]\\d|6[0-24-9])|6(?:4[89]|6[67])|7(?:0[07]|7\\d|8[1-47-9]|9[0-36-9])|9(?:[01]9|9[0579])))\\d{4}', example_number='8762101234', possible_length=(10,), possible_length_local_only=(7,)),
toll_free=PhoneNumberDesc(national_number_pattern='8(?:00|33|44|55|66|77|88)[2-9]\\d{6}', example_number='8002123456', possible_length=(10,)),
premium_rate=PhoneNumberDesc(national_number_pattern='900[2-9]\\d{6}', example_number='9002123456', possible_length=(10,)),
personal_number=PhoneNumberDesc(national_number_pattern='52(?:3(?:[2-46-9][02-9]\\d|5(?:[02-46-9]\\d|5[0-46-9]))|4(?:[2-478][02-9]\\d|5(?:[034]\\d|2[024-9]|5[0-46-9])|6(?:0[1-9]|[2-9]\\d)|9(?:[05-9]\\d|2[0-5]|49)))\\d{4}|52[34][2-9]1[02-9]\\d{4}|5(?:00|2[12]|33|44|66|77|88)[2-9]\\d{6}', example_number='5002345678', possible_length=(10,)),
national_prefix='1',
national_prefix_for_parsing='1',
leading_digits='658|876',
mobile_number_portable_region=True)
| 121.933333 | 388 | 0.653362 |
ace3132cc482d20dd443cf51ba107ab4c532ce31 | 1,682 | py | Python | testing/cli.py | Siggert75/testing | cac18a95754b4554faa19d1bfaa98c6e67ea74d7 | [
"Unlicense"
] | null | null | null | testing/cli.py | Siggert75/testing | cac18a95754b4554faa19d1bfaa98c6e67ea74d7 | [
"Unlicense"
] | null | null | null | testing/cli.py | Siggert75/testing | cac18a95754b4554faa19d1bfaa98c6e67ea74d7 | [
"Unlicense"
] | null | null | null | import typer
import uvicorn
from sqlmodel import Session, select
from .app import app
from .config import settings
from .db import create_db_and_tables, engine
from .models.content import Content
from .security import User
cli = typer.Typer(name="testing API")
@cli.command()
def run(
port: int = settings.server.port,
host: str = settings.server.host,
log_level: str = settings.server.log_level,
reload: bool = settings.server.reload,
): # pragma: no cover
"""Run the API server."""
uvicorn.run(
"testing.app:app",
host=host,
port=port,
log_level=log_level,
reload=reload,
)
@cli.command()
def create_user(username: str, password: str, superuser: bool = False):
"""Create user"""
create_db_and_tables(engine)
with Session(engine) as session:
user = User(username=username, password=password, superuser=superuser)
session.add(user)
session.commit()
session.refresh(user)
typer.echo(f"created {username} user")
return user
@cli.command()
def shell(): # pragma: no cover
"""Opens an interactive shell with objects auto imported"""
_vars = {
"app": app,
"settings": settings,
"User": User,
"engine": engine,
"cli": cli,
"create_user": create_user,
"select": select,
"session": Session(engine),
"Content": Content,
}
typer.echo(f"Auto imports: {list(_vars.keys())}")
try:
from IPython import start_ipython
start_ipython(argv=[], user_ns=_vars)
except ImportError:
import code
code.InteractiveConsole(_vars).interact()
| 25.104478 | 78 | 0.63258 |
ace31409e6197a934a50171877e9cb93cd550769 | 880 | py | Python | programs/pyeos/tests/python/backyard/solidity.py | learnforpractice/pyeos | 4f04eb982c86c1fdb413084af77c713a6fda3070 | [
"MIT"
] | 144 | 2017-10-18T16:38:51.000Z | 2022-01-09T12:43:57.000Z | programs/pyeos/tests/python/backyard/solidity.py | openchatproject/safeos | 2c8dbf57d186696ef6cfcbb671da9705b8f3d9f7 | [
"MIT"
] | 60 | 2017-10-11T13:07:43.000Z | 2019-03-26T04:33:27.000Z | programs/pyeos/tests/python/backyard/solidity.py | learnforpractice/pyeos | 4f04eb982c86c1fdb413084af77c713a6fda3070 | [
"MIT"
] | 38 | 2017-12-05T01:13:56.000Z | 2022-01-07T07:06:53.000Z | from eoslib import eosio_assert
def event(func):
def func_wrapper(self, *args):
print('TODO: event')
return func(self, *args)
return func_wrapper
#FIXME it works, yet not safe since payable.count can be changed deliberately
#It'd better to implement payable decorator in C
class payable(object):
count = 0
def __init__(self, func):
self.func = func
payable.count = 0
def __call__(self, *args):
assert payable.count == 0, 'reentrant in payable function is forbidden!'
payable.count = 1
ret = self.func(*args)
payable.count = 0
return ret
class address(int):
pass
def require(cond, msg = ''):
if cond:
cond = 1
else:
cond = 0
eosio_assert(cond, msg)
class Msg(object):
def __init__(self):
self.sender = address(0)
self.value = None
| 23.157895 | 80 | 0.617045 |
ace3150a3c4b4ae4c46dabd7687e1f677e245b89 | 9,609 | py | Python | resources/antlr-introduction/solution/listsParser.py | arminnh/c-to-p-compilers | 2c649e1d3643471bac681c2656c1c7d6249be4d7 | [
"MIT"
] | 1 | 2021-04-29T06:40:54.000Z | 2021-04-29T06:40:54.000Z | resources/antlr-introduction/solution/listsParser.py | arminnh/ba3-c-to-p-compiler | 2c649e1d3643471bac681c2656c1c7d6249be4d7 | [
"MIT"
] | null | null | null | resources/antlr-introduction/solution/listsParser.py | arminnh/ba3-c-to-p-compiler | 2c649e1d3643471bac681c2656c1c7d6249be4d7 | [
"MIT"
] | 1 | 2017-01-30T19:19:31.000Z | 2017-01-30T19:19:31.000Z | # Generated from lists.g4 by ANTLR 4.5.2
# encoding: utf-8
from __future__ import print_function
from antlr4 import *
from io import StringIO
def serializedATN():
with StringIO() as buf:
buf.write(u"\3\u0430\ud6d1\u8206\uad2d\u4417\uaef1\u8d80\uaadd\3")
buf.write(u"\7 \4\2\t\2\4\3\t\3\4\4\t\4\4\5\t\5\3\2\3\2\3\2\3\2\3")
buf.write(u"\2\3\3\3\3\3\3\7\3\23\n\3\f\3\16\3\26\13\3\5\3\30\n\3")
buf.write(u"\3\4\3\4\5\4\34\n\4\3\5\3\5\3\5\2\2\6\2\4\6\b\2\2\36")
buf.write(u"\2\n\3\2\2\2\4\27\3\2\2\2\6\33\3\2\2\2\b\35\3\2\2\2\n")
buf.write(u"\13\7\5\2\2\13\f\5\4\3\2\f\r\7\6\2\2\r\16\b\2\1\2\16")
buf.write(u"\3\3\2\2\2\17\24\5\6\4\2\20\21\7\4\2\2\21\23\5\4\3\2")
buf.write(u"\22\20\3\2\2\2\23\26\3\2\2\2\24\22\3\2\2\2\24\25\3\2")
buf.write(u"\2\2\25\30\3\2\2\2\26\24\3\2\2\2\27\17\3\2\2\2\27\30")
buf.write(u"\3\2\2\2\30\5\3\2\2\2\31\34\5\b\5\2\32\34\5\2\2\2\33")
buf.write(u"\31\3\2\2\2\33\32\3\2\2\2\34\7\3\2\2\2\35\36\7\3\2\2")
buf.write(u"\36\t\3\2\2\2\5\24\27\33")
return buf.getvalue()
class listsParser ( Parser ):
grammarFileName = "lists.g4"
atn = ATNDeserializer().deserialize(serializedATN())
decisionsToDFA = [ DFA(ds, i) for i, ds in enumerate(atn.decisionToState) ]
sharedContextCache = PredictionContextCache()
literalNames = [ u"<INVALID>", u"<INVALID>", u"','", u"'('", u"')'" ]
symbolicNames = [ u"<INVALID>", u"NUMBER", u"COMMA", u"LBRACKET", u"RBRACKET",
u"WS" ]
RULE_lst = 0
RULE_seq = 1
RULE_item = 2
RULE_number = 3
ruleNames = [ u"lst", u"seq", u"item", u"number" ]
EOF = Token.EOF
NUMBER=1
COMMA=2
LBRACKET=3
RBRACKET=4
WS=5
def __init__(self, input):
super(listsParser, self).__init__(input)
self.checkVersion("4.5.2")
self._interp = ParserATNSimulator(self, self.atn, self.decisionsToDFA, self.sharedContextCache)
self._predicates = None
class LstContext(ParserRuleContext):
def __init__(self, parser, parent=None, invokingState=-1):
super(listsParser.LstContext, self).__init__(parent, invokingState)
self.parser = parser
self._LBRACKET = None # Token
def LBRACKET(self):
return self.getToken(listsParser.LBRACKET, 0)
def seq(self):
return self.getTypedRuleContext(listsParser.SeqContext,0)
def RBRACKET(self):
return self.getToken(listsParser.RBRACKET, 0)
def getRuleIndex(self):
return listsParser.RULE_lst
def enterRule(self, listener):
if hasattr(listener, "enterLst"):
listener.enterLst(self)
def exitRule(self, listener):
if hasattr(listener, "exitLst"):
listener.exitLst(self)
def accept(self, visitor):
if hasattr(visitor, "visitLst"):
return visitor.visitLst(self)
else:
return visitor.visitChildren(self)
def lst(self):
localctx = listsParser.LstContext(self, self._ctx, self.state)
self.enterRule(localctx, 0, self.RULE_lst)
try:
self.enterOuterAlt(localctx, 1)
self.state = 8
localctx._LBRACKET = self.match(listsParser.LBRACKET)
self.state = 9
self.seq()
self.state = 10
self.match(listsParser.RBRACKET)
print("another list %d:%d" % ((0 if localctx._LBRACKET is None else localctx._LBRACKET.line), (0 if localctx._LBRACKET is None else localctx._LBRACKET.column)))
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class SeqContext(ParserRuleContext):
def __init__(self, parser, parent=None, invokingState=-1):
super(listsParser.SeqContext, self).__init__(parent, invokingState)
self.parser = parser
def item(self):
return self.getTypedRuleContext(listsParser.ItemContext,0)
def COMMA(self, i=None):
if i is None:
return self.getTokens(listsParser.COMMA)
else:
return self.getToken(listsParser.COMMA, i)
def seq(self, i=None):
if i is None:
return self.getTypedRuleContexts(listsParser.SeqContext)
else:
return self.getTypedRuleContext(listsParser.SeqContext,i)
def getRuleIndex(self):
return listsParser.RULE_seq
def enterRule(self, listener):
if hasattr(listener, "enterSeq"):
listener.enterSeq(self)
def exitRule(self, listener):
if hasattr(listener, "exitSeq"):
listener.exitSeq(self)
def accept(self, visitor):
if hasattr(visitor, "visitSeq"):
return visitor.visitSeq(self)
else:
return visitor.visitChildren(self)
def seq(self):
localctx = listsParser.SeqContext(self, self._ctx, self.state)
self.enterRule(localctx, 2, self.RULE_seq)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 21
_la = self._input.LA(1)
if _la==listsParser.NUMBER or _la==listsParser.LBRACKET:
self.state = 13
self.item()
self.state = 18
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,0,self._ctx)
while _alt!=2 and _alt!=ATN.INVALID_ALT_NUMBER:
if _alt==1:
self.state = 14
self.match(listsParser.COMMA)
self.state = 15
self.seq()
self.state = 20
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,0,self._ctx)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class ItemContext(ParserRuleContext):
def __init__(self, parser, parent=None, invokingState=-1):
super(listsParser.ItemContext, self).__init__(parent, invokingState)
self.parser = parser
def number(self):
return self.getTypedRuleContext(listsParser.NumberContext,0)
def lst(self):
return self.getTypedRuleContext(listsParser.LstContext,0)
def getRuleIndex(self):
return listsParser.RULE_item
def enterRule(self, listener):
if hasattr(listener, "enterItem"):
listener.enterItem(self)
def exitRule(self, listener):
if hasattr(listener, "exitItem"):
listener.exitItem(self)
def accept(self, visitor):
if hasattr(visitor, "visitItem"):
return visitor.visitItem(self)
else:
return visitor.visitChildren(self)
def item(self):
localctx = listsParser.ItemContext(self, self._ctx, self.state)
self.enterRule(localctx, 4, self.RULE_item)
try:
self.state = 25
token = self._input.LA(1)
if token in [listsParser.NUMBER]:
self.enterOuterAlt(localctx, 1)
self.state = 23
self.number()
elif token in [listsParser.LBRACKET]:
self.enterOuterAlt(localctx, 2)
self.state = 24
self.lst()
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class NumberContext(ParserRuleContext):
def __init__(self, parser, parent=None, invokingState=-1):
super(listsParser.NumberContext, self).__init__(parent, invokingState)
self.parser = parser
def NUMBER(self):
return self.getToken(listsParser.NUMBER, 0)
def getRuleIndex(self):
return listsParser.RULE_number
def enterRule(self, listener):
if hasattr(listener, "enterNumber"):
listener.enterNumber(self)
def exitRule(self, listener):
if hasattr(listener, "exitNumber"):
listener.exitNumber(self)
def accept(self, visitor):
if hasattr(visitor, "visitNumber"):
return visitor.visitNumber(self)
else:
return visitor.visitChildren(self)
def number(self):
localctx = listsParser.NumberContext(self, self._ctx, self.state)
self.enterRule(localctx, 6, self.RULE_number)
try:
self.enterOuterAlt(localctx, 1)
self.state = 27
self.match(listsParser.NUMBER)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
| 31.198052 | 172 | 0.570819 |
ace315cdba188cf3ae84ed67c638ca7d52e3ce75 | 443 | py | Python | core/migrations/0008_board_private.py | nickali/pinry | 5e0c9ee79a70c4997810c6e6b51a460db4fe4fa0 | [
"BSD-2-Clause"
] | 1,193 | 2015-01-04T15:48:41.000Z | 2022-03-31T21:17:59.000Z | core/migrations/0008_board_private.py | nickali/pinry | 5e0c9ee79a70c4997810c6e6b51a460db4fe4fa0 | [
"BSD-2-Clause"
] | 201 | 2015-01-17T09:12:01.000Z | 2022-03-29T03:04:15.000Z | core/migrations/0008_board_private.py | nickali/pinry | 5e0c9ee79a70c4997810c6e6b51a460db4fe4fa0 | [
"BSD-2-Clause"
] | 234 | 2015-01-05T18:57:32.000Z | 2022-03-08T11:17:52.000Z | # -*- coding: utf-8 -*-
# Generated by Django 1.11.26 on 2020-02-11 08:38
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0007_pin_private'),
]
operations = [
migrations.AddField(
model_name='board',
name='private',
field=models.BooleanField(default=False),
),
]
| 21.095238 | 53 | 0.609481 |
ace31687338f918ef260b3134b0bd429795542d0 | 9,133 | py | Python | python/paddle/distributed/fleet/meta_optimizers/graph_execution_optimizer.py | ZHUI/Paddle | 32ae8e81322ed380a89157fcb632c229e2c64979 | [
"Apache-2.0"
] | 1 | 2021-12-27T02:41:23.000Z | 2021-12-27T02:41:23.000Z | python/paddle/distributed/fleet/meta_optimizers/graph_execution_optimizer.py | zlsh80826/Paddle | c560a7d57aad990f374ebadd330351f18e2ca65f | [
"Apache-2.0"
] | null | null | null | python/paddle/distributed/fleet/meta_optimizers/graph_execution_optimizer.py | zlsh80826/Paddle | c560a7d57aad990f374ebadd330351f18e2ca65f | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
import paddle
from paddle.fluid.framework import core
from paddle.fluid import compiler
from .meta_optimizer_base import MetaOptimizerBase
from ..base.private_helper_function import wait_server_ready
import logging
class GraphExecutionOptimizer(MetaOptimizerBase):
def __init__(self, optimizer):
super(GraphExecutionOptimizer, self).__init__(optimizer)
self.inner_opt = optimizer
# we do not allow meta optimizer to be inner optimizer currently
self.meta_optimizers_white_list = []
self.meta_optimizers_black_list = []
def _is_graph_out(self):
return True
def _can_apply(self):
"""
Basically, this is PE, and almost all programs can be executed here
"""
if not self.role_maker._is_collective:
# update me. currently, if parameter server is used
# graph execution optimizer can not be applied
return False
return True
def backward(self,
loss,
startup_program=None,
parameter_list=None,
no_grad_set=None,
callbacks=None):
pass
# should fix the variable
def _setup_nccl_op(self, startup_program, main_program, build_strategy):
trainer_endpoints = self.role_maker.get_trainer_endpoints()
trainers = trainer_endpoints
trainer_id = self.role_maker.worker_index()
current_endpoint = self.role_maker.get_trainer_endpoints()[trainer_id]
trainer_endpoints_env = ",".join(trainer_endpoints)
trainers_num = self.role_maker.worker_num()
nccl_id_var = startup_program.global_block().create_var(
name="NCCLID", persistable=True, type=core.VarDesc.VarType.RAW)
for i in range(1, build_strategy.nccl_comm_num):
startup_program.global_block().create_var(
name="NCCLID_{}".format(i),
persistable=True,
type=core.VarDesc.VarType.RAW)
if build_strategy.use_hierarchical_allreduce:
for i in range(0, build_strategy.nccl_comm_num):
startup_program.global_block().create_var(
name="Hierarchical_inter_NCCLID_{}".format(i),
persistable=True,
type=core.VarDesc.VarType.RAW)
startup_program.global_block().create_var(
name="Hierarchical_exter_NCCLID_{}".format(i),
persistable=True,
type=core.VarDesc.VarType.RAW)
startup_program.global_block().append_op(
type="gen_nccl_id",
inputs={},
outputs={"NCCLID": nccl_id_var},
attrs={
"trainers": trainer_endpoints,
"trainer_id": trainer_id,
"nccl_comm_num": build_strategy.nccl_comm_num,
"use_hierarchical_allreduce":
build_strategy.use_hierarchical_allreduce,
"hierarchical_allreduce_inter_ranks":
build_strategy.hierarchical_allreduce_inter_nranks
})
def _try_to_compile(self, startup_program, main_program, loss):
import copy
dist_strategy = self.user_defined_strategy
local_build_strategy = paddle.fluid.BuildStrategy()
local_build_strategy.enable_sequential_execution = \
dist_strategy.build_strategy.enable_sequential_execution
local_build_strategy.fuse_elewise_add_act_ops = \
dist_strategy.build_strategy.fuse_elewise_add_act_ops
local_build_strategy.fuse_bn_act_ops = \
dist_strategy.build_strategy.fuse_bn_act_ops
local_build_strategy.enable_auto_fusion = \
dist_strategy.build_strategy.enable_auto_fusion
local_build_strategy.fuse_relu_depthwise_conv = \
dist_strategy.build_strategy.fuse_relu_depthwise_conv
local_build_strategy.fuse_broadcast_ops = \
dist_strategy.build_strategy.fuse_broadcast_ops
local_build_strategy.fuse_all_optimizer_ops = \
dist_strategy.build_strategy.fuse_all_optimizer_ops
local_build_strategy.enable_inplace = \
dist_strategy.build_strategy.enable_inplace
local_build_strategy.use_hierarchical_allreduce = \
dist_strategy.use_hierarchical_allreduce
local_build_strategy.hierarchical_allreduce_inter_nranks = \
dist_strategy.hierarchical_allreduce_inter_nranks
local_build_strategy.sync_batch_norm = \
dist_strategy.sync_batch_norm
local_build_strategy.fuse_all_reduce_ops = \
dist_strategy.fuse_all_reduce_ops
local_build_strategy.nccl_comm_num = \
dist_strategy.nccl_comm_num
if self.user_defined_strategy.recompute == True:
logging.warn(
"set enable_sequential_execution=True since you have enable the recompute strategy"
)
local_build_strategy.enable_sequential_execution = True
exe_strategy = self.user_defined_strategy.execution_strategy
worker_num = self.role_maker.worker_num()
node_num = self.role_maker.node_num()
if self.role_maker._is_collective:
assert worker_num >= 1, "nccl2 worker_num must >= 1, now:{}" % worker_num
if worker_num <= 1:
# local mode
if local_build_strategy.nccl_comm_num > 1:
logging.warn("set nccl_comm_num=1 since you only have 1 node.")
local_build_strategy.nccl_comm_num = 1
if node_num <= 1:
if local_build_strategy.use_hierarchical_allreduce:
logging.warn(
"set hierachical_allreduce=False since you only have 1 node."
)
local_build_strategy.use_hierarchical_allreduce = False
sync_allreduce = dist_strategy.sync_nccl_allreduce
if sync_allreduce:
paddle.fluid.framework.set_flags({
"FLAGS_sync_nccl_allreduce": True
})
exe_strategy.num_threads = local_build_strategy.nccl_comm_num + 1
if local_build_strategy.use_hierarchical_allreduce:
exe_strategy.num_threads = 2 * local_build_strategy.nccl_comm_num + 1
if exe_strategy.num_threads > 4:
logging.warn(
"if you use hierachical_allreduce or "
"with multi nccl comm, please set distributed_strategy.sync_nccl_allreduce=False"
)
sync_batch_norm = local_build_strategy.sync_batch_norm
if sync_batch_norm:
local_build_strategy.nccl_comm_num = 1
local_build_strategy.use_hierarchical_allreduce = False
exe_strategy.num_threads = 1
logging.warn(
"use sync_batch_norm will hang when set num_threads > 1, so "
"set num_threads=1, nccl_comm_num=1, hierachical_allreduce=False."
)
# TODO(guru4elephant): should be an independent optimizer
self._setup_nccl_op(startup_program, main_program, local_build_strategy)
local_build_strategy.num_trainers = self.role_maker.worker_num()
local_build_strategy.trainer_id = self.role_maker.worker_index()
local_build_strategy.trainers_endpoints = self.role_maker.get_trainer_endpoints(
)
local_build_strategy.enable_backward_optimizer_op_deps = True
self._compiled_program = compiler.CompiledProgram(main_program)
self._compiled_program.with_data_parallel(
loss_name=loss.name,
build_strategy=local_build_strategy,
exec_strategy=exe_strategy,
share_vars_from=None)
return self._compiled_program
def _disable_strategy(self, dist_strategy):
# TODO(guru4elephant): should close all PE related flags here
pass
def minimize(self,
loss,
startup_program=None,
parameter_list=None,
no_grad_set=None):
if startup_program == None:
startup_program = paddle.static.default_startup_program()
compiled_program = self._try_to_compile(startup_program,
loss.block.program, loss)
loss.block.program._graph = compiled_program
# just return self.optimizer_ops and self.param_grads
return None, None
| 43.698565 | 101 | 0.655535 |
ace317b1b96f173a4a3d959982be80db4c865da7 | 631 | py | Python | experiments/2013-08-16-IBM.py | jaesikchoi/gpss-research | 2a64958a018f1668f7b8eedf33c4076a63af7868 | [
"MIT"
] | 151 | 2015-01-09T19:25:05.000Z | 2022-01-05T02:05:52.000Z | experiments/2013-08-16-IBM.py | jaesikchoi/gpss-research | 2a64958a018f1668f7b8eedf33c4076a63af7868 | [
"MIT"
] | 1 | 2016-08-04T13:12:51.000Z | 2016-08-04T13:12:51.000Z | experiments/2013-08-16-IBM.py | jaesikchoi/gpss-research | 2a64958a018f1668f7b8eedf33c4076a63af7868 | [
"MIT"
] | 59 | 2015-02-04T19:13:58.000Z | 2021-07-28T23:36:09.000Z | Experiment(description='Trying out the integrated Brownian motion',
data_dir='../data/tsdl/',
max_depth=8,
random_order=False,
k=1,
debug=False,
local_computation=False,
n_rand=9,
sd=4,
max_jobs=600,
verbose=False,
make_predictions=False,
skip_complete=True,
results_dir='../results/2013-08-16-IBM/',
iters=500,
base_kernels='SE,Lin,IBMLin,Const,Per',
zero_mean=True,
random_seed=0,
period_heuristic=5)
| 27.434783 | 67 | 0.499208 |
ace319e370eeb43b574a64aa63b5bac9b3770d8f | 761 | py | Python | djcat/settings.py | avigmati/djcat | 3776f88108104d74700ebd834e7bbe2feeaa21e2 | [
"BSD-3-Clause"
] | 1 | 2017-02-03T14:36:18.000Z | 2017-02-03T14:36:18.000Z | djcat/settings.py | avigmati/djcat | 3776f88108104d74700ebd834e7bbe2feeaa21e2 | [
"BSD-3-Clause"
] | null | null | null | djcat/settings.py | avigmati/djcat | 3776f88108104d74700ebd834e7bbe2feeaa21e2 | [
"BSD-3-Clause"
] | null | null | null | import os
from django.conf import settings
DJCAT_DEBUG_OUT = getattr(settings, 'DJCAT_DEBUG_OUT', "file")
DJCAT_DEBUG_FILE = getattr(settings, 'DJCAT_DEBUG_FILE', os.path.join(settings.BASE_DIR, 'djcat_debug.txt'))
DJCAT_ATTR_TYPES = getattr(settings, 'DJCAT_ATTR_TYPES', ['numeric', 'choice'])
DJCAT_ITEM_SLUG_DELIMITER = getattr(settings, 'DJCAT_ITEM_SLUG_DELIMITER', '_')
DJCAT_SLUG_UNIQNUMBER_DELIMITER = getattr(settings, 'DJCAT_SLUG_UNIQNUMBER_DELIMITER', '-')
DJCAT_SLUG_RESERVED = ['', DJCAT_ITEM_SLUG_DELIMITER, DJCAT_SLUG_UNIQNUMBER_DELIMITER]
DJCAT_ITEM_UID_LENGTH = getattr(settings, 'DJCAT_ITEM_UID_LENGTH', 8)
DJCAT_CATEGORY_MODEL = getattr(settings, 'DJCAT_CATEGORY_MODEL')
DJCAT_CATALOG_ROOT_URL = getattr(settings, 'DJCAT_CATALOG_ROOT_URL')
| 50.733333 | 108 | 0.817346 |
ace31a4a62e108630595febf241d0ec2e90c51b9 | 3,994 | py | Python | train.py | jeffasante/captcha | 7e97d3ed0caf253efd400acfa9f7a754e314c93b | [
"MIT"
] | null | null | null | train.py | jeffasante/captcha | 7e97d3ed0caf253efd400acfa9f7a754e314c93b | [
"MIT"
] | null | null | null | train.py | jeffasante/captcha | 7e97d3ed0caf253efd400acfa9f7a754e314c93b | [
"MIT"
] | 1 | 2022-01-02T10:51:37.000Z | 2022-01-02T10:51:37.000Z | from torch.utils.data import Dataset, DataLoader
# define transforms
transform = transforms.Compose([transforms.Resize((75, 300)),
transforms.ToTensor(),
transforms.Normalize((0.73199,), (0.28809,)),
])
# build partion -- train test split
n_data = len(data)
train_size = int(0.9 * n_data)
test_size = n_data - train_size
full_dataset = CaptchaDataset(data, encoded_targets, transform=transform)
train_dataset, val_dataset = torch.utils.data.random_split(full_dataset,
[train_size, test_size])
batch_size = 16
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
val_loader = torch.utils.data.DataLoader(val_dataset, batch_size=batch_size, shuffle=False)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
def weights_init(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
m.weight.data.normal_(0.0, 0.02)
elif classname.find('BatchNorm') != -1:
m.weight.data.normal_(1.0, 0.02)
m.bias.data.fill_(0)
n_outputs = len(unique_letters)
total_len = 5 # input sequence length
HIDDEN_SIZE = 128
model = CRNN(75, 300, n_outputs, HIDDEN_SIZE) #.to(device)
model.apply(weights_init)
criterion = nn.CTCLoss(zero_infinity=True)
optimizer = optim.Adam(model.parameters(), lr=0.003)
# # how is our networ doing on new data?
def validation(model, val_loader):
val_loss = 0
accuracy = 0
for images, labels in val_loader:
images = images.to(device)
labels = labels.to(device)
bs, _, _, _ = images.shape
optimizer.zero_grad()
val_preds = model(images)
val_preds_lengths = torch.full(size=(bs,), fill_value=total_len, dtype=torch.long)
val_target_lengths = torch.randint(low=1, high=total_len, size=(bs,), dtype=torch.long)
val_loss = criterion(val_preds, labels, val_preds_lengths, val_target_lengths)
val_loss += val_loss.item()
# accuracy
ps = torch.exp(output)
equality = (labels.data == ps.max(dim=1)[1]) # to see the index of the highest prob
accuracy += equality.type(torch.FloatTensor).mean()
return val_loss, accuracy
def train_batch():
epochs = 10
steps = 0
print_every = 40
running_loss = 0
all_losses = []
for e in range(epochs):
model.train()
for images, labels in train_loader:
steps += 1
images = images.clone().detach().to(device)
labels = labels.to(device)
bs, _, _, _ = images.shape
optimizer.zero_grad()
preds = model(images)
preds_lengths = torch.full(size=(bs,), fill_value=total_len, dtype=torch.long)
target_lengths = torch.randint(low=1, high=total_len, size=(bs,), dtype=torch.long)
loss = criterion(preds, labels, preds_lengths, target_lengths)
loss.backward()
optimizer.step()
running_loss += loss.item()
# break
if steps % print_every == 0:
all_losses.append(running_loss / print_every)
model.eval() # incase theres dropout, turn dropout off
with torch.no_grad():
val_loss, accuracy = validation(model, val_loader)
print("Epoch: {}/{}... ".format(e+1, epochs),
"Training Loss: {:.3f}.. ".format(running_loss/print_every),
"Test Loss: {:.3f}... ".format(val_loss/len(val_loader)),
"Test Accuracy: {:.3f}".format(accuracy/len(val_loader)))
running_loss = 0
model.train() # turn dropout back on
if main == 'main':
train()
| 28.528571 | 95 | 0.582374 |
ace31a6f0b6be65f87fe4bc0ef906031e2a9e78f | 2,529 | py | Python | google-cloud-sdk/lib/surface/genomics/variantsets/delete.py | bopopescu/searchparty | afdc2805cb1b77bd5ac9fdd1a76217f4841f0ea6 | [
"Apache-2.0"
] | 1 | 2017-11-29T18:52:27.000Z | 2017-11-29T18:52:27.000Z | google-cloud-sdk/lib/surface/genomics/variantsets/delete.py | bopopescu/searchparty | afdc2805cb1b77bd5ac9fdd1a76217f4841f0ea6 | [
"Apache-2.0"
] | null | null | null | google-cloud-sdk/lib/surface/genomics/variantsets/delete.py | bopopescu/searchparty | afdc2805cb1b77bd5ac9fdd1a76217f4841f0ea6 | [
"Apache-2.0"
] | 3 | 2017-07-27T18:44:13.000Z | 2020-07-25T17:48:53.000Z | # Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implementation of gcloud genomics variantsets delete.
"""
from googlecloudsdk.api_lib.genomics import genomics_util
from googlecloudsdk.api_lib.genomics.exceptions import GenomicsError
from googlecloudsdk.calliope import base
from googlecloudsdk.core import log
from googlecloudsdk.core.console import console_io
class Delete(base.DeleteCommand):
"""Deletes a variant set.
"""
@staticmethod
def Args(parser):
"""Register flags for this command."""
parser.add_argument('variant_set_id',
help='The ID of the variant set to be deleted.')
def Run(self, args):
"""This is what gets called when the user runs this command.
Args:
args: an argparse namespace, All the arguments that were provided to this
command invocation.
Raises:
HttpException: An http error response was received while executing api
request.
GenomicsError: if canceled by the user.
Returns:
None
"""
apitools_client = genomics_util.GetGenomicsClient()
genomics_messages = genomics_util.GetGenomicsMessages()
get_request = genomics_messages.GenomicsVariantsetsGetRequest(
variantSetId=str(args.variant_set_id))
variant_set = apitools_client.variantsets.Get(get_request)
prompt_message = (
'Deleting variant set {0}: "{1}" will also delete all its contents '
'(variants, callsets, and calls).').format(args.variant_set_id,
variant_set.name)
if not console_io.PromptContinue(message=prompt_message):
raise GenomicsError('Deletion aborted by user.')
req = genomics_messages.GenomicsVariantsetsDeleteRequest(
variantSetId=args.variant_set_id,)
ret = apitools_client.variantsets.Delete(req)
log.DeletedResource('{0}: "{1}"'.format(args.variant_set_id,
variant_set.name))
return ret
| 34.643836 | 79 | 0.706603 |
ace31aecedadd1c93fd77130b80db69ca4cd8530 | 12,257 | py | Python | examples/steady_horseshoe_vortex_lattice_method_solver_example.py | KamiGazi/PteraSoftware | 3b6f6bfb8db776970674234cb524c338ecc82df1 | [
"MIT"
] | null | null | null | examples/steady_horseshoe_vortex_lattice_method_solver_example.py | KamiGazi/PteraSoftware | 3b6f6bfb8db776970674234cb524c338ecc82df1 | [
"MIT"
] | null | null | null | examples/steady_horseshoe_vortex_lattice_method_solver_example.py | KamiGazi/PteraSoftware | 3b6f6bfb8db776970674234cb524c338ecc82df1 | [
"MIT"
] | null | null | null | """This is script is an example of how to run Ptera Software's steady horseshoe
vortex lattice method solver on a custom airplane."""
# First, import the software's main package. Note that if you wished to import this
# software into another package, you would first install the software by running "pip
# install pterasoftware" in your terminal. Here, I am importing the source directory.
# However, if you were working on an external project, you should change this to
# "import pterasoftware as ps".
import pterasoftware as ps
# Create an airplane object. Note, I am going to declare every attribute for each
# class, even most of them have usable default values. This is simply for educational
# purposes, even though it makes the code much longer than what it needs to be.
example_airplane = ps.geometry.Airplane(
# Give the airplane object a name. This value defaults to "Untitled".
name="Example Airplane",
# Specify the location of the airplane's center of gravity. This is the point
# around about which the solver will calculate the moments on the airplane. These
# three values default to 0.0 meters. Note that every input and output of this
# program is in SI units.
x_ref=0.0,
y_ref=0.0,
z_ref=0.0,
# Give the reference dimensions of this aircraft. "s_ref" is the reference area
# in meters squared, "b_ref" is the reference span in meters, and "c_ref" is the
# reference chord in meters. I set these values to None, which is their default,
# so that they will be populated by the first wing object's calculated
# characteristics. Note that the reference area used in this program is the
# wetted area of the wing's mean-camberline surface.
s_ref=None,
b_ref=None,
c_ref=None,
wings=[
ps.geometry.Wing(
name="Main Wing",
# Define the location of the leading edge of the wing relative to the
# airplane's reference position. These values all default to 0.0 meters.
x_le=0.0,
y_le=0.0,
z_le=0.0,
# Declare that this wing is symmetric. This means that the geometry will
# be reflected across the y-z plane. Note that the geometry coordinates
# are defined as such: If you were riding in the airplane, the positive x
# direction would point behind you, the positive y direction would point
# out of your right wing, and the positive z direction would point
# upwards, out of your chair. These directions form a right-handed
# coordinate system. The default value of "symmetric" is false.
symmetric=True,
# Define the number of chordwise panels on the wing, and the spacing
# between them. The number of chordwise panels defaults to 8 panels. The
# spacing defaults to "cosine", which makes the panels relatively finer,
# in the chordwise direction, near the leading and trailing edges. The
# other option is "uniform".
num_chordwise_panels=8,
chordwise_spacing="cosine",
# Every wing has a list of wing cross sections. In order for the geometry
# output to be sensible, each wing must have at least two wing cross
# sections.
wing_cross_sections=[
ps.geometry.WingCrossSection(
# Define the location of the leading edge of the wing cross
# section relative to the wing's leading edge. These values all
# default to 0.0 meters.
x_le=0.0,
y_le=0.0,
z_le=0.0,
# Define the twist of the wing cross section in degrees. This is
# equivalent to incidence angle of cross section. The twist is
# about the leading edge. Note that the twist is only stable up
# to 45.0 degrees. Values above that produce unexpected results.
# This will be fixed in a future release. The default value is
# 0.0 degrees. Positive twist corresponds to positive rotation
# about the y axis, as defined by the right-hand rule.
twist=0.0,
# Define the type of control surface. The options are "symmetric"
# and "asymmetric". This is only applicable if your wing is also
# symmetric. If so, symmetric control surfaces will deflect in
# the same direction, like flaps, while asymmetric control
# surfaces will deflect in opposite directions, like ailerons.
# The default value is "symmetric".
control_surface_type="symmetric",
# Define the point on the airfoil where the control surface
# hinges. This is expressed as a faction of the chord length,
# back from the leading edge. The default value is 0.75.
control_surface_hinge_point=0.75,
# Define the deflection of the control surface in degrees. The
# default is 0.0 degrees.
control_surface_deflection=0.0,
# Define the number of spanwise panels on the wing cross section,
# and the spacing between them. The number of spanwise panels
# defaults to 8 panels. The spacing defaults to "cosine",
# which makes the panels relatively finer, in the spanwise
# direction, near the cross section ends. The other option is
# "uniform".
num_spanwise_panels=8,
spanwise_spacing="cosine",
# Set the chord of this cross section to be 1.75 meters. This
# value defaults to 1.0 meter.
chord=1.75,
airfoil=ps.geometry.Airfoil(
# Give the airfoil a name. This defaults to "Untitled
# Airfoil". This name should correspond to a name in the
# airfoil directory or a NACA four series airfoil, unless you
# are passing in your own coordinates.
name="naca2412",
# If you wish to pass in coordinates, set this to a N x 2
# array of the airfoil's coordinates, where N is the number
# of coordinates. Treat this as an immutable, don't edit
# directly after initialization. If you wish to load
# coordinates from the airfoil directory, leave this as None.
# The default is None. Make sure that any airfoil coordinates
# used range in x from 0 to 1.
coordinates=None,
# This is the variable that determines whether or not you
# would like to repanel the airfoil coordinates. This applies
# to coordinates passed in by the user or to the directory
# coordinates. It is highly recommended to set this to True.
# The default is True.
repanel=True,
# This is number of points to use if repaneling the airfoil.
# It is ignored if the repanel is False. The default is 400.
n_points_per_side=400,
),
),
# Define the next wing cross section. From here on out,
# the declarations will not be as commented as the previous. See the
# above comments if you have questions.
ps.geometry.WingCrossSection(
x_le=0.75,
y_le=6.0,
z_le=1.0,
chord=1.5,
twist=5.0,
airfoil=ps.geometry.Airfoil(
name="naca2412",
),
),
],
),
# Define the next wing.
ps.geometry.Wing(
name="V-Tail",
x_le=6.75,
z_le=0.25,
symmetric=True,
# Define this wing's root wing cross section.
wing_cross_sections=[
ps.geometry.WingCrossSection(
chord=1.5,
# Give the root wing cross section an airfoil.
airfoil=ps.geometry.Airfoil(
name="naca0012",
),
twist=-5.0,
),
# Define the wing's tip wing cross section.
ps.geometry.WingCrossSection(
x_le=0.5,
y_le=2.0,
z_le=1.0,
chord=1.0,
twist=-5.0,
airfoil=ps.geometry.Airfoil(
name="naca0012",
),
),
],
),
],
)
# Define a new operating point object. This defines the state at which the airplane
# object is operating.
example_operating_point = ps.operating_point.OperatingPoint(
# Define the density of the fluid the airplane is flying in. This defaults to
# 1.225 kilograms per meters cubed.
density=1.225,
# Define the angle of sideslip the airplane is experiencing. This defaults to 0.0
# degrees.
beta=0.0,
# Define the freestream velocity at which the airplane is flying. This defaults
# to 10.0 meters per second.
velocity=10.0,
# Define the angle of attack the airplane is experiencing. This defaults to 5.0
# degrees.
alpha=1.0,
)
# Define a new steady problem. A steady problem contains an airplane object and an
# operating point object.
example_problem = ps.problems.SteadyProblem(
# Set this steady problem's airplane object to be the one we just created.
airplanes=[example_airplane],
# Set this steady problem's operating point object ot be the one we just created.
operating_point=example_operating_point,
)
# Now, the airplane and operating point object exist within the steady problem
# object. I like to delete the external pointers to these objects to ease debugging.
del example_airplane
del example_operating_point
# Define a new solver. The available solver objects are the steady horseshoe vortex
# lattice method solver, the steady ring vortex lattice method solver, and the
# unsteady ring vortex lattice method solver.
example_solver = ps.steady_horseshoe_vortex_lattice_method.SteadyHorseshoeVortexLatticeMethodSolver(
# Solvers just take in one attribute: the problem they are going to solve.
steady_problem=example_problem
)
# Delete the extraneous pointer to the problem as it is now contained within the
# solver. Again, this is unnecessary, I just like to do this to ease debugging.
del example_problem
# Run the example solver.
example_solver.run(
# This parameter determines the detail of information that the solver's logger
# will output while running. The options are, in order of detail and severity,
# "Debug", "Info", "Warning", "Error", "Critical". The default value is "Warning".
logging_level="Warning",
)
# Call this function from the output module to print the results.
ps.output.print_steady_results(steady_solver=example_solver)
# Call the software's draw function on the solver.
ps.output.draw(
solver=example_solver,
# Tell the draw function to show the pressure's on the aircraft's panels. This
# value defaults to false.
show_delta_pressures=True,
# Tell the draw function to show the calculated streamlines. This value defaults
# to false.
show_streamlines=True,
# Tell the draw function to not show any wake vortices. As this is a steady
# solver, no vortices have been shed into the wake. This value defaults to false.
show_wake_vortices=False,
)
# Compare the output you see with the expected outputs saved in the "docs/examples
# expected output" directory.
| 50.858921 | 100 | 0.607734 |
ace31bc75f6c304a3efff0e6911ae27ee2b4ecee | 1,064 | py | Python | DataProcessor/dev_set_partition.py | cherry979988/feedforward-RE | 546a608a8cb5b35c475e577995df70a89affa15e | [
"MIT"
] | 1 | 2019-08-25T00:44:27.000Z | 2019-08-25T00:44:27.000Z | DataProcessor/dev_set_partition.py | cherry979988/feedforward-RE | 546a608a8cb5b35c475e577995df70a89affa15e | [
"MIT"
] | null | null | null | DataProcessor/dev_set_partition.py | cherry979988/feedforward-RE | 546a608a8cb5b35c475e577995df70a89affa15e | [
"MIT"
] | null | null | null | __author__ = 'QinyuanYe'
import sys
import random
from shutil import copyfile
# split the original train set into
# 90% train-set (train_split.json) and 10% dev-set (dev.json)
if __name__ == "__main__":
random.seed(1234)
if len(sys.argv) != 3:
print 'Usage:feature_generation.py -DATA -ratio'
exit(1)
dataset = sys.argv[1]
ratio = float(sys.argv[2])
dir = 'data/source/%s' % sys.argv[1]
original_train_json = dir + '/train.json'
train_json = dir + '/train_split.json'
dev_json = dir + '/dev.json'
if 'TACRED' in dataset or 'Sub' in dataset:
print '%s has a provided dev set, skip splitting' % dataset
copyfile(original_train_json, train_json)
exit(0)
fin = open(original_train_json, 'r')
lines = fin.readlines()
dev_size = int(ratio * len(lines))
random.shuffle(lines)
dev = lines[:dev_size]
train_split = lines[dev_size:]
fout1 = open(dev_json, 'w')
fout1.writelines(dev)
fout2 = open(train_json, 'w')
fout2.writelines(train_split)
| 24.744186 | 67 | 0.640038 |
ace31cc0315e764b8f0ea6645b0f47f8899f189a | 4,198 | py | Python | tensorflow_probability/python/experimental/inference_gym/targets/probit_regression_test.py | rupei/probability | 4aa1ee652853a19c4e80d39216c3fa535ed3e589 | [
"Apache-2.0"
] | null | null | null | tensorflow_probability/python/experimental/inference_gym/targets/probit_regression_test.py | rupei/probability | 4aa1ee652853a19c4e80d39216c3fa535ed3e589 | [
"Apache-2.0"
] | null | null | null | tensorflow_probability/python/experimental/inference_gym/targets/probit_regression_test.py | rupei/probability | 4aa1ee652853a19c4e80d39216c3fa535ed3e589 | [
"Apache-2.0"
] | null | null | null | # Lint as: python2, python3
# Copyright 2020 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the modelific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for inference_gym.targets.probit_regression."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import tensorflow.compat.v2 as tf
from tensorflow_probability.python.experimental.inference_gym.internal import test_util
from tensorflow_probability.python.experimental.inference_gym.targets import probit_regression
from tensorflow_probability.python.internal import test_util as tfp_test_util
def _test_dataset(num_features, num_test_points=None):
return dict(
train_features=tf.zeros([10, num_features]),
test_features=(tf.zeros([num_test_points, num_features])
if num_test_points else None),
train_labels=tf.zeros([10], dtype=tf.int32),
test_labels=(tf.zeros([num_test_points], dtype=tf.int32)
if num_test_points else None),
)
class ProbitRegressionTest(test_util.InferenceGymTestCase,
parameterized.TestCase):
@parameterized.named_parameters(
('NoTest', None),
('WithTest', 5),
)
def testBasic(self, num_test_points):
"""Checks that you get finite values given unconstrained samples.
We check `unnormalized_log_prob` as well as the values of the sample
transformations.
Args:
num_test_points: Number of test points.
"""
num_features = 5
model = probit_regression.ProbitRegression(
**_test_dataset(num_features, num_test_points))
self.validate_log_prob_and_transforms(
model,
sample_transformation_shapes=dict(
identity=[num_features + 1],
test_nll=[],
per_example_test_nll=[num_test_points],
))
@parameterized.named_parameters(
('NoTest', None),
('WithTest', 5),
)
def testDeferred(self, num_test_points):
"""Checks that the dataset is not prematurely materialized."""
num_features = 5
kwargs = _test_dataset(num_features, num_test_points)
self.validate_deferred_materialization(
probit_regression.ProbitRegression, **kwargs)
def testPartiallySpecifiedTestSet(self):
"""Check that partially specified test set raises an error."""
num_features = 5
num_test_points = 5
dataset = _test_dataset(num_features, num_test_points)
del dataset['test_features']
with self.assertRaisesRegex(ValueError, 'both specified'):
probit_regression.ProbitRegression(**dataset)
@test_util.uses_tfds
def testGermanCredit(self):
"""Checks that you get finite values given unconstrained samples.
We check `unnormalized_log_prob` as well as the values of the sample
transformations.
"""
model = probit_regression.GermanCreditNumericProbitRegression()
self.validate_log_prob_and_transforms(
model,
sample_transformation_shapes=dict(identity=[25],),
check_ground_truth_mean_standard_error=True,
check_ground_truth_mean=True,
check_ground_truth_standard_deviation=True,
)
@test_util.uses_tfds
@tfp_test_util.numpy_disable_gradient_test
def testGermanCreditHMC(self):
"""Checks approximate samples from the model against the ground truth."""
model = probit_regression.GermanCreditNumericProbitRegression()
self.validate_ground_truth_using_hmc(
model,
num_chains=4,
num_steps=4000,
num_leapfrog_steps=15,
step_size=0.03,
)
if __name__ == '__main__':
tf.test.main()
| 34.694215 | 94 | 0.715579 |
ace31d2ca0bc575398be1541081754e6629ec084 | 2,862 | py | Python | databuilder/example/scripts/sample_bq_usage_loader.py | defendercrypt/amundsen | 83c728b646020f60cf2270c12e766fe4af8c9948 | [
"Apache-2.0"
] | 2,072 | 2020-08-11T20:16:48.000Z | 2022-03-31T07:04:05.000Z | databuilder/example/scripts/sample_bq_usage_loader.py | defendercrypt/amundsen | 83c728b646020f60cf2270c12e766fe4af8c9948 | [
"Apache-2.0"
] | 795 | 2020-08-11T15:24:39.000Z | 2022-03-31T18:56:13.000Z | databuilder/example/scripts/sample_bq_usage_loader.py | defendercrypt/amundsen | 83c728b646020f60cf2270c12e766fe4af8c9948 | [
"Apache-2.0"
] | 671 | 2020-08-11T20:39:56.000Z | 2022-03-31T08:39:07.000Z | # Copyright Contributors to the Amundsen project.
# SPDX-License-Identifier: Apache-2.0
"""
This is a example script for extracting BigQuery usage results
"""
import logging
import os
import sqlite3
from pyhocon import ConfigFactory
from databuilder.extractor.bigquery_usage_extractor import BigQueryTableUsageExtractor
from databuilder.job.job import DefaultJob
from databuilder.loader.file_system_neo4j_csv_loader import FsNeo4jCSVLoader
from databuilder.publisher import neo4j_csv_publisher
from databuilder.publisher.neo4j_csv_publisher import Neo4jCsvPublisher
from databuilder.task.task import DefaultTask
from databuilder.transformer.bigquery_usage_transformer import BigqueryUsageTransformer
logging.basicConfig(level=logging.INFO)
# set env NEO4J_HOST to override localhost
NEO4J_ENDPOINT = f'bolt://{os.getenv("NEO4J_HOST", "localhost")}:7687'
neo4j_endpoint = NEO4J_ENDPOINT
neo4j_user = 'neo4j'
neo4j_password = 'test'
def create_connection(db_file):
try:
conn = sqlite3.connect(db_file)
return conn
except Exception:
logging.exception('exception')
return None
# todo: Add a second model
def create_bq_job(metadata_type, gcloud_project):
tmp_folder = f'/var/tmp/amundsen/{metadata_type}'
node_files_folder = f'{tmp_folder}/nodes'
relationship_files_folder = f'{tmp_folder}/relationships'
bq_usage_extractor = BigQueryTableUsageExtractor()
csv_loader = FsNeo4jCSVLoader()
task = DefaultTask(extractor=bq_usage_extractor,
loader=csv_loader,
transformer=BigqueryUsageTransformer())
job_config = ConfigFactory.from_dict({
f'extractor.bigquery_table_usage.{BigQueryTableUsageExtractor.PROJECT_ID_KEY}': gcloud_project,
f'loader.filesystem_csv_neo4j.{FsNeo4jCSVLoader.NODE_DIR_PATH}': node_files_folder,
f'loader.filesystem_csv_neo4j.{FsNeo4jCSVLoader.RELATION_DIR_PATH}': relationship_files_folder,
f'loader.filesystem_csv_neo4j.{FsNeo4jCSVLoader.SHOULD_DELETE_CREATED_DIR}': True,
f'publisher.neo4j.{neo4j_csv_publisher.NODE_FILES_DIR}': node_files_folder,
f'publisher.neo4j.{neo4j_csv_publisher.RELATION_FILES_DIR}': relationship_files_folder,
f'publisher.neo4j.{neo4j_csv_publisher.NEO4J_END_POINT_KEY}': neo4j_endpoint,
f'publisher.neo4j.{neo4j_csv_publisher.NEO4J_USER}': neo4j_user,
f'publisher.neo4j.{neo4j_csv_publisher.NEO4J_PASSWORD}': neo4j_password,
f'publisher.neo4j.{neo4j_csv_publisher.JOB_PUBLISH_TAG}': 'unique_tag', # should use unique tag here like {ds}
})
job = DefaultJob(conf=job_config,
task=task,
publisher=Neo4jCsvPublisher())
return job
if __name__ == "__main__":
# start table job
job1 = create_bq_job('bigquery_usage', 'your-project-here')
job1.launch()
| 37.657895 | 119 | 0.758211 |
ace31e11958c33022ba96c48b4746b9b8f58b55b | 234 | py | Python | Encapsulation/Exercises/01.wild_cat_zoo/project/keeper.py | geodimitrov/PythonOOP_SoftUni | f1c6718c878b618b3ab3f174cd4d187bd178940b | [
"MIT"
] | 1 | 2021-06-30T11:53:44.000Z | 2021-06-30T11:53:44.000Z | Encapsulation/Exercises/01.wild_cat_zoo/project/keeper.py | geodimitrov/PythonOOP_SoftUni | f1c6718c878b618b3ab3f174cd4d187bd178940b | [
"MIT"
] | null | null | null | Encapsulation/Exercises/01.wild_cat_zoo/project/keeper.py | geodimitrov/PythonOOP_SoftUni | f1c6718c878b618b3ab3f174cd4d187bd178940b | [
"MIT"
] | null | null | null | class Keeper:
def __init__(self, name, age, salary):
self.name = name
self.age = age
self.salary = salary
def __repr__(self):
return f"Name: {self.name}, Age: {self.age}, Salary: {self.salary}" | 29.25 | 75 | 0.58547 |
ace31f0e33e8411e1fb6f405055f858d2acd0620 | 160 | py | Python | python_patient_exercise_program_V3/patient_exercise_program.py | iek21/python_patient_exercise_program | d81993a281a8aec0c31d90af16b92bf5076617f6 | [
"MIT"
] | null | null | null | python_patient_exercise_program_V3/patient_exercise_program.py | iek21/python_patient_exercise_program | d81993a281a8aec0c31d90af16b92bf5076617f6 | [
"MIT"
] | null | null | null | python_patient_exercise_program_V3/patient_exercise_program.py | iek21/python_patient_exercise_program | d81993a281a8aec0c31d90af16b92bf5076617f6 | [
"MIT"
] | null | null | null | """
Created on Monday August 23 2021
@author: İzzet Emre KARSAVURAN
"""
import UI as ui
if __name__ == '__main__':
ui.patient_exercise_program()
| 16 | 34 | 0.675 |
ace31f274bdbad646a9d2229c72f244323768a19 | 2,906 | py | Python | paclair/plugins/docker_plugin.py | jpthiery/paclair | bd6d56044a273e1658bc0167b47702775e705fdc | [
"Apache-2.0"
] | null | null | null | paclair/plugins/docker_plugin.py | jpthiery/paclair | bd6d56044a273e1658bc0167b47702775e705fdc | [
"Apache-2.0"
] | null | null | null | paclair/plugins/docker_plugin.py | jpthiery/paclair | bd6d56044a273e1658bc0167b47702775e705fdc | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
import re
from paclair import DOCKER_HUB_DOMAIN
from paclair import REGEX
from paclair.ancestries.docker import DockerAncestry
from paclair.docker.docker_image import DockerImage
from paclair.docker.docker_registry import DockerRegistry
from paclair.exceptions import ResourceNotFoundException
from paclair.plugins.abstract_plugin import AbstractPlugin
class DockerPlugin(AbstractPlugin):
"""
Docker plugin
"""
def __init__(self, clair, registries=None):
"""
Constructor
:param clair: ClairRequests object
:param registries: registries' configuration {'registry1_domain': {'conf': ...},
'registry2_domain': {'conf': ...}}
"""
super().__init__(clair, "Docker")
registries = registries or {}
self.__registries = {domain: DockerRegistry(domain, **conf) for domain, conf in registries.items()}
self.__docker_hub = DockerRegistry(DOCKER_HUB_DOMAIN)
self._pattern = re.compile(REGEX['domain'] + REGEX['name'] + REGEX['tag'])
self._domain_pattern = re.compile(r'(?P<repository>[a-zA-Z0-9-]*)\.(?P<domain>[a-zA-Z0-9-.]*)$')
def create_docker_image(self, name):
"""
Create docker image
:param name: image's name
:return: paclair.docker.DockerImage
"""
matcher = self._pattern.match(name)
if not matcher:
raise ResourceNotFoundException("Incorrect image name: {}".format(name))
# Base docker image
if matcher.group("domain") is None:
return DockerImage("library/" + matcher.group("name"), self.__docker_hub,
tag=matcher.group("tag") or 'latest')
domain_regex_matcher = self._domain_pattern.search(matcher.group("domain") or "")
# Repo docker
if domain_regex_matcher is None:
return DockerImage("{}/{}".format(matcher.group("domain"), matcher.group("name")), self.__docker_hub,
tag=matcher.group("tag") or 'latest')
# Find the registry
repo = ""
if domain_regex_matcher.group("domain") in self.__registries:
registry = self.__registries[domain_regex_matcher.group("domain")]
repo = domain_regex_matcher.group("repository") or ""
elif matcher.group("domain") in self.__registries:
registry = self.__registries[matcher.group("domain")]
else:
registry = DockerRegistry(matcher.group("domain"))
return DockerImage(matcher.group("name"), registry, repo, tag=matcher.group("tag") or 'latest')
def create_ancestry(self, name):
return DockerAncestry(self.create_docker_image(name))
def analyse(self, name, output=None):
ancestry = self.create_ancestry(name)
return super().analyse(ancestry.name, output)
| 39.808219 | 113 | 0.635237 |
ace320629aff09e1ba8bb68cbc73ed215ef05cb2 | 620 | py | Python | vsaProject-master/proj04/proj04.py | gPongdee/vsaProject | ad8663d33e3a3642fb046243073a57c0f66f732f | [
"MIT"
] | null | null | null | vsaProject-master/proj04/proj04.py | gPongdee/vsaProject | ad8663d33e3a3642fb046243073a57c0f66f732f | [
"MIT"
] | null | null | null | vsaProject-master/proj04/proj04.py | gPongdee/vsaProject | ad8663d33e3a3642fb046243073a57c0f66f732f | [
"MIT"
] | null | null | null | # Name:
# Date:
def main():
user_string = raw_input("Type a word or phrase: ").lower().replace(" ", "")
opp_string = user_string[::-1].lower().replace(" ", "")
if user_string == opp_string:
print "Palindrome yay!"
elif user_string != opp_string:
print "That is not a palindrome."
replay()
def replay():
again = raw_input("Would you like to try another phrase(yes or no)?").lower()
if again == "yes" or again == "yeah" or again == "yep":
print "Ok"
main()
elif again == "no" or again == "nope" or again == "nah":
print "Bye"
quit()
main()
| 28.181818 | 81 | 0.564516 |
ace320fbdcc52a9645f34b90de384f1cfed58419 | 265 | py | Python | paikkala/tests/test_runs.py | tracon/paikkala | dc859d924e4acfba95f3446a169bf5f88eecc6a2 | [
"MIT"
] | null | null | null | paikkala/tests/test_runs.py | tracon/paikkala | dc859d924e4acfba95f3446a169bf5f88eecc6a2 | [
"MIT"
] | 17 | 2018-05-13T12:52:02.000Z | 2020-02-16T16:51:05.000Z | paikkala/tests/test_runs.py | tracon/paikkala | dc859d924e4acfba95f3446a169bf5f88eecc6a2 | [
"MIT"
] | 1 | 2019-08-19T18:10:16.000Z | 2019-08-19T18:10:16.000Z | from paikkala.utils.runs import find_runs, following_integer
def test_runs():
data = [1, 2, 6, 7, 8, 9, 11, 12, 14, 15, 16]
assert find_runs(data, following_integer) == [
[1, 2],
[6, 7, 8, 9],
[11, 12],
[14, 15, 16],
]
| 22.083333 | 60 | 0.516981 |
ace321ab7bf9911a1732df8473b5099396139d57 | 2,918 | py | Python | setup.py | xbis/electrum-sw | 56b877420de850d7752fe9223f2f0eaa28f11790 | [
"MIT"
] | null | null | null | setup.py | xbis/electrum-sw | 56b877420de850d7752fe9223f2f0eaa28f11790 | [
"MIT"
] | null | null | null | setup.py | xbis/electrum-sw | 56b877420de850d7752fe9223f2f0eaa28f11790 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# python setup.py sdist --format=zip,gztar
from setuptools import setup
import os
import sys
import platform
import imp
import argparse
with open('contrib/requirements/requirements.txt') as f:
requirements = f.read().splitlines()
with open('contrib/requirements/requirements-hw.txt') as f:
requirements_hw = f.read().splitlines()
version = imp.load_source('version', 'lib/version.py')
if sys.version_info[:3] < (3, 4, 0):
sys.exit("Error: Electrum requires Python version >= 3.4.0...")
data_files = []
if platform.system() in ['Linux', 'FreeBSD', 'DragonFly']:
parser = argparse.ArgumentParser()
parser.add_argument('--root=', dest='root_path', metavar='dir', default='/')
opts, _ = parser.parse_known_args(sys.argv[1:])
usr_share = os.path.join(sys.prefix, "share")
if not os.access(opts.root_path + usr_share, os.W_OK) and \
not os.access(opts.root_path, os.W_OK):
if 'XDG_DATA_HOME' in os.environ.keys():
usr_share = os.environ['XDG_DATA_HOME']
else:
usr_share = os.path.expanduser('~/.local/share')
data_files += [
(os.path.join(usr_share, 'applications/'), ['electrum.desktop']),
(os.path.join(usr_share, 'pixmaps/'), ['icons/electrum.png'])
]
setup(
name="Electrum",
version=version.ELECTRUM_VERSION,
install_requires=requirements,
packages=[
'electrum',
'electrum_gui',
'electrum_gui.qt',
'electrum_plugins',
'electrum_plugins.audio_modem',
'electrum_plugins.cosigner_pool',
'electrum_plugins.email_requests',
'electrum_plugins.greenaddress_instant',
'electrum_plugins.hw_wallet',
'electrum_plugins.keepkey',
'electrum_plugins.labels',
'electrum_plugins.ledger',
'electrum_plugins.sw',
'electrum_plugins.trezor',
'electrum_plugins.digitalbitbox',
'electrum_plugins.trustedcoin',
'electrum_plugins.virtualkeyboard',
],
package_dir={
'electrum': 'lib',
'electrum_gui': 'gui',
'electrum_plugins': 'plugins',
},
package_data={
'electrum': [
'servers.json',
'servers_testnet.json',
'currencies.json',
'checkpoints.json',
'checkpoints_testnet.json',
'www/index.html',
'wordlist/*.txt',
'locale/*/LC_MESSAGES/electrum.mo',
]
},
scripts=['electrum'],
data_files=data_files,
description="Lightweight Bitcoin Wallet",
author="Thomas Voegtlin",
author_email="thomasv@electrum.org",
license="MIT Licence",
url="https://electrum.org",
long_description="""Lightweight Bitcoin Wallet"""
)
# Optional modules (not required to run Electrum)
import pip
opt_modules = requirements_hw + ['pycryptodomex']
[ pip.main(['install', m]) for m in opt_modules ]
| 30.715789 | 80 | 0.634339 |
ace322e96edc134cfe829cfd827f86799d2db528 | 275 | py | Python | venv/Lib/site-packages/pytz_deprecation_shim/_common.py | ajayiagbebaku/NFL-Model | afcc67a85ca7138c58c3334d45988ada2da158ed | [
"MIT"
] | 13 | 2021-11-02T03:35:39.000Z | 2022-03-30T03:34:55.000Z | venv/Lib/site-packages/pytz_deprecation_shim/_common.py | ajayiagbebaku/NFL-Model | afcc67a85ca7138c58c3334d45988ada2da158ed | [
"MIT"
] | 10 | 2020-06-11T21:37:09.000Z | 2021-11-15T17:47:36.000Z | venv/Lib/site-packages/pytz_deprecation_shim/_common.py | ajayiagbebaku/NFL-Model | afcc67a85ca7138c58c3334d45988ada2da158ed | [
"MIT"
] | 2 | 2021-12-06T00:45:20.000Z | 2022-02-19T15:50:56.000Z | import sys
_PYTZ_IMPORTED = False
def pytz_imported():
"""Detects whether or not pytz has been imported without importing pytz."""
global _PYTZ_IMPORTED
if not _PYTZ_IMPORTED and "pytz" in sys.modules:
_PYTZ_IMPORTED = True
return _PYTZ_IMPORTED
| 19.642857 | 79 | 0.72 |
ace322f083367b4037c377c96ba682918d3d26ac | 3,573 | py | Python | app_quis/models.py | naelallves/proj_sinal_transito | bc8b82c1809b8ff97996227bbefc07f42ea5f736 | [
"MIT"
] | null | null | null | app_quis/models.py | naelallves/proj_sinal_transito | bc8b82c1809b8ff97996227bbefc07f42ea5f736 | [
"MIT"
] | null | null | null | app_quis/models.py | naelallves/proj_sinal_transito | bc8b82c1809b8ff97996227bbefc07f42ea5f736 | [
"MIT"
] | null | null | null | from itertools import chain
from django.db import models
class Categoria(models.Model):
nome = models.CharField(max_length=100)
created_at = models.DateTimeField(auto_now_add=True, null=True, blank=True)
updated_at = models.DateTimeField(auto_now=True, null=True, blank=True)
class Meta:
verbose_name_plural = 'Categorias'
def to_dict(instance):
opts = instance._meta
data = {}
for f in chain(opts.concrete_fields, opts.private_fields):
data[f.name] = f.value_from_object(instance)
for f in opts.many_to_many:
data[f.name] = [i.id for i in f.value_from_object(instance)]
return data
def __repr__(self):
return str(self.to_dict())
def __str__(self):
return self.__repr__()
class Pergunta(models.Model):
id_categoria = models.ForeignKey(Categoria, on_delete=models.CASCADE, blank=True, null=True)
código = models.CharField(max_length=50, blank=True, null=True)
enunciado = models.TextField(blank=True, null=False)
created_at = models.DateTimeField(auto_now_add=True, null=True, blank=True)
updated_at = models.DateTimeField(auto_now=True, null=True, blank=True)
def getAlternativas(self):
relAlternativas = self.objects.get(id=self.id).relperguntaalternativa
alternativas = relAlternativas.alternativas
return alternativas
def to_dict(instance):
opts = instance._meta
data = {}
for f in chain(opts.concrete_fields, opts.private_fields):
data[f.name] = f.value_from_object(instance)
for f in opts.many_to_many:
data[f.name] = [i.id for i in f.value_from_object(instance)]
return data
def __repr__(self):
return str(self.to_dict())
def __str__(self):
return self.__repr__()
class Meta:
verbose_name_plural = 'Perguntas'
class Alternativa(models.Model):
conteudo = models.TextField(blank=True, null=False)
created_at = models.DateTimeField(auto_now_add=True, null=True, blank=True)
updated_at = models.DateTimeField(auto_now=True, null=True, blank=True)
class Meta:
verbose_name_plural = 'Alternativas'
def to_dict(instance):
opts = instance._meta
data = {}
for f in chain(opts.concrete_fields, opts.private_fields):
data[f.name] = f.value_from_object(instance)
for f in opts.many_to_many:
data[f.name] = [i.id for i in f.value_from_object(instance)]
return data
def __repr__(self):
return str(self.to_dict())
def __str__(self):
return self.__repr__()
class RelPerguntaAlternativa(models.Model):
id_pergunta = models.ForeignKey(Pergunta, on_delete=models.CASCADE)
id_alternativa = models.ForeignKey(Alternativa, on_delete=models.CASCADE)
certa = models.BooleanField(default=False)
def to_dict(instance):
opts = instance._meta
data = {}
for f in chain(opts.concrete_fields, opts.private_fields):
data[f.name] = f.value_from_object(instance)
for f in opts.many_to_many:
data[f.name] = [i.id for i in f.value_from_object(instance)]
return data
def __repr__(self):
return str(self.to_dict())
def __str__(self):
return self.__repr__()
# class RelAlternativaCategoria(models.Model):
# id_alternativa = models.ForeignKey(Alternativa, on_delete=models.CASCADE)
# id_categoria = models.ForeignKey(Categoria, on_delete=models.CASCADE)
# certa = models.BooleanField(default=False) | 40.146067 | 96 | 0.679261 |
ace324b4a8cd90e85d8d52ab3f064fe1bc6b3404 | 1,644 | py | Python | src/main/python/org/o3project/odenos/core/component/network/flow/ofpflow/ofp_flow_action_push_pbb.py | o3project/odenos | 837d0d3d3c37482e843c40c5eeeac10646e68c65 | [
"Apache-2.0"
] | 26 | 2015-02-18T10:22:50.000Z | 2020-06-18T05:07:54.000Z | src/main/python/org/o3project/odenos/core/component/network/flow/ofpflow/ofp_flow_action_push_pbb.py | o3project/odenos | 837d0d3d3c37482e843c40c5eeeac10646e68c65 | [
"Apache-2.0"
] | 45 | 2015-02-20T00:40:45.000Z | 2021-12-14T21:07:57.000Z | src/main/python/org/o3project/odenos/core/component/network/flow/ofpflow/ofp_flow_action_push_pbb.py | o3project/odenos | 837d0d3d3c37482e843c40c5eeeac10646e68c65 | [
"Apache-2.0"
] | 30 | 2015-02-19T02:00:35.000Z | 2017-02-18T15:28:09.000Z | # -*- coding:utf-8 -*-
# Copyright 2015 NEC Corporation. #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); #
# you may not use this file except in compliance with the License. #
# You may obtain a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
from org.o3project.odenos.core.component.network.flow.basic.flow_action import (
FlowAction
)
class OFPFlowActionPushPbb(FlowAction):
DEFAULT_ETHER_TYPE = 0x88e7
# property key
ETH_TYPE = "eth_type"
def __init__(self, type_, eth_type):
super(OFPFlowActionPushPbb, self).__init__(type_)
self._body[self.ETH_TYPE] = eth_type
@property
def eth_type(self):
return self._body[self.ETH_TYPE]
@classmethod
def create_from_packed(cls, packed):
return cls(packed[cls.TYPE], packed[cls.ETH_TYPE])
def packed_object(self):
return self._body
| 38.232558 | 80 | 0.535888 |
ace324d0eded1f2c2aa63675963c7aaf4b746ad0 | 446 | py | Python | venv/src/pages/migrations/0006_ruta_nombre_destino.py | ddelgadoJS/ProyectoWeb | f899c910bf16a79d5c3498bc6e8aa6b741fb56e1 | [
"MIT"
] | 1 | 2019-10-28T03:44:38.000Z | 2019-10-28T03:44:38.000Z | venv/src/pages/migrations/0006_ruta_nombre_destino.py | ddelgadoJS/ProyectoWeb | f899c910bf16a79d5c3498bc6e8aa6b741fb56e1 | [
"MIT"
] | null | null | null | venv/src/pages/migrations/0006_ruta_nombre_destino.py | ddelgadoJS/ProyectoWeb | f899c910bf16a79d5c3498bc6e8aa6b741fb56e1 | [
"MIT"
] | null | null | null | # Generated by Django 2.2.6 on 2019-11-14 16:59
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('pages', '0005_auto_20191114_1018'),
]
operations = [
migrations.AddField(
model_name='ruta',
name='nombre_destino',
field=models.CharField(default='default', max_length=10000),
preserve_default=False,
),
]
| 22.3 | 72 | 0.607623 |
ace324ddcfaa481c6839045c9eee0e251bb3b9a8 | 1,466 | py | Python | rick_db/sql/common.py | oddbit-project/rick_db | 02910c071f3ad58fdd88b2a27bfdd2bc61497d42 | [
"MIT"
] | null | null | null | rick_db/sql/common.py | oddbit-project/rick_db | 02910c071f3ad58fdd88b2a27bfdd2bc61497d42 | [
"MIT"
] | null | null | null | rick_db/sql/common.py | oddbit-project/rick_db | 02910c071f3ad58fdd88b2a27bfdd2bc61497d42 | [
"MIT"
] | null | null | null | class SqlError(Exception):
pass
class Literal:
"""
Representation class for literal expressions
"""
def __init__(self, literal):
self._literal = literal
def __str__(self):
return self._literal
class Sql:
DISTINCT = 'distinct'
COLUMNS = 'columns'
FROM = 'from'
UNION = 'union'
WHERE = 'where'
GROUP = 'group'
HAVING = 'having'
ORDER = 'order'
LIMIT_OFFSET = 'limitoffset'
FOR_UPDATE = 'forupdate'
ANONYMOUS = '_'
INNER_JOIN = 'INNER JOIN'
LEFT_JOIN = 'LEFT JOIN'
RIGHT_JOIN = 'RIGHT JOIN'
FULL_JOIN = 'FULL JOIN'
CROSS_JOIN = 'CROSS JOIN'
NATURAL_JOIN = 'NATURAL JOIN'
SQL_WILDCARD = '*'
SQL_SELECT = 'SELECT'
SQL_UNION = 'UNION'
SQL_UNION_ALL = 'UNION ALL'
SQL_FROM = 'FROM'
SQL_WHERE = 'WHERE'
SQL_DISTINCT = 'DISTINCT'
SQL_GROUP_BY = 'GROUP BY'
SQL_ORDER_BY = 'ORDER BY'
SQL_HAVING = 'HAVING'
SQL_FOR_UPDATE = 'FOR UPDATE'
SQL_AND = 'AND'
SQL_AS = 'AS'
SQL_OR = 'OR'
SQL_ON = 'ON'
SQL_ASC = 'ASC'
SQL_DESC = 'DESC'
SQL_OFFSET = 'OFFSET'
SQL_LIMIT = 'LIMIT'
SQL_INSERT = 'INSERT INTO'
SQL_VALUES = 'VALUES'
SQL_RETURNING = "RETURNING"
SQL_LIST_DELIMITER_LEFT = '('
SQL_LIST_DELIMITER_RIGHT = ')'
SQL_ALL = 'ALL'
SQL_DELETE = 'DELETE FROM'
SQL_CASCADE = 'CASCADE'
SQL_UPDATE = 'UPDATE'
SQL_SET = 'SET'
class SqlStatement:
pass
| 20.647887 | 48 | 0.605048 |
ace3250e9fd3390ecc62f56f742c707788eecf99 | 1,201 | py | Python | altro/curva_logistica.py | mattyonweb/loquacious-lamp | 1aae40ed537fec37f851fde97ae2d8577fabc42c | [
"Unlicense"
] | null | null | null | altro/curva_logistica.py | mattyonweb/loquacious-lamp | 1aae40ed537fec37f851fde97ae2d8577fabc42c | [
"Unlicense"
] | null | null | null | altro/curva_logistica.py | mattyonweb/loquacious-lamp | 1aae40ed537fec37f851fde97ae2d8577fabc42c | [
"Unlicense"
] | null | null | null | ''' logistic map. Per information: https://it.wikipedia.org/wiki/Mappa_logistica'''
# -*- coding: UTF-8 -*-
from PIL import Image, ImageDraw
import time
WIDTH = int(raw_input("width: "))
HEIGHT = int(raw_input("height: "))
def normalize(value, min0, max0, minF, maxF):
''' rinormalizza un valore (come map in processing) '''
return (((value - min0)*(maxF-minF))/(max0-min0))+minF
y = 0.8 #valore iniziale, y0 = ...
r = r_in = 1 #valore della variabile r (come una x in una f(x))
r_max = 4 #valore max r (limite intrinseco della formula)
im = Image.new("RGB", (WIDTH, HEIGHT), "black")
draw = ImageDraw.Draw(im)
while r < 4: #for non ammette step di valori non integer.......
for _ in range(400):
y = r * y * (1-y) #la formula y(n+1) = r*y(n)*(1-y(n))
minilist = [] #per velocizzare il calcolo, se un punto e' gia' stato disegnato
#lo si appende qui e non si ridisegna.
for _ in range(200):
y = r * y * (1-y)
if y not in minilist:
minilist.append(y) #vedi sopra
screen_x = normalize(r,r_in,r_max+0.01,0,WIDTH) #converti r ad una x in px
screen_y = normalize(y,0,1,0,HEIGHT)
draw.point((screen_x, screen_y), fill = "white")
r += 0.001
im.save("cazzo.jpg")
| 30.025 | 83 | 0.646128 |
ace325747ee5fbac510258992ba287381b492420 | 1,391 | py | Python | azure-mgmt-network/azure/mgmt/network/v2017_11_01/models/express_route_circuit_arp_table.py | v-Ajnava/azure-sdk-for-python | a1f6f80eb5869c5b710e8bfb66146546697e2a6f | [
"MIT"
] | 4 | 2016-06-17T23:25:29.000Z | 2022-03-30T22:37:45.000Z | azure-mgmt-network/azure/mgmt/network/v2017_11_01/models/express_route_circuit_arp_table.py | v-Ajnava/azure-sdk-for-python | a1f6f80eb5869c5b710e8bfb66146546697e2a6f | [
"MIT"
] | 2 | 2016-09-30T21:40:24.000Z | 2017-11-10T18:16:18.000Z | azure-mgmt-network/azure/mgmt/network/v2017_11_01/models/express_route_circuit_arp_table.py | v-Ajnava/azure-sdk-for-python | a1f6f80eb5869c5b710e8bfb66146546697e2a6f | [
"MIT"
] | 3 | 2016-05-03T20:49:46.000Z | 2017-10-05T21:05:27.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class ExpressRouteCircuitArpTable(Model):
"""The ARP table associated with the ExpressRouteCircuit.
:param age: Age
:type age: int
:param interface: Interface
:type interface: str
:param ip_address: The IP address.
:type ip_address: str
:param mac_address: The MAC address.
:type mac_address: str
"""
_attribute_map = {
'age': {'key': 'age', 'type': 'int'},
'interface': {'key': 'interface', 'type': 'str'},
'ip_address': {'key': 'ipAddress', 'type': 'str'},
'mac_address': {'key': 'macAddress', 'type': 'str'},
}
def __init__(self, age=None, interface=None, ip_address=None, mac_address=None):
super(ExpressRouteCircuitArpTable, self).__init__()
self.age = age
self.interface = interface
self.ip_address = ip_address
self.mac_address = mac_address
| 33.926829 | 84 | 0.591661 |
ace32617113901d99274efff03dff71a94446f41 | 1,258 | py | Python | cfstream/source.py | IRLToolkit/cloudflare-stream-python | 60771d81c55a16210782093a2bb63b5871f76c31 | [
"MIT"
] | null | null | null | cfstream/source.py | IRLToolkit/cloudflare-stream-python | 60771d81c55a16210782093a2bb63b5871f76c31 | [
"MIT"
] | null | null | null | cfstream/source.py | IRLToolkit/cloudflare-stream-python | 60771d81c55a16210782093a2bb63b5871f76c31 | [
"MIT"
] | null | null | null | """
The MIT License (MIT)
Copyright (c) 2022-present IRLToolkit Inc.
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
import logging
import websockets
from .http import *
from .client import *
from .input import *
from .exceptions import *
_log = logging.getLogger(__name__)
| 37 | 75 | 0.793323 |
ace3262a317959170eaff73c38c42d5e8f59e3f1 | 63,380 | py | Python | tests/pagegenerators_tests.py | sauravsrijan/pywikibot | 3d42e7c4de7ee96cda7d6dd9f95fe7c6d3c37484 | [
"MIT"
] | null | null | null | tests/pagegenerators_tests.py | sauravsrijan/pywikibot | 3d42e7c4de7ee96cda7d6dd9f95fe7c6d3c37484 | [
"MIT"
] | null | null | null | tests/pagegenerators_tests.py | sauravsrijan/pywikibot | 3d42e7c4de7ee96cda7d6dd9f95fe7c6d3c37484 | [
"MIT"
] | 1 | 2020-04-14T14:52:24.000Z | 2020-04-14T14:52:24.000Z | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""Test pagegenerators module."""
#
# (C) Pywikibot team, 2009-2019
#
# Distributed under the terms of the MIT license.
from __future__ import absolute_import, division, unicode_literals
import calendar
import datetime
import logging
import sys
import pywikibot
from pywikibot import pagegenerators, date
from pywikibot.exceptions import ServerError, UnknownExtension
from pywikibot.pagegenerators import (
PagesFromTitlesGenerator,
PreloadingGenerator,
CategorizedPageGenerator
)
from pywikibot.tools import has_module, PY2, suppress_warnings
from tests import join_data_path, mock
from tests.aspects import (
unittest,
TestCase,
DeprecationTestCase,
WikidataTestCase,
DefaultSiteTestCase,
RecentChangesTestCase,
)
from tests.thread_tests import GeneratorIntersectTestCase
if PY2:
from future_builtins import zip
en_wp_page_titles = (
# just a bunch of randomly selected titles for English Wikipedia tests
'Eastern Sayan',
'The Addams Family (pinball)',
'Talk:Nowy Sącz',
'Talk:Battle of Węgierska Górka',
'Template:!',
'Template:Template',
)
en_wp_nopage_titles = (
'Cities in Burkina Faso',
'Talk:Hispanic (U.S. Census)',
'Talk:Stołpce',
'Template:!/Doc',
'Template:!/Meta',
'Template:Template/Doc',
'Template:Template/Meta',
)
class TestDryPageGenerators(TestCase):
"""Test pagegenerators methods."""
family = 'wikipedia'
code = 'en'
dry = True
titles = en_wp_page_titles + en_wp_nopage_titles
def setUp(self):
"""Setup test."""
super(TestDryPageGenerators, self).setUp()
self.site = self.get_site()
def assertFunction(self, obj):
"""Assert function test."""
self.assertTrue(hasattr(pagegenerators, obj))
self.assertTrue(hasattr(getattr(pagegenerators, obj), '__call__'))
def test_module_import(self):
"""Test module import."""
self.assertIn('pywikibot.pagegenerators', sys.modules)
def test_PagesFromTitlesGenerator(self):
"""Test PagesFromTitlesGenerator."""
self.assertFunction('PagesFromTitlesGenerator')
gen = pagegenerators.PagesFromTitlesGenerator(self.titles, self.site)
self.assertPageTitlesEqual(gen, self.titles)
def test_NamespaceFilterPageGenerator(self):
"""Test NamespaceFilterPageGenerator."""
self.assertFunction('NamespaceFilterPageGenerator')
site = self.site
gen = pagegenerators.PagesFromTitlesGenerator(self.titles, site)
gen = pagegenerators.NamespaceFilterPageGenerator(gen, 0, site)
self.assertLength(tuple(gen), 3)
gen = pagegenerators.PagesFromTitlesGenerator(self.titles, site)
gen = pagegenerators.NamespaceFilterPageGenerator(gen, 1, site)
self.assertLength(tuple(gen), 4)
gen = pagegenerators.PagesFromTitlesGenerator(self.titles, site)
gen = pagegenerators.NamespaceFilterPageGenerator(gen, 10, site)
self.assertLength(tuple(gen), 6)
gen = pagegenerators.PagesFromTitlesGenerator(self.titles, site)
gen = pagegenerators.NamespaceFilterPageGenerator(gen, (1, 10), site)
self.assertLength(tuple(gen), 10)
gen = pagegenerators.PagesFromTitlesGenerator(self.titles, site)
gen = pagegenerators.NamespaceFilterPageGenerator(
gen, ('Talk', 'Template'), site)
self.assertLength(tuple(gen), 10)
def test_RegexFilterPageGenerator(self):
"""Test RegexFilterPageGenerator."""
self.assertFunction('RegexFilterPageGenerator')
gen = pagegenerators.PagesFromTitlesGenerator(self.titles, self.site)
gen = pagegenerators.RegexFilterPageGenerator(gen, '/doc')
self.assertPageTitlesEqual(gen,
('Template:!/Doc', 'Template:Template/Doc'))
gen = pagegenerators.PagesFromTitlesGenerator(self.titles, self.site)
gen = pagegenerators.RegexFilterPageGenerator(gen, '/doc',
quantifier='none')
self.assertLength(tuple(gen), 11)
gen = pagegenerators.PagesFromTitlesGenerator(self.titles, self.site)
gen = pagegenerators.RegexFilterPageGenerator(gen, ['/doc', '/meta'])
self.assertPageTitlesEqual(gen,
('Template:!/Doc',
'Template:!/Meta',
'Template:Template/Doc',
'Template:Template/Meta'))
gen = pagegenerators.PagesFromTitlesGenerator(self.titles, self.site)
gen = pagegenerators.RegexFilterPageGenerator(gen, ['/doc', '/meta'],
quantifier='none')
self.assertLength(tuple(gen), 9)
gen = pagegenerators.PagesFromTitlesGenerator(self.titles, self.site)
gen = pagegenerators.RegexFilterPageGenerator(gen, ['/doc', '/meta'],
quantifier='all')
self.assertPageTitlesEqual(gen, [])
gen = pagegenerators.PagesFromTitlesGenerator(self.titles, self.site)
gen = pagegenerators.RegexFilterPageGenerator(
gen, ['Template', '/meta'], quantifier='all')
self.assertPageTitlesEqual(gen, ('Template:Template/Meta', ))
gen = pagegenerators.PagesFromTitlesGenerator(self.titles, self.site)
gen = pagegenerators.RegexFilterPageGenerator(
gen, ['template', '/meta'], quantifier='any')
self.assertPageTitlesEqual(gen,
('Template:Template',
'Template:!/Meta',
'Template:Template/Doc',
'Template:Template/Meta'))
gen = pagegenerators.PagesFromTitlesGenerator(self.titles,
site=self.site)
gen = pagegenerators.RegexFilterPageGenerator(
gen, ['template', '/meta'], quantifier='any',
ignore_namespace=False)
self.assertLength(tuple(gen), 6)
gen = pagegenerators.PagesFromTitlesGenerator(self.titles,
site=self.site)
gen = pagegenerators.RegexFilterPageGenerator(
gen, ['template', '/meta'], quantifier='all',
ignore_namespace=False)
self.assertPageTitlesEqual(gen,
('Template:!/Meta',
'Template:Template/Meta'))
gen = pagegenerators.PagesFromTitlesGenerator(self.titles,
site=self.site)
gen = pagegenerators.RegexFilterPageGenerator(
gen, ['template', '/meta'], quantifier='none',
ignore_namespace=False)
self.assertLength(tuple(gen), 7)
def test_RegexBodyFilterPageGenerator(self):
"""Test RegexBodyFilterPageGenerator."""
self.assertFunction('RegexBodyFilterPageGenerator')
gen = pagegenerators.PagesFromTitlesGenerator(self.titles,
site=self.site)
pages = []
for p in gen:
p.text = 'This is the content of {} as a sample'.format(p.title())
pages.append(p)
gen = pagegenerators.RegexBodyFilterPageGenerator(iter(pages), '/doc')
self.assertPageTitlesEqual(gen,
('Template:!/Doc', 'Template:Template/Doc'))
gen = pagegenerators.RegexBodyFilterPageGenerator(iter(pages), 'This')
self.assertPageTitlesEqual(gen, self.titles)
gen = pagegenerators.RegexBodyFilterPageGenerator(iter(pages), 'talk',
quantifier='none')
self.assertLength(tuple(gen), 9)
class TestPagesFromPageidGenerator(TestCase):
"""Test PagesFromPageidGenerator method."""
family = 'wikisource'
code = 'en'
base_title = ('Page:06-24-1920 -The Story of the Jones County '
'Calf Case.pdf/%s')
def setUp(self):
"""Setup tests."""
super(TestPagesFromPageidGenerator, self).setUp()
self.site = self.get_site()
self.titles = [self.base_title % i for i in range(1, 11)]
def test_PagesFromPageidGenerator(self):
"""Test PagesFromPageidGenerator."""
gen_pages = pagegenerators.PagesFromTitlesGenerator(self.titles,
self.site)
pageids = []
for page in gen_pages:
pageids.append(page.pageid)
gen = pagegenerators.PagesFromPageidGenerator(pageids, self.site)
self.assertPageTitlesEqual(gen, self.titles)
class TestCategoryFilterPageGenerator(TestCase):
"""Test CategoryFilterPageGenerator method."""
family = 'wikisource'
code = 'en'
base_title = ('Page:06-24-1920 -The Story of the Jones County '
'Calf Case.pdf/%s')
category_list = ['Category:Validated']
def setUp(self):
"""Setup tests."""
super(TestCategoryFilterPageGenerator, self).setUp()
self.site = self.get_site()
self.titles = [self.base_title % i for i in range(1, 11)]
self.catfilter_list = [pywikibot.Category(
self.site, cat) for cat in self.category_list]
def test_CategoryFilterPageGenerator(self):
"""Test CategoryFilterPageGenerator."""
site = self.site
gen = pagegenerators.PagesFromTitlesGenerator(self.titles, site)
gen = pagegenerators.CategoryFilterPageGenerator(
gen, self.catfilter_list, site)
self.assertLength(tuple(gen), 10)
class TestQualityFilterPageGenerator(TestCase):
"""Test QualityFilterPageGenerator methods."""
family = 'wikisource'
code = 'en'
cached = True
base_title = 'Page:Popular Science Monthly Volume 1.djvu/%s'
def setUp(self):
"""Setup tests."""
super(TestQualityFilterPageGenerator, self).setUp()
self.site = self.get_site()
self.titles = [self.base_title % i for i in range(1, 11)]
def test_QualityFilterPageGenerator(self):
"""Test QualityFilterPageGenerator."""
site = self.site
gen = pagegenerators.PagesFromTitlesGenerator(self.titles, site)
gen = pagegenerators.QualityFilterPageGenerator(gen, [0])
self.assertLength(tuple(gen), 7)
gen = pagegenerators.PagesFromTitlesGenerator(self.titles, site)
gen = pagegenerators.QualityFilterPageGenerator(gen, [4])
self.assertLength(tuple(gen), 3)
gen = pagegenerators.PagesFromTitlesGenerator(self.titles, site)
self.assertLength(tuple(gen), 10)
class EdittimeFilterPageGeneratorTestCase(TestCase):
"""Test EdittimeFilterPageGenerator."""
family = 'wikipedia'
code = 'en'
titles = en_wp_page_titles
def test_first_edit(self):
"""Test first edit."""
expect = (
'The Addams Family (pinball)',
'Talk:Nowy Sącz',
'Template:Template',
)
gen = PagesFromTitlesGenerator(self.titles, self.site)
gen = pagegenerators.EdittimeFilterPageGenerator(
gen, first_edit_end=datetime.datetime(2006, 1, 1))
self.assertPageTitlesEqual(gen, titles=expect, site=self.site)
gen = PagesFromTitlesGenerator(self.titles, self.site)
gen = pagegenerators.EdittimeFilterPageGenerator(
gen, first_edit_start=datetime.datetime(2006, 1, 1))
opposite_pages = list(gen)
self.assertTrue(all(isinstance(p, pywikibot.Page)
for p in opposite_pages))
self.assertTrue(all(p.title not in expect for p in opposite_pages))
def test_last_edit(self):
"""Test last edit."""
two_days_ago = datetime.datetime.now() - datetime.timedelta(days=2)
nine_days_ago = datetime.datetime.now() - datetime.timedelta(days=9)
gen = PagesFromTitlesGenerator(['Wikipedia:Sandbox'], self.site)
gen = pagegenerators.EdittimeFilterPageGenerator(
gen, last_edit_start=two_days_ago)
self.assertLength(list(gen), 1)
gen = PagesFromTitlesGenerator(['Wikipedia:Sandbox'], self.site)
gen = pagegenerators.EdittimeFilterPageGenerator(
gen, last_edit_end=two_days_ago)
self.assertIsEmpty(list(gen))
gen = PagesFromTitlesGenerator(['Template:Side box'], self.site)
gen = pagegenerators.EdittimeFilterPageGenerator(
gen, last_edit_end=nine_days_ago)
self.assertLength(list(gen), 1)
gen = PagesFromTitlesGenerator(['Template:Side box'], self.site)
gen = pagegenerators.EdittimeFilterPageGenerator(
gen, last_edit_start=nine_days_ago)
self.assertIsEmpty(list(gen))
class SubpageFilterGeneratorTestCase(TestCase):
"""Test SubpageFilterGenerator."""
family = 'wikipedia'
code = 'test'
def test_subpage_filter(self):
"""Test SubpageFilterGenerator."""
site = self.get_site()
test_cat = pywikibot.Category(site, 'Subpage testing')
gen = CategorizedPageGenerator(test_cat)
gen = pagegenerators.SubpageFilterGenerator(gen, 0)
expect_0 = ('/home/test',)
self.assertPageTitlesEqual(gen, titles=expect_0, site=site)
gen = CategorizedPageGenerator(test_cat)
gen = pagegenerators.SubpageFilterGenerator(gen, 3)
expect_3 = (
'/home/test',
'User:Sn1per/ProtectTest1/test',
'User:Sn1per/ProtectTest1/test/test',
)
self.assertPageTitlesEqual(gen, titles=expect_3, site=site)
class PetScanPageGeneratorTestCase(TestCase):
"""Test PetScanPageGenerator."""
family = 'wikipedia'
code = 'test'
def test_petscan(self):
"""Test PetScanPageGenerator."""
site = self.get_site()
gen = pagegenerators.PetScanPageGenerator(['Pywikibot Protect Test'],
True, None, site)
try:
self.assertPageTitlesEqual(gen, titles=(
'User:Sn1per/ProtectTest1', 'User:Sn1per/ProtectTest2'),
site=site)
except ServerError as e:
self.skipTest(e)
gen = pagegenerators.PetScanPageGenerator(['Pywikibot Protect Test'],
False, None, site)
self.assertPageTitlesEqual(gen, titles=('User:Sn1per/ProtectTest1',
'User:Sn1per/ProtectTest2'),
site=site)
gen = pagegenerators.PetScanPageGenerator(
['Pywikibot PetScan Test',
'Pywikibot Category That Needs&ToBe!Encoded',
'Test'], True, None, site)
self.assertPageTitlesEqual(gen, titles=('User:Sn1per/PetScanTest1', ),
site=site)
class TestRepeatingGenerator(RecentChangesTestCase):
"""Test RepeatingGenerator."""
def test_RepeatingGenerator(self):
"""Test RepeatingGenerator."""
with suppress_warnings(category=DeprecationWarning):
gen = pagegenerators.RepeatingGenerator(
self.site.recentchanges,
key_func=lambda x: x['revid'],
sleep_duration=10,
reverse=True,
namespaces=[0],
total=self.length)
items = list(gen)
self.assertLength(items, self.length)
timestamps = [pywikibot.Timestamp.fromISOformat(item['timestamp'])
for item in items]
self.assertEqual(sorted(timestamps), timestamps)
self.assertTrue(all(item['ns'] == 0 for item in items))
self.assertLength({item['revid'] for item in items}, self.length)
class TestTextfilePageGenerator(DefaultSiteTestCase):
"""Test loading pages from a textfile."""
dry = True
title_columns = {
'case-sensitive': 0,
'first-letter': 1,
}
expected_titles = (
('file', 'File'),
('bracket', 'Bracket'),
('MediaWiki:Test', 'MediaWiki:Test'),
('under score', 'Under score'),
('Upper case', 'Upper case'),
)
def test_brackets(self):
"""Test TextfilePageGenerator with brackets."""
filename = join_data_path('pagelist-brackets.txt')
site = self.get_site()
titles = list(pagegenerators.TextfilePageGenerator(filename, site))
self.assertLength(titles, self.expected_titles)
expected_titles = [
expected_title[self.title_columns[site.namespaces[page.namespace()]
.case]]
for expected_title, page in zip(self.expected_titles, titles)]
self.assertPageTitlesEqual(titles, expected_titles)
def test_lines(self):
"""Test TextfilePageGenerator with newlines."""
filename = join_data_path('pagelist-lines.txt')
site = self.get_site()
titles = list(pagegenerators.TextfilePageGenerator(filename, site))
self.assertLength(titles, self.expected_titles)
expected_titles = [
expected_title[self.title_columns[site.namespaces[page.namespace()]
.case]]
for expected_title, page in zip(self.expected_titles, titles)]
self.assertPageTitlesEqual(titles, expected_titles)
class TestYearPageGenerator(DefaultSiteTestCase):
"""Test the year page generator."""
def test_basic(self):
"""Test YearPageGenerator."""
site = self.get_site()
# Some languages are missing (T85681)
if (site.lang not in date.formats['YearBC']
or site.lang not in date.formats['YearAD']):
self.skipTest(
'Date formats for this language are missing from date.py')
start = -20
end = 2026
i = 0
for page in pagegenerators.YearPageGenerator(start, end, site):
self.assertIsInstance(page, pywikibot.Page)
self.assertEqual(date.formatYear(site.lang, start + i),
page.title())
self.assertNotEqual(page.title(), '0')
i += 1
if start + i == 0:
i += 1
self.assertEqual(start + i - 1, end)
class TestDayPageGenerator(DefaultSiteTestCase):
"""Test the day page generator."""
@classmethod
def setUpClass(cls):
"""Setup class for tests."""
super(TestDayPageGenerator, cls).setUpClass()
cls.site = cls.get_site()
cls.fd = date.FormatDate(cls.site)
def _run_test(self, start_month=1, end_month=12, year=2000):
"""Test method for DayPageGenerator."""
params = {
'start_month': start_month,
'end_month': end_month,
'site': self.site,
}
if year != 2000:
params['year'] = year
# use positional parameter
gen1 = pagegenerators.DayPageGenerator(
start_month, end_month, self.site, year)
# use keyworded parameter and default for year
gen2 = pagegenerators.DayPageGenerator(**params)
for page in gen1:
self.assertIsInstance(page, pywikibot.Page)
self.assertTrue(page.isAutoTitle)
expected = []
for month in range(start_month, end_month + 1):
for day in range(1, calendar.monthrange(year, month)[1] + 1):
expected.append(self.fd(month, day))
self.assertPageTitlesEqual(gen2, expected)
def test_basic(self):
"""General test for day page generator."""
self._run_test()
def test_year_2001(self):
"""Test for day page generator of year 2001."""
self._run_test(2, year=2001)
def test_year_2100(self):
"""Test for day page generator of year 2100."""
self._run_test(end_month=2, year=2100)
def test_start_0(self):
"""Test for day page generator with startMonth 0."""
self.assertRaises(calendar.IllegalMonthError, self._run_test, 0)
def test_end_13(self):
"""Test for day page generator with endMonth 13."""
self.assertRaises(calendar.IllegalMonthError, self._run_test, 12, 13)
class TestPreloadingGenerator(DefaultSiteTestCase):
"""Test preloading generator on lists."""
def test_basic(self):
"""Test PreloadingGenerator with a list of pages."""
mainpage = self.get_mainpage()
links = [page for page in self.site.pagelinks(mainpage, total=20)
if page.exists()]
count = 0
for page in PreloadingGenerator(links, groupsize=20):
self.assertIsInstance(page, pywikibot.Page)
self.assertIsInstance(page.exists(), bool)
self.assertLength(page._revisions, 1)
self.assertIsNotNone(page._revisions[page._revid].text)
self.assertFalse(hasattr(page, '_pageprops'))
count += 1
self.assertLength(links, count)
def test_low_step(self):
"""Test PreloadingGenerator with a list of pages."""
mainpage = self.get_mainpage()
links = [page for page in self.site.pagelinks(mainpage, total=20)
if page.exists()]
count = 0
for page in PreloadingGenerator(links, groupsize=10):
self.assertIsInstance(page, pywikibot.Page)
self.assertIsInstance(page.exists(), bool)
self.assertLength(page._revisions, 1)
self.assertIsNotNone(page._revisions[page._revid].text)
self.assertFalse(hasattr(page, '_pageprops'))
count += 1
self.assertLength(links, count)
def test_order(self):
"""Test outcome is following same order of input."""
mainpage = self.get_mainpage()
links = [page for page in self.site.pagelinks(mainpage, total=20)
if page.exists()]
count = 0
for page in PreloadingGenerator(links, groupsize=10):
self.assertIsInstance(page, pywikibot.Page)
self.assertIsInstance(page.exists(), bool)
self.assertLength(page._revisions, 1)
self.assertIsNotNone(page._revisions[page._revid].text)
self.assertFalse(hasattr(page, '_pageprops'))
self.assertEqual(page, links[count])
count += 1
self.assertLength(links, count)
class TestDequePreloadingGenerator(DefaultSiteTestCase):
"""Test preloading generator on lists."""
def test_deque_preloading(self):
"""Test pages being added to a DequePreloadingGenerator."""
mainpage = self.get_mainpage()
pages = pywikibot.tools.DequeGenerator([mainpage])
gen = pagegenerators.DequePreloadingGenerator(pages)
pages_out = []
for page in gen:
pages_out.append(page)
# Add a page to the generator
if not page.isTalkPage():
pages.extend([page.toggleTalkPage()])
self.assertTrue(all(isinstance(page,
pywikibot.Page) for page in pages_out))
self.assertIn(mainpage, pages_out)
self.assertIn(mainpage.toggleTalkPage(), pages_out)
self.assertLength(pages_out, 2)
self.assertTrue(pages_out[1].isTalkPage())
class TestPreloadingEntityGenerator(WikidataTestCase):
"""Test preloading item generator."""
def test_non_item_gen(self):
"""Test TestPreloadingEntityGenerator with ReferringPageGenerator."""
site = self.get_site()
instance_of_page = pywikibot.Page(site, 'Property:P31')
ref_gen = pagegenerators.ReferringPageGenerator(instance_of_page,
total=5)
gen = pagegenerators.PreloadingEntityGenerator(ref_gen)
self.assertTrue(all(isinstance(item,
pywikibot.ItemPage) for item in gen))
class DryFactoryGeneratorTest(TestCase):
"""Dry tests for pagegenerators.GeneratorFactory."""
family = 'wikipedia'
code = 'en'
dry = True
def test_one_namespace(self):
"""Test one namespace."""
gf = pagegenerators.GeneratorFactory(site=self.get_site())
gf.handleArg('-ns:2')
self.assertEqual(gf.namespaces, {2})
def test_two_namespaces(self):
"""Test two namespaces."""
gf = pagegenerators.GeneratorFactory(site=self.get_site())
gf.handleArg('-ns:2')
gf.handleArg('-ns:Talk')
self.assertEqual(gf.namespaces, {2, 1})
def test_two_named_namespaces(self):
"""Test two named namespaces."""
gf = pagegenerators.GeneratorFactory(site=self.get_site())
gf.handleArg('-ns:Talk,File')
self.assertEqual(gf.namespaces, {1, 6})
def test_two_numeric_namespaces(self):
"""Test two namespaces delimited by colon."""
gf = pagegenerators.GeneratorFactory(site=self.get_site())
gf.handleArg('-ns:1,6')
self.assertEqual(gf.namespaces, {1, 6})
def test_immutable_namespaces_on_read(self):
"""Test immutable namespaces on read."""
gf = pagegenerators.GeneratorFactory(site=self.get_site())
gf.handleArg('-ns:1,6')
self.assertEqual(gf.namespaces, {1, 6})
self.assertIsInstance(gf.namespaces, frozenset)
with suppress_warnings(
'Cannot handle arg -namespaces as namespaces can not be altered '
'after a generator is created.',
pywikibot.exceptions.ArgumentDeprecationWarning
):
gf.handleArg('-ns:0')
self.assertEqual(gf.namespaces, {1, 6})
def test_unsupported_quality_level_filter(self):
"""Test unsupported option."""
gf = pagegenerators.GeneratorFactory(site=self.get_site())
self.assertRaises(UnknownExtension, gf.handleArg, '-ql:2')
def test_one_excluded_namespaces(self):
"""Test one excluded namespaces."""
gf = pagegenerators.GeneratorFactory(site=self.get_site())
gf.handleArg('-ns:not:2')
ns = set(range(16))
ns.remove(2)
self.assertTrue(ns.issubset(gf.namespaces))
def test_two_excluded_namespaces(self):
"""Test two excluded namespaces."""
gf = pagegenerators.GeneratorFactory(site=self.get_site())
gf.handleArg('-ns:not:2')
gf.handleArg('-ns:not:Talk')
ns = set(range(16))
ns.remove(2)
ns.remove(1)
self.assertTrue(ns.issubset(gf.namespaces))
def test_two_excluded_named_namespaces(self):
"""Test two excluded named namespaces."""
gf = pagegenerators.GeneratorFactory(site=self.get_site())
gf.handleArg('-ns:not:Talk,File')
ns = set(range(16))
ns.remove(1)
ns.remove(6)
self.assertTrue(ns.issubset(gf.namespaces))
def test_two_excluded_numeric_namespaces(self):
"""Test two excluded namespaces delimited by colon."""
gf = pagegenerators.GeneratorFactory(site=self.get_site())
gf.handleArg('-ns:not:1,6')
ns = set(range(16))
ns.remove(1)
ns.remove(6)
self.assertTrue(ns.issubset(gf.namespaces))
def test_mixed_namespaces_with_exclusion(self):
"""Test mixed namespaces with exclusion."""
gf = pagegenerators.GeneratorFactory(site=self.get_site())
gf.handleArg('-ns:not:2,File')
gf.handleArg('-ns:not:3,4,5')
gf.handleArg('-ns:6,7')
ns = set(range(16))
for i in range(2, 6):
ns.remove(i)
self.assertTrue(ns.issubset(gf.namespaces))
def test_given_namespaces_with_exclusion(self):
"""Test mixed namespaces with exclusion."""
gf = pagegenerators.GeneratorFactory(site=self.get_site())
gf.handleArg('-ns:1,2,3,4,5')
gf.handleArg('-ns:not:User')
self.assertEqual(gf.namespaces, {1, 3, 4, 5})
def test_invalid_arg(self):
"""Test invalid / non-generator arguments."""
gf = pagegenerators.GeneratorFactory(site=self.get_site())
self.assertFalse(gf.handleArg('-foobar'))
self.assertFalse(gf.handleArg('barbaz'))
self.assertFalse(gf.handleArg('-ì'))
self.assertFalse(gf.handleArg('ì'))
class TestItemClaimFilterPageGenerator(WikidataTestCase):
"""Test item claim filter page generator generator."""
def _simple_claim_test(self, prop, claim, qualifiers, valid, negate=False):
"""
Test given claim on sample (India) page.
@param prop: the property to check
@type prop: str
@param claim: the claim the property should contain
@param qualifiers: qualifiers to check or None
@type qualifiers: dict or None
@param valid: true if the page should be yielded by the generator,
false otherwise
@type valid: bool
@param negate: true to swap the filters' behavior
@type negate: bool
"""
item = pywikibot.ItemPage(self.get_repo(), 'Q668')
gen = pagegenerators.ItemClaimFilterPageGenerator([item], prop, claim,
qualifiers, negate)
pages = set(gen)
if valid:
self.assertLength(pages, 1)
else:
self.assertIsEmpty(pages)
def _get_council_page(self):
"""Return United Nations Security Council Wikidata page."""
site = self.get_site()
return pywikibot.Page(site, 'Q37470')
def test_valid_qualifiers(self):
"""Test ItemClaimFilterPageGenerator using valid qualifiers."""
qualifiers = {
'P580': pywikibot.WbTime(1950, 1, 1, precision=9,
site=self.get_site()),
'P582': '1951',
}
self._simple_claim_test('P463', self._get_council_page(), qualifiers,
True)
def test_invalid_qualifiers(self):
"""Test ItemClaimFilterPageGenerator with invalid qualifiers."""
qualifiers = {
'P580': 1950,
'P582': pywikibot.WbTime(1960, 1, 1, precision=9,
site=self.site),
}
self._simple_claim_test('P463', self._get_council_page(), qualifiers,
False)
def test_nonexisting_qualifiers(self):
"""
Test ItemClaimFilterPageGenerator on sample page.
The item does not have the searched qualifiers.
"""
qualifiers = {
'P370': pywikibot.WbTime(1950, 1, 1, precision=9,
site=self.get_site()),
'P232': pywikibot.WbTime(1960, 1, 1, precision=9,
site=self.get_site()),
}
self._simple_claim_test('P463', self._get_council_page(), qualifiers,
False)
def test_no_qualifiers(self):
"""Test ItemClaimFilterPageGenerator without qualifiers."""
self._simple_claim_test('P474', '+91', None, True)
self._simple_claim_test('P463', 'Q37470', None, True)
self._simple_claim_test('P1334', '28,97.4,0.1', None, True)
self._simple_claim_test('P1334', '28,96,0.01', None, False)
def test_negative_filter(self):
"""Test negative ItemClaimFilterPageGenerator."""
self._simple_claim_test('P463', 'Q37470', None, False, True)
self._simple_claim_test('P463', 'Q37471', None, True, True)
class TestFactoryGenerator(DefaultSiteTestCase):
"""Test pagegenerators.GeneratorFactory."""
def test_combined_generator(self):
"""Test getCombinedGenerator with generator parameter."""
gf = pagegenerators.GeneratorFactory()
gen = gf.getCombinedGenerator(gen='ABC')
self.assertEqual(tuple(gen), ('A', 'B', 'C'))
def test_intersect_generator(self):
"""Test getCombinedGenerator with generator parameter."""
gf = pagegenerators.GeneratorFactory()
gf.handleArg('-intersect')
gf.gens = ['Python 3.7-dev']
gen = gf.getCombinedGenerator(gen='Pywikibot 3.0.dev')
self.assertEqual(''.join(gen), 'Pyot 3.dev')
def test_ns(self):
"""Test namespace option."""
gf = pagegenerators.GeneratorFactory()
gf.handleArg('-ns:1')
gen = gf.getCombinedGenerator()
self.assertIsNone(gen)
def test_allpages_default(self):
"""Test allpages generator."""
gf = pagegenerators.GeneratorFactory()
self.assertTrue(gf.handleArg('-start:!'))
gf.handleArg('-limit:10')
gen = gf.getCombinedGenerator()
self.assertIsNotNone(gen)
pages = set(gen)
self.assertLessEqual(len(pages), 10)
for page in pages:
self.assertIsInstance(page, pywikibot.Page)
self.assertEqual(page.namespace(), 0)
def test_allpages_ns(self):
"""Test allpages generator with namespace argument."""
gf = pagegenerators.GeneratorFactory()
self.assertTrue(gf.handleArg('-start:!'))
gf.handleArg('-limit:10')
gf.handleArg('-ns:1')
gen = gf.getCombinedGenerator()
self.assertIsNotNone(gen)
pages = set(gen)
self.assertLessEqual(len(pages), 10)
self.assertPagesInNamespaces(gen, 1)
def test_regexfilter_default(self):
"""Test allpages generator with titleregex filter."""
gf = pagegenerators.GeneratorFactory()
# Matches titles with the same two or more continuous characters
self.assertTrue(gf.handleArg('-start'))
self.assertTrue(gf.handleArg('-titleregex:(.)\\1+'))
gf.handleArg('-limit:10')
gen = gf.getCombinedGenerator()
self.assertIsNotNone(gen)
pages = list(gen)
self.assertLessEqual(len(pages), 10)
for page in pages:
self.assertIsInstance(page, pywikibot.Page)
self.assertRegex(page.title().lower(), '(.)\\1+')
def test_regexfilter_ns_after(self):
"""Test allpages generator with titleregex and namespace filter."""
gf = pagegenerators.GeneratorFactory()
self.assertTrue(gf.handleArg('-start'))
self.assertTrue(gf.handleArg('-titleregex:.*'))
gf.handleArg('-ns:1')
gf.handleArg('-limit:10')
gen = gf.getCombinedGenerator()
pages = list(gen)
self.assertLessEqual(len(pages), 10)
self.assertPagesInNamespaces(pages, 1)
def test_regexfilter_ns_before(self):
"""Test allpages generator with namespace and titleregex filter."""
gf = pagegenerators.GeneratorFactory()
self.assertTrue(gf.handleArg('-start'))
gf.handleArg('-ns:1')
self.assertTrue(gf.handleArg('-titleregex:.*'))
gf.handleArg('-limit:10')
gen = gf.getCombinedGenerator()
self.assertIsNotNone(gen)
pages = list(gen)
self.assertLessEqual(len(pages), 10)
self.assertPagesInNamespaces(pages, 1)
def test_regexfilternot_default(self):
"""Test allpages generator with titleregexnot filter."""
gf = pagegenerators.GeneratorFactory()
self.assertTrue(gf.handleArg('-start'))
# matches titles with less than 11 characters
self.assertTrue(gf.handleArg('-titleregexnot:.{11,}'))
gf.handleArg('-limit:10')
gen = gf.getCombinedGenerator()
self.assertIsNotNone(gen)
pages = list(gen)
self.assertLessEqual(len(pages), 10)
for page in pages:
self.assertIsInstance(page, pywikibot.Page)
self.assertNotRegex(page.title().lower(), '.{11,}')
def test_regexfilternot_ns_after(self):
"""Test allpages generator with titleregexnot and namespace filter."""
gf = pagegenerators.GeneratorFactory()
self.assertTrue(gf.handleArg('-start'))
self.assertTrue(gf.handleArg('-titleregexnot:zzzz'))
gf.handleArg('-ns:1')
gf.handleArg('-limit:10')
gen = gf.getCombinedGenerator()
pages = list(gen)
self.assertLessEqual(len(pages), 10)
self.assertPagesInNamespaces(pages, 1)
def test_regexfilternot_ns_before(self):
"""Test allpages generator with namespace and titleregexnot filter."""
gf = pagegenerators.GeneratorFactory()
self.assertTrue(gf.handleArg('-start'))
gf.handleArg('-ns:1')
self.assertTrue(gf.handleArg('-titleregexnot:zzzz'))
gf.handleArg('-limit:10')
gen = gf.getCombinedGenerator()
self.assertIsNotNone(gen)
pages = list(gen)
self.assertLessEqual(len(pages), 10)
self.assertPagesInNamespaces(pages, 1)
def test_allpages_with_two_ns(self):
"""Test that allpages fails with two ns as parameter."""
gf = pagegenerators.GeneratorFactory()
self.assertTrue(gf.handleArg('-start'))
gf.handleArg('-ns:3,1')
# allpages only accepts a single namespace, and will raise a
# TypeError if self.namespaces contains more than one namespace.
self.assertRaises(
TypeError,
'allpages module does not support multiple namespaces',
gf.getCombinedGenerator)
def test_prefixing_default(self):
"""Test prefixindex generator."""
gf = pagegenerators.GeneratorFactory()
self.assertTrue(gf.handleArg('-prefixindex:a'))
gf.handleArg('-limit:10')
gen = gf.getCombinedGenerator()
self.assertIsNotNone(gen)
pages = set(gen)
self.assertLessEqual(len(pages), 10)
for page in pages:
self.assertIsInstance(page, pywikibot.Page)
self.assertTrue(page.title().lower().startswith('a'))
def test_prefixing_ns(self):
"""Test prefixindex generator with namespace filter."""
gf = pagegenerators.GeneratorFactory(site=self.site)
gf.handleArg('-ns:1')
gf.handleArg('-prefixindex:a')
gf.handleArg('-limit:10')
gen = gf.getCombinedGenerator()
self.assertIsNotNone(gen)
self.assertPagesInNamespaces(gen, 1)
def test_recentchanges_timespan(self):
"""Test recentchanges generator with offset and duration params."""
gf = pagegenerators.GeneratorFactory(site=self.site)
gf.handleArg('-recentchanges:120,70')
gen = gf.getCombinedGenerator()
self.assertIsNotNone(gen)
self.assertRaises(ValueError, gf.handleArg, '-recentchanges:3,2,1')
self.assertRaises(ValueError, gf.handleArg, '-recentchanges:12,-12')
self.assertRaises(
ValueError, gf.handleArg, '-recentchanges:visualeditor,3,2,1')
self.assertRaises(
ValueError, gf.handleArg, '-recentchanges:"mobile edit,-10,20"')
def test_recentchanges_rctag(self):
"""Test recentchanges generator with recent changes tag."""
gf = pagegenerators.GeneratorFactory(site=self.site)
gf.handleArg('-recentchanges:visualeditor')
gen = gf.getCombinedGenerator()
self.assertIsNotNone(gen)
self.assertPagesInNamespacesAll(gen, {0, 1, 2}, skip=True)
def test_recentchanges_default(self):
"""Test recentchanges generator with default namespace setting."""
if self.site.family.name in ('wpbeta', 'wsbeta'):
self.skipTest('Skipping {} due to too many autoblocked users'
.format(self.site))
gf = pagegenerators.GeneratorFactory(site=self.site)
gf.handleArg('-recentchanges:50')
gen = gf.getCombinedGenerator()
self.assertIsNotNone(gen)
self.assertPagesInNamespacesAll(gen, {0, 1, 2}, skip=True)
def test_recentchanges_ns(self):
"""Test recentchanges generator with namespace."""
gf = pagegenerators.GeneratorFactory(site=self.site)
gf.handleArg('-ns:1')
gf.handleArg('-recentchanges:10')
gen = gf.getCombinedGenerator()
self.assertIsNotNone(gen)
self.assertPagesInNamespaces(gen, 1)
def test_recentchanges_ns_multi(self):
"""Test recentchanges generator with multiple namespaces."""
gf = pagegenerators.GeneratorFactory(site=self.site)
gf.handleArg('-ns:1')
gf.handleArg('-ns:3')
gf.handleArg('-recentchanges:10')
gen = gf.getCombinedGenerator()
self.assertIsNotNone(gen)
self.assertPagesInNamespaces(gen, {1, 3})
def test_pageid(self):
"""Test pageid parameter."""
# Get reference pages and their pageids.
gf = pagegenerators.GeneratorFactory(site=self.get_site())
self.assertTrue(gf.handleArg('-random'))
gf.handleArg('-limit:10')
gen = gf.getCombinedGenerator()
pages = list(gen)
self.assertLength(pages, 10)
# pipe-separated used as test reference.
pageids = '|'.join(str(page.pageid) for page in pages)
# Get by pageids.
gf = pagegenerators.GeneratorFactory(site=self.get_site())
gf.handleArg('-pageid:{}'.format(pageids))
gen = gf.getCombinedGenerator()
self.assertIsNotNone(gen)
pages_from_pageid = list(gen)
self.assertLength(pages_from_pageid, 10)
for page_a, page_b in zip(pages, pages_from_pageid):
self.assertIsInstance(page_a, pywikibot.Page)
self.assertIsInstance(page_b, pywikibot.Page)
self.assertTrue(page_a, page_b)
def test_pagegenerator(self):
"""Test page generator."""
gf = pagegenerators.GeneratorFactory(site=self.site)
gf.handleArg('-page:Main Page')
gen = gf.getCombinedGenerator()
self.assertIsNotNone(gen)
def test_random_generator_default(self):
"""Test random generator."""
gf = pagegenerators.GeneratorFactory(site=self.site)
gf.handleArg('-random:1')
gen = gf.getCombinedGenerator()
self.assertIsNotNone(gen)
pages = set(gen)
self.assertLength(pages, 1)
def test_random_generator_ns(self):
"""Test random generator with namespace."""
gf = pagegenerators.GeneratorFactory(site=self.site)
gf.handleArg('-ns:1')
gf.handleArg('-random:1')
gen = gf.getCombinedGenerator()
self.assertIsNotNone(gen)
self.assertPagesInNamespaces(gen, 1)
def test_random_generator_ns_multi(self):
"""Test random generator with multiple namespaces."""
gf = pagegenerators.GeneratorFactory(site=self.site)
gf.handleArg('-ns:1')
gf.handleArg('-ns:3')
gf.handleArg('-random:10')
gen = gf.getCombinedGenerator()
self.assertIsNotNone(gen)
self.assertPagesInNamespaces(gen, {1, 3})
def test_randomredirect_generator_default(self):
"""Test random generator."""
gf = pagegenerators.GeneratorFactory(site=self.site)
gf.handleArg('-randomredirect:1')
gen = gf.getCombinedGenerator()
self.assertIsNotNone(gen)
pages = set(gen)
self.assertLength(pages, 1)
def test_randomredirect_generator_ns(self):
"""Test random generator with namespace."""
gf = pagegenerators.GeneratorFactory(site=self.site)
gf.handleArg('-ns:1')
gf.handleArg('-randomredirect:1')
gen = gf.getCombinedGenerator()
self.assertIsNotNone(gen)
self.assertPagesInNamespaces(gen, 1)
def test_randomredirect_generator_ns_multi(self):
"""Test random generator with multiple namespaces."""
gf = pagegenerators.GeneratorFactory(site=self.site)
gf.handleArg('-ns:1')
gf.handleArg('-ns:3')
gf.handleArg('-randomredirect:10')
gen = gf.getCombinedGenerator()
self.assertIsNotNone(gen)
self.assertPagesInNamespaces(gen, {1, 3})
def test_pages_with_property_generator(self):
"""Test the pages_with_property_generator method."""
mysite = self.get_site()
if mysite.mw_version < '1.21':
self.skipTest('requires v1.21+')
for item in ('defaultsort', 'disambiguation', 'displaytitle',
'hiddencat', 'invalid_property'):
if item in mysite.get_property_names():
gf = pagegenerators.GeneratorFactory()
gf.handleArg('-property:{0}'.format(item))
gf.handleArg('-limit:10')
gen = gf.getCombinedGenerator()
self.assertIsNotNone(gen)
pages = list(gen)
self.assertLessEqual(len(pages), 10)
for page in pages:
self.assertIsInstance(page, pywikibot.Page)
if item == 'disambiguation':
self.assertTrue(page.isDisambig())
else:
with self.assertRaises(NotImplementedError):
mysite.pages_with_property(item)
self.fail(
'NotImplementedError not raised for {0}'.format(item))
def test_empty_generator(self):
"""Test empty generator."""
gf = pagegenerators.GeneratorFactory(site=self.site)
gen = gf.getCombinedGenerator()
self.assertIsNone(gen)
def test_positionalargument(self):
"""Test page generator with positional argument."""
gf1 = pagegenerators.GeneratorFactory(site=self.site,
positional_arg_name='page')
gf1.handleArg('Main Page')
gen1 = gf1.getCombinedGenerator()
self.assertIsNotNone(gen1)
gf2 = pagegenerators.GeneratorFactory(site=self.site)
gf2.handleArg('-page:Main Page')
gen2 = gf2.getCombinedGenerator()
self.assertIsNotNone(gen2)
self.assertEqual(list(gen1), list(gen2))
def test_positionalargument_with_colon(self):
"""Test page generator with positional argument with colon."""
gf1 = pagegenerators.GeneratorFactory(site=self.site,
positional_arg_name='page')
gf1.handleArg('Project:Main Page')
gen1 = gf1.getCombinedGenerator()
self.assertIsNotNone(gen1)
gf2 = pagegenerators.GeneratorFactory(site=self.site)
gf2.handleArg('-page:Project:Main Page')
gen2 = gf2.getCombinedGenerator()
self.assertIsNotNone(gen2)
self.assertEqual(list(gen1), list(gen2))
def test_linter_generator_ns_valid_cat(self):
"""Test generator of pages with lint errors."""
if not self.site.has_extension('Linter'):
self.skipTest('The site {0} does not use Linter extension'
.format(self.site))
gf = pagegenerators.GeneratorFactory(site=self.site)
gf.handleArg('-ns:1')
gf.handleArg('-limit:3')
gf.handleArg('-linter:obsolete-tag')
gen = gf.getCombinedGenerator()
self.assertIsNotNone(gen)
pages = list(gen)
self.assertLessEqual(len(pages), 5)
for page in pages:
self.assertIsInstance(page, pywikibot.Page)
self.assertEqual(page._lintinfo['category'], 'obsolete-tag')
self.assertPagesInNamespaces(pages, {1})
def test_linter_generator_invalid_cat(self):
"""Test generator of pages with lint errors."""
if not self.site.has_extension('Linter'):
self.skipTest('The site {0} does not use Linter extension'
.format(self.site))
gf = pagegenerators.GeneratorFactory(site=self.site)
self.assertRaises(ValueError, gf.handleArg, '-linter:dummy')
def test_linter_generator_show(self):
"""Test generator of pages with lint errors."""
gf = pagegenerators.GeneratorFactory(site=self.site)
if self.site.has_extension('Linter'):
with self.assertRaises(SystemExit) as cm:
gf.handleArg('-linter:show')
self.assertEqual(cm.exception.code, 0)
else:
self.assertRaises(UnknownExtension, gf.handleArg, '-linter:show')
def test_querypage_generator_with_valid_page(self):
"""Test generator of pages with lint errors."""
gf = pagegenerators.GeneratorFactory(site=self.site)
gf.handleArg('-querypage:Ancientpages')
gf.handleArg('-limit:5')
gen = gf.getCombinedGenerator()
self.assertIsNotNone(gen)
pages = list(gen)
self.assertLessEqual(len(pages), 5)
for page in pages:
self.assertIsInstance(page, pywikibot.Page)
def test_querypage_generator_with_invalid_page(self):
"""Test generator of pages with lint errors."""
gf = pagegenerators.GeneratorFactory(site=self.site)
self.assertRaises(AssertionError, gf.handleArg, '-querypage:dummy')
def test_querypage_generator_with_no_page(self):
"""Test generator of pages with lint errors."""
gf = pagegenerators.GeneratorFactory(site=self.site)
with self.assertRaises(SystemExit) as cm:
gf.handleArg('-querypage')
self.assertEqual(cm.exception.code, 0)
class TestFactoryGeneratorNewpages(TestCase):
"""Test pagegenerators.GeneratorFactory for newpages."""
# Detached from TestFactoryGenerator due to T159029
sites = {
'eswiki': {
'family': 'wikipedia',
'code': 'es',
},
'commons': {
'family': 'commons',
'code': 'commons',
},
'ruwikt': {
'family': 'wiktionary',
'code': 'ru',
},
'meta': {
'family': 'meta',
'code': 'meta',
},
'frsource': {
'family': 'wikisource',
'code': 'fr',
},
'devoy': {
'family': 'wikivoyage',
'code': 'de',
},
}
def test_newpages_default(self, key):
"""Test newpages generator."""
site = self.get_site(key)
gf = pagegenerators.GeneratorFactory(site=site)
gf.handleArg('-newpages:60')
gen = gf.getCombinedGenerator()
self.assertIsNotNone(gen)
pages = set(gen)
self.assertLessEqual(len(pages), 60)
newpages_url = self.site.base_url(
self.site.path() + '?title=Special:NewPages&uselang=en')
failure_message = 'No new pages returned by -newpages. ' \
'If this is the only failure, check whether {url} contains any ' \
'pages. If not, create a new page on the site to make the test ' \
'pass again.'.format(url=newpages_url)
self.assertIsNotEmpty(pages, msg=failure_message)
def test_newpages_ns_default(self, key):
"""Test newpages generator with limit argument."""
site = self.get_site(key)
gf = pagegenerators.GeneratorFactory(site=site)
gf.handleArg('-newpages:10')
gen = gf.getCombinedGenerator()
self.assertIsNotNone(gen)
self.assertPagesInNamespaces(gen, 0)
def test_newpages_ns(self, key):
"""Test newpages generator with limit argument and namespace filter."""
site = self.get_site(key)
gf = pagegenerators.GeneratorFactory(site=site)
gf.handleArg('-ns:1')
gf.handleArg('-newpages:10')
gen = gf.getCombinedGenerator()
self.assertIsNotNone(gen)
self.assertPagesInNamespaces(gen, 1)
class TestWantedFactoryGenerator(DefaultSiteTestCase):
"""Test pagegenerators.GeneratorFactory for wanted pages."""
def setUp(self):
"""Setup tests."""
super(TestWantedFactoryGenerator, self).setUp()
self.gf = pagegenerators.GeneratorFactory(site=self.site)
def _generator_with_tests(self):
"""Test generator."""
gen = self.gf.getCombinedGenerator()
self.assertIsNotNone(gen)
pages = list(gen)
self.assertLessEqual(len(pages), 5)
for page in pages:
yield page
def test_wanted_pages(self):
"""Test wantedpages generator."""
self.gf.handleArg('-wantedpages:5')
for page in self._generator_with_tests():
self.assertIsInstance(page, pywikibot.Page)
def test_wanted_files(self):
"""Test wantedfiles generator."""
self.gf.handleArg('-wantedfiles:5')
for page in self._generator_with_tests():
self.assertIsInstance(page, pywikibot.FilePage)
def test_wanted_templates(self):
"""Test wantedtemplates generator."""
self.gf.handleArg('-wantedtemplates:5')
for page in self._generator_with_tests():
self.assertIsInstance(page, pywikibot.Page)
self.assertEqual(page.namespace(), 10)
def test_wanted_categories(self):
"""Test wantedcategories generator."""
self.gf.handleArg('-wantedcategories:5')
for page in self._generator_with_tests():
self.assertIsInstance(page, pywikibot.Category)
class TestFactoryGeneratorWikibase(WikidataTestCase):
"""Test pagegenerators.GeneratorFactory on Wikibase site."""
def test_onlyif(self):
"""Test -onlyif without qualifiers."""
gf = pagegenerators.GeneratorFactory(site=self.site)
gf.handleArg('-page:Q15745378')
self.assertTrue(gf.handleArg(
'-onlyif:P1476=International Journal of Minerals\\, '
'Metallurgy\\, and Materials'))
gen = gf.getCombinedGenerator()
self.assertIsNotNone(gen)
self.assertLength(set(gen), 1)
def test_onlyifnot(self):
"""Test -onlyifnot without qualifiers."""
gf = pagegenerators.GeneratorFactory(site=self.site)
gf.handleArg('-page:Q15745378')
gf.handleArg('-onlyifnot:P1476=International Journal of Minerals\\, '
'Metallurgy\\, and Materials')
gen = gf.getCombinedGenerator()
self.assertIsNotNone(gen)
self.assertIsEmpty(set(gen))
def test_onlyif_qualifiers(self):
"""Test -onlyif with qualifiers."""
gf = pagegenerators.GeneratorFactory(site=self.site)
gf.handleArg('-page:Q668')
gf.handleArg('-onlyif:P47=Q837,P805=Q3088768')
gen = gf.getCombinedGenerator()
self.assertIsNotNone(gen)
self.assertLength(set(gen), 1)
def test_searchitem(self):
"""Test -searchitem."""
gf = pagegenerators.GeneratorFactory(site=self.site)
gf.handleArg('-searchitem:abc')
gen = gf.getCombinedGenerator()
self.assertIsNotNone(gen)
self.assertIsNotNone(next(gen))
def test_searchitem_language(self):
"""Test -searchitem with custom language specified."""
gf = pagegenerators.GeneratorFactory(site=self.site)
gf.handleArg('-searchitem:pl:abc')
gf.handleArg('-limit:1')
gen = gf.getCombinedGenerator()
self.assertIsNotNone(gen)
# alphabet, also known as ABC
page1 = next(gen)
self.assertEqual(page1.title(), 'Q9779')
gf = pagegenerators.GeneratorFactory(site=self.site)
gf.handleArg('-searchitem:en:abc')
gf.handleArg('-limit:2')
gen = gf.getCombinedGenerator()
self.assertIsNotNone(gen)
# American Broadcasting Company
page1 = next(gen)
self.assertEqual(page1.title(), 'Q169889')
# alphabet, also known as ABC
page2 = next(gen)
self.assertEqual(page2.title(), 'Q9779')
def test_get_category_site(self):
"""Test the getCategory method."""
# With default site
gf = pagegenerators.GeneratorFactory()
cat = gf.getCategory('foo')[0]
self.assertEqual(cat.site, pywikibot.Site())
# With a user-specified site
fa_wikisource = pywikibot.Site('fa', 'wikisource')
gf = pagegenerators.GeneratorFactory(fa_wikisource)
cat = gf.getCategory('foo')[0]
self.assertEqual(cat.site, fa_wikisource)
class TestLogeventsFactoryGenerator(DefaultSiteTestCase,
DeprecationTestCase):
"""Test GeneratorFactory with pagegenerators.LogeventsPageGenerator."""
@classmethod
def setUpClass(cls):
"""Setup test class."""
super(TestLogeventsFactoryGenerator, cls).setUpClass()
site = pywikibot.Site()
newuser_logevents = list(site.logevents(logtype='newusers', total=1))
if len(newuser_logevents) == 0:
raise unittest.SkipTest('No newuser logs found to test with.')
user = True
def test_logevents_parse(self):
"""Test wrong logevents option."""
factory = pagegenerators.GeneratorFactory
gf = factory()
self.assertFalse(gf.handleArg('-log'))
self.assertFalse(gf.handleArg('-log:text_here'))
self.assertRaises(NotImplementedError,
gf.handleArg, '-logevents:anyevent')
# test that old format log option is not handled by any handler method.
gf_mock = mock.create_autospec(gf)
self.assertFalse(factory.handleArg(gf_mock, '-anotherlog'))
self.assertFalse(gf_mock.method_calls)
def test_logevents_with_start_timestamp(self):
"""Test -logevents which uses timestamp for start."""
gf = pagegenerators.GeneratorFactory(site=self.site)
# We limit the results to 1 as running this on large websites like
# Wikipedia will give an insane number of results as it asks for all
# logevents since beginning till now.
self.assertTrue(gf.handleArg('-limit:1'))
self.assertTrue(gf.handleArg('-logevents:newusers,,21000101'))
gen = gf.getCombinedGenerator()
self.assertIsNotNone(gen)
pages = set(gen)
self.assertIsNotEmpty(pages)
self.assertTrue(all(isinstance(item, pywikibot.User)
for item in pages))
def test_logevents_with_start_and_end_timestamp(self):
"""Test -logevents which uses timestamps for start and end."""
gf = pagegenerators.GeneratorFactory(site=self.site)
self.assertTrue(gf.handleArg('-logevents:newusers,,21000101,20990101'))
gen = gf.getCombinedGenerator()
self.assertIsNotNone(gen)
pages = set(gen)
self.assertIsEmpty(pages)
def test_logevents_with_total(self):
"""Test -logevents which uses total."""
gf = pagegenerators.GeneratorFactory(site=self.site)
self.assertTrue(gf.handleArg('-logevents:newusers,,1'))
gen = gf.getCombinedGenerator()
self.assertIsNotNone(gen)
pages = set(gen)
self.assertLength(pages, 1)
self.assertTrue(all(isinstance(item, pywikibot.User)
for item in pages))
class PageGeneratorIntersectTestCase(GeneratorIntersectTestCase,
RecentChangesTestCase):
"""Page intersect_generators test cases."""
def test_intersect_newpages_twice(self):
"""Test newpages intersection."""
site = self.get_site()
self.assertEqualItertools(
[pagegenerators.NewpagesPageGenerator(site=site, total=10),
pagegenerators.NewpagesPageGenerator(site=site, total=10)])
def test_intersect_newpages_and_recentchanges(self):
"""Test intersection betweem newpages and recentchanges."""
site = self.get_site()
self.assertEqualItertools(
[pagegenerators.NewpagesPageGenerator(site=site, total=50),
pagegenerators.RecentChangesPageGenerator(site=site, total=200)])
class EnWikipediaPageGeneratorIntersectTestCase(GeneratorIntersectTestCase,
RecentChangesTestCase):
"""Page intersect_generators test cases."""
family = 'wikipedia'
code = 'en'
def test_intersect_newpages_csd(self):
"""Test intersection between newpages and sd candidates."""
site = self.get_site()
self.assertEqualItertools([
pagegenerators.NewpagesPageGenerator(site=site, total=10),
pagegenerators.CategorizedPageGenerator(
pywikibot.Category(
site, 'Category:Candidates_for_speedy_deletion'))]
)
class EventStreamsPageGeneratorTestCase(RecentChangesTestCase):
"""Test case for Live Recent Changes pagegenerator."""
@classmethod
def setUpClass(cls):
"""Setup test class."""
super(EventStreamsPageGeneratorTestCase, cls).setUpClass()
cls.client = 'sseclient'
if not has_module(cls.client):
raise unittest.SkipTest('{0} is not available'.format(cls.client))
def test_RC_pagegenerator_result(self):
"""Test RC pagegenerator."""
lgr = logging.getLogger(self.client)
lgr.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
lgr.addHandler(ch)
site = self.get_site()
pagegenerator = pagegenerators.LiveRCPageGenerator(site,
total=self.length)
entries = list(pagegenerator)
self.assertLength(entries, self.length)
testentry = entries[0]
self.assertEqual(testentry.site, site)
self.assertTrue(hasattr(testentry, '_rcinfo'))
rcinfo = testentry._rcinfo
self.assertEqual(rcinfo['server_name'], site.hostname())
self.assertEqual(rcinfo['wiki'], site.dbName())
for key in ['type', 'namespace', 'title', 'comment',
'timestamp', 'user', 'bot']:
self.assertIn(key, rcinfo.keys())
class TestUnconnectedPageGenerator(DefaultSiteTestCase):
"""Test UnconnectedPageGenerator."""
cached = True
def test_unconnected_with_repo(self):
"""Test UnconnectedPageGenerator."""
if not self.site.data_repository():
self.skipTest('Site is not using a Wikibase repository')
with suppress_warnings(
'pywikibot.pagegenerators.UnconnectedPageGenerator is '
'deprecated', DeprecationWarning):
upgen = pagegenerators.UnconnectedPageGenerator(self.site, 3)
self.assertDictEqual(
upgen.request._params, {
'gqppage': ['UnconnectedPages'],
'prop': ['info', 'imageinfo', 'categoryinfo'],
'inprop': ['protection'],
'iilimit': ['max'],
'iiprop': ['timestamp', 'user', 'comment', 'url', 'size',
'sha1', 'metadata'],
'generator': ['querypage'], 'action': ['query'],
'indexpageids': [True], 'continue': [True]})
self.assertLessEqual(len(tuple(upgen)), 3)
def test_unconnected_without_repo(self):
"""Test that it raises a ValueError on sites without repository."""
if self.site.data_repository():
self.skipTest('Site is using a Wikibase repository')
with self.assertRaises(ValueError):
for page in pagegenerators.UnconnectedPageGenerator(self.site,
total=5):
assert False # this shouldn't be reached
if __name__ == '__main__': # pragma: no cover
try:
unittest.main()
except SystemExit:
pass
| 38.78825 | 79 | 0.621379 |
ace3285c12fc54a8d821a51d26607459d8426ff3 | 7,478 | py | Python | dataloaders/sbd.py | ZurMaD/DeepGrabCut-PyTorch | 13d9e81e6e438ad3394fb3a78aca26c2cc63c825 | [
"MIT"
] | 244 | 2018-06-16T07:52:56.000Z | 2022-03-12T21:45:30.000Z | dataloaders/sbd.py | ZurMaD/DeepGrabCut-PyTorch | 13d9e81e6e438ad3394fb3a78aca26c2cc63c825 | [
"MIT"
] | 7 | 2018-08-21T13:08:06.000Z | 2021-08-25T04:04:59.000Z | dataloaders/sbd.py | ZurMaD/DeepGrabCut-PyTorch | 13d9e81e6e438ad3394fb3a78aca26c2cc63c825 | [
"MIT"
] | 60 | 2018-07-26T15:47:10.000Z | 2022-01-02T13:59:56.000Z | from __future__ import print_function, division
import json
import os
import numpy as np
import scipy.io
import torch.utils.data as data
from PIL import Image
from mypath import Path
class SBDSegmentation(data.Dataset):
def __init__(self,
base_dir=Path.db_root_dir('sbd'),
split='val',
transform=None,
preprocess=False,
area_thres=0,
retname=True):
"""
:param base_dir: path to VOC dataset directory
:param split: train/val
:param transform: transform to apply
"""
super().__init__()
self._base_dir = base_dir
self._dataset_dir = os.path.join(self._base_dir, 'dataset')
self._mask_dir = os.path.join(self._dataset_dir, 'inst')
self._image_dir = os.path.join(self._dataset_dir, 'img')
self.transform = transform
if isinstance(split, str):
self.split = [split]
else:
split.sort()
self.split = split
self.area_thres = area_thres
self.retname = retname
if self.area_thres != 0:
self.obj_list_file = os.path.join(self._dataset_dir, '_'.join(self.split) + '_instances_area_thres-' +
str(area_thres) + '.txt')
else:
self.obj_list_file = os.path.join(self._dataset_dir, '_'.join(self.split) + '_instances' + '.txt')
# Get list of all images from the split and check that the files exist
self.im_ids = []
self.images = []
self.masks = []
for splt in self.split:
with open(os.path.join(self._dataset_dir, splt + '.txt'), "r") as f:
lines = f.read().splitlines()
for line in lines:
_image = os.path.join(self._image_dir, line + ".jpg")
_mask = os.path.join(self._mask_dir, line + ".mat")
assert os.path.isfile(_image)
assert os.path.isfile(_mask)
self.im_ids.append(line)
self.images.append(_image)
self.masks.append(_mask)
assert (len(self.images) == len(self.masks))
# Precompute the list of objects and their categories for each image
if (not self._check_preprocess()) or preprocess:
print('Preprocessing SBD dataset, this will take long, but it will be done only once.')
self._preprocess()
# Build the list of objects
self.obj_list = []
num_images = 0
for ii in range(len(self.im_ids)):
if self.im_ids[ii] in self.obj_dict.keys():
flag = False
for jj in range(len(self.obj_dict[self.im_ids[ii]])):
if self.obj_dict[self.im_ids[ii]][jj] != -1:
self.obj_list.append([ii, jj])
flag = True
if flag:
num_images += 1
# Display stats
print('Number of images: {:d}\nNumber of objects: {:d}'.format(num_images, len(self.obj_list)))
def __getitem__(self, index):
_img, _target = self._make_img_gt_point_pair(index)
sample = {'image': _img, 'gt': _target}
if self.retname:
_im_ii = self.obj_list[index][0]
_obj_ii = self.obj_list[index][1]
sample['meta'] = {'image': str(self.im_ids[_im_ii]),
'object': str(_obj_ii),
'im_size': (_img.shape[0], _img.shape[1]),
'category': self.obj_dict[self.im_ids[_im_ii]][_obj_ii]}
if self.transform is not None:
sample = self.transform(sample)
return sample
def __len__(self):
return len(self.obj_list)
def _check_preprocess(self):
# Check that the file with categories is there and with correct size
_obj_list_file = self.obj_list_file
if not os.path.isfile(_obj_list_file):
return False
else:
self.obj_dict = json.load(open(_obj_list_file, 'r'))
return list(np.sort([str(x) for x in self.obj_dict.keys()])) == list(np.sort(self.im_ids))
def _preprocess(self):
# Get all object instances and their category
self.obj_dict = {}
obj_counter = 0
for ii in range(len(self.im_ids)):
# Read object masks and get number of objects
tmp = scipy.io.loadmat(self.masks[ii])
_mask = tmp["GTinst"][0]["Segmentation"][0]
_cat_ids = tmp["GTinst"][0]["Categories"][0].astype(int)
_mask_ids = np.unique(_mask)
n_obj = _mask_ids[-1]
assert (n_obj == len(_cat_ids))
for jj in range(n_obj):
temp = np.where(_mask == jj + 1)
obj_area = len(temp[0])
if obj_area < self.area_thres:
_cat_ids[jj] = -1
obj_counter += 1
self.obj_dict[self.im_ids[ii]] = np.squeeze(_cat_ids, 1).tolist()
# Save it to file for future reference
with open(self.obj_list_file, 'w') as outfile:
outfile.write('{{\n\t"{:s}": {:s}'.format(self.im_ids[0], json.dumps(self.obj_dict[self.im_ids[0]])))
for ii in range(1, len(self.im_ids)):
outfile.write(',\n\t"{:s}": {:s}'.format(self.im_ids[ii], json.dumps(self.obj_dict[self.im_ids[ii]])))
outfile.write('\n}\n')
print('Pre-processing finished')
def _make_img_gt_point_pair(self, index):
_im_ii = self.obj_list[index][0]
_obj_ii = self.obj_list[index][1]
# Read Image
_img = np.array(Image.open(self.images[_im_ii]).convert('RGB')).astype(np.float32)
# Read Taret object
_tmp = scipy.io.loadmat(self.masks[_im_ii])["GTinst"][0]["Segmentation"][0]
_target = (_tmp == (_obj_ii + 1)).astype(np.float32)
return _img, _target
def __str__(self):
return 'SBDSegmentation(split=' + str(self.split) + ', area_thres=' + str(self.area_thres) + ')'
if __name__ == '__main__':
from dataloaders import custom_transforms as tr
from torch.utils.data import DataLoader
from torchvision import transforms
import matplotlib.pyplot as plt
composed_transforms_tr = transforms.Compose([
tr.RandomHorizontalFlip(),
tr.ScaleNRotate(rots=(-15, 15), scales=(.75, 1.25)),
tr.FixedResize(resolutions={'image': (450, 450), 'gt': (450, 450)}),
tr.DistanceMap(v=0.15, elem='gt'),
tr.ConcatInputs(elems=('image', 'distance_map')),
tr.ToTensor()])
sbd_train = SBDSegmentation(split='train', retname=False,
transform=composed_transforms_tr)
dataloader = DataLoader(sbd_train, batch_size=2, shuffle=True, num_workers=2)
for ii, sample in enumerate(dataloader):
for jj in range(sample["image"].size()[0]):
dismap = sample['distance_map'][jj].numpy()
gt = sample['gt'][jj].numpy()
gt[gt > 0] = 255
gt = np.array(gt[0]).astype(np.uint8)
dismap = np.array(dismap[0]).astype(np.uint8)
display = 0.9 * gt + 0.4 * dismap
display = display.astype(np.uint8)
plt.figure()
plt.title('display')
plt.imshow(display, cmap='gray')
if ii == 1:
break
plt.show(block=True) | 36.125604 | 118 | 0.56031 |
ace32874a1f31d246288eed4a6712b2520e3274d | 813 | py | Python | plenum/test/view_change/test_disable_view_change.py | steptan/indy-plenum | 488bf63c82753a74a92ac6952da784825ffd4a3d | [
"Apache-2.0"
] | null | null | null | plenum/test/view_change/test_disable_view_change.py | steptan/indy-plenum | 488bf63c82753a74a92ac6952da784825ffd4a3d | [
"Apache-2.0"
] | null | null | null | plenum/test/view_change/test_disable_view_change.py | steptan/indy-plenum | 488bf63c82753a74a92ac6952da784825ffd4a3d | [
"Apache-2.0"
] | null | null | null | import pytest
from plenum.test.helper import waitForViewChange
from plenum.test.view_change.helper import simulate_slow_master
@pytest.fixture(scope="module")
def disable_view_change_config(tconf):
tconf.unsafe.add('disable_view_change')
yield tconf
tconf.unsafe.remove('disable_view_change')
def test_disable_view_change(
disable_view_change_config,
looper,
nodeSet,
up,
viewNo,
wallet1,
client1):
assert disable_view_change_config
assert isinstance(disable_view_change_config.unsafe, set)
assert 'disable_view_change' in disable_view_change_config.unsafe
simulate_slow_master(looper, nodeSet, wallet1, client1)
with pytest.raises(AssertionError):
waitForViewChange(looper, nodeSet, expectedViewNo=viewNo + 1)
| 28.034483 | 69 | 0.746617 |
ace328ff8e15497d2bc477d0801c9cfdbcf2caec | 139 | py | Python | func1setup.py | bangyen/matics | a6c41c80052fd054b184f2cc6e9a14d303362687 | [
"MIT"
] | null | null | null | func1setup.py | bangyen/matics | a6c41c80052fd054b184f2cc6e9a14d303362687 | [
"MIT"
] | null | null | null | func1setup.py | bangyen/matics | a6c41c80052fd054b184f2cc6e9a14d303362687 | [
"MIT"
] | null | null | null | from distutils.core import setup
from Cython.Build import cythonize
setup(
name='func1.pyx',
ext_modules=cythonize("func1.pyx")
)
| 17.375 | 38 | 0.741007 |
ace329a9ecb9950ecf2e89653338cdf987ecbccd | 3,450 | py | Python | src/roles/shaman.py | IamInferior/ppd-werewolves | 749138b212abbf89bf648f2d7e4054bf7b27c256 | [
"BSD-2-Clause"
] | null | null | null | src/roles/shaman.py | IamInferior/ppd-werewolves | 749138b212abbf89bf648f2d7e4054bf7b27c256 | [
"BSD-2-Clause"
] | null | null | null | src/roles/shaman.py | IamInferior/ppd-werewolves | 749138b212abbf89bf648f2d7e4054bf7b27c256 | [
"BSD-2-Clause"
] | null | null | null | import re
import random
import itertools
from collections import defaultdict, deque
import botconfig
from src.utilities import *
from src import debuglog, errlog, plog, users, channels
from src.functions import get_players, get_all_players, get_main_role, get_reveal_role, get_target
from src.decorators import command, event_listener
from src.containers import UserList, UserSet, UserDict, DefaultUserDict
from src.dispatcher import MessageDispatcher
from src.messages import messages
from src.events import Event
from src.roles._shaman_helper import setup_variables, get_totem_target, give_totem
TOTEMS, LASTGIVEN, SHAMANS = setup_variables("shaman", knows_totem=True)
@command("give", "totem", chan=False, pm=True, playing=True, silenced=True, phases=("night",), roles=("shaman",))
def shaman_totem(var, wrapper, message):
"""Give a totem to a player."""
target = get_totem_target(var, wrapper, message, LASTGIVEN)
if not target:
return
SHAMANS[wrapper.source] = give_totem(var, wrapper, target, prefix="You", role="shaman", msg=" of {0}".format(TOTEMS[wrapper.source]))
@event_listener("transition_day_begin", priority=4)
def on_transition_day_begin(evt, var):
# Select random totem recipients if shamans didn't act
pl = get_players()
for shaman in get_players(("shaman",)):
if shaman not in SHAMANS and shaman.nick not in var.SILENCED:
ps = pl[:]
if shaman in LASTGIVEN:
if LASTGIVEN[shaman] in ps:
ps.remove(LASTGIVEN[shaman])
if ps:
target = random.choice(ps)
dispatcher = MessageDispatcher(shaman, shaman)
SHAMANS[shaman] = give_totem(var, dispatcher, target, prefix=messages["random_totem_prefix"], role="shaman", msg=" of {0}".format(TOTEMS[shaman]))
else:
LASTGIVEN[shaman] = None
elif shaman not in SHAMANS:
LASTGIVEN[shaman] = None
@event_listener("transition_night_end", priority=2.01)
def on_transition_night_end(evt, var):
max_totems = 0
ps = get_players()
shamans = get_players(("shaman",))
index = var.TOTEM_ORDER.index("shaman")
for c in var.TOTEM_CHANCES.values():
max_totems += c[index]
for s in list(LASTGIVEN):
if s not in shamans:
del LASTGIVEN[s]
for shaman in shamans:
pl = ps[:]
random.shuffle(pl)
if LASTGIVEN.get(shaman):
if LASTGIVEN[shaman] in pl:
pl.remove(LASTGIVEN[shaman])
target = 0
rand = random.random() * max_totems
for t in var.TOTEM_CHANCES.keys():
target += var.TOTEM_CHANCES[t][index]
if rand <= target:
TOTEMS[shaman] = t
break
if shaman.prefers_simple():
shaman.send(messages["shaman_simple"].format("shaman"))
shaman.send(messages["totem_simple"].format(TOTEMS[shaman]))
else:
shaman.send(messages["shaman_notify"].format("shaman", ""))
totem = TOTEMS[shaman]
tmsg = messages["shaman_totem"].format(totem)
tmsg += messages[totem + "_totem"]
shaman.send(tmsg)
shaman.send(messages["players_list"].format(", ".join(p.nick for p in pl)))
@event_listener("get_special")
def on_get_special(evt, var):
evt.data["villagers"].update(get_players(("shaman",)))
# vim: set sw=4 expandtab:
| 37.096774 | 162 | 0.648986 |
ace32a6c0386769cb87407238961abd1af91d902 | 4,269 | py | Python | AlbumThing/albumwindow.py | chewi/albumthing | 4a7b9db11d45cf8608e42ef51d88f511c018be14 | [
"BSD-3-Clause"
] | null | null | null | AlbumThing/albumwindow.py | chewi/albumthing | 4a7b9db11d45cf8608e42ef51d88f511c018be14 | [
"BSD-3-Clause"
] | null | null | null | AlbumThing/albumwindow.py | chewi/albumthing | 4a7b9db11d45cf8608e42ef51d88f511c018be14 | [
"BSD-3-Clause"
] | null | null | null | # Copyright (c) 2008 Sebastian Sareyko <smoon at nooms dot de>
# See COPYING file for details.
import pygtk
pygtk.require('2.0')
import gtk
import menu, albumlist, playlist, controls, aboutdialog, preferencesdialog
import gobject
from albumthing import AlbumThing
import const
class AlbumWindow(gtk.Window):
"""
The main window
"""
def __init__(self):
super(AlbumWindow, self).__init__(gtk.WINDOW_TOPLEVEL)
self.__at = AlbumThing()
self.__cb_foo = False
self.set_title('Album')
self.album_list = albumlist.AlbumListThing()
self.playlist = playlist.PlayListThing()
self.controls = controls.AlbumControls()
self.vbox = gtk.VBox(homogeneous=False, spacing=8)
self.about_dialog = aboutdialog.AboutDialog()
self.preferences_dialog = preferencesdialog.PreferencesDialog(self)
self.hpaned = gtk.HPaned()
self.hpaned.set_position(
int(self.__at.configuration.get('win', 'pos_hpaned')))
self.hpaned.add1(self.album_list)
self.hpaned.add2(self.playlist)
self.menu_bar = menu.MenuBar()
self.add_accel_group(self.menu_bar.accel_group)
self.vbox.pack_start(self.menu_bar.item_factory.get_widget('<main>'),
expand=False)
self.vbox.pack_start(self.controls, expand=False)
self.vbox.pack_start(self.hpaned)
accel_group = gtk.AccelGroup()
accel_group.connect_group(ord('L'), gtk.gdk.CONTROL_MASK,
gtk.ACCEL_VISIBLE, self.focus_filter_entry)
self.add_accel_group(accel_group)
self.set_default_size(int(self.__at.configuration.get('win', 'width')),
int(self.__at.configuration.get('win', 'height')))
x = int(self.__at.configuration.get('win', 'pos_x'))
y = int(self.__at.configuration.get('win', 'pos_y'))
if x and y:
self.move(x, y)
try:
gobject.timeout_add_seconds(1, self.__check_connection)
except AttributeError:
gobject.timeout_add(1000, self.__check_connection)
self.add(self.vbox)
self.connect('destroy', self.destroy)
self.show_all()
def __xmms_cb_id_info(self, result):
if not result.value():
return
try:
artist = result.value()['artist']
except KeyError:
artist = const.UNKNOWN
try:
title = result.value()['title']
except KeyError:
title = '%s (%s)' % (const.UNKNOWN, result.value()['url'])
self.set_title('%s - %s' % (artist, title))
def __xmms_cb_current_id(self, result):
self.__at.xmms.medialib_get_info(result.value(),
cb=self.__xmms_cb_id_info)
def __check_connection(self):
if self.__at.connected:
if not self.__cb_foo:
self.__cb_foo = True
self.setup_callbacks()
self.__widgets_set_sensitive(True)
self.album_list.filter_entry.grab_focus()
else:
self.__cb_foo = False
self.__widgets_set_sensitive(False)
self.album_list.album_list.list_store.clear()
self.album_list.filter_entry.set_text('')
self.playlist.playlist.list_store.clear()
self.controls.info_label.set_markup('<b>Not Connected</b>')
self.controls.cover_art.clear()
self.controls.seek_bar.scale.set_value(0)
self.controls.seek_bar.time.set_text('-')
return True
def __widgets_set_sensitive(self, sens):
self.album_list.set_sensitive(sens)
self.playlist.set_sensitive(sens)
self.controls.set_sensitive(sens)
def focus_filter_entry(self, accel_group, acceleratable, keyval, modifier):
self.album_list.filter_entry.grab_focus()
def destroy(self, widget, data=None):
self.__at.quit()
def setup_callbacks(self):
self.__at.xmms.playback_current_id(cb=self.__xmms_cb_current_id)
self.__at.xmms.broadcast_playback_current_id(
cb=self.__xmms_cb_current_id)
self.album_list.setup_callbacks()
self.playlist.setup_callbacks()
self.controls.setup_callbacks()
| 31.389706 | 79 | 0.634106 |
ace32a86dc0bbb4454b0880bd96154f4d759cc0b | 33,035 | py | Python | watcher/tests/api/v1/test_audit_templates.py | mail2nsrajesh/watcher | 5f179609d0ee145fc7957972c83593cce242884d | [
"Apache-2.0"
] | null | null | null | watcher/tests/api/v1/test_audit_templates.py | mail2nsrajesh/watcher | 5f179609d0ee145fc7957972c83593cce242884d | [
"Apache-2.0"
] | null | null | null | watcher/tests/api/v1/test_audit_templates.py | mail2nsrajesh/watcher | 5f179609d0ee145fc7957972c83593cce242884d | [
"Apache-2.0"
] | null | null | null | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import itertools
import mock
from webtest.app import AppError
from oslo_config import cfg
from oslo_serialization import jsonutils
from oslo_utils import timeutils
from six.moves.urllib import parse as urlparse
from wsme import types as wtypes
from watcher.api.controllers.v1 import audit_template as api_audit_template
from watcher.common import exception
from watcher.common import utils
from watcher import objects
from watcher.tests.api import base as api_base
from watcher.tests.api import utils as api_utils
from watcher.tests import base
from watcher.tests.db import utils as db_utils
from watcher.tests.objects import utils as obj_utils
def post_get_test_audit_template(**kw):
goal = db_utils.get_test_goal()
strategy = db_utils.get_test_strategy(goal_id=goal['id'])
kw['goal'] = kw.get('goal', goal['uuid'])
kw['strategy'] = kw.get('strategy', strategy['uuid'])
kw['scope'] = kw.get('scope', [])
audit_template = api_utils.audit_template_post_data(**kw)
return audit_template
class TestAuditTemplateObject(base.TestCase):
def test_audit_template_init(self):
audit_template_dict = post_get_test_audit_template()
del audit_template_dict['name']
audit_template = api_audit_template.AuditTemplate(
**audit_template_dict)
self.assertEqual(wtypes.Unset, audit_template.name)
class FunctionalTestWithSetup(api_base.FunctionalTest):
def setUp(self):
super(FunctionalTestWithSetup, self).setUp()
self.fake_goal1 = obj_utils.create_test_goal(
self.context, id=1, uuid=utils.generate_uuid(), name="dummy_1")
self.fake_goal2 = obj_utils.create_test_goal(
self.context, id=2, uuid=utils.generate_uuid(), name="dummy_2")
self.fake_strategy1 = obj_utils.create_test_strategy(
self.context, id=1, uuid=utils.generate_uuid(), name="strategy_1",
goal_id=self.fake_goal1.id)
self.fake_strategy2 = obj_utils.create_test_strategy(
self.context, id=2, uuid=utils.generate_uuid(), name="strategy_2",
goal_id=self.fake_goal2.id)
class TestListAuditTemplate(FunctionalTestWithSetup):
def test_empty(self):
response = self.get_json('/audit_templates')
self.assertEqual([], response['audit_templates'])
def _assert_audit_template_fields(self, audit_template):
audit_template_fields = ['name', 'goal_uuid', 'goal_name',
'strategy_uuid', 'strategy_name']
for field in audit_template_fields:
self.assertIn(field, audit_template)
def test_one(self):
audit_template = obj_utils.create_test_audit_template(
self.context, strategy_id=self.fake_strategy1.id)
response = self.get_json('/audit_templates')
self.assertEqual(audit_template.uuid,
response['audit_templates'][0]["uuid"])
self._assert_audit_template_fields(response['audit_templates'][0])
def test_get_one_soft_deleted_ok(self):
audit_template = obj_utils.create_test_audit_template(self.context)
audit_template.soft_delete()
response = self.get_json('/audit_templates',
headers={'X-Show-Deleted': 'True'})
self.assertEqual(audit_template.uuid,
response['audit_templates'][0]["uuid"])
self._assert_audit_template_fields(response['audit_templates'][0])
response = self.get_json('/audit_templates')
self.assertEqual([], response['audit_templates'])
def test_get_one_by_uuid(self):
audit_template = obj_utils.create_test_audit_template(self.context)
response = self.get_json(
'/audit_templates/%s' % audit_template['uuid'])
self.assertEqual(audit_template.uuid, response['uuid'])
self._assert_audit_template_fields(response)
def test_get_one_by_name(self):
audit_template = obj_utils.create_test_audit_template(self.context)
response = self.get_json(urlparse.quote(
'/audit_templates/%s' % audit_template['name']))
self.assertEqual(audit_template.uuid, response['uuid'])
self._assert_audit_template_fields(response)
def test_get_one_soft_deleted(self):
audit_template = obj_utils.create_test_audit_template(self.context)
audit_template.soft_delete()
response = self.get_json(
'/audit_templates/%s' % audit_template['uuid'],
headers={'X-Show-Deleted': 'True'})
self.assertEqual(audit_template.uuid, response['uuid'])
self._assert_audit_template_fields(response)
response = self.get_json(
'/audit_templates/%s' % audit_template['uuid'],
expect_errors=True)
self.assertEqual(404, response.status_int)
def test_detail(self):
audit_template = obj_utils.create_test_audit_template(self.context)
response = self.get_json('/audit_templates/detail')
self.assertEqual(audit_template.uuid,
response['audit_templates'][0]["uuid"])
self._assert_audit_template_fields(response['audit_templates'][0])
def test_detail_soft_deleted(self):
audit_template = obj_utils.create_test_audit_template(self.context)
audit_template.soft_delete()
response = self.get_json('/audit_templates/detail',
headers={'X-Show-Deleted': 'True'})
self.assertEqual(audit_template.uuid,
response['audit_templates'][0]["uuid"])
self._assert_audit_template_fields(response['audit_templates'][0])
response = self.get_json('/audit_templates/detail')
self.assertEqual([], response['audit_templates'])
def test_detail_against_single(self):
audit_template = obj_utils.create_test_audit_template(self.context)
response = self.get_json(
'/audit_templates/%s/detail' % audit_template['uuid'],
expect_errors=True)
self.assertEqual(404, response.status_int)
def test_many(self):
audit_template_list = []
for id_ in range(1, 6):
audit_template = obj_utils.create_test_audit_template(
self.context, id=id_,
uuid=utils.generate_uuid(),
name='My Audit Template {0}'.format(id_))
audit_template_list.append(audit_template)
response = self.get_json('/audit_templates')
self.assertEqual(len(audit_template_list),
len(response['audit_templates']))
uuids = [s['uuid'] for s in response['audit_templates']]
self.assertEqual(
sorted([at.uuid for at in audit_template_list]),
sorted(uuids))
def test_many_without_soft_deleted(self):
audit_template_list = []
for id_ in range(1, 6):
audit_template = obj_utils.create_test_audit_template(
self.context, id=id_, uuid=utils.generate_uuid(),
name='My Audit Template {0}'.format(id_))
audit_template_list.append(audit_template)
# We soft delete the ones with ID 4 and 5
[at.soft_delete() for at in audit_template_list[3:]]
response = self.get_json('/audit_templates')
self.assertEqual(3, len(response['audit_templates']))
uuids = [s['uuid'] for s in response['audit_templates']]
self.assertEqual(
sorted([at.uuid for at in audit_template_list[:3]]),
sorted(uuids))
def test_many_with_soft_deleted(self):
audit_template_list = []
for id_ in range(1, 6):
audit_template = obj_utils.create_test_audit_template(
self.context, id=id_, uuid=utils.generate_uuid(),
name='My Audit Template {0}'.format(id_))
audit_template_list.append(audit_template)
# We soft delete the ones with ID 4 and 5
[at.soft_delete() for at in audit_template_list[3:]]
response = self.get_json('/audit_templates',
headers={'X-Show-Deleted': 'True'})
self.assertEqual(5, len(response['audit_templates']))
uuids = [s['uuid'] for s in response['audit_templates']]
self.assertEqual(
sorted([at.uuid for at in audit_template_list]),
sorted(uuids))
def test_links(self):
uuid = utils.generate_uuid()
obj_utils.create_test_audit_template(self.context, id=1, uuid=uuid)
response = self.get_json('/audit_templates/%s' % uuid)
self.assertIn('links', response.keys())
self.assertEqual(2, len(response['links']))
self.assertIn(uuid, response['links'][0]['href'])
for l in response['links']:
bookmark = l['rel'] == 'bookmark'
self.assertTrue(self.validate_link(l['href'], bookmark=bookmark))
def test_collection_links(self):
for id_ in range(5):
obj_utils.create_test_audit_template(
self.context, id=id_, uuid=utils.generate_uuid(),
name='My Audit Template {0}'.format(id_))
response = self.get_json('/audit_templates/?limit=3')
self.assertEqual(3, len(response['audit_templates']))
next_marker = response['audit_templates'][-1]['uuid']
self.assertIn(next_marker, response['next'])
def test_collection_links_default_limit(self):
cfg.CONF.set_override('max_limit', 3, 'api')
for id_ in range(5):
obj_utils.create_test_audit_template(
self.context, id=id_, uuid=utils.generate_uuid(),
name='My Audit Template {0}'.format(id_))
response = self.get_json('/audit_templates')
self.assertEqual(3, len(response['audit_templates']))
next_marker = response['audit_templates'][-1]['uuid']
self.assertIn(next_marker, response['next'])
def test_filter_by_goal_uuid(self):
for id_, goal_id in enumerate(itertools.chain.from_iterable([
itertools.repeat(self.fake_goal1.id, 3),
itertools.repeat(self.fake_goal2.id, 2)]), 1):
obj_utils.create_test_audit_template(
self.context, id=id_, uuid=utils.generate_uuid(),
name='My Audit Template {0}'.format(id_),
goal_id=goal_id)
response = self.get_json(
'/audit_templates?goal=%s' % self.fake_goal2.uuid)
self.assertEqual(2, len(response['audit_templates']))
def test_filter_by_goal_name(self):
for id_, goal_id in enumerate(itertools.chain.from_iterable([
itertools.repeat(self.fake_goal1.id, 3),
itertools.repeat(self.fake_goal2.id, 2)]), 1):
obj_utils.create_test_audit_template(
self.context, id=id_, uuid=utils.generate_uuid(),
name='My Audit Template {0}'.format(id_),
goal_id=goal_id)
response = self.get_json(
'/audit_templates?goal=%s' % self.fake_goal2.name)
self.assertEqual(2, len(response['audit_templates']))
def test_filter_by_strategy_uuid(self):
for id_, strategy_id in enumerate(itertools.chain.from_iterable([
itertools.repeat(self.fake_strategy1.id, 3),
itertools.repeat(self.fake_strategy2.id, 2)]), 1):
obj_utils.create_test_audit_template(
self.context, id=id_, uuid=utils.generate_uuid(),
name='My Audit Template {0}'.format(id_),
strategy_id=strategy_id)
response = self.get_json(
'/audit_templates?strategy=%s' % self.fake_strategy2.uuid)
self.assertEqual(2, len(response['audit_templates']))
def test_filter_by_strategy_name(self):
for id_, strategy_id in enumerate(itertools.chain.from_iterable([
itertools.repeat(self.fake_strategy1.id, 3),
itertools.repeat(self.fake_strategy2.id, 2)]), 1):
obj_utils.create_test_audit_template(
self.context, id=id_, uuid=utils.generate_uuid(),
name='My Audit Template {0}'.format(id_),
strategy_id=strategy_id)
response = self.get_json(
'/audit_templates?strategy=%s' % self.fake_strategy2.name)
self.assertEqual(2, len(response['audit_templates']))
class TestPatch(FunctionalTestWithSetup):
def setUp(self):
super(TestPatch, self).setUp()
obj_utils.create_test_goal(self.context)
self.audit_template = obj_utils.create_test_audit_template(
self.context, strategy_id=None)
@mock.patch.object(timeutils, 'utcnow')
def test_replace_goal_uuid(self, mock_utcnow):
test_time = datetime.datetime(2000, 1, 1, 0, 0)
mock_utcnow.return_value = test_time
new_goal_uuid = self.fake_goal2.uuid
response = self.get_json(
'/audit_templates/%s' % self.audit_template.uuid)
self.assertNotEqual(new_goal_uuid, response['goal_uuid'])
response = self.patch_json(
'/audit_templates/%s' % self.audit_template.uuid,
[{'path': '/goal', 'value': new_goal_uuid,
'op': 'replace'}])
self.assertEqual('application/json', response.content_type)
self.assertEqual(200, response.status_code)
response = self.get_json(
'/audit_templates/%s' % self.audit_template.uuid)
self.assertEqual(new_goal_uuid, response['goal_uuid'])
return_updated_at = timeutils.parse_isotime(
response['updated_at']).replace(tzinfo=None)
self.assertEqual(test_time, return_updated_at)
@mock.patch.object(timeutils, 'utcnow')
def test_replace_goal_uuid_by_name(self, mock_utcnow):
test_time = datetime.datetime(2000, 1, 1, 0, 0)
mock_utcnow.return_value = test_time
new_goal_uuid = self.fake_goal2.uuid
response = self.get_json(urlparse.quote(
'/audit_templates/%s' % self.audit_template.name))
self.assertNotEqual(new_goal_uuid, response['goal_uuid'])
response = self.patch_json(
'/audit_templates/%s' % self.audit_template.name,
[{'path': '/goal', 'value': new_goal_uuid,
'op': 'replace'}])
self.assertEqual('application/json', response.content_type)
self.assertEqual(200, response.status_code)
response = self.get_json(
'/audit_templates/%s' % self.audit_template.name)
self.assertEqual(new_goal_uuid, response['goal_uuid'])
return_updated_at = timeutils.parse_isotime(
response['updated_at']).replace(tzinfo=None)
self.assertEqual(test_time, return_updated_at)
def test_replace_non_existent_audit_template(self):
response = self.patch_json(
'/audit_templates/%s' % utils.generate_uuid(),
[{'path': '/goal', 'value': self.fake_goal1.uuid,
'op': 'replace'}],
expect_errors=True)
self.assertEqual(404, response.status_int)
self.assertEqual('application/json', response.content_type)
self.assertTrue(response.json['error_message'])
def test_replace_invalid_goal(self):
with mock.patch.object(
self.dbapi,
'update_audit_template',
wraps=self.dbapi.update_audit_template
) as cn_mock:
response = self.patch_json(
'/audit_templates/%s' % self.audit_template.uuid,
[{'path': '/goal', 'value': utils.generate_uuid(),
'op': 'replace'}],
expect_errors=True)
self.assertEqual(400, response.status_int)
assert not cn_mock.called
def test_add_goal_uuid(self):
response = self.patch_json(
'/audit_templates/%s' % self.audit_template.uuid,
[{'path': '/goal',
'value': self.fake_goal2.uuid,
'op': 'add'}])
self.assertEqual('application/json', response.content_type)
self.assertEqual(200, response.status_int)
response = self.get_json(
'/audit_templates/%s' % self.audit_template.uuid)
self.assertEqual(self.fake_goal2.uuid, response['goal_uuid'])
def test_add_strategy_uuid(self):
response = self.patch_json(
'/audit_templates/%s' % self.audit_template.uuid,
[{'path': '/strategy',
'value': self.fake_strategy1.uuid,
'op': 'add'}])
self.assertEqual('application/json', response.content_type)
self.assertEqual(200, response.status_int)
response = self.get_json(
'/audit_templates/%s' % self.audit_template.uuid)
self.assertEqual(self.fake_strategy1.uuid, response['strategy_uuid'])
def test_replace_strategy_uuid(self):
response = self.patch_json(
'/audit_templates/%s' % self.audit_template.uuid,
[{'path': '/strategy',
'value': self.fake_strategy2['uuid'],
'op': 'replace'}])
self.assertEqual('application/json', response.content_type)
self.assertEqual(200, response.status_int)
response = self.get_json(
'/audit_templates/%s' % self.audit_template.uuid)
self.assertEqual(
self.fake_strategy2['uuid'], response['strategy_uuid'])
def test_replace_invalid_strategy(self):
response = self.patch_json(
'/audit_templates/%s' % self.audit_template.uuid,
[{'path': '/strategy',
'value': utils.generate_uuid(), # Does not exist
'op': 'replace'}], expect_errors=True)
self.assertEqual('application/json', response.content_type)
self.assertEqual(400, response.status_int)
self.assertTrue(response.json['error_message'])
def test_add_non_existent_property(self):
response = self.patch_json(
'/audit_templates/%s' % self.audit_template.uuid,
[{'path': '/foo', 'value': 'bar', 'op': 'add'}],
expect_errors=True)
self.assertEqual('application/json', response.content_type)
self.assertEqual(400, response.status_int)
self.assertTrue(response.json['error_message'])
def test_remove_strategy(self):
audit_template = obj_utils.create_test_audit_template(
self.context, uuid=utils.generate_uuid(),
name="AT_%s" % utils.generate_uuid(),
goal_id=self.fake_goal1.id,
strategy_id=self.fake_strategy1.id)
response = self.get_json(
'/audit_templates/%s' % audit_template.uuid)
self.assertIsNotNone(response['strategy_uuid'])
response = self.patch_json(
'/audit_templates/%s' % self.audit_template.uuid,
[{'path': '/strategy', 'op': 'remove'}])
self.assertEqual('application/json', response.content_type)
self.assertEqual(200, response.status_code)
def test_remove_goal(self):
response = self.get_json(
'/audit_templates/%s' % self.audit_template.uuid)
self.assertIsNotNone(response['goal_uuid'])
response = self.patch_json(
'/audit_templates/%s' % self.audit_template.uuid,
[{'path': '/goal', 'op': 'remove'}],
expect_errors=True)
self.assertEqual(403, response.status_code)
self.assertEqual('application/json', response.content_type)
self.assertTrue(response.json['error_message'])
def test_remove_uuid(self):
response = self.patch_json(
'/audit_templates/%s' % self.audit_template.uuid,
[{'path': '/uuid', 'op': 'remove'}],
expect_errors=True)
self.assertEqual(400, response.status_int)
self.assertEqual('application/json', response.content_type)
self.assertTrue(response.json['error_message'])
def test_remove_non_existent_property(self):
response = self.patch_json(
'/audit_templates/%s' % self.audit_template.uuid,
[{'path': '/non-existent', 'op': 'remove'}],
expect_errors=True)
self.assertEqual(400, response.status_code)
self.assertEqual('application/json', response.content_type)
self.assertTrue(response.json['error_message'])
class TestPost(FunctionalTestWithSetup):
@mock.patch.object(timeutils, 'utcnow')
def test_create_audit_template(self, mock_utcnow):
audit_template_dict = post_get_test_audit_template(
goal=self.fake_goal1.uuid,
strategy=self.fake_strategy1.uuid)
test_time = datetime.datetime(2000, 1, 1, 0, 0)
mock_utcnow.return_value = test_time
response = self.post_json('/audit_templates', audit_template_dict)
self.assertEqual('application/json', response.content_type)
self.assertEqual(201, response.status_int)
# Check location header
self.assertIsNotNone(response.location)
expected_location = \
'/v1/audit_templates/%s' % response.json['uuid']
self.assertEqual(urlparse.urlparse(response.location).path,
expected_location)
self.assertTrue(utils.is_uuid_like(response.json['uuid']))
self.assertNotIn('updated_at', response.json.keys)
self.assertNotIn('deleted_at', response.json.keys)
self.assertEqual(self.fake_goal1.uuid, response.json['goal_uuid'])
self.assertEqual(self.fake_strategy1.uuid,
response.json['strategy_uuid'])
return_created_at = timeutils.parse_isotime(
response.json['created_at']).replace(tzinfo=None)
self.assertEqual(test_time, return_created_at)
def test_create_audit_template_vlidation_with_aggregates(self):
scope = [{'host_aggregates': [{'id': '*'}]},
{'availability_zones': [{'name': 'AZ1'},
{'name': 'AZ2'}]},
{'exclude': [
{'instances': [
{'uuid': 'INSTANCE_1'},
{'uuid': 'INSTANCE_2'}]},
{'compute_nodes': [
{'name': 'Node_1'},
{'name': 'Node_2'}]},
{'host_aggregates': [{'id': '*'}]}
]}
]
audit_template_dict = post_get_test_audit_template(
goal=self.fake_goal1.uuid,
strategy=self.fake_strategy1.uuid, scope=scope)
with self.assertRaisesRegex(AppError,
"be included and excluded together"):
self.post_json('/audit_templates', audit_template_dict)
def test_create_audit_template_does_autogenerate_id(self):
audit_template_dict = post_get_test_audit_template(
goal=self.fake_goal1.uuid, strategy=None)
with mock.patch.object(
self.dbapi,
'create_audit_template',
wraps=self.dbapi.create_audit_template
) as cn_mock:
response = self.post_json('/audit_templates', audit_template_dict)
self.assertEqual(audit_template_dict['goal'],
response.json['goal_uuid'])
# Check that 'id' is not in first arg of positional args
self.assertNotIn('id', cn_mock.call_args[0][0])
def test_create_audit_template_generate_uuid(self):
audit_template_dict = post_get_test_audit_template(
goal=self.fake_goal1.uuid, strategy=None)
response = self.post_json('/audit_templates', audit_template_dict)
self.assertEqual('application/json', response.content_type)
self.assertEqual(201, response.status_int)
self.assertTrue(utils.is_uuid_like(response.json['uuid']))
def test_create_audit_template_with_invalid_goal(self):
with mock.patch.object(
self.dbapi,
'create_audit_template',
wraps=self.dbapi.create_audit_template
) as cn_mock:
audit_template_dict = post_get_test_audit_template(
goal_uuid=utils.generate_uuid())
response = self.post_json('/audit_templates',
audit_template_dict, expect_errors=True)
self.assertEqual(400, response.status_int)
assert not cn_mock.called
def test_create_audit_template_with_invalid_strategy(self):
with mock.patch.object(
self.dbapi,
'create_audit_template',
wraps=self.dbapi.create_audit_template
) as cn_mock:
audit_template_dict = post_get_test_audit_template(
goal_uuid=self.fake_goal1['uuid'],
strategy_uuid=utils.generate_uuid())
response = self.post_json('/audit_templates',
audit_template_dict, expect_errors=True)
self.assertEqual(400, response.status_int)
assert not cn_mock.called
def test_create_audit_template_with_unrelated_strategy(self):
with mock.patch.object(
self.dbapi,
'create_audit_template',
wraps=self.dbapi.create_audit_template
) as cn_mock:
audit_template_dict = post_get_test_audit_template(
goal_uuid=self.fake_goal1['uuid'],
strategy=self.fake_strategy2['uuid'])
response = self.post_json('/audit_templates',
audit_template_dict, expect_errors=True)
self.assertEqual(400, response.status_int)
assert not cn_mock.called
def test_create_audit_template_with_uuid(self):
with mock.patch.object(
self.dbapi,
'create_audit_template',
wraps=self.dbapi.create_audit_template
) as cn_mock:
audit_template_dict = post_get_test_audit_template()
response = self.post_json('/audit_templates', audit_template_dict,
expect_errors=True)
self.assertEqual('application/json', response.content_type)
self.assertEqual(400, response.status_int)
assert not cn_mock.called
class TestDelete(api_base.FunctionalTest):
def setUp(self):
super(TestDelete, self).setUp()
obj_utils.create_test_goal(self.context)
self.audit_template = obj_utils.create_test_audit_template(
self.context)
@mock.patch.object(timeutils, 'utcnow')
def test_delete_audit_template_by_uuid(self, mock_utcnow):
test_time = datetime.datetime(2000, 1, 1, 0, 0)
mock_utcnow.return_value = test_time
self.delete(urlparse.quote('/audit_templates/%s' %
self.audit_template.uuid))
response = self.get_json(
urlparse.quote('/audit_templates/%s' % self.audit_template.uuid),
expect_errors=True)
self.assertEqual(404, response.status_int)
self.assertEqual('application/json', response.content_type)
self.assertTrue(response.json['error_message'])
self.assertRaises(exception.AuditTemplateNotFound,
objects.AuditTemplate.get_by_uuid,
self.context,
self.audit_template.uuid)
self.context.show_deleted = True
at = objects.AuditTemplate.get_by_uuid(self.context,
self.audit_template.uuid)
self.assertEqual(self.audit_template.name, at.name)
@mock.patch.object(timeutils, 'utcnow')
def test_delete_audit_template_by_name(self, mock_utcnow):
test_time = datetime.datetime(2000, 1, 1, 0, 0)
mock_utcnow.return_value = test_time
self.delete(urlparse.quote('/audit_templates/%s' %
self.audit_template.name))
response = self.get_json(
urlparse.quote('/audit_templates/%s' % self.audit_template.name),
expect_errors=True)
self.assertEqual(404, response.status_int)
self.assertEqual('application/json', response.content_type)
self.assertTrue(response.json['error_message'])
self.assertRaises(exception.AuditTemplateNotFound,
objects.AuditTemplate.get_by_name,
self.context,
self.audit_template.name)
self.context.show_deleted = True
at = objects.AuditTemplate.get_by_name(self.context,
self.audit_template.name)
self.assertEqual(self.audit_template.uuid, at.uuid)
def test_delete_audit_template_not_found(self):
uuid = utils.generate_uuid()
response = self.delete(
'/audit_templates/%s' % uuid, expect_errors=True)
self.assertEqual(404, response.status_int)
self.assertEqual('application/json', response.content_type)
self.assertTrue(response.json['error_message'])
class TestAuditTemplatePolicyEnforcement(api_base.FunctionalTest):
def _common_policy_check(self, rule, func, *arg, **kwarg):
self.policy.set_rules({
"admin_api": "(role:admin or role:administrator)",
"default": "rule:admin_api",
rule: "rule:defaut"})
response = func(*arg, **kwarg)
self.assertEqual(403, response.status_int)
self.assertEqual('application/json', response.content_type)
self.assertTrue(
"Policy doesn't allow %s to be performed." % rule,
jsonutils.loads(response.json['error_message'])['faultstring'])
def test_policy_disallow_get_all(self):
self._common_policy_check(
"audit_template:get_all", self.get_json, '/audit_templates',
expect_errors=True)
def test_policy_disallow_get_one(self):
obj_utils.create_test_goal(self.context)
audit_template = obj_utils.create_test_audit_template(self.context)
self._common_policy_check(
"audit_template:get", self.get_json,
'/audit_templates/%s' % audit_template.uuid,
expect_errors=True)
def test_policy_disallow_detail(self):
self._common_policy_check(
"audit_template:detail", self.get_json,
'/audit_templates/detail',
expect_errors=True)
def test_policy_disallow_update(self):
obj_utils.create_test_goal(self.context)
audit_template = obj_utils.create_test_audit_template(self.context)
self._common_policy_check(
"audit_template:update", self.patch_json,
'/audit_templates/%s' % audit_template.uuid,
[{'path': '/state', 'value': objects.audit.State.SUCCEEDED,
'op': 'replace'}], expect_errors=True)
def test_policy_disallow_create(self):
fake_goal1 = obj_utils.get_test_goal(
self.context, id=1, uuid=utils.generate_uuid(), name="dummy_1")
fake_goal1.create()
fake_strategy1 = obj_utils.get_test_strategy(
self.context, id=1, uuid=utils.generate_uuid(), name="strategy_1",
goal_id=fake_goal1.id)
fake_strategy1.create()
audit_template_dict = post_get_test_audit_template(
goal=fake_goal1.uuid,
strategy=fake_strategy1.uuid)
self._common_policy_check(
"audit_template:create", self.post_json, '/audit_templates',
audit_template_dict, expect_errors=True)
def test_policy_disallow_delete(self):
obj_utils.create_test_goal(self.context)
audit_template = obj_utils.create_test_audit_template(self.context)
self._common_policy_check(
"audit_template:delete", self.delete,
'/audit_templates/%s' % audit_template.uuid, expect_errors=True)
class TestAuditTemplatePolicyWithAdminContext(TestListAuditTemplate,
api_base.AdminRoleTest):
def setUp(self):
super(TestAuditTemplatePolicyWithAdminContext, self).setUp()
self.policy.set_rules({
"admin_api": "(role:admin or role:administrator)",
"default": "rule:admin_api",
"audit_template:create": "rule:default",
"audit_template:delete": "rule:default",
"audit_template:detail": "rule:default",
"audit_template:get": "rule:default",
"audit_template:get_all": "rule:default",
"audit_template:update": "rule:default"})
| 43.754967 | 78 | 0.637718 |
ace32ad48024bf33296c6a4a972fb33c11032244 | 2,387 | py | Python | configs/textdet/maskrcnn/mask_rcnn_r50_fpn_160e_ctw1500.py | xyzhu8/mmocr | f62b4513f5411bde9f24e1902b1cb1945340022a | [
"Apache-2.0"
] | null | null | null | configs/textdet/maskrcnn/mask_rcnn_r50_fpn_160e_ctw1500.py | xyzhu8/mmocr | f62b4513f5411bde9f24e1902b1cb1945340022a | [
"Apache-2.0"
] | null | null | null | configs/textdet/maskrcnn/mask_rcnn_r50_fpn_160e_ctw1500.py | xyzhu8/mmocr | f62b4513f5411bde9f24e1902b1cb1945340022a | [
"Apache-2.0"
] | null | null | null | _base_ = [
'../../_base_/models/ocr_mask_rcnn_r50_fpn_ohem_poly.py',
'../../_base_/schedules/schedule_160e.py', '../../_base_/runtime_10e.py'
]
dataset_type = 'IcdarDataset'
data_root = 'data/ctw1500/'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
# img_norm_cfg = dict(mean=[0, 0, 0], std=[1, 1, 1], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile', color_type='color_ignore_orientation'),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(
type='ScaleAspectJitter',
img_scale=None,
keep_ratio=False,
resize_type='indep_sample_in_range',
scale_range=(640, 2560)),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(
type='RandomCropInstances',
target_size=(640, 640),
mask_type='union_all',
instance_key='gt_masks'),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']),
]
test_pipeline = [
dict(type='LoadImageFromFile', color_type='color_ignore_orientation'),
dict(
type='MultiScaleFlipAug',
# resize the long size to 1600
img_scale=(1600, 1600),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
# no flip
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
samples_per_gpu=8,
workers_per_gpu=4,
val_dataloader=dict(samples_per_gpu=1),
test_dataloader=dict(samples_per_gpu=1),
train=dict(
type=dataset_type,
ann_file=data_root + '/instances_training.json',
img_prefix=data_root + '/imgs',
pipeline=train_pipeline),
val=dict(
type=dataset_type,
# select_first_k=1,
ann_file=data_root + '/instances_test.json',
img_prefix=data_root + '/imgs',
pipeline=test_pipeline),
test=dict(
type=dataset_type,
# select_first_k=1,
ann_file=data_root + '/instances_test.json',
img_prefix=data_root + '/imgs',
pipeline=test_pipeline))
evaluation = dict(interval=10, metric='hmean-iou')
| 34.1 | 77 | 0.624214 |
ace32ae6b0aa5952be43f64ecba5003b57b09197 | 2,606 | py | Python | manuscript_preparation/gut_microbiome/main.py | bigghost2054/KIDS | ace171efc6cf4eb3cd346a662e5af32dc4072ab3 | [
"Apache-2.0"
] | null | null | null | manuscript_preparation/gut_microbiome/main.py | bigghost2054/KIDS | ace171efc6cf4eb3cd346a662e5af32dc4072ab3 | [
"Apache-2.0"
] | null | null | null | manuscript_preparation/gut_microbiome/main.py | bigghost2054/KIDS | ace171efc6cf4eb3cd346a662e5af32dc4072ab3 | [
"Apache-2.0"
] | null | null | null | import pandas as pd
TOTAL_NUM_SAMPLES = 94342
def main():
# lrp
df_lrp = pd.read_csv(
'./lrp.txt',
sep='\t',
names=['mark', 'target', 'run_sample_ids', 'e-value'])
run_sample_ids = df_lrp['run_sample_ids'].tolist()
run_sample_ids = [y for x in run_sample_ids for y in x.split(' ')]
run_sample_ids = list(set(run_sample_ids))
lrp_percentage = len(run_sample_ids) * 100 / TOTAL_NUM_SAMPLES
print(f'Number of unique samples for lrp: {len(run_sample_ids)}/{TOTAL_NUM_SAMPLES} ({lrp_percentage:.2f}%)')
#rbsK
df_rbsK = pd.read_csv(
'./rbsK.txt',
sep='\t',
names=['mark', 'target', 'run_sample_ids', 'e-value'])
run_sample_ids = df_rbsK['run_sample_ids'].tolist()
run_sample_ids = [y for x in run_sample_ids for y in x.split(' ')]
run_sample_ids = list(set(run_sample_ids))
rbsK_percentage = len(run_sample_ids) * 100 / TOTAL_NUM_SAMPLES
print(f'Number of unique samples for rbsK: {len(run_sample_ids)}/{TOTAL_NUM_SAMPLES} ({rbsK_percentage:.2f}%)')
#qorB
df_qorB = pd.read_csv(
'./qorB.txt',
sep='\t',
names=['mark', 'target', 'run_sample_ids', 'e-value'])
run_sample_ids = df_qorB['run_sample_ids'].tolist()
run_sample_ids = [y for x in run_sample_ids for y in x.split(' ')]
run_sample_ids = list(set(run_sample_ids))
qorB_percentage = len(run_sample_ids) * 100 / TOTAL_NUM_SAMPLES
print(f'Number of unique samples for qorB: {len(run_sample_ids)}/{TOTAL_NUM_SAMPLES} ({qorB_percentage:.2f}%)')
#hdfR
df_hdfR = pd.read_csv(
'./hdfR.txt',
sep='\t',
names=['mark', 'target', 'run_sample_ids', 'e-value'])
run_sample_ids = df_hdfR['run_sample_ids'].tolist()
run_sample_ids = [y for x in run_sample_ids for y in x.split(' ')]
run_sample_ids = list(set(run_sample_ids))
hdfR_percentage = len(run_sample_ids) * 100 / TOTAL_NUM_SAMPLES
print(f'Number of unique samples for hdfR: {len(run_sample_ids)}/{TOTAL_NUM_SAMPLES} ({hdfR_percentage:.2f}%)')
#ftsP
df_ftsP = pd.read_csv(
'./ftsP.txt',
sep='\t',
names=['mark', 'target', 'run_sample_ids', 'e-value'])
run_sample_ids = df_ftsP['run_sample_ids'].tolist()
run_sample_ids = [y for x in run_sample_ids for y in x.split(' ')]
run_sample_ids = list(set(run_sample_ids))
ftsP_percentage = len(run_sample_ids) * 100 / TOTAL_NUM_SAMPLES
print(f'Number of unique samples for ftsP: {len(run_sample_ids)}/{TOTAL_NUM_SAMPLES} ({ftsP_percentage:.2f}%)')
#proV
if __name__ == '__main__':
main() | 35.69863 | 115 | 0.651957 |
ace32c02ddfd19946298fd82b00e7199aaf9ca3e | 3,756 | py | Python | fase.py | gersonvneto/pythonbirds | 08f8d9b5d94879ceee762dc2f6087ed7bd165bcb | [
"MIT"
] | null | null | null | fase.py | gersonvneto/pythonbirds | 08f8d9b5d94879ceee762dc2f6087ed7bd165bcb | [
"MIT"
] | null | null | null | fase.py | gersonvneto/pythonbirds | 08f8d9b5d94879ceee762dc2f6087ed7bd165bcb | [
"MIT"
] | 1 | 2018-04-16T22:11:50.000Z | 2018-04-16T22:11:50.000Z | # -*- coding: utf-8 -*-
from itertools import chain
from atores import ATIVO
VITORIA = 'VITORIA'
DERROTA = 'DERROTA'
EM_ANDAMENTO = 'EM_ANDAMENTO'
class Ponto():
def __init__(self, x, y, caracter):
self.caracter = caracter
self.x = round(x)
self.y = round(y)
def __eq__(self, other):
return self.x == other.x and self.y == other.y and self.caracter == other.caracter
def __hash__(self):
return hash(self.x) ^ hash(self.y)
def __repr__(self, *args, **kwargs):
return "Ponto(%s,%s,'%s')" % (self.x, self.y, self.caracter)
class Fase():
def __init__(self, intervalo_de_colisao=1):
"""
Método que inicializa uma fase.
:param intervalo_de_colisao:
"""
self.intervalo_de_colisao = intervalo_de_colisao
self._passaros = []
self._porcos = []
self._obstaculos = []
def adicionar_obstaculo(self, *obstaculos):
"""
Adiciona obstáculos em uma fase
:param obstaculos:
"""
self._obstaculos.extend(obstaculos)
def adicionar_porco(self, *porcos):
"""
Adiciona porcos em uma fase
:param porcos:
"""
self._porcos.extend(porcos)
def adicionar_passaro(self, *passaros):
"""
Adiciona pássaros em uma fase
:param passaros:
"""
self._passaros.extend(passaros)
def status(self):
"""
Método que indica com mensagem o status do jogo
Se o jogo está em andamento (ainda tem porco ativo e pássaro ativo), retorna essa mensagem.
Se o jogo acabou com derrota (ainda existe porco ativo), retorna essa mensagem
Se o jogo acabou com vitória (não existe porco ativo), retorna essa mensagem
:return:
"""
if not self._possui_porco_ativo():
return VITORIA
# quando o metodo(pode ser tb com atributo de classe) comeca com _ é um metodo protegido, apenas a própria classe
# ou algumas de suas subclasses podem fazer uso dele
elif self._possui_passaros_ativos():
return EM_ANDAMENTO
else:
return DERROTA
def lancar(self, angulo, tempo):
"""
Método que executa lógica de lançamento.
Deve escolher o primeiro pássaro não lançado da lista e chamar seu método lançar
Se não houver esse tipo de pássaro, não deve fazer nada
:param angulo: ângulo de lançamento
:param tempo: Tempo de lançamento
"""
for passaro in self._passaros:
if not passaro.foi_lancado():
passaro.lancar(angulo, tempo)
break
def calcular_pontos(self, tempo):
"""
Lógica que retorna os pontos a serem exibidos na tela.
Cada ator deve ser transformado em um Ponto.
:param tempo: tempo para o qual devem ser calculados os pontos
:return: objeto do tipo Ponto
"""
for passaro in self._passaros:
passaro.calcular_posicao(tempo)
for alvo in self._obstaculos+self._porcos:
passaro.colidir(alvo, self.intervalo_de_colisao)
passaro.colidir_com_chao()
pontos=[self._transformar_em_ponto(a) for a in self._passaros+self._obstaculos+self._porcos]
return pontos
def _transformar_em_ponto(self, ator):
return Ponto(ator.x, ator.y, ator.caracter())
def _possui_porco_ativo(self):
for porco in self._porcos:
if porco.status== ATIVO:
return True
return False
def _possui_passaros_ativos(self):
for passaro in self._passaros:
if passaro.status== ATIVO:
return True
return False
| 26.828571 | 113 | 0.610756 |
ace32d8627a7d6afd9923f45d3a68e606b5d9843 | 502 | py | Python | src/djangocatan/pages/migrations/0002_example.py | takuto-litalico/django-catan | 32df1b57f433f5b992207d3f13b9abf73883b276 | [
"bzip2-1.0.6"
] | null | null | null | src/djangocatan/pages/migrations/0002_example.py | takuto-litalico/django-catan | 32df1b57f433f5b992207d3f13b9abf73883b276 | [
"bzip2-1.0.6"
] | null | null | null | src/djangocatan/pages/migrations/0002_example.py | takuto-litalico/django-catan | 32df1b57f433f5b992207d3f13b9abf73883b276 | [
"bzip2-1.0.6"
] | null | null | null | # Generated by Django 3.1.1 on 2020-11-05 10:36
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('pages', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Example',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=120)),
],
),
]
| 23.904762 | 114 | 0.569721 |
ace32f0ac1b57e38bbb689c7f78c4fc38e1172ea | 2,836 | py | Python | tests/test_fbnet.py | hanranCode/mega.pytorch | 28c8a184372aa57a942576a944b3526590bc1ace | [
"BSD-2-Clause"
] | 521 | 2020-03-23T13:08:44.000Z | 2022-03-31T08:50:01.000Z | tests/test_fbnet.py | hanranCode/mega.pytorch | 28c8a184372aa57a942576a944b3526590bc1ace | [
"BSD-2-Clause"
] | 108 | 2020-03-27T07:20:12.000Z | 2022-03-22T03:30:04.000Z | tests/test_fbnet.py | hanranCode/mega.pytorch | 28c8a184372aa57a942576a944b3526590bc1ace | [
"BSD-2-Clause"
] | 115 | 2020-03-27T06:40:57.000Z | 2022-02-28T07:27:27.000Z | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import unittest
import numpy as np
import torch
import mega_core.modeling.backbone.fbnet_builder as fbnet_builder
TEST_CUDA = torch.cuda.is_available()
def _test_primitive(self, device, op_name, op_func, N, C_in, C_out, expand, stride):
op = op_func(C_in, C_out, expand, stride).to(device)
input = torch.rand([N, C_in, 7, 7], dtype=torch.float32).to(device)
output = op(input)
self.assertEqual(
output.shape[:2], torch.Size([N, C_out]),
'Primitive {} failed for shape {}.'.format(op_name, input.shape)
)
class TestFBNetBuilder(unittest.TestCase):
def test_identity(self):
id_op = fbnet_builder.Identity(20, 20, 1)
input = torch.rand([10, 20, 7, 7], dtype=torch.float32)
output = id_op(input)
np.testing.assert_array_equal(np.array(input), np.array(output))
id_op = fbnet_builder.Identity(20, 40, 2)
input = torch.rand([10, 20, 7, 7], dtype=torch.float32)
output = id_op(input)
np.testing.assert_array_equal(output.shape, [10, 40, 4, 4])
def test_primitives(self):
''' Make sures the primitives runs '''
for op_name, op_func in fbnet_builder.PRIMITIVES.items():
print('Testing {}'.format(op_name))
_test_primitive(
self, "cpu",
op_name, op_func,
N=20, C_in=16, C_out=32, expand=4, stride=1
)
@unittest.skipIf(not TEST_CUDA, "no CUDA detected")
def test_primitives_cuda(self):
''' Make sures the primitives runs on cuda '''
for op_name, op_func in fbnet_builder.PRIMITIVES.items():
print('Testing {}'.format(op_name))
_test_primitive(
self, "cuda",
op_name, op_func,
N=20, C_in=16, C_out=32, expand=4, stride=1
)
def test_primitives_empty_batch(self):
''' Make sures the primitives runs '''
for op_name, op_func in fbnet_builder.PRIMITIVES.items():
print('Testing {}'.format(op_name))
# test empty batch size
_test_primitive(
self, "cpu",
op_name, op_func,
N=0, C_in=16, C_out=32, expand=4, stride=1
)
@unittest.skipIf(not TEST_CUDA, "no CUDA detected")
def test_primitives_cuda_empty_batch(self):
''' Make sures the primitives runs '''
for op_name, op_func in fbnet_builder.PRIMITIVES.items():
print('Testing {}'.format(op_name))
# test empty batch size
_test_primitive(
self, "cuda",
op_name, op_func,
N=0, C_in=16, C_out=32, expand=4, stride=1
)
if __name__ == "__main__":
unittest.main()
| 33.364706 | 84 | 0.593441 |
ace32f4b8ef96ebf316dec50b95c8e77bb615079 | 6,550 | py | Python | Section5_User_Interface_Design/Video5_OpenGL_Rotation.py | Tom-Niesytto/Python_GUI_Programming_Recipes_using_PyQt5 | 6c63b6351c03fa228745854a939306d58d17c02e | [
"MIT"
] | 1 | 2021-01-06T23:32:00.000Z | 2021-01-06T23:32:00.000Z | Section5_User_Interface_Design/Video5_OpenGL_Rotation.py | Tom-Niesytto/Python_GUI_Programming_Recipes_using_PyQt5 | 6c63b6351c03fa228745854a939306d58d17c02e | [
"MIT"
] | null | null | null | Section5_User_Interface_Design/Video5_OpenGL_Rotation.py | Tom-Niesytto/Python_GUI_Programming_Recipes_using_PyQt5 | 6c63b6351c03fa228745854a939306d58d17c02e | [
"MIT"
] | 1 | 2021-01-06T23:32:02.000Z | 2021-01-06T23:32:02.000Z | '''
Created on Sep 26, 2017
Some of the code in this module was inspired by the official PyQt "Hello GL" example
@author: Burkhard A. Meier
'''
import sys
from OpenGL.GL import *
from PyQt5.QtWidgets import QOpenGLWidget, QApplication
from PyQt5.QtCore import pyqtSignal, QPoint, Qt
class PyQtOpenGL(QOpenGLWidget):
# rotation signals mouse movement
x_rotation_changed = pyqtSignal(int)
y_rotation_changed = pyqtSignal(int)
z_rotation_changed = pyqtSignal(int)
def __init__(self, parent=None):
super().__init__(parent)
self.paint_0 = True
self.paint_1 = True
self.paint_2 = True
self.resize_lines = True # set orthographic matrix multiplier to large/small
# self.resize_lines = False
self.paint_rotation = True
# self.paint_rotation = False
self.x_rotation = 0 # rotation variables
self.y_rotation = 0
self.z_rotation = 0
self.last_pos = QPoint()
def normalize_angle(self, angle):
while angle < 0:
angle += 360 * 16
while angle > 360 * 16:
angle -= 360 * 16
return angle
# slots for xyz-rotation
def set_x_rotation(self, angle):
angle = self.normalize_angle(angle)
if angle != self.x_rotation:
self.x_rotation = angle
self.x_rotation_changed.emit(angle)
self.update()
def set_y_rotation(self, angle):
angle = self.normalize_angle(angle)
if angle != self.y_rotation:
self.y_rotation = angle
self.y_rotation_changed.emit(angle)
self.update()
def set_z_rotation(self, angle):
angle = self.normalize_angle(angle)
if angle != self.z_rotation:
self.z_rotation = angle
self.z_rotation_changed.emit(angle)
self.update()
def initializeGL(self):
# reimplemented
glClearColor(0.0, 0.0, 1.0, 0.0) # blue
glEnable(GL_DEPTH_TEST)
glEnable(GL_CULL_FACE)
glShadeModel(GL_SMOOTH)
glEnable(GL_LIGHTING)
glEnable(GL_LIGHT0)
lightPosition = [0, 0, 10, 1.0]
glLightfv(GL_LIGHT0, GL_POSITION, lightPosition)
def paintGL(self):
# reimplemented
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
glLoadIdentity()
glTranslatef(0.0, 0.0, -10.0)
glRotatef(self.x_rotation / 16.0, 1.0, 0.0, 0.0)
glRotatef(self.y_rotation / 16.0, 0.0, 1.0, 0.0)
glRotatef(self.z_rotation / 16.0, 0.0, 0.0, 1.0)
self.draw()
def draw(self):
if self.paint_rotation:
glColor3f(1.0, 0.0, 0.0)
glBegin(GL_QUADS) # bottom of pyramid
glNormal3f(0, 0, -1)
glVertex3f(-1 ,-1, 0)
glVertex3f(-1 ,1, 0)
glVertex3f(1, 1, 0)
glVertex3f(1, -1, 0)
glEnd()
glColor3f(0.0, 0.0, 0.0)
glBegin(GL_TRIANGLES) # four sides of pyramid
glNormal3f(0, -1, 0.707)
glVertex3f(-1, -1, 0)
glVertex3f(1, -1, 0)
glVertex3f(0, 0, 1.2)
glEnd()
glBegin(GL_TRIANGLES)
glNormal3f(1,0, 0.707)
glVertex3f(1,-1,0)
glVertex3f(1,1,0)
glVertex3f(0,0,1.2)
glEnd()
glBegin(GL_TRIANGLES)
glNormal3f(0,1,0.707)
glVertex3f(1,1,0)
glVertex3f(-1,1,0)
glVertex3f(0,0,1.2)
glEnd()
glBegin(GL_TRIANGLES)
glNormal3f(-1,0,0.707)
glVertex3f(-1,1,0)
glVertex3f(-1,-1,0)
glVertex3f(0,0,1.2)
glEnd()
# square and lines
if self.paint_0:
glColor3f(1.0, 0.0, 0.0) # functions expects 3 f(loats); RGB: red
glRectf(-5, -5, 5, 5) # draw a filled rectangle with above color, position(x,y) pairs from center of window
if self.paint_1:
glColor3f(0.0, 1.0, 0.0) # set color to RGB: green
x=10
y=10
self.draw_loop(x, y)
if self.paint_2:
glColor3f(0.0, 0.0, 0.0) # set color to RGB: black
x=5
y=5
self.draw_loop(x, y)
def resizeGL(self, width, height):
# reimplemented
side = min(width, height)
if side < 0:
return
glViewport((width - side) // 2, (height - side) // 2, side, side)
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
if self.resize_lines:
glOrtho(-50, 50, -50, 50, -50.0, 50.0) # for square and lines; combined with pyramid
else:
glOrtho(-2, +2, -2, +2, 1.0, 15.0) # original pyramid setting
glMatrixMode(GL_MODELVIEW)
def draw_loop(self, x, y, incr=10):
for _ in range(5):
self.draw_square_lines(x, y)
x += incr
y += incr
def draw_square_lines(self, x=10, y=10, z=0):
glBegin(GL_LINES) # begin to draw a line
glVertex3f(x, y, z) # start line relative to center of window
glVertex3f(x, -y, z) # draw first line
glVertex3f(x, -y, z)
glVertex3f(-x, -y, z)
glVertex3f(-x, -y, z)
glVertex3f(-x, y, z)
glVertex3f(-x, y, z)
glVertex3f(x, y, z)
glEnd()
def mousePressEvent(self, event):
# reimplemented
self.last_pos = event.pos()
def mouseMoveEvent(self, event):
# reimplemented
move_x = event.x() - self.last_pos.x()
move_y = event.y() - self.last_pos.y()
if event.buttons() & Qt.LeftButton: # left mouse button
self.set_x_rotation(self.x_rotation + 8 * move_y)
self.set_y_rotation(self.y_rotation + 8 * move_x)
elif event.buttons() & Qt.RightButton: # right mouse button
self.set_x_rotation(self.x_rotation + 8 * move_y)
self.set_z_rotation(self.z_rotation + 8 * move_x) # spin pyramid around itself
self.last_pos = event.pos()
if __name__ == '__main__':
app = QApplication(sys.argv)
widget = PyQtOpenGL()
widget.show()
app.exec_() | 31.190476 | 125 | 0.52916 |
ace33093b8a69903ee1bb156c4226710f66c5b9d | 2,096 | py | Python | tigre/Utilities/parkerweight.py | Reuben481/tigre | f342aaa73da8204140fb48929c28cf2f75566a21 | [
"BSD-3-Clause"
] | null | null | null | tigre/Utilities/parkerweight.py | Reuben481/tigre | f342aaa73da8204140fb48929c28cf2f75566a21 | [
"BSD-3-Clause"
] | null | null | null | tigre/Utilities/parkerweight.py | Reuben481/tigre | f342aaa73da8204140fb48929c28cf2f75566a21 | [
"BSD-3-Clause"
] | null | null | null | from __future__ import print_function
from __future__ import division
import numpy as np
import warnings
import scipy.io
def parkerweight(proj,geo,angles,q):
start=-geo.sDetector[0]/2+geo.dDetector[0]/2
stop=geo.sDetector[0]/2-geo.dDetector[0]/2
step=geo.dDetector[0]
alpha=np.arctan(np.arange(start, stop+step, step)/geo.DSD)
alpha=-alpha
delta=abs(alpha[0]-alpha[-1])/2
totangles=np.cumsum(np.diff(angles))
totangles=totangles[-1]
if totangles<2*np.pi:
warnings.warn('Computing Parker weigths for scanning angle equal or bigger than 2*pi '
'Consider disabling Parker weigths.')
if totangles<np.pi+2*delta:
warnings.warn('Scanning angles smaller than pi+cone_angle. This is limited angle tomgraphy, \n'
'there is nosufficient data, thus weigthing for data redundancy is not required.')
epsilon=max(totangles-(np.pi+2*delta),0)
data=proj
# for i in range(proj.shape[0]):
for i in range(33):
beta=angles[i]
w=0.5*(s_function(beta/b_subf(alpha,delta,epsilon,q)-0.5)+s_function((beta-2*delta+2*alpha-epsilon)
/b_subf(alpha,delta,epsilon,q)+0.5)
-s_function((beta-np.pi+2*alpha)
/b_subf(-alpha,delta,epsilon,q)-0.5)
-s_function((beta-np.pi-2*delta-epsilon)
/b_subf(-alpha,delta,epsilon,q)+0.5)
)
proj[i]*=w
return proj
def s_function(abeta):
w=np.zeros(abeta.shape)
w[abeta<=-0.5]=0
w[abs(abeta)<0.5]=0.5*(1+np.sin(np.pi*abeta[abs(abeta)<0.5]))
w[abeta>=0.5]=1
return w
# b_function = B
def b_function(alpha,delta,epsilon):
return 2*delta-2*alpha+epsilon
# b_subf = b
def b_subf(alpha,delta,epsilon,q):
return q*b_function(alpha,delta,epsilon) | 42.77551 | 116 | 0.552958 |
ace33125997e3f2ad446bb0dd57ef14aa999d9f4 | 133 | py | Python | Project Euler/euler16.py | kdawar1/generalized_euler | 6ee2f6a3b5cfd7af3b6a3eb96b3ad22838ac6d7b | [
"MIT"
] | null | null | null | Project Euler/euler16.py | kdawar1/generalized_euler | 6ee2f6a3b5cfd7af3b6a3eb96b3ad22838ac6d7b | [
"MIT"
] | null | null | null | Project Euler/euler16.py | kdawar1/generalized_euler | 6ee2f6a3b5cfd7af3b6a3eb96b3ad22838ac6d7b | [
"MIT"
] | null | null | null | def sum_digits(num, pow):
return sum(int(i) for i in str(num ** pow))
if __name__ == "__main__":
print(sum_digits(2, 1000))
| 22.166667 | 47 | 0.639098 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.