hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 248 | max_stars_repo_name stringlengths 5 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 248 | max_issues_repo_name stringlengths 5 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 248 | max_forks_repo_name stringlengths 5 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 5 2.06M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.03M | alphanum_fraction float64 0 1 | count_classes int64 0 1.6M | score_classes float64 0 1 | count_generators int64 0 651k | score_generators float64 0 1 | count_decorators int64 0 990k | score_decorators float64 0 1 | count_async_functions int64 0 235k | score_async_functions float64 0 1 | count_documentation int64 0 1.04M | score_documentation float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
7f982e2d7b9d04a27d39368ea1137d719db208d1 | 2,030 | py | Python | pulsar/async/consts.py | goodboy/pulsar | e4b42d94b7e262a165782747d65f8b39fb8d3ba9 | [
"BSD-3-Clause"
] | 1 | 2020-11-30T07:36:57.000Z | 2020-11-30T07:36:57.000Z | pulsar/async/consts.py | goodboy/pulsar | e4b42d94b7e262a165782747d65f8b39fb8d3ba9 | [
"BSD-3-Clause"
] | null | null | null | pulsar/async/consts.py | goodboy/pulsar | e4b42d94b7e262a165782747d65f8b39fb8d3ba9 | [
"BSD-3-Clause"
] | null | null | null | '''
Constants used throughout pulsar.
'''
from pulsar.utils.structures import AttributeDictionary
# LOW LEVEL CONSTANTS - NO NEED TO CHANGE THOSE ###########################
ACTOR_STATES = AttributeDictionary(INITIAL=0X0,
INACTIVE=0X1,
STARTING=0x2,
RUN=0x3,
STOPPING=0x4,
CLOSE=0x5,
TERMINATE=0x6)
'''
.. _actor-states:
Actor state constants are access via::
from pulsar import ACTOR_STATES
They are:
* ``ACTOR_STATES.INITIAL = 0`` when an actor is just created, before the
:class:`pulsar.Actor.start` method is called.
* ``ACTOR_STATES.STARTING = 2`` when :class:`pulsar.Actor.start` method
is called.
* ``ACTOR_STATES.RUN = 3`` when :class:`pulsar.Actor._loop` is up
and running.
* ``ACTOR_STATES.STOPPING = 4`` when :class:`pulsar.Actor.stop` has been
called for the first time and the actor is running.
'''
ACTOR_STATES.DESCRIPTION = {ACTOR_STATES.INACTIVE: 'inactive',
ACTOR_STATES.INITIAL: 'initial',
ACTOR_STATES.STARTING: 'starting',
ACTOR_STATES.RUN: 'running',
ACTOR_STATES.STOPPING: 'stopping',
ACTOR_STATES.CLOSE: 'closed',
ACTOR_STATES.TERMINATE: 'terminated'}
#
ACTOR_ACTION_TIMEOUT = 5
'''Important constant used by :class:`pulsar.Monitor` to kill actors which
don't respond to the ``stop`` command.'''
MAX_ASYNC_WHILE = 1 # Max interval for async_while
MIN_NOTIFY = 3 # DON'T NOTIFY BELOW THIS INTERVAL
MAX_NOTIFY = 30 # NOTIFY AT LEAST AFTER THESE SECONDS
ACTOR_TIMEOUT_TOLE = 0.3 # NOTIFY AFTER THIS TIMES THE TIMEOUT
ACTOR_JOIN_THREAD_POOL_TIMEOUT = 5 # TIMEOUT WHEN JOINING THE THREAD POOL
MONITOR_TASK_PERIOD = 1
'''Interval for :class:`pulsar.Monitor` and :class:`pulsar.Arbiter`
periodic task.'''
| 39.038462 | 75 | 0.600493 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,090 | 0.536946 |
7f983a73e7043a31b4df3adf2be126e5bcbbdc71 | 1,732 | py | Python | tornado/test/twisted_test.py | dave-shawley/tornado | 00c9e0ae31a5a0d12e09109fb77ffe391bfe1131 | [
"Apache-2.0"
] | 1 | 2022-03-24T00:58:47.000Z | 2022-03-24T00:58:47.000Z | tornado/test/twisted_test.py | dave-shawley/tornado | 00c9e0ae31a5a0d12e09109fb77ffe391bfe1131 | [
"Apache-2.0"
] | null | null | null | tornado/test/twisted_test.py | dave-shawley/tornado | 00c9e0ae31a5a0d12e09109fb77ffe391bfe1131 | [
"Apache-2.0"
] | null | null | null | # Author: Ovidiu Predescu
# Date: July 2011
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import unittest
from tornado.testing import AsyncTestCase, gen_test
try:
from twisted.internet.defer import ( # type: ignore
inlineCallbacks,
returnValue,
)
have_twisted = True
except ImportError:
have_twisted = False
else:
# Not used directly but needed for `yield deferred` to work.
import tornado.platform.twisted # noqa: F401
skipIfNoTwisted = unittest.skipUnless(have_twisted, "twisted module not present")
@skipIfNoTwisted
class ConvertDeferredTest(AsyncTestCase):
@gen_test
def test_success(self):
@inlineCallbacks
def fn():
if False:
# inlineCallbacks doesn't work with regular functions;
# must have a yield even if it's unreachable.
yield
returnValue(42)
res = yield fn()
self.assertEqual(res, 42)
@gen_test
def test_failure(self):
@inlineCallbacks
def fn():
if False:
yield
1 / 0
with self.assertRaises(ZeroDivisionError):
yield fn()
if __name__ == "__main__":
unittest.main()
| 26.646154 | 81 | 0.662818 | 614 | 0.354503 | 534 | 0.308314 | 631 | 0.364319 | 0 | 0 | 799 | 0.461316 |
7f99395d006207eb956af04a6f570e6ac0ee2c30 | 5,590 | py | Python | related_name/migrations/0001_initial.py | thinkAmi-sandbox/django_30-sample | 5ce2408a27100b0975f92c0f99a15671ad0c2465 | [
"Unlicense"
] | null | null | null | related_name/migrations/0001_initial.py | thinkAmi-sandbox/django_30-sample | 5ce2408a27100b0975f92c0f99a15671ad0c2465 | [
"Unlicense"
] | null | null | null | related_name/migrations/0001_initial.py | thinkAmi-sandbox/django_30-sample | 5ce2408a27100b0975f92c0f99a15671ad0c2465 | [
"Unlicense"
] | null | null | null | # Generated by Django 3.0.6 on 2020-06-14 05:26
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Color',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=30, unique=True, verbose_name='色')),
],
),
migrations.CreateModel(
name='Potato',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=30, unique=True, verbose_name='品種名')),
('bud_color', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='potato_bud_colors', to='related_name.Color')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='Fruit',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=30, unique=True, verbose_name='品種名')),
('bud_color', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='fruit_bud_colors', to='related_name.Color')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='AppleWithRelatedName',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=30, unique=True, verbose_name='品種名')),
('color', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='my_apple_color', to='related_name.Color')),
],
),
migrations.CreateModel(
name='AppleWith3Color',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=30, unique=True, verbose_name='品種名')),
('bud_color', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='default_colors', to='related_name.Color')),
('fruit_color', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='fruit_colors', related_query_name='my_fruit_colors', to='related_name.Color')),
('leaf_color', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='leaf_colors', to='related_name.Color')),
],
options={
'default_related_name': 'default_colors',
},
),
migrations.CreateModel(
name='AppleWith2Color',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=30, unique=True, verbose_name='品種名')),
('bud_color', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='buds', to='related_name.Color')),
('fruit_color', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='fruits', to='related_name.Color')),
],
),
migrations.CreateModel(
name='AppleNoReverseWithPlus',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=30, unique=True, verbose_name='品種名')),
('color', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='+', to='related_name.Color')),
],
),
migrations.CreateModel(
name='AppleNoReverseWithEndPlus',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=30, unique=True, verbose_name='品種名')),
('color', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='end_plus+', to='related_name.Color')),
],
),
migrations.CreateModel(
name='AppleDefaultRelatedName',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=30, unique=True, verbose_name='品種名')),
('color', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='related_name_appledefaultrelatedname_list', to='related_name.Color')),
],
options={
'default_related_name': '%(app_label)s_%(class)s_list',
},
),
migrations.CreateModel(
name='Apple',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=30, unique=True, verbose_name='品種名')),
('color', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='related_name.Color')),
],
),
]
| 50.818182 | 188 | 0.597138 | 5,520 | 0.977683 | 0 | 0 | 0 | 0 | 0 | 0 | 1,103 | 0.19536 |
7f9b2596a347b694b0723898dcd1d3a1c8ef87bd | 380 | py | Python | architectures/utils.py | eddardd/torchDA | f53e3a8d5add454a7a2601fb4642da5c89608018 | [
"MIT"
] | null | null | null | architectures/utils.py | eddardd/torchDA | f53e3a8d5add454a7a2601fb4642da5c89608018 | [
"MIT"
] | null | null | null | architectures/utils.py | eddardd/torchDA | f53e3a8d5add454a7a2601fb4642da5c89608018 | [
"MIT"
] | null | null | null | import torch
class GradReverse(torch.autograd.Function):
@staticmethod
def forward(ctx, x):
return x.view_as(x)
@staticmethod
def backward(ctx, grad_output):
return - grad_output
class LambdaLayer(torch.nn.Module):
def __init__(self, fn):
super().__init__()
self.fn = fn
def forward(self, x):
return self.fn(x) | 19 | 43 | 0.626316 | 362 | 0.952632 | 0 | 0 | 144 | 0.378947 | 0 | 0 | 0 | 0 |
7f9be0ddb5a30a9415b074670d8819f33e76a562 | 550 | py | Python | tests/binary_search.py | jpages/twopy | d0ae42b02ee60cf432e716884f43ec6670bcae2b | [
"BSD-3-Clause"
] | 7 | 2018-12-18T20:32:04.000Z | 2021-05-30T04:20:22.000Z | tests/binary_search.py | jpages/twopy | d0ae42b02ee60cf432e716884f43ec6670bcae2b | [
"BSD-3-Clause"
] | null | null | null | tests/binary_search.py | jpages/twopy | d0ae42b02ee60cf432e716884f43ec6670bcae2b | [
"BSD-3-Clause"
] | 1 | 2021-11-14T17:47:11.000Z | 2021-11-14T17:47:11.000Z | # Binary search of an item in a (sorted) list
def binary_search(alist, item):
first = 0
last = len(alist) - 1
found = False
while first <= last and not found:
middle = (first + last) // 2
if alist[middle] == item:
found = True
else:
if item < alist[middle]:
# Search in the left part
last = middle - 1
else:
first = middle + 1
return found
alist = [1, 4, 6, 7, 9, 15, 33, 45, 68, 90]
print(binary_search(alist, 68))
| 22 | 45 | 0.503636 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 70 | 0.127273 |
7f9ccadbcf78416f1bb7daee7a2f287bb5176549 | 5,052 | py | Python | controller/modules/Logger.py | saumitraaditya/xmppSignal | 5f74153f19e1d1700fc4af954400bfd8fd440dfb | [
"MIT"
] | null | null | null | controller/modules/Logger.py | saumitraaditya/xmppSignal | 5f74153f19e1d1700fc4af954400bfd8fd440dfb | [
"MIT"
] | null | null | null | controller/modules/Logger.py | saumitraaditya/xmppSignal | 5f74153f19e1d1700fc4af954400bfd8fd440dfb | [
"MIT"
] | null | null | null | # ipop-project
# Copyright 2016, University of Florida
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import logging
import logging.handlers as lh
import os
import sys
from controller.framework.ControllerModule import ControllerModule
class Logger(ControllerModule):
def __init__(self, CFxHandle, paramDict, ModuleName):
super(Logger, self).__init__(CFxHandle, paramDict, ModuleName)
def initialize(self):
# Extracts the controller Log Level from the ipop-config file,
# If nothing is provided the default is INFO
if "LogLevel" in self.CMConfig:
level = getattr(logging, self.CMConfig["LogLevel"])
else:
level = getattr(logging, "info")
# Check whether the Logging is set to File by the User
if self.CMConfig["LogOption"] == "Console":
# Console logging
logging.basicConfig(format='[%(asctime)s.%(msecs)03d] %(levelname)s:\n%(message)s\n', datefmt='%H:%M:%S',
level=level)
logging.info("Logger Module Loaded")
else:
# Extracts the filepath else sets logs to current working directory
filepath = self.CMConfig.get("LogFilePath", "./")
fqname = filepath + \
self.CMConfig.get("CtrlLogFileName", "ctrl.log")
if not os.path.isdir(filepath):
os.mkdir(filepath)
self.logger = logging.getLogger("IPOP Rotating Log")
self.logger.setLevel(level)
# Creates rotating filehandler
handler = lh.RotatingFileHandler(filename=fqname, maxBytes=self.CMConfig["LogFileSize"],
backupCount=self.CMConfig["BackupLogFileCount"])
formatter = logging.Formatter(
"[%(asctime)s.%(msecs)03d] %(levelname)s:%(message)s", datefmt='%Y%m%d %H:%M:%S')
handler.setFormatter(formatter)
# Adds the filehandler to the Python logger module
self.logger.addHandler(handler)
# PKTDUMP mode dumps packet information
logging.addLevelName(5, "PKTDUMP")
logging.PKTDUMP = 5
def processCBT(self, cbt):
# Extracting the logging level information from the CBT action tag
if cbt.action == 'debug':
if self.CMConfig["LogOption"] == "File":
self.logger.debug(cbt.initiator + ": " + cbt.data)
else:
logging.debug(cbt.initiator + ": " + cbt.data)
elif cbt.action == 'info':
if self.CMConfig["LogOption"] == "File":
self.logger.info(cbt.initiator + ": " + cbt.data)
else:
logging.info(cbt.initiator + ": " + cbt.data)
elif cbt.action == 'warning':
if self.CMConfig["LogOption"] == "File":
self.logger.warning(cbt.initiator + ": " + cbt.data)
else:
logging.warning(cbt.initiator + ": " + cbt.data)
elif cbt.action == 'error':
if self.CMConfig["LogOption"] == "File":
self.logger.error(cbt.initiator + ": " + cbt.data)
else:
logging.error(cbt.initiator + ": " + cbt.data)
elif cbt.action == "pktdump":
self.pktdump(message=cbt.data.get('message'),
dump=cbt.data.get('dump'))
else:
log = '{0}: unrecognized CBT {1} received from {2}'\
.format(cbt.recipient, cbt.action, cbt.initiator)
self.registerCBT('Logger', 'warning', log)
def timer_method(self):
pass
def pktdump(self, message, dump=None, *args, **argv):
hext = ""
if dump:
for i in range(0, len(dump), 2):
hext += dump[i:i+2].encode("hex")
hext += " "
if i % 16 == 14:
hext += "\n"
logging.log(5, message + "\n" + hext)
else:
logging.log(5, message, *args, **argv)
def terminate(self):
pass
| 43.930435 | 117 | 0.600752 | 3,803 | 0.752771 | 0 | 0 | 0 | 0 | 0 | 0 | 2,064 | 0.408551 |
7f9d05c84680fa636b07deb1bc4b9da548452f7b | 3,879 | py | Python | setup.py | annieapp/annie | 82e0e858d835fcafa70629377a58102699bb8bb2 | [
"MIT"
] | 4 | 2019-06-17T15:44:54.000Z | 2022-03-25T15:40:04.000Z | setup.py | annieapp/annie | 82e0e858d835fcafa70629377a58102699bb8bb2 | [
"MIT"
] | 5 | 2019-06-12T15:25:51.000Z | 2019-07-04T23:33:06.000Z | setup.py | RDIL/annie | 82e0e858d835fcafa70629377a58102699bb8bb2 | [
"MIT"
] | 1 | 2019-06-18T00:04:41.000Z | 2019-06-18T00:04:41.000Z | """
Annie Modified MIT License
Copyright (c) 2019-present year Reece Dunham and the Annie Team
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, and/or distribute
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE. SELLING THE SOFTWARE IS ALSO NOT ALLOWED WITHOUT WRITTEN PERMISSION
FROM THE ANNIE TEAM.
"""
import setuptools
CLASSIFIERS = [
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3 :: Only",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: Implementation :: PyPy",
"Operating System :: Microsoft",
"Operating System :: Microsoft :: Windows :: Windows 10",
"Operating System :: Microsoft :: Windows :: Windows 8",
"Operating System :: Microsoft :: Windows :: Windows 8.1",
"Operating System :: Microsoft :: Windows :: Windows 7",
"Operating System :: MacOS",
"Operating System :: MacOS :: MacOS X",
"Operating System :: POSIX :: Linux",
"Operating System :: Unix",
"Operating System :: Other OS",
"Intended Audience :: Developers",
"Intended Audience :: System Administrators",
"Intended Audience :: Information Technology",
"Intended Audience :: Science/Research",
"Topic :: Software Development :: Libraries",
"Topic :: Software Development :: Libraries :: Python Modules",
"Topic :: Utilities",
"Topic :: System",
"Topic :: Terminals",
"Topic :: Text Processing",
"Topic :: Internet",
"Topic :: Internet :: WWW/HTTP :: WSGI",
"Topic :: Internet :: WWW/HTTP :: WSGI :: Server",
"Topic :: System :: Monitoring",
"Topic :: System :: Software Distribution",
"Development Status :: 4 - Beta",
"Framework :: IDLE",
"Framework :: Flask",
"Natural Language :: English",
"Environment :: Web Environment"
]
URLs = \
{
"Bug Tracker": "https://github.com/annieapp/annie/issues",
"Documentation": "https://docs.annieapp.co",
"Source Code": "https://github.com/annieapp/annie",
"License": "https://github.com/annieapp/annie/blob/master/LICENSE",
}
setuptools.setup(
name='annie-server',
version='1.4.0',
author="Annie Team",
author_email="support@rdil.rocks",
description="Annie Server",
license="See https://github.com/annieapp/annie/blob/master/LICENSE",
url="https://annieapp.co",
packages=setuptools.find_packages(exclude=["docs", "frontend"]),
include_package_data=True,
zip_safe=False,
install_requires=[
"Flask>=1.1.1",
"lcbools>=1.0.2"
],
classifiers=CLASSIFIERS,
project_urls=URLs,
download_url="https://github.com/annieapp/annie/releases",
keywords=["annie", "server", "analytics", "monitoring"],
long_description="See https://annieapp.co",
long_description_content_type="text/markdown"
)
| 39.181818 | 78 | 0.685228 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,118 | 0.803815 |
7f9deee06e30542f48efc2735e6a677bc1ee2c2d | 427 | py | Python | store/migrations/0011_watch_slug.py | StilyanMinchev/OnlineStore | 9f4c5a4d13150d753a0a85908c0cd56246106fb8 | [
"MIT"
] | 1 | 2020-12-27T09:36:28.000Z | 2020-12-27T09:36:28.000Z | store/migrations/0011_watch_slug.py | StilyanMinchev/OnlineStore | 9f4c5a4d13150d753a0a85908c0cd56246106fb8 | [
"MIT"
] | 7 | 2021-06-05T00:01:05.000Z | 2022-03-12T00:52:05.000Z | store/migrations/0011_watch_slug.py | StilyanMinchev/OnlineStore | 9f4c5a4d13150d753a0a85908c0cd56246106fb8 | [
"MIT"
] | 1 | 2020-12-27T09:36:31.000Z | 2020-12-27T09:36:31.000Z | # Generated by Django 3.1.3 on 2020-12-13 08:36
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('store', '0010_auto_20201212_2058'),
]
operations = [
migrations.AddField(
model_name='watch',
name='slug',
field=models.SlugField(default=1, editable=False),
preserve_default=False,
),
]
| 21.35 | 62 | 0.594848 | 334 | 0.782201 | 0 | 0 | 0 | 0 | 0 | 0 | 92 | 0.215457 |
7f9f23fa80527780dae80cad8f55b90a2cf98725 | 11,329 | py | Python | main.py | SantiagoBeltranO/teenscooktoo | 8a73981c67ca80eb774cecc7d34e7b0e547c5da9 | [
"MIT"
] | null | null | null | main.py | SantiagoBeltranO/teenscooktoo | 8a73981c67ca80eb774cecc7d34e7b0e547c5da9 | [
"MIT"
] | null | null | null | main.py | SantiagoBeltranO/teenscooktoo | 8a73981c67ca80eb774cecc7d34e7b0e547c5da9 | [
"MIT"
] | null | null | null | # -*- encoding: utf-8 -*-
import os
import jinja2
import webapp2
from models.models import *
from google.appengine.ext import blobstore
from google.appengine.ext.webapp import blobstore_handlers
from libs import mylib
template_dir = os.path.join(os.path.dirname(__file__), 'views')
jinja_env = jinja2.Environment (loader = jinja2.FileSystemLoader(template_dir),
autoescape = True,
extensions = ['jinja2.ext.autoescape'])
class Handler(webapp2.RequestHandler):
def initialize(self, request, response):
super(Handler, self).initialize(request, response)
request.is_ajax = lambda:request.environ.get('HTTP_X_REQUESTED_WITH') == 'XMLHttpRequest'
def write(self, *a, **kw):
self.response.out.write(*a, **kw)
def render_str(self, template, **params):
t = jinja_env.get_template(template)
return t.render(params)
def render(self, template, **kw):
self.write(self.render_str(template, **kw))
class Index(Handler):
def get(self):
cadenanav = mylib.getcadenanav()
self.render("index.html", cadenanav = cadenanav, is_ajax = self.request.is_ajax())
class Home(Handler):
def get(self):
is_ajax = self.request.is_ajax()
cadenanav = ""
if not is_ajax:
cadenanav = mylib.getcadenanav()
self.render("intro.html", cadenanav = cadenanav, is_ajax = is_ajax)
class About(Handler):
def get(self):
is_ajax = self.request.is_ajax()
cadenanav = ""
if not is_ajax:
cadenanav = mylib.getcadenanav()
self.render("about.html", cadenanav = cadenanav, is_ajax = is_ajax)
class Details(Handler):
def get(self):
is_ajax = self.request.is_ajax()
cadenanav = ""
if not is_ajax:
cadenanav = mylib.getcadenanav()
self.render("details.html", cadenanav = cadenanav, is_ajax = is_ajax)
class Tools(Handler):
def get(self):
is_ajax = self.request.is_ajax()
cadenanav = ""
if not is_ajax:
cadenanav = mylib.getcadenanav()
self.render("tools.html", cadenanav = cadenanav, is_ajax = is_ajax)
class Health(Handler):
def get(self):
is_ajax = self.request.is_ajax()
cadenanav = ""
if not is_ajax:
cadenanav = mylib.getcadenanav()
self.render("health.html", cadenanav = cadenanav, is_ajax = is_ajax)
class Consult(Handler):
def get(self):
speciality = Speciality()
specialities = speciality.get_specialities()
option = 'consult'
self.render("selspeciality.html", specialities = specialities,
option=option)
def post(self):
dish_id = self.request.get('dish_id', default_value=None)
thedish = Dish.get_by_id(int(dish_id))
thedish.description = mylib.formatcadena(thedish.description)
thedish.directions = mylib.formatcadena(thedish.directions)
photo = ""
if thedish.photo:
photo = str(thedish.photo)
audio = ""
if thedish.audio:
audio = str(thedish.audio)
video = ""
if thedish.video:
video = str(thedish.video)
photogallery = ""
if thedish.photogallery:
photogallery = str(thedish.photogallery)
ingredientdish = Ingredientdish()
ingredients = ingredientdish.get_ingredientsofadish(int(dish_id))
for ingredient in ingredients:
ingredient[2] = mylib.formatunidad(ingredient[2])
ingredient[1] = mylib.formatquantity(ingredient[1])
self.render("consultadish.html", thedish = thedish,
ingredients = ingredients,
photo = photo,
audio = audio,
video = video,
photogallery = photogallery)
class Recap(Handler):
def get(self):
speciality = Speciality()
specialities = speciality.get_specialities()
option = 'recap'
self.render("selspeciality.html", specialities = specialities,
option=option)
class Update(Handler):
def get(self):
self.render("restricted.html")
def post(self):
restricted = self.request.get('restricted', default_value=None)
if restricted:
if restricted == "e44165d04caec1113db6159d0210c4c3":
speciality = Speciality()
specialities = speciality.get_specialities()
option = 'update'
self.render("selspeciality.html", specialities = specialities,
option=option)
else:
self.render("intro.html")
else:
dish_id = self.request.get('dish_id', default_value=None)
description = self.request.get('description', default_value=None)
servings = self.request.get('servings', default_value=None)
directions = self.request.get('directions', default_value=None)
if description:
dish = Dish()
dish.update(dish_id, description, int(servings), directions)
else:
upload_url_photo = blobstore.create_upload_url('/photos/' + dish_id)
upload_url_audio = blobstore.create_upload_url('/audios/' + dish_id)
upload_url_video = blobstore.create_upload_url('/videos/' + dish_id)
upload_url_photogallery = blobstore.create_upload_url('/photogallery/' + dish_id)
thedish = Dish.get_by_id(int(dish_id))
self.render("updateadish.html", thedish = thedish,
dish_id = int(dish_id),
upload_url_photo = upload_url_photo,
upload_url_audio = upload_url_audio,
upload_url_video = upload_url_video,
upload_url_photogallery = upload_url_photogallery)
class Dishesspeciality(Handler):
def post(self, option):
speciality_id = self.request.get('speciality_id', default_value=None)
if option == 'recap':
thespeciality = ndb.Key('Speciality', int(speciality_id))
dishes = ndb.gql("SELECT * FROM Dish WHERE speciality = :1", thespeciality).order(Dish.name).fetch()
i = 0
for d in dishes:
dishes[i].description = mylib.formatcadena(d.description)
i += 1
self.render("recapspeciality.html", dishes=dishes)
else:
dish = Dish()
dishes = dish.get_dishes(int(speciality_id))
self.render("dishesspeciality.html", dishes=dishes,
option=option)
class Photos(Handler, blobstore_handlers.BlobstoreUploadHandler, blobstore_handlers.BlobstoreDownloadHandler):
def get(self, dish_id):
dish = Dish()
infophoto = dish.getphoto(dish_id)
self.send_blob(infophoto, save_as=True)
def post(self, dish_id):
upload_files = self.get_uploads('photo')
if len(upload_files) == 0:
photo = None
else:
blob_info = upload_files[0]
photo = blob_info.key()
"""
Actualizar photo
"""
dish = Dish()
dish.updatephoto(dish_id, photo)
self.response.headers['Content-Type'] = 'text/plain'
self.redirect('/')
class Audios(Handler, blobstore_handlers.BlobstoreUploadHandler, blobstore_handlers.BlobstoreDownloadHandler):
def get(self, dish_id):
dish = Dish()
infoaudio = dish.getaudio(dish_id)
self.send_blob(infoaudio, save_as=True)
def post(self, dish_id):
upload_files = self.get_uploads('audio')
if len(upload_files) == 0:
audio = None
else:
blob_info = upload_files[0]
audio = blob_info.key()
"""
Actualizar audio
"""
dish = Dish()
dish.updateaudio(dish_id, audio)
self.response.headers['Content-Type'] = 'text/plain'
self.redirect('/')
class Videos(Handler, blobstore_handlers.BlobstoreUploadHandler, blobstore_handlers.BlobstoreDownloadHandler):
def get(self, dish_id):
dish = Dish()
infovideo = dish.getvideo(dish_id)
self.send_blob(infovideo, save_as=True)
def post(self, dish_id):
upload_files = self.get_uploads('video')
if len(upload_files) == 0:
audio = None
else:
blob_info = upload_files[0]
video = blob_info.key()
"""
Actualizar video
"""
dish = Dish()
dish.updatevideo(dish_id, video)
self.response.headers['Content-Type'] = 'text/plain'
self.redirect('/')
class Photogallery(Handler, blobstore_handlers.BlobstoreUploadHandler, blobstore_handlers.BlobstoreDownloadHandler):
def get(self, dish_id):
dish = Dish()
infophotogallery = dish.getphotogallery(dish_id)
self.send_blob(infophotogallery, save_as=True)
def post(self, dish_id):
upload_files = self.get_uploads('photogallery')
if len(upload_files) == 0:
photogallery = None
else:
photogallery = []
for blob_info in upload_files:
photogallery.append(blob_info.key())
"""
Actualizar photos de la galería
"""
dish = Dish()
dish.updatephotogallery(dish_id, photogallery)
self.response.headers['Content-Type'] = 'text/plain'
self.redirect('/')
class GetImage(Handler):
def get(self):
id = self.request.get('id')
photo_info = blobstore.BlobInfo.get(id)
img = photo_info.open().read()
self.response.headers['Content-Type'] = 'image/png'
self.response.out.write(img)
class GetSound(Handler):
def get(self):
id = self.request.get('id')
audio_info = blobstore.BlobInfo.get(id)
aud = audio_info.open().read()
self.response.headers['Content-Type'] = 'audio/mp4'
self.response.out.write(aud)
class GetVideo(Handler):
def get(self):
id = self.request.get('id')
video_info = blobstore.BlobInfo.get(id)
vid = video_info.open().read()
self.response.headers['Content-Type'] = 'video/mov'
self.response.out.write(vid)
application = webapp2.WSGIApplication([
('/', Index),
('/home', Home),
('/about', About),
('/details', Details),
('/tools', Tools),
('/health', Health),
('/consult', Consult),
('/recap', Recap),
('/update', Update),
('/dishesspeciality/(\w+)', Dishesspeciality),
('/photos/(\d+)', Photos),
('/audios/(\d+)', Audios),
('/videos/(\d+)', Videos),
('/photogallery/(\d+)', Photogallery),
('/image', GetImage),
('/sound', GetSound),
('/video', GetVideo),
], debug=True) | 35.514107 | 116 | 0.57728 | 10,275 | 0.906884 | 0 | 0 | 0 | 0 | 0 | 0 | 1,236 | 0.109091 |
7fa0b2e0b6ffafed2c4103aacf903f5f57d207fd | 108 | py | Python | core/urls.py | pouya007RN/Voice-Changer | 829e5a9ed6332b692d3f9f73b36c2d773f44182b | [
"MIT"
] | null | null | null | core/urls.py | pouya007RN/Voice-Changer | 829e5a9ed6332b692d3f9f73b36c2d773f44182b | [
"MIT"
] | null | null | null | core/urls.py | pouya007RN/Voice-Changer | 829e5a9ed6332b692d3f9f73b36c2d773f44182b | [
"MIT"
] | null | null | null | from .views import index
from django.urls import path
urlpatterns = [
path('', index, name='index'),
] | 15.428571 | 34 | 0.675926 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 9 | 0.083333 |
7fa185743757eeee2b89a1731ab4ef8f9039d128 | 154 | py | Python | shared/__init__.py | seenu-andi-rajendran/plagcomps | 98e82cfb871f73bbd8f4ab1452c2b27a95beee83 | [
"MIT"
] | 2 | 2015-01-18T06:20:27.000Z | 2021-03-19T21:19:16.000Z | shared/__init__.py | NoahCarnahan/plagcomps | 98e82cfb871f73bbd8f4ab1452c2b27a95beee83 | [
"MIT"
] | null | null | null | shared/__init__.py | NoahCarnahan/plagcomps | 98e82cfb871f73bbd8f4ab1452c2b27a95beee83 | [
"MIT"
] | 2 | 2015-11-19T12:52:14.000Z | 2016-11-11T17:00:50.000Z | # Required to use access this directory as a "package." Could add more to this file later!
# See http://docs.python.org/2/tutorial/modules.html#packages
| 51.333333 | 91 | 0.75974 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 152 | 0.987013 |
7fa2a0f0ee0be54fb76852220d29ddc43297d130 | 937 | py | Python | mytravelblog/accounts/tests/forms/tests_EditPasswordForm.py | yetoshimo/my-travel-blog | de67dd135e66f2dda121850d54fd56fd644b9bff | [
"MIT"
] | null | null | null | mytravelblog/accounts/tests/forms/tests_EditPasswordForm.py | yetoshimo/my-travel-blog | de67dd135e66f2dda121850d54fd56fd644b9bff | [
"MIT"
] | null | null | null | mytravelblog/accounts/tests/forms/tests_EditPasswordForm.py | yetoshimo/my-travel-blog | de67dd135e66f2dda121850d54fd56fd644b9bff | [
"MIT"
] | null | null | null | from django import test as django_tests
from django.contrib.auth import get_user_model
from mytravelblog.accounts.forms import UserLoginForm, EditPasswordForm
UserModel = get_user_model()
class EditPasswordFromTests(django_tests.TestCase):
def setUp(self):
self.username = 'testuser'
self.password1 = 'P@ssword1'
self.user = UserModel.objects.create_user(
username=self.username,
password=self.password1,
)
def test_form_populates_with_form_control_attribute(self):
data = {
'old_password': self.password1,
'new_password1': self.password1,
'new_password2': self.password1,
}
change_password_form = EditPasswordForm(data=data, user=self.user)
self.assertTrue(change_password_form.is_valid())
self.assertEqual('form-control', change_password_form.fields['old_password'].widget.attrs['class'])
| 33.464286 | 107 | 0.693703 | 744 | 0.794023 | 0 | 0 | 0 | 0 | 0 | 0 | 100 | 0.106724 |
7fa380f39d2046691cd5b2cd72080ca55bc6fd09 | 295 | py | Python | attic/operator/dispatch.py | matteoshen/example-code | b54c22a1b8cee3fc53d1473cb38ca46eb179b4c3 | [
"MIT"
] | 5,651 | 2015-01-06T21:58:46.000Z | 2022-03-31T13:39:07.000Z | attic/operator/dispatch.py | matteoshen/example-code | b54c22a1b8cee3fc53d1473cb38ca46eb179b4c3 | [
"MIT"
] | 42 | 2016-12-11T19:17:11.000Z | 2021-11-23T19:41:16.000Z | attic/operator/dispatch.py | matteoshen/example-code | b54c22a1b8cee3fc53d1473cb38ca46eb179b4c3 | [
"MIT"
] | 2,394 | 2015-01-18T10:57:38.000Z | 2022-03-31T11:41:12.000Z | """
Experiments with infix operator dispatch
>>> kadd = KnowsAdd()
>>> kadd + 1
(<KnowsAdd object>, 1)
>>> kadd * 1
"""
class KnowsAdd:
def __add__(self, other):
return self, other
def __repr__(self):
return '<{} object>'.format(type(self).__name__)
| 16.388889 | 56 | 0.572881 | 153 | 0.518644 | 0 | 0 | 0 | 0 | 0 | 0 | 150 | 0.508475 |
7fa45f3fdd3c79db48d1ae0e9746ea8d56e463f4 | 125 | py | Python | diary/admin.py | yokoc1322/day_has_a_name | 6d4bb47d07e3cade2c89995f3a744244ce95158f | [
"MIT"
] | null | null | null | diary/admin.py | yokoc1322/day_has_a_name | 6d4bb47d07e3cade2c89995f3a744244ce95158f | [
"MIT"
] | 4 | 2021-03-19T01:28:08.000Z | 2021-06-04T22:52:46.000Z | diary/admin.py | yokoc1322/day_has_a_name | 6d4bb47d07e3cade2c89995f3a744244ce95158f | [
"MIT"
] | null | null | null | from django.contrib import admin
from .models import Writer, Record
admin.site.register(Writer)
admin.site.register(Record)
| 20.833333 | 34 | 0.816 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
7fa5e85b7397fa24db26c6520e306df8fed85fb5 | 1,279 | py | Python | cs498_positioning/kalman.py | liuzikai/UWB_Master | 7ee360e15388912e26d2e624e8b8a288e7999963 | [
"Apache-2.0"
] | 1 | 2020-12-07T16:15:12.000Z | 2020-12-07T16:15:12.000Z | cs498_positioning/kalman.py | liuzikai/UWB_Master | 7ee360e15388912e26d2e624e8b8a288e7999963 | [
"Apache-2.0"
] | null | null | null | cs498_positioning/kalman.py | liuzikai/UWB_Master | 7ee360e15388912e26d2e624e8b8a288e7999963 | [
"Apache-2.0"
] | 1 | 2020-01-08T10:08:00.000Z | 2020-01-08T10:08:00.000Z | from pykalman import KalmanFilter
import numpy as np
kf = KalmanFilter(transition_matrices=np.array([[1.0, 0.0, 0.0, 1.0, 0.0, 0.0],
[0.0, 1.0, 0.0, 0.0, 1.0, 0.0],
[0.0, 0.0, 1.0, 0.0, 0.0, 1.0],
[0.0, 0.0, 0.0, 1.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 1.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 1.0]]),
observation_matrices=np.array([[1.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 1.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 1.0, 0.0, 0.0, 0.0]]),
transition_covariance=0.003 * np.eye(6, dtype=float)) # TODO: change this constant
t = 0
means = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0])
covariances = np.eye(6, dtype=float)
def kalman_filter(measurement):
global t, means, covariances
new_filtered_means, new_filtered_covariances = (kf.filter_update(means, covariances, measurement))
means, covariances = new_filtered_means, new_filtered_covariances
t = t + 1.0
# print(means[:3]);
return means[:3]
| 44.103448 | 102 | 0.433933 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 47 | 0.036747 |
7fa63fc07d4fb1ef716494e877960f58b0ca18d6 | 1,935 | py | Python | year_3/databases_sem1/lab2/rdmslab2/api/dbmodels/payrolls.py | honchardev/KPI | f8425681857c02a67127ffb05c0af0563a8473e1 | [
"MIT"
] | null | null | null | year_3/databases_sem1/lab2/rdmslab2/api/dbmodels/payrolls.py | honchardev/KPI | f8425681857c02a67127ffb05c0af0563a8473e1 | [
"MIT"
] | 21 | 2020-03-24T16:26:04.000Z | 2022-02-18T15:56:16.000Z | year_3/databases_sem1/lab2/rdmslab2/api/dbmodels/payrolls.py | honchardev/KPI | f8425681857c02a67127ffb05c0af0563a8473e1 | [
"MIT"
] | null | null | null | from api.models import Payroll
class Payrolls(object):
def __init__(self, cursor):
"""
:param cursor: MySQLdb.cursor.Cursor
"""
self.cursor = cursor
cursor.execute('SET NAMES utf8;')
cursor.execute('SET CHARACTER SET utf8;')
cursor.execute('SET character_set_connection=utf8;')
@staticmethod
def _map_payroll(payroll_db_data):
return Payroll(*payroll_db_data)
def readall(self):
self.cursor.execute(
"""
SELECT
Id, EmployeeId, PaymentId, ProjectId, PayrollDate
FROM
payrolls
"""
)
payrolls = self.cursor.fetchall()
mapped = map(self._map_payroll, payrolls)
return list(mapped)
def add(self, payroll):
params = (
payroll.employee_id,
payroll.payment_id,
payroll.project_id,
payroll.payroll_date,
)
self.cursor.execute(
"""
INSERT
INTO payrolls
(EmployeeId, PaymentId, ProjectId, PayrollDate)
VALUES
(%s, %s, %s, %s)
""",
params
)
def remove(self, payroll_id):
self.cursor.execute(
"""
DELETE
FROM payrolls
WHERE Id=%s
""",
[payroll_id]
)
def update(self, payroll, payroll_id):
params = (
payroll.employee_id,
payroll.payment_id,
payroll.project_id,
payroll.payroll_date,
payroll_id
)
self.cursor.execute(
"""
UPDATE
payrolls
SET
EmployeeId=%s,
PaymentId=%s,
ProjectId=%s,
PayrollDate=%s
WHERE
payrolls.Id=%s
""",
params
)
| 23.888889 | 61 | 0.477519 | 1,901 | 0.982429 | 0 | 0 | 93 | 0.048062 | 0 | 0 | 765 | 0.395349 |
7fa65605f373bb773e7700052c14cdd58e55214a | 929 | py | Python | Python/eight_kyu/abbrev_name.py | Brokenshire/codewars-projects | db9cd09618b8a7085b0d53ad76f73f9e249b9396 | [
"Apache-2.0"
] | 1 | 2019-12-20T04:09:56.000Z | 2019-12-20T04:09:56.000Z | Python/eight_kyu/abbrev_name.py | Brokenshire/codewars-projects | db9cd09618b8a7085b0d53ad76f73f9e249b9396 | [
"Apache-2.0"
] | null | null | null | Python/eight_kyu/abbrev_name.py | Brokenshire/codewars-projects | db9cd09618b8a7085b0d53ad76f73f9e249b9396 | [
"Apache-2.0"
] | null | null | null | # Python solution for 'Abbreviate a Two Word Name' codewars question.
# Level: 8 kyu
# Tags: FUNDAMENTALS, STRINGS, and ARRAYS.
# Author: Jack Brokenshire
# Date: 16/05/2020
import unittest
def abbrev_name(name):
"""
Converts name of two words into initials.
:param name: a string.
:return: two capital letters with a dot separating them.
"""
name = name.split()
return "{}.{}".format(name[0][0].upper(), name[1][0].upper())
class TestAbbrevName(unittest.TestCase):
"""Class to test 'abbrev_name' function"""
def test_abbrev_name(self):
self.assertEqual(abbrev_name("Sam Harris"), "S.H")
self.assertEqual(abbrev_name("Patrick Feenan"), "P.F")
self.assertEqual(abbrev_name("Evan Cole"), "E.C")
self.assertEqual(abbrev_name("P Favuzzi"), "P.F")
self.assertEqual(abbrev_name("David Mendieta"), "D.M")
if __name__ == '__main__':
unittest.main()
| 28.151515 | 69 | 0.658773 | 421 | 0.453175 | 0 | 0 | 0 | 0 | 0 | 0 | 464 | 0.499462 |
7fa7c7a849af2d16d11ffa6cce19f13ddd50d411 | 1,876 | py | Python | assets/python_scripts/cvScale.py | weiSupreme/weiSupreme.github.io | 59d419f2d8207c7da0a427330b21546f4f0d85d0 | [
"CC-BY-4.0"
] | 3 | 2018-09-15T05:47:35.000Z | 2019-04-08T07:00:02.000Z | assets/python_scripts/cvScale.py | weiSupreme/weiSupreme.github.io | 59d419f2d8207c7da0a427330b21546f4f0d85d0 | [
"CC-BY-4.0"
] | null | null | null | assets/python_scripts/cvScale.py | weiSupreme/weiSupreme.github.io | 59d419f2d8207c7da0a427330b21546f4f0d85d0 | [
"CC-BY-4.0"
] | null | null | null | import os
import cv2
import random
src_dir = "select2000/"
name_idx = 6000
dest_img_dir = "scale/image/"
dest_txt_dir = "scale/txt/"
img_list = os.listdir(src_dir)
def write_label(old_name, new_name, hratio_, wratio_):
old_obj = open(src_dir+old_name)
new_obj = open(dest_txt_dir+new_name, 'w')
old_txt = old_obj.read()
writeline_str = ''
gt_split = old_txt.split('\n')
for gt_line in gt_split:
gt_ind = gt_line.split(',')
cordiante_str = []
if len(gt_ind) > 8:
for i in range(0, 8):
if i % 2 == 0:
cordiante_str.append(str(float(gt_ind[i])*wratio_))
else:
cordiante_str.append(str(float(gt_ind[i])*hratio_))
writeline_str = cordiante_str[0] + ',' + cordiante_str[1] + ',' + cordiante_str[2] + ',' + cordiante_str[3] + ',' + cordiante_str[4] + ',' + cordiante_str[5] + ',' + cordiante_str[6] + ',' + cordiante_str[7] + ',' + gt_ind[8] + '\n'
new_obj.write(writeline_str)
old_obj.close()
new_obj.close()
for img_name in img_list:
if '.txt' in img_name:
continue
print img_name
txt_name = img_name.rstrip('jpg') + 'txt'
new_txt_name = str(name_idx).zfill(6) + '.txt'
img = cv2.imread(src_dir+img_name)
height, width, c = img.shape
hratio = 0
wratio = 0
prob = random.choice(range(0, 10))
hratio = random.choice(range(5, 10)) / 10.
if prob < 7:
wratio = hratio
else:
wratio = random.choice(range(5, 10)) / 10.
scale_img = cv2.resize(img, (int(width*wratio), int(height*hratio)), interpolation=cv2.INTER_AREA)
write_label(txt_name, new_txt_name, hratio, wratio)
cv2.imwrite(dest_img_dir+str(name_idx).zfill(6)+'.jpg', scale_img)
name_idx += 1
#print hratio, wratio
#break
| 30.258065 | 244 | 0.591151 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 134 | 0.071429 |
7fa9c8f061e4bf8efdf7486f8dd64dceb09e0005 | 2,912 | py | Python | segway_ros/src/segway/segway_system_wd.py | shengchen-liu/segway_v3 | 6469b92f66e6e05aeff26cb525520948119aacac | [
"BSD-3-Clause"
] | null | null | null | segway_ros/src/segway/segway_system_wd.py | shengchen-liu/segway_v3 | 6469b92f66e6e05aeff26cb525520948119aacac | [
"BSD-3-Clause"
] | null | null | null | segway_ros/src/segway/segway_system_wd.py | shengchen-liu/segway_v3 | 6469b92f66e6e05aeff26cb525520948119aacac | [
"BSD-3-Clause"
] | null | null | null | """--------------------------------------------------------------------
COPYRIGHT 2014 Stanley Innovation Inc.
Software License Agreement:
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation and/or
other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its contributors
may be used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
\file segway_system_wd.py
\brief This is the segway system watchdod which monitors signals
from the embedded power system to safely shutdown the PC
upon embedded powerdown
\Platform: Linux/ROS Indigo
--------------------------------------------------------------------"""
import socket
import sys
import rospy
import os
from utils import m32
class SegwayWatchdog:
def __init__(self):
"""
Initialize the UDP connection
"""
self._continue = True
self.conn = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.conn.setblocking(False)
self.conn.bind(('',6234))
def Receive(self):
"""
Try receiving the data up to a maximum size. If it fails
empty the data
"""
try:
data = self.conn.recv(4)
except:
data = []
if (len(data) == 4):
rx_dat = [ord(i) for i in data]
shutdwn_cmd = m32(rx_dat)
if (0x8756BAEB == shutdwn_cmd):
rospy.logerr("Platform signaled shutdown, need to shutdown the onboard PC")
self.Close()
os.system("sudo shutdown now -h")
sys.exit(0)
def Close(self):
self.conn.close()
| 35.084337 | 91 | 0.65831 | 930 | 0.319368 | 0 | 0 | 0 | 0 | 0 | 0 | 2,109 | 0.724245 |
7faa1f86c7a3da87c151ad577e7101f3270f7242 | 340 | py | Python | src/boot.py | macbury/pussificator | 1a8a3e0d029ec6f22161dfc41349f82b5df55a9f | [
"MIT"
] | 3 | 2019-01-09T09:54:25.000Z | 2019-06-30T07:10:01.000Z | src/boot.py | macbury/pussificator | 1a8a3e0d029ec6f22161dfc41349f82b5df55a9f | [
"MIT"
] | null | null | null | src/boot.py | macbury/pussificator | 1a8a3e0d029ec6f22161dfc41349f82b5df55a9f | [
"MIT"
] | null | null | null | import logging
import os
from ruamel.yaml import YAML
LOGGER = logging.getLogger('pussy')
logging.basicConfig(
level=logging.INFO,
format="[%(asctime)s] %(levelname)s:%(name)s:%(message)s"
)
CONFIG_PATH = os.path.join('./', 'config.yaml')
LOGGER.info("Loading config: " + CONFIG_PATH)
CONFIG = YAML(typ='safe').load(open(CONFIG_PATH)) | 26.153846 | 59 | 0.717647 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 98 | 0.288235 |
7fad1ad0db50dcc08b312498186ef8fc1fe2906e | 501 | py | Python | app/helpers/response.py | trongdth/python-flask | a9db95a93d828e984399dc40735fe9db9aea974c | [
"MIT"
] | 2 | 2020-06-30T18:03:12.000Z | 2021-09-02T11:31:59.000Z | restapi/app/helpers/response.py | ninjadotorg/KPI | 430617a1e85304a254cc7364d524d721b8d45b11 | [
"MIT"
] | 10 | 2020-09-05T23:29:52.000Z | 2022-03-11T23:36:59.000Z | restapi/app/helpers/response.py | ninjadotorg/KPI | 430617a1e85304a254cc7364d524d721b8d45b11 | [
"MIT"
] | null | null | null | #!/usr/bin/python
# -*- coding: utf-8 -*-
from flask import jsonify
def response_ok(value=None, message='', code=-1):
result = {
'status': 1,
'message': message,
'code': code
}
if len(message) > 0:
result['message'] = message
if value is not None:
result.update({'data': value})
return jsonify(result)
def response_error(message='', code=-1, status=0):
result = {
'status': status,
'code': code
}
if message:
result.update({'message': message})
return jsonify(result)
| 16.16129 | 50 | 0.636727 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 105 | 0.209581 |
7fae23033f24a25e823cfa616f0af4eaa950d635 | 3,344 | py | Python | utest/resources/robotdata/datagenerator.py | guojiajiaok/RIDE | d797155ed931fe865e62fad20ffb4c60d9934e81 | [
"ECL-2.0",
"Apache-2.0"
] | 775 | 2015-01-12T06:54:09.000Z | 2022-03-25T05:18:05.000Z | utest/resources/robotdata/datagenerator.py | guojiajiaok/RIDE | d797155ed931fe865e62fad20ffb4c60d9934e81 | [
"ECL-2.0",
"Apache-2.0"
] | 2,191 | 2015-05-19T16:49:09.000Z | 2022-03-28T21:38:34.000Z | utest/resources/robotdata/datagenerator.py | guojiajiaok/RIDE | d797155ed931fe865e62fad20ffb4c60d9934e81 | [
"ECL-2.0",
"Apache-2.0"
] | 382 | 2015-01-24T08:41:44.000Z | 2022-03-13T10:14:20.000Z | #!/usr/bin/env python
# Copyright 2008-2015 Nokia Networks
# Copyright 2016- Robot Framework Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from getopt import getopt, GetoptError
from random import randint
import os
SUITE=\
"""*** Settings ***
Resource resource.txt
*** Test Cases ***
%TESTCASES%
*** Keywords ***
Test Keyword
Log jee
"""
RESOURCE=\
"""*** Variables ***
@{Resource Var} MOI
*** Keywords ***
%KEYWORDS%
"""
KEYWORD_TEMPLATE=\
"""My Keyword %KW_ID%
No Operation"""
TEST_CASE_TEMPLATE=\
"""My Test %TEST_ID%
My Keyword %KW_ID%
Log moi
Test Keyword
Log moi
Test Keyword
Log moi
Test Keyword
Log moi
Test Keyword
Log moi
Test Keyword
My Keyword %KW_ID%
Test Keyword
Log moi
Test Keyword
Log moi
Test Keyword
Log moi"""
def generate_tests(number_of_tests, number_of_keywords):
mytests = range(number_of_tests)
return '\n'.join(TEST_CASE_TEMPLATE.replace('%TEST_ID%', str(test_id))\
.replace('%KW_ID%', str(randint(0,number_of_keywords-1)))\
for test_id in mytests)
def generate_keywords(number_of_keywords):
mykeywords = range(number_of_keywords)
return '\n'.join(KEYWORD_TEMPLATE.replace('%KW_ID%', str(i)) for i in mykeywords)
def generate_suite(number_of_tests, number_of_keywords):
return SUITE.replace('%TESTCASES%', generate_tests(number_of_tests, number_of_keywords))\
.replace('%KEYWORDS%', generate_keywords(number_of_keywords))
def generate_resource(number_of_keywords):
return RESOURCE.replace('%KEYWORDS%', generate_keywords(number_of_keywords))
def generate(directory, suites, tests, keywords):
os.mkdir(directory)
mysuites = range(suites)
for suite_index in mysuites:
f = open(os.path.join('.', directory, 'suite%s.txt' % suite_index), 'w')
f.write(generate_suite(tests, keywords))
f.close()
r = open(os.path.join('.', directory, 'resource.txt'), 'w')
r.write(generate_resource(keywords))
r.close()
def usage():
print('datagenerator.py -d [directory] -s [NUMBER OF SUITES] -t [NUMBER OF TESTS IN SUITE] -k [NUMBER OF KEYWORDS]')
def main(args):
try:
opts, args = getopt(args, 'd:s:t:k:', [])
except GetoptError as e:
print(e)
usage()
sys.exit(2)
if len(opts) != 4:
if opts:
print(opts)
usage()
sys.exit(2)
for opt, arg in opts:
if opt == '-d':
directory = arg
if opt == '-s':
suites = int(arg)
if opt == '-t':
tests = int(arg)
if opt == '-k':
keywords = int(arg)
generate(directory, suites, tests, keywords)
if __name__ == '__main__':
import sys
main(sys.argv[1:])
| 25.333333 | 120 | 0.639653 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,455 | 0.435108 |
7fae83cec822c86f4d08919e0c38bf973182583d | 3,074 | py | Python | pyrobolearn/policies/linear.py | Pandinosaurus/pyrobolearn | 9cd7c060723fda7d2779fa255ac998c2c82b8436 | [
"Apache-2.0"
] | 2 | 2021-01-21T21:08:30.000Z | 2022-03-29T16:45:49.000Z | pyrobolearn/policies/linear.py | Pandinosaurus/pyrobolearn | 9cd7c060723fda7d2779fa255ac998c2c82b8436 | [
"Apache-2.0"
] | null | null | null | pyrobolearn/policies/linear.py | Pandinosaurus/pyrobolearn | 9cd7c060723fda7d2779fa255ac998c2c82b8436 | [
"Apache-2.0"
] | 1 | 2020-09-29T21:25:39.000Z | 2020-09-29T21:25:39.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Provide the linear policy.
The linear policy uses a linear parametric approximator to predict the action vector based on the state vector.
"""
from pyrobolearn.policies.policy import Policy
from pyrobolearn.approximators import LinearApproximator
__author__ = "Brian Delhaisse"
__copyright__ = "Copyright 2019, PyRoboLearn"
__credits__ = ["Brian Delhaisse"]
__license__ = "GNU GPLv3"
__version__ = "1.0.0"
__maintainer__ = "Brian Delhaisse"
__email__ = "briandelhaisse@gmail.com"
__status__ = "Development"
class LinearPolicy(Policy):
r"""Linear Policy
The linear policy uses a linear parametric approximator: :math:`y = W x + b` where :math:`x` is the state vector,
and :math:`y` is the action vector, :math:`W` is the weight matrix, and :math:`b` is the bias/intercept.
"""
def __init__(self, state, action, rate=1, preprocessors=None, postprocessors=None, *args, **kwargs):
"""
Initialize the Linear Policy.
Args:
action (Action): At each step, by calling `policy.act(state)`, the `action` is computed by the policy,
and can be given to the environment. As with the `state`, the type and size/shape of each inner
action can be inferred and could be used to automatically build a policy. The `action` connects the
policy with a controllable object (such as a robot) in the environment.
state (State): By giving the `state` to the policy, it can automatically infer the type and size/shape
of each inner state, and thus can be used to automatically build a policy. At each step, the `state`
is filled by the environment, and read by the policy. The `state` connects the policy with one or
several objects (including robots) in the environment. Note that some policies don't use any state
information.
rate (int, float): rate (float) at which the policy operates if we are operating in real-time. If we are
stepping deterministically in the simulator, it represents the number of ticks (int) to sleep before
executing the model.
preprocessors (Processor, list of Processor, None): pre-processors to be applied to the given input
postprocessors (Processor, list of Processor, None): post-processors to be applied to the output
*args (list): list of arguments
**kwargs (dict): dictionary of arguments
"""
model = LinearApproximator(state, action, preprocessors=preprocessors, postprocessors=postprocessors)
super(LinearPolicy, self).__init__(state, action, model, rate=rate, *args, **kwargs)
# Tests
if __name__ == '__main__':
import copy
from pyrobolearn.states import FixedState
from pyrobolearn.actions import FixedAction
# check linear policy
policy = LinearPolicy(state=FixedState(range(4)), action=FixedAction(range(2)))
print(policy)
target = copy.deepcopy(policy)
print(target)
| 45.880597 | 117 | 0.686727 | 2,181 | 0.709499 | 0 | 0 | 0 | 0 | 0 | 0 | 2,196 | 0.714379 |
7fb00f904badbb5beb0199c4114151cfe440b175 | 875 | py | Python | C/resolve.py | staguchi0703/Nomura2020 | 9931419d9c1f6827a7fa1d417c568bf9954581d3 | [
"MIT"
] | null | null | null | C/resolve.py | staguchi0703/Nomura2020 | 9931419d9c1f6827a7fa1d417c568bf9954581d3 | [
"MIT"
] | null | null | null | C/resolve.py | staguchi0703/Nomura2020 | 9931419d9c1f6827a7fa1d417c568bf9954581d3 | [
"MIT"
] | null | null | null | def resolve():
'''
code here
'''
N = int(input())
A_list = [int(item) for item in input().split()]
hp = sum(A_list) #leaf
is_slove = True
res = 1 # root
prev = 1
prev_add_num = 0
delete_cnt = 0
if hp > 2**N:
is_slove = False
elif A_list[0] == 1:
if len(A_list) >= 2:
if sum(A_list[1:]) >= 1:
is_slove = False
else:
for i in range(1, N+1):
if 2**i >= A_list[i]:
add_num = min(2**(i-1) - prev_add_num, hp)
res += add_num
hp -= A_list[i]
prev = A_list[i]
prev_add_num = add_num
else:
is_slove = False
break
print(i, prev, hp, res)
print(res) if is_slove else print(-1)
if __name__ == "__main__":
resolve()
| 23.026316 | 58 | 0.442286 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 46 | 0.052571 |
7fb037fc5e93403c24d4069718d9478ba79f717e | 13,941 | py | Python | mod_spotted_extended_light/mod_spotted_extended_light308.py | chipsi007/spoter-mods | 0b8745cf06c651d84e356d16ce9d49f574dac81e | [
"WTFPL"
] | null | null | null | mod_spotted_extended_light/mod_spotted_extended_light308.py | chipsi007/spoter-mods | 0b8745cf06c651d84e356d16ce9d49f574dac81e | [
"WTFPL"
] | null | null | null | mod_spotted_extended_light/mod_spotted_extended_light308.py | chipsi007/spoter-mods | 0b8745cf06c651d84e356d16ce9d49f574dac81e | [
"WTFPL"
] | null | null | null | # -*- coding: utf-8 -*-
import os
import re
import json
import codecs
import datetime
import threading
import urllib
import urllib2
import string
import random
import BigWorld
from constants import AUTH_REALM
from gui.Scaleform.daapi.view.lobby.hangar.Hangar import Hangar
from gui.battle_control import g_sessionProvider
from Avatar import PlayerAvatar
from constants import BATTLE_EVENT_TYPE
import SoundGroups
from gui.app_loader import g_appLoader
class Config(object):
def __init__(self):
self.enable = True
self.debug = False
self.ru = True if 'RU' in AUTH_REALM else False
self.version = 'v3.08(18.11.2015)'
self.author = 'by spoter'
self.description = 'spotted_extended_light'
self.description_ru = 'Мод: "Маленький Светлячок"'
self.author_ru = 'автор: spoter'
self.name = 'spotted_extended_light'
self.description_analytics = 'Мод: "Маленький Светлячок"'
self.tid = 'UA-57975916-7'
self.sys_mes = {}
self.setup = {'MODIFIER': {'MODIFIER_NONE': 0, 'MODIFIER_SHIFT': 1, 'MODIFIER_CTRL': 2, 'MODIFIER_ALT': 4}}
self._thread_analytics = None
self.analytics_started = False
self.language = None
self.xvm_installed = False
self.xvm_check()
self.res_mods = self.res_mods_init()
self.data = {}
self.default_config()
new_config = self.load_json(self.name, self.data)
self.data = new_config
if 'Русский' in self.data['config'].get('language'): self.ru = True
if self.ru:
self.description = self.description_ru
self.author = self.author_ru
@staticmethod
def res_mods_init():
wd = os.path.dirname(os.path.realpath(__file__))
wd = wd[0:wd.rfind('\\')]
wd = wd[0:wd.rfind('\\')]
wd = wd[0:wd.rfind('\\')]
return wd
def xvm_check(self):
try:
import xvm_main
self.xvm_installed = True
except StandardError:
pass
def default_config(self):
self.data = {
'config': {
'enable': True, 'debug': False, 'sound': True, 'icon_size': [70, 24], 'language': 'Русский'
}, 'sound': {
'%s' % BATTLE_EVENT_TYPE.SPOTTED: '/GUI/notifications_FX/enemy_sighted_for_team', '%s' % BATTLE_EVENT_TYPE.RADIO_HIT_ASSIST: '/GUI/notifications_FX/gun_intuition',
'%s' % BATTLE_EVENT_TYPE.RADIO_KILL_ASSIST: '/GUI/notifications_FX/cybersport_auto_search', '%s' % BATTLE_EVENT_TYPE.TRACK_ASSIST: '/GUI/notifications_FX/gun_intuition'
}, 'language': {
'Русский': {
'messages': {
'%s' % BATTLE_EVENT_TYPE.SPOTTED: 'Засвечены {icons}', '%s' % BATTLE_EVENT_TYPE.RADIO_HIT_ASSIST: 'Урон по засвету в {icons_names}',
'%s' % BATTLE_EVENT_TYPE.RADIO_KILL_ASSIST: 'Убили по засвету {full}', '%s' % BATTLE_EVENT_TYPE.TRACK_ASSIST: 'Ассист {icons_vehicles} за сбитые траки'
}
}, 'English': {
'messages': {
'%s' % BATTLE_EVENT_TYPE.SPOTTED: 'Spotted {icons}', '%s' % BATTLE_EVENT_TYPE.RADIO_HIT_ASSIST: 'Radio hit assist to {icons_names}',
'%s' % BATTLE_EVENT_TYPE.RADIO_KILL_ASSIST: 'Radio kill assist to {full}', '%s' % BATTLE_EVENT_TYPE.TRACK_ASSIST: 'Tracks assist {icons_vehicles}'
}
}, 'Deutsch': {
'messages': {
'%s' % BATTLE_EVENT_TYPE.SPOTTED: 'Gefunden {icons}', '%s' % BATTLE_EVENT_TYPE.RADIO_HIT_ASSIST: 'Schaden von Licht in {icons_names}',
'%s' % BATTLE_EVENT_TYPE.RADIO_KILL_ASSIST: 'Bei gluht getotet {full}', '%s' % BATTLE_EVENT_TYPE.TRACK_ASSIST: 'Assist {icons_vehicles} fur schwer verletzte tracks'
}
}
}
}
def do_config(self):
self.enable = self.data['config'].get('enable', False)
self.debug = self.data['config'].get('debug', False)
if self.data['config'].get('language') in self.data['language']:
self.language = self.data['language'].get(self.data['config'].get('language'))
else:
self.data['config']['language'] = 'English'
self.language = self.data['language'].get('English')
def byte_ify(self, inputs):
if inputs:
if isinstance(inputs, dict):
return {self.byte_ify(key): self.byte_ify(value) for key, value in inputs.iteritems()}
elif isinstance(inputs, list):
return [self.byte_ify(element) for element in inputs]
elif isinstance(inputs, unicode):
return inputs.encode('utf-8')
else:
return inputs
return inputs
@staticmethod
def json_comments(text):
regex = r'\s*(#|\/{2}).*$'
regex_inline = r'(:?(?:\s)*([A-Za-z\d\.{}]*)|((?<=\").*\"),?)(?:\s)*(((#|(\/{2})).*)|)$'
lines = text.split('\n')
excluded = []
for index, line in enumerate(lines):
if re.search(regex, line):
if re.search(r'^' + regex, line, re.IGNORECASE):
excluded.append(lines[index])
elif re.search(regex_inline, line):
lines[index] = re.sub(regex_inline, r'\1', line)
for line in excluded:
lines.remove(line)
return '\n'.join(lines)
def load_json(self, name, config_old, save=False):
config_new = config_old
path = './res_mods/configs/spoter_mods/%s/' % self.name
if not os.path.exists(path):
os.makedirs(path)
new_path = '%s%s.json' % (path, name)
if save:
with codecs.open(new_path, 'w', encoding='utf-8-sig') as json_file:
data = json.dumps(config_old, sort_keys=True, indent=4, ensure_ascii=False, encoding='utf-8-sig', separators=(',', ': '))
json_file.write('%s' % self.byte_ify(data))
json_file.close()
config_new = config_old
else:
if os.path.isfile(new_path):
try:
with codecs.open(new_path, 'r', encoding='utf-8-sig') as json_file:
data = self.json_comments(json_file.read().decode('utf-8-sig'))
config_new = self.byte_ify(json.loads(data))
json_file.close()
except Exception as e:
self.sys_mess()
print '%s%s' % (self.sys_mes['ERROR'], e)
else:
self.sys_mess()
print '%s[%s, %s %s]' % (self.sys_mes['ERROR'], self.code_pa(self.description), self.version, self.sys_mes['MSG_RECREATE_CONFIG'])
with codecs.open(new_path, 'w', encoding='utf-8-sig') as json_file:
data = json.dumps(config_old, sort_keys=True, indent=4, ensure_ascii=False, encoding='utf-8-sig', separators=(',', ': '))
json_file.write('%s' % self.byte_ify(data))
json_file.close()
config_new = config_old
print '%s[%s, %s %s]' % (self.sys_mes['INFO'], self.code_pa(self.description), self.version, self.sys_mes['MSG_RECREATE_CONFIG_DONE'])
return config_new
@staticmethod
def code_pa(text):
try:
return text.encode('windows-1251')
except StandardError:
return text
def debugs(self, text):
if self.debug:
try:
text = text.encode('windows-1251')
except StandardError:
pass
print '%s%s [%s]: %s' % (datetime.datetime.now(), self.sys_mes['DEBUG'], self.code_pa(self.description), text)
def analytics_do(self):
if not self.analytics_started:
player = BigWorld.player()
param = urllib.urlencode({
'v': 1, # Version.
'tid': '%s' % self.tid, # Tracking ID / Property ID.
'cid': player.databaseID, # Anonymous Client ID.
't': 'screenview', # Screenview hit type.
'an': '%s' % self.description_analytics, # App name.
'av': '%s %s' % (self.description_analytics, self.version), # App version.
'cd': 'start [%s]' % AUTH_REALM # Screen name / content description.
})
self.debugs('http://www.google-analytics.com/collect?%s' % param)
urllib2.urlopen(url='http://www.google-analytics.com/collect?', data=param).read()
self.analytics_started = True
def analytics(self):
self._thread_analytics = threading.Thread(target=self.analytics_do, name='Thread')
self._thread_analytics.start()
def sys_mess(self):
self.sys_mes = {
'DEBUG': '[DEBUG]', 'LOAD_MOD': self.code_pa('[ЗАГРУЗКА]: ') if self.ru else '[LOAD_MOD]: ', 'INFO': self.code_pa('[ИНФО]: ') if self.ru else '[INFO]: ',
'ERROR': self.code_pa('[ОШИБКА]: ') if self.ru else '[ERROR]: ',
'MSG_RECREATE_CONFIG': self.code_pa('конфиг не найден, создаем заново') if self.ru else 'Config not found, recreating',
'MSG_RECREATE_CONFIG_DONE': self.code_pa('конфиг создан УСПЕШНО') if self.ru else 'Config recreating DONE',
'MSG_INIT': self.code_pa('применение настроек...') if self.ru else 'initialized ...', 'MSG_LANGUAGE_SET': self.code_pa('Выбран язык:') if self.ru else 'Language set to:',
'MSG_DISABLED': self.code_pa('отключен ...') if self.ru else 'disabled ...'
}
def load_mod(self):
self.do_config()
self.sys_mess()
print ''
print '%s[%s, %s]' % (self.sys_mes['LOAD_MOD'], self.code_pa(self.description), self.code_pa(self.author))
if self.enable:
self.debugs('Debug Activated ...')
print '%s[%s %s %s...]' % (self.sys_mes['INFO'], self.code_pa(self.description), self.sys_mes['MSG_LANGUAGE_SET'], self.code_pa(self.data['config'].get('language')))
print '%s[%s, %s %s]' % (self.sys_mes['INFO'], self.code_pa(self.description), self.version, self.sys_mes['MSG_INIT'])
else:
print '%s[%s, %s %s]' % (self.sys_mes['INFO'], self.code_pa(self.description), self.version, self.sys_mes['MSG_DISABLED'])
print ''
class Assist(object):
def __init__(self):
self.format_str = {'icons': '', 'names': '', 'vehicles': '', 'icons_names': '', 'icons_vehicles': '', 'full': ''}
@staticmethod
def check_macros(macros):
if macros in config.language['messages']['0']: return True
if macros in config.language['messages']['1']: return True
if macros in config.language['messages']['2']: return True
if macros in config.language['messages']['3']: return True
def format_recreate(self):
self.format_str = {'icons': '', 'names': '', 'vehicles': '', 'icons_names': '', 'icons_vehicles': '', 'full': ''}
@staticmethod
def sound(assist_type):
if '%s' % assist_type in config.data['sound']:
if config.data['sound'][assist_type] != '':
sound = SoundGroups.g_instance.getSound2D(config.data['sound'][assist_type])
if sound:
sound.play()
def post_message(self, assist_type, vehicles_ids):
if assist_type in config.language['messages']:
self.format_recreate()
for i in vehicles_ids:
if i >> 32 & 4294967295L > 0: i = i >> 32 & 4294967295L
else: i &= 4294967295L
icon = '<img src="img://%s" width="%s" height="%s" />' % (
g_sessionProvider.getArenaDP().getVehicleInfo(i).vehicleType.iconPath.replace('..', 'gui'), config.data['config'].get('icon_size')[0], config.data['config'].get('icon_size')[1])
target_info = g_sessionProvider.getCtx().getFullPlayerNameWithParts(vID=i)
if self.check_macros('{icons}'): self.format_str['icons'] += icon
if self.check_macros('{names}'): self.format_str['names'] += '[%s]' % target_info[1] if target_info[1] else icon
if self.check_macros('{vehicles}'): self.format_str['vehicles'] += '[%s]' % target_info[4] if target_info[4] else icon
if self.check_macros('{icons_names}'): self.format_str['icons_names'] += '%s[%s]' % (icon, target_info[1]) if target_info[1] else icon
if self.check_macros('{icons_vehicles}'): self.format_str['icons_vehicles'] += '%s[%s]' % (icon, target_info[4]) if target_info[4] else icon
if self.check_macros('{full}'):
full = g_sessionProvider.getCtx().getFullPlayerName(vID=i)
self.format_str['full'] += '%s[%s]' % (icon, full) if full else icon
msg = config.language['messages'][assist_type].format(**self.format_str)
g_appLoader.getDefBattleApp().call('battle.PlayerMessagesPanel.ShowMessage', [msg + random.choice(string.ascii_letters), '%s' % msg.decode('utf-8-sig'), 'gold'])
# deformed functions:
def hook_update_all(self):
hooked_update_all(self)
config.analytics()
def hook_on_battle_event(self, event_type, details):
if config.enable:
assist_type = '%s' % event_type
if config.data['config'].get('sound'): assist.sound(assist_type)
assist.post_message(assist_type, details)
return hooked_on_battle_event(self, event_type, details)
#hooked
# noinspection PyProtectedMember
hooked_update_all = Hangar._Hangar__updateAll
hooked_on_battle_event = PlayerAvatar.onBattleEvent
#hook
Hangar._Hangar__updateAll = hook_update_all
PlayerAvatar.onBattleEvent = hook_on_battle_event
#start mod
assist = Assist()
config = Config()
config.load_mod()
| 46.781879 | 197 | 0.582742 | 12,994 | 0.917202 | 0 | 0 | 1,622 | 0.114491 | 0 | 0 | 3,359 | 0.2371 |
7fb04acdd54e9cb4cbbbf41f4ccf06ce95b2ef6e | 19,118 | py | Python | sahara/service/edp/oozie/engine.py | ksshanam/sahara | 0d259f7a71447cd0cefe4f11184cc2ee335f4e33 | [
"Apache-2.0"
] | 161 | 2015-01-05T11:46:42.000Z | 2022-01-05T07:41:39.000Z | sahara/service/edp/oozie/engine.py | ksshanam/sahara | 0d259f7a71447cd0cefe4f11184cc2ee335f4e33 | [
"Apache-2.0"
] | 5 | 2019-08-14T06:46:03.000Z | 2021-12-13T20:01:25.000Z | sahara/service/edp/oozie/engine.py | ksshanam/sahara | 0d259f7a71447cd0cefe4f11184cc2ee335f4e33 | [
"Apache-2.0"
] | 118 | 2015-01-29T06:34:35.000Z | 2021-12-06T07:30:09.000Z | # Copyright (c) 2014 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
import os
import xml.dom.minidom as xml
from oslo_config import cfg
from oslo_utils import uuidutils
import six
from sahara import conductor as c
from sahara import context
from sahara.service.edp import base_engine
from sahara.service.edp import hdfs_helper as h
from sahara.service.edp.job_binaries import manager as jb_manager
from sahara.service.edp import job_utils
from sahara.service.edp.oozie import oozie as o
from sahara.service.edp.oozie.workflow_creator import workflow_factory
from sahara.service.validations.edp import job_execution as j
from sahara.utils import edp
from sahara.utils import remote
from sahara.utils import xmlutils as x
CONF = cfg.CONF
conductor = c.API
@six.add_metaclass(abc.ABCMeta)
class OozieJobEngine(base_engine.JobEngine):
def __init__(self, cluster):
self.cluster = cluster
self.plugin = job_utils.get_plugin(self.cluster)
def get_remote_client(self):
return o.RemoteOozieClient(self.get_oozie_server_uri(self.cluster),
self.get_oozie_server(self.cluster),
self.get_hdfs_user())
def get_client(self):
# by default engine will return standard oozie client implementation
return o.OozieClient(self.get_oozie_server_uri(self.cluster),
self.get_oozie_server(self.cluster))
def _get_oozie_job_params(self, hdfs_user, path_to_workflow,
oozie_params, use_hbase_lib,
scheduled_params=None, job_dir=None,
job_execution_type=None):
oozie_libpath_key = "oozie.libpath"
oozie_libpath = ""
rm_path = self.get_resource_manager_uri(self.cluster)
nn_path = self.get_name_node_uri(self.cluster)
hbase_common_lib_path = "%s%s" % (nn_path, h.HBASE_COMMON_LIB_PATH)
if use_hbase_lib:
if oozie_libpath_key in oozie_params:
oozie_libpath = "%s,%s" % (oozie_params.get(oozie_libpath_key,
""), hbase_common_lib_path)
else:
oozie_libpath = hbase_common_lib_path
if job_execution_type == "scheduled":
app_path = "oozie.coord.application.path"
job_parameters = {
"start": scheduled_params.get('start'),
"end": scheduled_params.get('end'),
"frequency": scheduled_params.get('frequency'),
"workflowAppUri": "%s%s" % (nn_path, job_dir),
app_path: "%s%s" % (nn_path, job_dir)}
else:
app_path = "oozie.wf.application.path"
job_parameters = {
app_path: "%s%s" % (nn_path, path_to_workflow)}
job_parameters["nameNode"] = nn_path
job_parameters["user.name"] = hdfs_user
job_parameters["jobTracker"] = rm_path
job_parameters[oozie_libpath_key] = oozie_libpath
job_parameters["oozie.use.system.libpath"] = "true"
# Don't let the application path be overwritten, that can't
# possibly make any sense
if app_path in oozie_params:
del oozie_params[app_path]
if oozie_libpath_key in oozie_params:
del oozie_params[oozie_libpath_key]
job_parameters.update(oozie_params)
return job_parameters
def _upload_workflow_file(self, where, job_dir, wf_xml, hdfs_user):
with remote.get_remote(where) as r:
h.put_file_to_hdfs(r, wf_xml, "workflow.xml", job_dir, hdfs_user)
return "%s/workflow.xml" % job_dir
def _upload_coordinator_file(self, where, job_dir, wf_xml, hdfs_user):
with remote.get_remote(where) as r:
h.put_file_to_hdfs(r, wf_xml, "coordinator.xml", job_dir,
hdfs_user)
return "%s/coordinator.xml" % job_dir
def cancel_job(self, job_execution):
if job_execution.engine_job_id is not None:
client = self.get_client()
client.kill_job(job_execution)
return client.get_job_info(job_execution)
def get_job_status(self, job_execution):
if job_execution.engine_job_id is not None:
return self.get_client().get_job_info(job_execution)
def _prepare_run_job(self, job_execution):
ctx = context.ctx()
# This will be a dictionary of tuples, (native_url, runtime_url)
# keyed by data_source id
data_source_urls = {}
prepared_job_params = {}
job = conductor.job_get(ctx, job_execution.job_id)
input_source, output_source = job_utils.get_input_output_data_sources(
job_execution, job, data_source_urls, self.cluster)
# Updated_job_configs will be a copy of job_execution.job_configs with
# any name or uuid references to data_sources resolved to paths
# assuming substitution is enabled.
# If substitution is not enabled then updated_job_configs will
# just be a reference to job_execution.job_configs to avoid a copy.
# Additional_sources will be a list of any data_sources found.
additional_sources, updated_job_configs = (
job_utils.resolve_data_source_references(job_execution.job_configs,
job_execution.id,
data_source_urls,
self.cluster)
)
job_execution = conductor.job_execution_update(
ctx, job_execution,
{"data_source_urls": job_utils.to_url_dict(data_source_urls)})
# Now that we've recorded the native urls, we can switch to the
# runtime urls
data_source_urls = job_utils.to_url_dict(data_source_urls,
runtime=True)
data_sources = additional_sources + [input_source, output_source]
job_utils.prepare_cluster_for_ds(data_sources,
self.cluster, updated_job_configs,
data_source_urls)
proxy_configs = updated_job_configs.get('proxy_configs')
configs = updated_job_configs.get('configs', {})
use_hbase_lib = configs.get('edp.hbase_common_lib', {})
# Extract all the 'oozie.' configs so that they can be set in the
# job properties file. These are config values for Oozie itself,
# not the job code
oozie_params = {}
for k in list(configs):
if k.startswith('oozie.'):
oozie_params[k] = configs[k]
external_hdfs_urls = self._resolve_external_hdfs_urls(
job_execution.job_configs)
for url in external_hdfs_urls:
h.configure_cluster_for_hdfs(self.cluster, url)
hdfs_user = self.get_hdfs_user()
# TODO(tmckay): this should probably be "get_namenode"
# but that call does not exist in the oozie engine api now.
oozie_server = self.get_oozie_server(self.cluster)
wf_dir = self._create_hdfs_workflow_dir(oozie_server, job)
self._upload_job_files_to_hdfs(oozie_server, wf_dir, job, configs,
proxy_configs)
wf_xml = workflow_factory.get_workflow_xml(
job, self.cluster, updated_job_configs,
input_source, output_source,
hdfs_user, data_source_urls)
path_to_workflow = self._upload_workflow_file(oozie_server, wf_dir,
wf_xml, hdfs_user)
prepared_job_params['context'] = ctx
prepared_job_params['hdfs_user'] = hdfs_user
prepared_job_params['path_to_workflow'] = path_to_workflow
prepared_job_params['use_hbase_lib'] = use_hbase_lib
prepared_job_params['job_execution'] = job_execution
prepared_job_params['oozie_params'] = oozie_params
prepared_job_params['wf_dir'] = wf_dir
prepared_job_params['oozie_server'] = oozie_server
return prepared_job_params
def run_job(self, job_execution):
prepared_job_params = self._prepare_run_job(job_execution)
path_to_workflow = prepared_job_params['path_to_workflow']
hdfs_user = prepared_job_params['hdfs_user']
oozie_params = prepared_job_params['oozie_params']
use_hbase_lib = prepared_job_params['use_hbase_lib']
ctx = prepared_job_params['context']
job_execution = prepared_job_params['job_execution']
job_params = self._get_oozie_job_params(hdfs_user,
path_to_workflow,
oozie_params,
use_hbase_lib)
client = self.get_client()
oozie_job_id = client.add_job(x.create_hadoop_xml(job_params),
job_execution)
job_execution = conductor.job_execution_get(ctx, job_execution.id)
if job_execution.info['status'] == edp.JOB_STATUS_TOBEKILLED:
return (None, edp.JOB_STATUS_KILLED, None)
conductor.job_execution_update(
context.ctx(), job_execution.id,
{'info': {'status': edp.JOB_STATUS_READYTORUN},
'engine_job_id': oozie_job_id})
client.run_job(job_execution, oozie_job_id)
try:
status = client.get_job_info(job_execution, oozie_job_id)['status']
except Exception:
status = None
return (oozie_job_id, status, None)
def run_scheduled_job(self, job_execution):
prepared_job_params = self._prepare_run_job(job_execution)
oozie_server = prepared_job_params['oozie_server']
wf_dir = prepared_job_params['wf_dir']
hdfs_user = prepared_job_params['hdfs_user']
oozie_params = prepared_job_params['oozie_params']
use_hbase_lib = prepared_job_params['use_hbase_lib']
ctx = prepared_job_params['context']
job_execution = prepared_job_params['job_execution']
coord_configs = {"jobTracker": "${jobTracker}",
"nameNode": "${nameNode}"}
coord_xml = self._create_coordinator_xml(coord_configs)
self._upload_coordinator_file(oozie_server, wf_dir, coord_xml,
hdfs_user)
job_params = self._get_oozie_job_params(
hdfs_user, None, oozie_params, use_hbase_lib,
job_execution.job_configs.job_execution_info, wf_dir,
"scheduled")
client = self.get_client()
oozie_job_id = client.add_job(x.create_hadoop_xml(job_params),
job_execution)
job_execution = conductor.job_execution_get(ctx, job_execution.id)
if job_execution.info['status'] == edp.JOB_STATUS_TOBEKILLED:
return (None, edp.JOB_STATUS_KILLED, None)
try:
status = client.get_job_status(job_execution,
oozie_job_id)['status']
except Exception:
status = None
return (oozie_job_id, status, None)
@abc.abstractmethod
def get_hdfs_user(self):
pass
@abc.abstractmethod
def create_hdfs_dir(self, remote, dir_name):
pass
@abc.abstractmethod
def get_oozie_server_uri(self, cluster):
pass
@abc.abstractmethod
def get_oozie_server(self, cluster):
pass
@abc.abstractmethod
def get_name_node_uri(self, cluster):
pass
@abc.abstractmethod
def get_resource_manager_uri(self, cluster):
pass
def validate_job_execution(self, cluster, job, data):
# Shell job type requires no specific fields
if job.type == edp.JOB_TYPE_SHELL:
return
# All other types except Java require input and output
# objects and Java require main class
if job.type == edp.JOB_TYPE_JAVA:
j.check_main_class_present(data, job)
else:
j.check_data_sources(data, job)
job_type, subtype = edp.split_job_type(job.type)
if job_type == edp.JOB_TYPE_MAPREDUCE and (
subtype == edp.JOB_SUBTYPE_STREAMING):
j.check_streaming_present(data, job)
@staticmethod
def get_possible_job_config(job_type):
return workflow_factory.get_possible_job_config(job_type)
@staticmethod
def get_supported_job_types():
return [edp.JOB_TYPE_HIVE,
edp.JOB_TYPE_JAVA,
edp.JOB_TYPE_MAPREDUCE,
edp.JOB_TYPE_MAPREDUCE_STREAMING,
edp.JOB_TYPE_PIG,
edp.JOB_TYPE_SHELL]
def _prepare_job_binaries(self, job_binaries, r):
for jb in job_binaries:
jb_manager.JOB_BINARIES.get_job_binary_by_url(jb.url). \
prepare_cluster(jb, remote=r)
def _upload_job_files_to_hdfs(self, where, job_dir, job, configs,
proxy_configs=None):
mains = list(job.mains) if job.mains else []
libs = list(job.libs) if job.libs else []
builtin_libs = edp.get_builtin_binaries(job, configs)
uploaded_paths = []
hdfs_user = self.get_hdfs_user()
job_dir_suffix = 'lib' if job.type != edp.JOB_TYPE_SHELL else ''
lib_dir = os.path.join(job_dir, job_dir_suffix)
with remote.get_remote(where) as r:
job_binaries = mains + libs
self._prepare_job_binaries(job_binaries, r)
# upload mains
uploaded_paths.extend(self._upload_job_binaries(r, mains,
proxy_configs,
hdfs_user,
job_dir))
# upload libs
if len(libs) and job_dir_suffix:
# HDFS 2.2.0 fails to put file if the lib dir does not exist
self.create_hdfs_dir(r, lib_dir)
uploaded_paths.extend(self._upload_job_binaries(r, libs,
proxy_configs,
hdfs_user,
lib_dir))
# upload buitin_libs
for lib in builtin_libs:
h.put_file_to_hdfs(r, lib['raw'], lib['name'], lib_dir,
hdfs_user)
uploaded_paths.append(lib_dir + lib['name'])
return uploaded_paths
def _upload_job_binaries(self, r, job_binaries, proxy_configs,
hdfs_user, job_dir):
uploaded_paths = []
for jb in job_binaries:
path = jb_manager.JOB_BINARIES. \
get_job_binary_by_url(jb.url). \
copy_binary_to_cluster(jb, proxy_configs=proxy_configs,
remote=r, context=context.ctx())
h.copy_from_local(r, path, job_dir, hdfs_user)
uploaded_paths.append(path)
return uploaded_paths
def _create_hdfs_workflow_dir(self, where, job):
constructed_dir = '/user/%s/' % self.get_hdfs_user()
constructed_dir = self._add_postfix(constructed_dir)
constructed_dir += '%s/%s' % (job.name, uuidutils.generate_uuid())
with remote.get_remote(where) as r:
self.create_hdfs_dir(r, constructed_dir)
return constructed_dir
def _create_coordinator_xml(self, coord_configs, config_filter=None,
appname='coord'):
doc = xml.Document()
# Create the <coordinator-app> base element
coord = doc.createElement('coordinator-app')
coord.attributes['name'] = appname
coord.attributes['start'] = "${start}"
coord.attributes['end'] = "${end}"
coord.attributes['frequency'] = "${frequency}"
coord.attributes['timezone'] = 'UTC'
coord.attributes['xmlns'] = 'uri:oozie:coordinator:0.2'
doc.appendChild(coord)
action = doc.createElement('action')
workflow = doc.createElement('workflow')
coord.appendChild(action)
action.appendChild(workflow)
x.add_text_element_to_tag(doc, "workflow", 'app-path',
"${workflowAppUri}")
configuration = doc.createElement('configuration')
workflow.appendChild(configuration)
default_configs = []
if config_filter is not None:
default_configs = [cfg['name'] for cfg in config_filter]
for name in sorted(coord_configs):
if name in default_configs or config_filter is None:
x.add_property_to_configuration(doc, name, coord_configs[name])
# Return newly created XML
return doc.toprettyxml(indent=" ")
def _add_postfix(self, constructed_dir):
def _append_slash_if_needed(path):
if path[-1] != '/':
path += '/'
return path
constructed_dir = _append_slash_if_needed(constructed_dir)
if CONF.job_workflow_postfix:
constructed_dir = ''.join([str(constructed_dir),
str(CONF.job_workflow_postfix)])
return _append_slash_if_needed(constructed_dir)
def _resolve_external_hdfs_urls(self, job_configs):
external_hdfs_urls = []
for k, v in six.iteritems(job_configs.get('configs', {})):
if isinstance(v, six.string_types) and v.startswith("hdfs://"):
external_hdfs_urls.append(v)
for k, v in six.iteritems(job_configs.get('params', {})):
if isinstance(v, six.string_types) and v.startswith("hdfs://"):
external_hdfs_urls.append(v)
for v in job_configs.get('args', []):
if isinstance(v, six.string_types) and v.startswith("hdfs://"):
external_hdfs_urls.append(v)
return external_hdfs_urls
def suspend_job(self, job_execution):
return self._manage_job(job_execution, edp.JOB_ACTION_SUSPEND)
def _manage_job(self, job_execution, action):
if job_execution.oozie_job_id is not None:
client = self.get_client()
if action == edp.JOB_ACTION_SUSPEND:
client.suspend_job(job_execution)
return client.get_job_status(job_execution)
| 40.850427 | 79 | 0.616016 | 17,791 | 0.930589 | 0 | 0 | 17,823 | 0.932263 | 0 | 0 | 2,918 | 0.152631 |
7fb04d7e5c955b6114c4a179c5490fcab0e0ff9c | 3,497 | py | Python | adjutantclient/osc/v1/tokens.py | openstack/python-adjutantclient | 20d234bc370056184a0885e6e6f735db056668a2 | [
"Apache-2.0"
] | 6 | 2017-10-31T13:27:59.000Z | 2019-01-28T22:09:25.000Z | adjutantclient/osc/v1/tokens.py | openstack/python-adjutantclient | 20d234bc370056184a0885e6e6f735db056668a2 | [
"Apache-2.0"
] | null | null | null | adjutantclient/osc/v1/tokens.py | openstack/python-adjutantclient | 20d234bc370056184a0885e6e6f735db056668a2 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2016 Catalyst IT Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import logging
from osc_lib.command import command
from osc_lib.i18n import _
from adjutantclient import client as adjutant_client
LOG = logging.getLogger(__name__)
def _list_tokens(client, filters={}):
tokens = client.tokens.list(filters=filters)
headers = ['Token', 'Task', 'Expires on', 'Created on']
rows = [[token.token, token.task, token.expires,
token.created_on] for token in tokens]
return headers, rows
class TokenList(command.Lister):
"""Lists adjutant tokens. """
def get_parser(self, prog_name):
parser = super(TokenList, self).get_parser(prog_name)
parser.add_argument(
'--filters', metavar='<filters>',
required=False,
help=_('JSON containing filters for tokens.'),
default={})
return parser
def take_action(self, parsed_args):
client = self.app.client_manager.admin_logic
return _list_tokens(client, parsed_args.filters)
class TokenShow(command.ShowOne):
"""Show details of one token."""
def get_parser(self, prog_name):
parser = super(TokenShow, self).get_parser(prog_name)
parser.add_argument(
'token', metavar='<token_id>',
help=_("The token."))
parser.add_argument(
'--bypass-url', metavar='<bypass-url>', default=None,
help=_('Bypasss URL for unauthenticated access to the endpoint.'))
return parser
def take_action(self, parsed_args):
if not parsed_args.bypass_url:
self.app.client_manager._auth_required = True
self.app.client_manager.setup_auth()
client = self.app.client_manager.admin_logic
else:
client = adjutant_client.Client("1", parsed_args.bypass_url)
token = client.tokens.get(parsed_args.token)
return zip(*(token.to_dict()).items())
class TokenSubmit(command.Command):
"""Submit token data."""
def get_parser(self, prog_name):
parser = super(TokenSubmit, self).get_parser(prog_name)
parser.add_argument(
'token', metavar='<token_id>', help=_('The token.'))
parser.add_argument(
'data', metavar='<token_data>',
help=_('Submission data for the token. Must be valid json.'))
return parser
def take_action(self, parsed_args):
client = self.app.client_manager.admin_logic
resp = client.tokens.submit(
parsed_args.token, json.loads(parsed_args.data))
print('Success', ' '.join(resp.notes))
class TokenClear(command.Lister):
"""Clear Expired tokens, admin only."""
def take_action(self, parsed_args):
client = self.app.client_manager.admin_logic
resp = client.tokens.clear_expired()
print('Success. ' + ' '.join(resp.json()['notes']))
return _list_tokens(client)
| 32.682243 | 78 | 0.654847 | 2,411 | 0.689448 | 0 | 0 | 0 | 0 | 0 | 0 | 1,074 | 0.30712 |
7fb19ef2495c344620752f4d76a363c1d3413606 | 3,126 | py | Python | Chapter05/Low_Quality.py | PacktPublishing/Bioinformatics-with-Python-Cookbook-third-edition | 94fc361fdadc15ec0d8b0cd101bfc3717a0ab50e | [
"MIT"
] | 9 | 2021-10-01T23:00:46.000Z | 2022-03-11T02:35:24.000Z | Chapter05/Low_Quality.py | PacktPublishing/Bioinformatics-with-Python-Cookbook-third-edition | 94fc361fdadc15ec0d8b0cd101bfc3717a0ab50e | [
"MIT"
] | null | null | null | Chapter05/Low_Quality.py | PacktPublishing/Bioinformatics-with-Python-Cookbook-third-edition | 94fc361fdadc15ec0d8b0cd101bfc3717a0ab50e | [
"MIT"
] | 6 | 2021-11-17T15:46:48.000Z | 2022-02-25T13:53:35.000Z | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.13.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# +
import gzip
import numpy as np
import matplotlib.pyplot as plt
from Bio import SeqIO, SeqUtils
# -
# !rm -f atroparvus.fa.gz gambiae.fa.gz 2>/dev/null
# !wget https://vectorbase.org/common/downloads/Current_Release/AgambiaePEST/fasta/data/VectorBase-55_AgambiaePEST_Genome.fasta -O gambiae.fa
# !gzip -9 gambiae.fa
# !wget https://vectorbase.org/common/downloads/Current_Release/AatroparvusEBRO/fasta/data/VectorBase-55_AatroparvusEBRO_Genome.fasta -O atroparvus.fa
# !gzip -9 atroparvus.fa
gambiae_name = 'gambiae.fa.gz'
atroparvus_name = 'atroparvus.fa.gz'
recs = SeqIO.parse(gzip.open(gambiae_name, 'rt', encoding='utf-8'), 'fasta')
for rec in recs:
print(rec.description)
#Do not do this with atroparvus
recs = SeqIO.parse(gzip.open(gambiae_name, 'rt', encoding='utf-8'), 'fasta')
chrom_Ns = {}
chrom_sizes = {}
for rec in recs:
if rec.description.find('supercontig') > -1:
continue
print(rec.description, rec.id, rec)
chrom = rec.id.split('_')[1]
if chrom in ['UNKN']:#, 'Y_unplaced']:
continue
chrom_Ns[chrom] = []
on_N = False
curr_size = 0
for pos, nuc in enumerate(rec.seq):
if nuc in ['N', 'n']:
curr_size += 1
on_N = True
else:
if on_N:
chrom_Ns[chrom].append(curr_size)
curr_size = 0
on_N = False
if on_N:
chrom_Ns[chrom].append(curr_size)
chrom_sizes[chrom] = len(rec.seq)
for chrom, Ns in chrom_Ns.items():
size = chrom_sizes[chrom]
if len(Ns) > 0:
max_Ns = max(Ns)
else:
max_Ns = 'NA'
print(f'{chrom} ({size}): %Ns ({round(100 * sum(Ns) / size, 1)}), num Ns: {len(Ns)}, max N: {max_Ns}')
# ## Atroparvus super-contigs
recs = SeqIO.parse(gzip.open(atroparvus_name, 'rt', encoding='utf-8'), 'fasta')
sizes = []
size_N = []
for rec in recs:
size = len(rec.seq)
sizes.append(size)
count_N = 0
for nuc in rec.seq:
if nuc in ['n', 'N']:
count_N += 1
size_N.append((size, count_N / size))
print(len(sizes), np.median(sizes), np.mean(sizes), max(sizes), min(sizes),
np.percentile(sizes, 10), np.percentile(sizes, 90))
small_split = 4800
large_split = 540000
fig, axs = plt.subplots(1, 3, figsize=(16, 9), dpi=300, squeeze=False, sharey=True)
xs, ys = zip(*[(x, 100 * y) for x, y in size_N if x <= small_split])
axs[0, 0].plot(xs, ys, '.')
xs, ys = zip(*[(x, 100 * y) for x, y in size_N if x > small_split and x <= large_split])
axs[0, 1].plot(xs, ys, '.')
axs[0, 1].set_xlim(small_split, large_split)
xs, ys = zip(*[(x, 100 * y) for x, y in size_N if x > large_split])
axs[0, 2].plot(xs, ys, '.')
axs[0, 0].set_ylabel('Fraction of Ns', fontsize=12)
axs[0, 1].set_xlabel('Contig size', fontsize=12)
fig.suptitle('Fraction of Ns per contig size', fontsize=26)
fig.savefig('frac.png')
| 29.771429 | 150 | 0.627639 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,034 | 0.330774 |
7fb391590f55d21149c4442ca20b07a2041ace3f | 6,835 | py | Python | experiments/2_Maze_Neutrality/experiment/stimuli_creation/stim_csv_iterator.py | BranPap/gender_ideology | 2b2b87e13cb7a8abd0403828fbc235768a774aaa | [
"MIT"
] | 1 | 2021-03-30T03:12:05.000Z | 2021-03-30T03:12:05.000Z | experiments/2_Maze_Neutrality/experiment/stimuli_creation/stim_csv_iterator.py | BranPap/gender_ideology | 2b2b87e13cb7a8abd0403828fbc235768a774aaa | [
"MIT"
] | null | null | null | experiments/2_Maze_Neutrality/experiment/stimuli_creation/stim_csv_iterator.py | BranPap/gender_ideology | 2b2b87e13cb7a8abd0403828fbc235768a774aaa | [
"MIT"
] | 1 | 2021-03-30T03:41:32.000Z | 2021-03-30T03:41:32.000Z | import json
import pandas as pd
import random
df = pd.read_csv("experiment\stimuli_creation\maze_lexemes.csv")
states = ["California","Alabama","Alaska","Arizona","Arkansas","Connecticut","Colorado","Delaware","Florida","Georgia","Hawaii","Idaho","Illinois","Indiana","Iowa","Kansas","Kentucky","Louisiana","Maine","Maryland","Massachusetts","Michigan","Minnesota","Mississippi","Missouri","Montana","Nebraska","Nevada","New Hampshire","New Jersey","New Mexico","New York","North Carolina","North Dakota","Ohio","Oklahoma","Oregon","Pennsylvania","Rhode Island","South Carolina","South Dakota","Tennessee","Texas","Utah","Vermont","Virginia","Washington","West Virginia","Wisconsin","Wyoming"]
random.shuffle(states)
activities = ["swimming","writing","singing","dancing","hiking","running","reading","drawing","painting","cooking","cycling","walking","studying","surfing","camping"]
random.shuffle(activities)
stim_list = []
coin = [0,1]
entry = []
status = 1
with open("experiment\stimuli_creation\maze_stims.csv", 'w') as stim_input:
stim_input.write("name,be,det,target,prep,state,pro,like,activity,question1,answer1,question2,answer2,gender,lexeme,orthog,condition,id")
stim_input.write("\n")
for index,row in df.iterrows():
status +=1
state = states.pop()
activity = random.choice(activities)
antistate = random.choice(states)
activity_2 = random.choice(activities)
stim_input.write("NAME,is,")
stim_input.write(row["det"])
stim_input.write("," + row["neutral"])
stim_input.write(",from,")
stim_input.write(state+".")
stim_input.write(",She,")
stim_input.write("likes,")
stim_input.write(activity+".")
entry.append(str(('female;'+str(status)+';Jane is '+row["det"]+" "+row["neutral"]+" from "+state+". She likes "+activity+".")))
activity_chance = random.choice(coin)
if activity_chance == 0:
stim_input.write(",Does NAME like "+activity_2+"?")
if activity == activity_2:
stim_input.write(",Yes")
else:
stim_input.write(",No")
else:
stim_input.write(",Does NAME like "+activity+"?")
stim_input.write(",Yes")
chance = random.choice(coin)
if chance == 0:
stim_input.write(",Is NAME from "+antistate+"?")
stim_input.write(",No,")
else:
stim_input.write(",Is NAME from "+state+"?")
stim_input.write(",Yes,")
stim_input.write("female,"+row['lexeme']+','+row["female"]+',')
stim_input.write("neutral_female"+',')
stim_input.write(row['lexeme'])
stim_input.write("_neutral_female")
stim_input.write('\n')
stim_input.write("NAME,is,")
stim_input.write(row["det"])
stim_input.write("," + row["female"])
stim_input.write(",from,")
stim_input.write(state+".")
stim_input.write(",She,")
stim_input.write("likes,")
stim_input.write(activity+".")
entry.append(str(('female;'+str(status)+';Jane is '+row["det"]+" "+row["female"]+" from "+state+". She likes "+activity+".")))
if activity_chance == 0:
stim_input.write(",Does NAME like "+activity_2+"?")
if activity == activity_2:
stim_input.write(",Yes")
else:
stim_input.write(",No")
else:
stim_input.write(",Does NAME like "+activity+"?")
stim_input.write(",Yes")
if chance == 0:
stim_input.write(",Is NAME from "+antistate+"?")
stim_input.write(",No,")
else:
stim_input.write(",Is NAME from "+state+"?")
stim_input.write(",Yes,")
stim_input.write("female,"+row['lexeme']+','+row["female"]+',')
stim_input.write("congruent_female"+',')
stim_input.write(row['lexeme'])
stim_input.write("_congruent_female")
stim_input.write('\n')
stim_input.write("NAME,is,")
stim_input.write(row["det"])
stim_input.write("," + row["neutral"])
stim_input.write(",from,")
stim_input.write(state+".")
stim_input.write(",He,")
stim_input.write("likes,")
stim_input.write(activity+".")
entry.append(str(('male;'+str(status)+';John is '+row["det"]+" "+row["neutral"]+" from "+state+". He likes "+activity+".")))
if activity_chance == 0:
stim_input.write(",Does NAME like "+activity_2+"?")
if activity == activity_2:
stim_input.write(",Yes")
else:
stim_input.write(",No")
else:
stim_input.write(",Does NAME like "+activity+"?")
stim_input.write(",Yes")
if chance == 0:
stim_input.write(",Is NAME from "+antistate+"?")
stim_input.write(",No,")
else:
stim_input.write(",Is NAME from "+state+"?")
stim_input.write(",Yes,")
stim_input.write("male,"+row['lexeme']+','+row["male"]+',')
stim_input.write("neutral_male"+',')
stim_input.write(row["lexeme"]+"_neutral_male")
stim_input.write('\n')
stim_input.write("NAME,is,")
stim_input.write(row["det"])
stim_input.write("," + row["male"])
stim_input.write(",from,")
stim_input.write(state+".")
stim_input.write(",He,")
stim_input.write("likes,")
stim_input.write(activity+".")
entry.append(str(('male;'+str(status)+';John is '+row["det"]+" "+row["male"]+" from "+state+". He likes "+activity+".")))
if activity_chance == 0:
stim_input.write(",Does NAME like "+activity_2+"?")
if activity == activity_2:
stim_input.write(",Yes")
else:
stim_input.write(",No")
else:
stim_input.write(",Does NAME like "+activity+"?")
stim_input.write(",Yes")
if chance == 0:
stim_input.write(",Is NAME from "+antistate+"?")
stim_input.write(",No,")
else:
stim_input.write(",Is NAME from "+state+"?")
stim_input.write(",Yes,")
stim_input.write("male,"+row['lexeme']+','+row["male"]+',')
stim_input.write("congruent_male"+',')
stim_input.write(row['lexeme'])
stim_input.write("_congruent_male")
stim_input.write('\n')
stim_list.append(row['lexeme'])
stim_list.append(row['neutral'])
stim_list.append(row['male'])
stim_list.append(row['female'])
with open('list_file.txt', 'w') as stim_checker:
stim_checker.write(str(stim_list))
with open('to-be-matched.txt', 'w') as match_list:
for sentence in entry:
match_list.write(str(sentence)+"\n")
| 42.71875 | 582 | 0.574689 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,154 | 0.315143 |
7fb495b0933b679faafdbcf8c0bfaeffd39c71bb | 737 | py | Python | Choruslib/chorusio.py | kramundson/Chorus2 | 43be15e082f346dc45b94a7fadb141b3a6f8c37c | [
"MIT"
] | 11 | 2019-05-23T13:28:39.000Z | 2022-02-10T00:24:54.000Z | Choruslib/chorusio.py | kramundson/Chorus2 | 43be15e082f346dc45b94a7fadb141b3a6f8c37c | [
"MIT"
] | 3 | 2019-03-15T01:47:53.000Z | 2021-12-16T07:41:08.000Z | Choruslib/chorusio.py | kramundson/Chorus2 | 43be15e082f346dc45b94a7fadb141b3a6f8c37c | [
"MIT"
] | 11 | 2019-02-28T08:17:02.000Z | 2021-12-13T11:44:02.000Z | from pyfasta import Fasta
def writebed(probelist, outbedfile):
'''probe list format:
chr\tstart\tend
'''
outio = open(outbedfile, 'w')
for pbnow in probelist:
print(pbnow, file=outio)
outio.close()
def writefa(genomefile, bedfile, outfile):
fastafile = Fasta(genomefile)
bedio = open(bedfile, 'r')
outio = open(outfile, 'w')
for lin in bedio.readlines():
lin = lin.rstrip()
chrnow, start, end = lin.split('\t')
seqid = '>' + chrnow + ':' + start + '-' + end
nowseq = fastafile[chrnow][int(start):int(end)]
print(seqid, file=outio)
print(nowseq, file=outio)
bedio.close()
outio.close()
# return True
| 14.74 | 55 | 0.569878 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 90 | 0.122117 |
7fb5da94eb1a0567d0a8b0b1f7117ea8c8244199 | 77 | py | Python | p001.py | scottwillmoore/project-euler | e8b24a72d88fc30e8ab701e2f56622c363600ea4 | [
"MIT"
] | 1 | 2018-07-09T09:19:58.000Z | 2018-07-09T09:19:58.000Z | p001.py | scottwillmoore/project-euler | e8b24a72d88fc30e8ab701e2f56622c363600ea4 | [
"MIT"
] | null | null | null | p001.py | scottwillmoore/project-euler | e8b24a72d88fc30e8ab701e2f56622c363600ea4 | [
"MIT"
] | null | null | null | c = lambda n: n % 3 == 0 or n % 5 == 0
print(sum(filter(c, range(0, 1000))))
| 25.666667 | 38 | 0.532468 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
7fb602d2e2a1eb7cec67da9528c261af68059799 | 770 | py | Python | source/Tutorials/Actions/client_0.py | dsauval/ros2_documentation | 19627272befdd4affa3bdb5b99bedb28c9975584 | [
"CC-BY-4.0"
] | 291 | 2018-10-05T17:35:45.000Z | 2022-03-29T02:46:57.000Z | source/Tutorials/Actions/client_0.py | dsauval/ros2_documentation | 19627272befdd4affa3bdb5b99bedb28c9975584 | [
"CC-BY-4.0"
] | 978 | 2018-10-18T17:00:46.000Z | 2022-03-29T12:25:21.000Z | source/Tutorials/Actions/client_0.py | dsauval/ros2_documentation | 19627272befdd4affa3bdb5b99bedb28c9975584 | [
"CC-BY-4.0"
] | 883 | 2018-10-01T19:18:13.000Z | 2022-03-28T03:03:09.000Z | import rclpy
from rclpy.action import ActionClient
from rclpy.node import Node
from action_tutorials_interfaces.action import Fibonacci
class FibonacciActionClient(Node):
def __init__(self):
super().__init__('fibonacci_action_client')
self._action_client = ActionClient(self, Fibonacci, 'fibonacci')
def send_goal(self, order):
goal_msg = Fibonacci.Goal()
goal_msg.order = order
self._action_client.wait_for_server()
return self._action_client.send_goal_async(goal_msg)
def main(args=None):
rclpy.init(args=args)
action_client = FibonacciActionClient()
future = action_client.send_goal(10)
rclpy.spin_until_future_complete(action_client, future)
if __name__ == '__main__':
main()
| 22 | 72 | 0.727273 | 393 | 0.51039 | 0 | 0 | 0 | 0 | 0 | 0 | 46 | 0.05974 |
7fb60d8a8b3c137ea51b2200982c164df6a11a71 | 655 | py | Python | label_demo.py | encela95dus/ios_pythonista_examples | e136cdcb05126f0f9b9f6fb6365870876b419619 | [
"MIT"
] | 36 | 2019-01-12T04:17:49.000Z | 2022-03-31T05:33:29.000Z | label_demo.py | Backup-eric645/ios_pythonista_examples | e136cdcb05126f0f9b9f6fb6365870876b419619 | [
"MIT"
] | null | null | null | label_demo.py | Backup-eric645/ios_pythonista_examples | e136cdcb05126f0f9b9f6fb6365870876b419619 | [
"MIT"
] | 15 | 2018-12-30T21:18:05.000Z | 2022-01-30T13:17:07.000Z | import ui
def make_label(text, font, alignment, background_color, x, y):
w, h = ui.measure_string(text,
font=font, alignment=alignment, max_width=0)
#print(w,h)
label = ui.Label(text=text,
font=font, alignment=alignment,
background_color=background_color, frame=(x,y,w,h), border_width=1)
return label
v = ui.load_view()
v.add_subview(make_label(
#'AajJpPyY iIlL', ('Helvetica', 80), ui.ALIGN_LEFT,
#'AajJpPyY iIlL', ('TimesNewRomanPSMT', 80), ui.ALIGN_LEFT,
'AajJpPyY iIlL', ('Avenir-Roman', 80), ui.ALIGN_LEFT,
'lightgray', 30,350))
v.present('sheet')
| 31.190476 | 83 | 0.622901 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 171 | 0.261069 |
7fb6655e444707235655b6b04ce814593df93517 | 153 | py | Python | dev.py | wabscale/flasq | ea5cba81c3259441f64e96a7c63d2234728100a3 | [
"MIT"
] | 1 | 2022-02-25T02:12:10.000Z | 2022-02-25T02:12:10.000Z | dev.py | wabscale/flasq | ea5cba81c3259441f64e96a7c63d2234728100a3 | [
"MIT"
] | null | null | null | dev.py | wabscale/flasq | ea5cba81c3259441f64e96a7c63d2234728100a3 | [
"MIT"
] | null | null | null | from web import app
from glob import glob
app.run(
debug=True,
host='0.0.0.0',
port=5000,
extra_files=glob('./web/templates/**.html')
)
| 15.3 | 47 | 0.627451 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 34 | 0.222222 |
7fb71d82abd96a4b2037456c5481862b49d126e5 | 6,010 | py | Python | src/django/addr/addr/settings.py | deshk04/addressparser | 03c33859bb585a6cf767e0f4d3dfd23cabd2d78b | [
"MIT"
] | null | null | null | src/django/addr/addr/settings.py | deshk04/addressparser | 03c33859bb585a6cf767e0f4d3dfd23cabd2d78b | [
"MIT"
] | null | null | null | src/django/addr/addr/settings.py | deshk04/addressparser | 03c33859bb585a6cf767e0f4d3dfd23cabd2d78b | [
"MIT"
] | null | null | null | """
Django settings for addr project.
Generated by 'django-admin startproject' using Django 1.11.29.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
from datetime import timedelta
from dotenv import load_dotenv
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
# SECRET_KEY = 'txynano6xva_dfzjhiopg5wak5bqk^eecw^3+0xv)09j23k%++'
load_dotenv('/addr/.env')
SECRET_KEY = os.getenv("DJANGO_SECRET_KEY")
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
PROJ_ENV = os.getenv("PROJ_ENV")
ALLOWED_HOSTS = ['*']
PROJECT_ROOT_PATH = '/addr/src/'
ADDR_TEMPLATE_FOLDER = PROJECT_ROOT_PATH + '/static/html'
ADDR_STATIC_ROOT = PROJECT_ROOT_PATH + '/static/'
ADDR_STATIC_FOLDER = PROJECT_ROOT_PATH + '/static/html'
DJANGO_LOGFILE = '/addr/ops/logs/uwsgi/django.log'
# Application definition
INSTALLED_APPS = []
if PROJ_ENV == 'dev':
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'adr',
'rest_framework',
'rest_framework.authtoken',
'corsheaders'
]
else:
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'adr',
'rest_framework',
'rest_framework.authtoken',
]
REST_FRAMEWORK = {
'DEFAULT_AUTHENTICATION_CLASSES': [
'rest_framework_simplejwt.authentication.JWTAuthentication',
],
# 'DEFAULT_AUTHENTICATION_CLASSES': (
# 'rest_framework.authentication.TokenAuthentication',
# 'rest_framework.authentication.SessionAuthentication',
# ),
}
SIMPLE_JWT = {
'ACCESS_TOKEN_LIFETIME': timedelta(minutes=240),
'REFRESH_TOKEN_LIFETIME': timedelta(days=6),
'SIGNING_KEY': 's1d'
}
MIDDLEWARE = []
if PROJ_ENV == 'dev':
MIDDLEWARE = [
'corsheaders.middleware.CorsMiddleware',
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
else:
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
CORS_ORIGIN_ALLOW_ALL = True
ROOT_URLCONF = 'addr.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [ADDR_TEMPLATE_FOLDER],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'addr.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': os.environ.get('DATABASE_ENGINE', 'django.db.backends.postgresql_psycopg2'),
'NAME': os.environ.get('POSTGRES_DB', 'addrmain'),
'USER': os.environ.get('POSTGRES_USER', 'addruser'),
'PASSWORD': os.environ.get('POSTGRES_PASSWORD', ''),
'HOST': os.environ.get('POSTGRES_SVCNM', os.environ.get('POSTGRES_HOST', 'addr_database')),
'PORT': os.environ.get('POSTGRES_PORT', '5432'),
},
# 'logs': {
# 'ENGINE': 'django.db.backends.postgresql_psycopg2',
# 'NAME': 'logs',
# 'USER': os.environ.get('PGUSER', 'dartuser'),
# 'PASSWORD': os.environ.get('PGPASSWORD', ''),
# 'HOST': os.environ.get('PGHOST', 'localhost'),
# 'PORT': os.environ.get('PGPORT', '5432'),
# },
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
# TIME_ZONE = 'UTC'
TIME_ZONE = 'Australia/Melbourne'
USE_I18N = True
USE_L10N = True
# USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, "static"),
ADDR_STATIC_ROOT,
]
DATA_UPLOAD_MAX_MEMORY_SIZE = 5242880
# 2.5MB - 2621440
# 5MB - 5242880
# 10MB - 10485760
# 20MB - 20971520
# 50MB - 5242880
| 29.033816 | 99 | 0.67604 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4,003 | 0.666057 |
7fb79206ce5753b6ec8f399db4b0f99941b1fce3 | 1,243 | py | Python | Hash PWs for Cisco/setupPW.py | NetworkNick-US/PythonScripts | b8441e4d433be59f4b3c4bd5c61543b2ef66ae4b | [
"MIT"
] | null | null | null | Hash PWs for Cisco/setupPW.py | NetworkNick-US/PythonScripts | b8441e4d433be59f4b3c4bd5c61543b2ef66ae4b | [
"MIT"
] | null | null | null | Hash PWs for Cisco/setupPW.py | NetworkNick-US/PythonScripts | b8441e4d433be59f4b3c4bd5c61543b2ef66ae4b | [
"MIT"
] | null | null | null | import getpass
import os
import platform
import subprocess
class Style:
BLACK = '\033[30m'
RED = '\033[31m'
GREEN = '\033[32m'
YELLOW = '\033[33m'
BLUE = '\033[34m'
MAGENTA = '\033[35m'
CYAN = '\033[36m'
WHITE = '\033[37m'
UNDERLINE = '\033[4m'
RESET = '\033[0m'
BLUEBACKGROUND = '\x1b[1;37;46m'
def clearConsole():
clear_con = 'cls' if platform.system().lower() == "windows" else 'clear'
os.system(clear_con)
def hashPass(salted, pwd):
return subprocess.getoutput("openssl passwd -salt " + salted + " -1 " + pwd)
def main():
os.system("")
print("This script will help you hash a password for use with your Ansible playbooks for IOS and IOS XE devices.\n",
Style.RED, "PLEASE NOTE: CURRENTLY NXOS_USER REQUIRES CLEAR-TEXT PASSWORDS", Style.RESET)
salt = getpass.getpass(prompt="Please enter a random string as your salt: ", stream=None)
userpasswd = getpass.getpass(prompt="Password: ", stream=None)
print("The value you should be using for your variable 'fallbackAdminPW' is: " + hashPass(salt, userpasswd))
print(Style.BLUE + "\nVisit NetworkNick.us for more Ansible and Python tools!\n" + Style.RESET)
if __name__ == '__main__':
main()
| 29.595238 | 120 | 0.65889 | 280 | 0.225261 | 0 | 0 | 0 | 0 | 0 | 0 | 538 | 0.432824 |
7fb8126383e7f0de2870b766de35128210bc222a | 2,197 | py | Python | web-scraping-challenge/scrape_mars.py | Felicia620/web-scraping-challenge | 6ad1087c940ad5b54ca5fcc889ee4a864b4ccead | [
"ADSL"
] | null | null | null | web-scraping-challenge/scrape_mars.py | Felicia620/web-scraping-challenge | 6ad1087c940ad5b54ca5fcc889ee4a864b4ccead | [
"ADSL"
] | null | null | null | web-scraping-challenge/scrape_mars.py | Felicia620/web-scraping-challenge | 6ad1087c940ad5b54ca5fcc889ee4a864b4ccead | [
"ADSL"
] | null | null | null | from bs4 import BeautifulSoup
import requests
import pymongo
from splinter import Browser
from flask import Flask, render_template, redirect
from flask_pymongo import PyMongo
import pandas as pd
def init_browser():
executable_path = {"executable_path": "chromedriver"}
return Browser("chrome", **executable_path, headless = False)
def scrape():
browser = init_browser()
mars_dict = {}
url = "https://redplanetscience.com/"
browser.visit(url)
html = browser.html
soup = BeautifulSoup(html, "html.parser")
latest_news_title = soup.find("div", class_ = "content_title").text
latest_news_paragraph = soup.find("div", class_ = "article_teaser_body").text
url = "https://spaceimages-mars.com/"
browser.visit(url)
html = browser.html
soup = BeautifulSoup(html, "html.parser")
featured_image_url = "https://spaceimages-mars.com/image/featured/mars1.jpg"
browser.visit(featured_image_url)
url = "https://galaxyfacts-mars.com/"
browser.visit(url)
html = browser.html
soup = BeautifulSoup(html, "html.parser")
mars_facts_table = pd.read_html(url)[1]
df = mars_facts_table
html_table = df.to_html()
url = "https://marshemispheres.com/"
browser.visit(url)
html = browser.html
soup = BeautifulSoup(html, "html.parser")
hemisphere_image_urls = []
links = browser.find_by_css("a.product-item img")
for i in range(len(links)):
hemisphere = {}
browser.find_by_css("a.product-item img")[i].click()
sample = browser.links.find_by_text("Sample").first
hemisphere["img_url"] = sample["href"]
hemisphere["title"] = browser.find_by_css("h2.title").text
hemisphere_image_urls.append(hemisphere)
browser.back()
mars_data = {
"latest_news_title": latest_news_title,
"latest_news_paragraph": latest_news_paragraph,
"featured_image_url": featured_image_url,
"html_table": html_table,
"hemisphere_image_urls": hemisphere_image_urls}
browser.quit()
return mars_data | 27.810127 | 82 | 0.648612 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 492 | 0.223942 |
7fb8c4bd3537291e9cb99243f7a04c75639a3244 | 332 | py | Python | proyo/templates/create/python/{{package_name}}/__main__.py | MatthewScholefield/proyo | ca5e0c55b7f8c2ea33dd4eea7525d36af48ea58b | [
"MIT"
] | null | null | null | proyo/templates/create/python/{{package_name}}/__main__.py | MatthewScholefield/proyo | ca5e0c55b7f8c2ea33dd4eea7525d36af48ea58b | [
"MIT"
] | 3 | 2019-08-07T04:26:45.000Z | 2019-08-07T05:21:17.000Z | proyo/templates/create/python/{{package_name}}/__main__.py | MatthewScholefield/proyo | ca5e0c55b7f8c2ea33dd4eea7525d36af48ea58b | [
"MIT"
] | null | null | null | # ~ if project_type == 'script':
# ~ with proyo.config_as(var_regex=r'# ---(.*?)--- #'):
# ---gen_license_header('#')--- #
# ~ #
from argparse import ArgumentParser
def main():
parser = ArgumentParser(description='{{description or tagline or ""}}')
args = parser.parse_args()
if __name__ == '__main__':
main()
# ~ #
| 20.75 | 75 | 0.60241 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 174 | 0.524096 |
7fb9490ea330e6c5777107c4f40c564b13d5c3b1 | 5,435 | py | Python | get_sd_ou/databaseUtil.py | ErfanPY/siencedirect-authors-data | 5ea248954efbdb4f84820870ed3e55a930c42732 | [
"MIT"
] | null | null | null | get_sd_ou/databaseUtil.py | ErfanPY/siencedirect-authors-data | 5ea248954efbdb4f84820870ed3e55a930c42732 | [
"MIT"
] | null | null | null | get_sd_ou/databaseUtil.py | ErfanPY/siencedirect-authors-data | 5ea248954efbdb4f84820870ed3e55a930c42732 | [
"MIT"
] | null | null | null | import logging
import mysql.connector
from get_sd_ou.config import Config
logger = logging.getLogger('mainLogger')
def init_db(host=None, user=None, password=None, port=None):
logger.debug('[databaseUtil][init_db][IN]')
cnx = mysql.connector.connect(
host = host or Config.DATABASE_HOST,
user = user or Config.DATABASE_USER,
password = password or Config.DATABASE_PASSWORD,
port = port or Config.DATABASE_PORT
)
logger.debug('[databaseUtil][init_db][OUT] | db_connection : %s', cnx)
return cnx
# INSERT
def insert_article(pii, title='', bibtex='', keywords='', cnx=None, **kwargs):
cursor = cnx.cursor(buffered=True)
sql = 'INSERT IGNORE INTO sciencedirect.articles (pii, title, bibtex, keywords) VALUES (%s, %s, %s, %s);'
val = (pii, title, bibtex, keywords)
logger.debug('[databaseUtil][insert_article][IN] | pii: %s, sql: %s, val: %s', pii, sql, val)
cursor.execute(sql, val)
cnx.commit()
article_id = cursor.lastrowid
if not article_id:
article_id = get_article(pii, cnx=cnx)['article_id']
logger.debug(
'[databaseUtil][insert_article][OUT] | pii: %s id: %s', pii, article_id)
return article_id
def insert_author(first_name, last_name, email='', affiliation='', is_coresponde=False, id=None, cnx=None):
name = last_name + '|' + first_name
sql = "INSERT IGNORE INTO sciencedirect.authors (name, email, affiliation) \
VALUES (%s, %s, %s)"
val = (name, email, affiliation)
logger.debug('[databaseUtil][insert_author][IN] | name : %s , email: %s, aff: %s, scopus: %s', name, email,
affiliation, id)
cursor = cnx.cursor(buffered=True)
cursor.execute(sql, val)
cnx.commit()
id = cursor.lastrowid
if not id:
id = get_author(first_name, last_name, email=email, cnx=cnx)['author_id']
return id
def get_article_author_id(article_id, author_id, cnx=None):
cursor = cnx.cursor(buffered=True)
sql = 'SELECT * FROM sciencedirect.article_authors WHERE article_id = %s AND author_id = %s'
cursor.execute(sql, [article_id, author_id])
fetch_res = cursor.fetchall()[-1]
return 1 if fetch_res else 0
def get_status(cnx):
cursor = cnx.cursor(buffered=True)
sql_queries = {'articles': 'SELECT count(*) FROM sciencedirect.articles',
'authors': 'SELECT count(*) FROM sciencedirect.authors',
'emails': 'SELECT count(*) FROM sciencedirect.authors where email is NOT NULL'}
sql_results = {}
for table_name, sql in sql_queries.items():
cursor.execute(sql)
result = cursor.fetchone()[0]
sql_results[table_name] = result
logger.info(f'{sql_results}')
return sql_results
def connect_article_author(article_id, author_id, is_corresponde=0, cnx=None):
cursor = cnx.cursor(buffered=True)
sql = "INSERT IGNORE INTO sciencedirect.article_authors " \
"(article_id, author_id, is_corresponde) VALUES (%s, %s, %s);"
val = (article_id, author_id, is_corresponde)
cursor.execute(sql, val)
cnx.commit()
connection_id = cursor.lastrowid
if not connection_id:
connection_id = get_article_author_id(article_id, author_id, cnx=cnx)
logger.debug(
'[databaseUtil][connect_article_author][OUT] | article_id: %s author_id: %s, connection_id: %s', article_id,
author_id, connection_id)
return connection_id
def insert_multi_author(authors_list, cnx=None):
authors_id = []
for author in authors_list:
authors_id.append(insert_author(**author, cnx=cnx))
return authors_id
def connect_multi_article_authors(article_id, authors_id_list, cnx=None):
for author_id in authors_id_list:
connect_article_author(article_id, author_id, cnx=cnx)
def insert_article_data(pii, authors, cnx=None, **kwargs):
article_data = get_article(pii, cnx)
if not article_data:
article_id = insert_article(pii=pii, cnx=cnx, **kwargs)
else:
article_id = article_data.get('article_id')
authors_id = insert_multi_author(authors, cnx=cnx)
connect_multi_article_authors(article_id, authors_id, cnx=cnx)
return article_id
# SELECT
def get_author(first_name, last_name, email='', cnx=None):
name = last_name + '|' + first_name
logger.debug('[db_util][get_author][IN] | name: %s, email: %s', name, email)
cursor = cnx.cursor(buffered=True, dictionary=True)
sql = "SELECT * FROM sciencedirect.authors WHERE name = %s OR email = %s LIMIT 1"
cursor.execute(sql, (name, email))
fetch_res = cursor.fetchone()
cursor.reset()
return fetch_res
def get_article(pii, cnx=None):
logger.debug('[databaseUtil][get_article][IN] | pii : %s', pii)
cursor = cnx.cursor(buffered=True, dictionary=True)
sql = "SELECT * FROM sciencedirect.articles WHERE pii = %s LIMIT 1"
cursor.execute(sql, (pii,))
fetch_res = cursor.fetchone()
cursor.reset()
logger.debug('[databaseUtil][get_article][OUT] | fetch_res : %s', fetch_res)
return fetch_res
def get_articles(cnx=None):
logger.debug('[databaseUtil][get_articles][IN]')
cursor = cnx.cursor(buffered=True, dictionary=True)
sql = "SELECT * FROM sciencedirect.articles"
cursor.execute(sql)
fetch_res = cursor.fetchall()
cursor.reset()
logger.debug('[databaseUtil][get_articles][OUT]')
return fetch_res
| 35.522876 | 117 | 0.673965 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,444 | 0.265685 |
7fba6b53744ed7ffa8875853e834cde754de773e | 5,106 | py | Python | xxhh/rank.py | jannson/Similar | 975527fae5fe63ab63913fa1f24619d616331687 | [
"MIT"
] | 2 | 2015-12-17T17:25:44.000Z | 2019-01-24T16:46:55.000Z | xxhh/rank.py | jannson/Similar | 975527fae5fe63ab63913fa1f24619d616331687 | [
"MIT"
] | null | null | null | xxhh/rank.py | jannson/Similar | 975527fae5fe63ab63913fa1f24619d616331687 | [
"MIT"
] | null | null | null | import sys, os, os.path, re
import codecs
import numpy as np
from scipy.sparse import *
from scipy import *
from sklearn.externals import joblib
import networkx as nx
django_path = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(13, django_path)
os.environ['DJANGO_SETTINGS_MODULE'] = 'xxhh.settings'
from django.db.models import Count
from django.db.models import Q
#from xxhh.models import TestLog as XhLogUd
from xxhh.models import XhLogUd
RATE = {}
RATE['u'] = 1
RATE['d'] = -1
def input_test():
all_objs = []
with codecs.open('trainsmall.txt', 'r', 'utf-8') as f:
for line in f:
items = line.split()
if len(items) > 2:
xh = XhLogUd()
xh.guid = items[0]
xh.post_id = int(items[1])
score = int(items[2])
if score == 1:
xh.uaction = 'd'
else:
xh.uaction = 'u'
xh.pos = 'z'
xh.shiduan = 9
xh.ctime = 9
#xh.save()
all_objs.append(xh)
return all_objs
class Ratings(object):
def __init__(self):
#all_objs = list(XhLogUd.objects.exclude(guid=''))
all_objs = input_test()
guid2id = {}
id2guid = []
i = 0
for obj in all_objs:
if obj.guid not in guid2id:
guid = obj.guid
guid2id[guid] = i
id2guid.append(guid)
i += 1
self.guid2id = guid2id
self.id2guid = id2guid
max_item = 0
post2id = {}
id2post = []
i = 0
for obj in all_objs:
if obj.post_id not in post2id:
post_id = obj.post_id
post2id[post_id] = i
id2post.append(post_id)
i += 1
self.post2id = post2id
self.id2post = id2post
guids = [[] for _ in id2guid]
for obj in all_objs:
guids[guid2id[obj.guid]].append((RATE[obj.uaction.strip()], self.guid2id[obj.guid], self.post2id[obj.post_id]))
self.guids = guids
posts1 = [set() for _ in id2post]
posts2 = [set() for _ in id2post]
for obj in all_objs:
if obj.uaction == 'u':
posts1[post2id[obj.post_id]].add(guid2id[obj.guid])
else:
posts2[post2id[obj.post_id]].add(guid2id[obj.guid])
'''
post_max = 0
for guid_set in self.posts:
if post_max < len(guid_set):
post_max = len(guid_set)
print "max guids in post", post_max
'''
print "all users", len(self.id2guid)
print "all items", len(self.id2post)
#sorting users
#sorted_guids = [(g,len(self.guids[self.guid2id[g]]) ) for i,g in enumerate(self.id2guid)]
#self.sorted_guids = [g for g,c in sorted(sorted_guids, key = lambda item: -item[1]) if c >= 3]
#print self.sorted_guids[0:10]
#print 'len and max rated guid', len(self.sorted_guids), self.sorted_guids[0], len(self.guids[self.guid2id[self.sorted_guids[0]]])
weights1 = np.zeros((len(id2post),len(id2post)), dtype=float)
weights2 = np.zeros((len(id2post),len(id2post)), dtype=float)
#weights = lil_matrix( (len(id2post), len(id2post)), dtype=float)
for x in xrange(len(id2post)):
for y in xrange(len(id2post)):
if x < y:
weights1[x,y] = len(posts1[x] & posts1[y])
weights2[x,y] = len(posts2[x] & posts2[y])
else:
weights1[x,y] = weights1[y,x]
weights2[x,y] = weights2[y,x]
#weights1 = weights1/len(self.id2post)
#weights2 = weights2/len(self.id2post)
nx_graph = nx.from_numpy_matrix(weights1)
scores1 = nx.pagerank_numpy(nx_graph)
print 'score1 complete'
nx_graph = nx.from_numpy_matrix(weights2)
scores2 = nx.pagerank_numpy(nx_graph)
print 'score2 complete'
scores = [scores1[i]/scores2[i] for i in xrange(len(id2post))]
#nx_graph = nx.from_scipy_sparse_matrix(self.weights)
#scores = nx.pagerank_scipy(nx_graph)
res = sorted( [(scores[i],id2post[i]) for i in xrange(len(id2post))] , reverse=True)
res1 = sorted( [(scores1[i],id2post[i]) for i in xrange(len(id2post))] , reverse=True)
res2 = sorted( [(scores2[i],id2post[i]) for i in xrange(len(id2post))] , reverse=True)
with open('rank.out', 'w') as f:
for r in res:
f.write('%d %f\n' % (r[1], r[0]) )
with open('rank_1.out', 'w') as f:
for r in res1:
f.write('%d %f\n' % (r[1], r[0]) )
with open('rank_2.out', 'w') as f:
for r in res2:
f.write('%d %f\n' % (r[1], r[0]) )
def by_user(self, id):
return self.guids[id]
def all_users(self):
for guid in self.sorted_guids:
yield self.guid2id[guid]
ratings = Ratings()
| 33.372549 | 138 | 0.53584 | 3,958 | 0.775166 | 96 | 0.018801 | 0 | 0 | 0 | 0 | 1,101 | 0.215629 |
7fba717dbec6526dc8e6412f2a8a1599a712edeb | 6,248 | py | Python | qcfractal/tests/test_server.py | MolSSI/dqm_server | ceff64fe032590095e0f865bc1d0c2da4684404e | [
"BSD-3-Clause"
] | 113 | 2018-08-04T20:33:41.000Z | 2022-02-08T21:17:52.000Z | qcfractal/tests/test_server.py | MolSSI/dqm_server | ceff64fe032590095e0f865bc1d0c2da4684404e | [
"BSD-3-Clause"
] | 665 | 2018-08-04T14:16:53.000Z | 2022-03-25T15:37:41.000Z | qcfractal/tests/test_server.py | MolSSI/dqm_server | ceff64fe032590095e0f865bc1d0c2da4684404e | [
"BSD-3-Clause"
] | 40 | 2018-08-16T21:41:02.000Z | 2022-01-26T15:07:06.000Z | """
Tests the DQM Server class
"""
import json
import os
import threading
import pytest
import requests
import qcfractal.interface as ptl
from qcfractal import FractalServer, FractalSnowflake, FractalSnowflakeHandler
from qcfractal.testing import (
await_true,
find_open_port,
pristine_loop,
test_server,
using_geometric,
using_rdkit,
using_torsiondrive,
)
meta_set = {"errors", "n_inserted", "success", "duplicates", "error_description", "validation_errors"}
def test_server_information(test_server):
client = ptl.FractalClient(test_server)
server_info = client.server_information()
assert {"name", "heartbeat_frequency", "counts"} <= server_info.keys()
assert server_info["counts"].keys() >= {"molecule", "kvstore", "result", "collection"}
def test_storage_socket(test_server):
storage_api_addr = test_server.get_address() + "collection" # Targets and endpoint in the FractalServer
storage = {
"collection": "TorsionDriveRecord",
"name": "Torsion123",
"something": "else",
"array": ["54321"],
"visibility": True,
"view_available": False,
"group": "default",
}
# Cast collection type to lower since the server-side does it anyways
storage["collection"] = storage["collection"].lower()
r = requests.post(storage_api_addr, json={"meta": {}, "data": storage})
assert r.status_code == 200, r.reason
pdata = r.json()
assert pdata["meta"].keys() == meta_set
assert pdata["meta"]["n_inserted"] == 1
r = requests.get(
storage_api_addr, json={"meta": {}, "data": {"collection": storage["collection"], "name": storage["name"]}}
)
print(r.content)
assert r.status_code == 200, r.reason
pdata = r.json()
col_id = pdata["data"][0].pop("id")
# got a default values when created
pdata["data"][0].pop("tags", None)
pdata["data"][0].pop("tagline", None)
pdata["data"][0].pop("provenance", None)
pdata["data"][0].pop("view_url_hdf5", None)
pdata["data"][0].pop("view_url_plaintext", None)
pdata["data"][0].pop("view_metadata", None)
pdata["data"][0].pop("description", None)
assert pdata["data"][0] == storage
# Test collection id sub-resource
r = requests.get(f"{storage_api_addr}/{col_id}", json={"meta": {}, "data": {}}).json()
assert r["meta"]["success"] is True
assert len(r["data"]) == 1
assert r["data"][0]["id"] == col_id
r = requests.get(f"{storage_api_addr}/{col_id}", json={"meta": {}, "data": {"name": "wrong name"}}).json()
assert r["meta"]["success"] is True
assert len(r["data"]) == 0
def test_bad_collection_get(test_server):
for storage_api_addr in [
test_server.get_address() + "collection/1234/entry",
test_server.get_address() + "collection/1234/value",
test_server.get_address() + "collection/1234/list",
test_server.get_address() + "collection/1234/molecule",
]:
r = requests.get(storage_api_addr, json={"meta": {}, "data": {}})
assert r.status_code == 200, f"{r.reason} {storage_api_addr}"
assert r.json()["meta"]["success"] is False, storage_api_addr
def test_bad_collection_post(test_server):
storage = {
"collection": "TorsionDriveRecord",
"name": "Torsion123",
"something": "else",
"array": ["54321"],
"visibility": True,
"view_available": False,
}
# Cast collection type to lower since the server-side does it anyways
storage["collection"] = storage["collection"].lower()
for storage_api_addr in [
test_server.get_address() + "collection/1234",
test_server.get_address() + "collection/1234/value",
test_server.get_address() + "collection/1234/entry",
test_server.get_address() + "collection/1234/list",
test_server.get_address() + "collection/1234/molecule",
]:
r = requests.post(storage_api_addr, json={"meta": {}, "data": storage})
assert r.status_code == 200, r.reason
assert r.json()["meta"]["success"] is False
def test_bad_view_endpoints(test_server):
"""Tests that certain misspellings of the view endpoints result in 404s"""
addr = test_server.get_address()
assert requests.get(addr + "collection//value").status_code == 404
assert requests.get(addr + "collection/234/values").status_code == 404
assert requests.get(addr + "collections/234/value").status_code == 404
assert requests.get(addr + "collection/234/view/value").status_code == 404
assert requests.get(addr + "collection/value").status_code == 404
assert requests.get(addr + "collection/S22").status_code == 404
@pytest.mark.slow
def test_snowflakehandler_restart():
with FractalSnowflakeHandler() as server:
server.client()
proc1 = server._qcfractal_proc
server.restart()
server.client()
proc2 = server._qcfractal_proc
assert proc1 != proc2
assert proc1.poll() is not None
assert proc2.poll() is not None
def test_snowflakehandler_log():
with FractalSnowflakeHandler() as server:
proc = server._qcfractal_proc
assert "No SSL files passed in" in server.show_log(show=False, nlines=100)
assert "0 task" not in server.show_log(show=False, nlines=100)
assert proc.poll() is not None
@pytest.mark.slow
@using_geometric
@using_torsiondrive
@using_rdkit
def test_snowflake_service():
with FractalSnowflakeHandler() as server:
client = server.client()
hooh = ptl.data.get_molecule("hooh.json")
# Geometric options
tdinput = {
"initial_molecule": [hooh],
"keywords": {"dihedrals": [[0, 1, 2, 3]], "grid_spacing": [90]},
"optimization_spec": {"program": "geometric", "keywords": {"coordsys": "tric"}},
"qc_spec": {"driver": "gradient", "method": "UFF", "basis": None, "keywords": None, "program": "rdkit"},
}
ret = client.add_service([tdinput])
def geometric_await():
td = client.query_procedures(id=ret.ids)[0]
return td.status == "COMPLETE"
assert await_true(60, geometric_await, period=2), client.query_procedures(id=ret.ids)[0]
| 32.884211 | 116 | 0.645327 | 0 | 0 | 0 | 0 | 1,241 | 0.198624 | 0 | 0 | 1,878 | 0.300576 |
7fbaf525baf4d7f1485c7c18c534796d329fc79c | 2,278 | py | Python | WLED_WiFi/WLED_WiFi.py | nirkons/Prismatik-WLED-WiFi | 7716d3c8e045c968a5c614b7847b51f0387ec548 | [
"MIT"
] | null | null | null | WLED_WiFi/WLED_WiFi.py | nirkons/Prismatik-WLED-WiFi | 7716d3c8e045c968a5c614b7847b51f0387ec548 | [
"MIT"
] | null | null | null | WLED_WiFi/WLED_WiFi.py | nirkons/Prismatik-WLED-WiFi | 7716d3c8e045c968a5c614b7847b51f0387ec548 | [
"MIT"
] | null | null | null | import lightpack, socket, configparser, os
from time import sleep
import sys
class WLED_WiFi:
def __init__(self):
self.loadConfig()
self.lp = lightpack.Lightpack()
self.status = False
try:
self.lp.connect()
except lightpack.CannotConnectError as e:
print(repr(e))
sys.exit(1)
def loadConfig(self):
self.scriptDir = os.path.dirname(os.path.realpath(__file__))
self.config = configparser.ConfigParser()
self.config.read(self.scriptDir + '/WLED_WiFi.ini')
self.fps = self.config.getint('WLED', 'FPS')
self.udpBroadcastIp = self.config.get('WLED', 'UDP_IP_ADDRESS')
self.udpPort = self.config.getint('WLED', 'UDP_PORT_NO')
self.originnumled = self.config.getint('WLED', 'ORIGINNUMLED')
self.numled = self.config.getint('WLED', 'NUMLED')
def run(self):
counter = 0
leddiff=abs(self.numled-self.originnumled)
half = int(round(self.originnumled/2))
while(True):
d = self.lp.getColoursFromAll()
v = [2, 2]
for i in d:
v.append(d[i][0])
v.append(d[i][1])
v.append(d[i][2])
if (counter % 2) == 0:
v.append(d[i][0])
v.append(d[i][1])
v.append(d[i][2])
if (counter % 10) == 0:
v.append(d[i][0])
v.append(d[i][1])
v.append(d[i][2])
counter = counter + 1
"""if counter == (half):
for x in range(int(leddiff/2)):
v.append(d[i][0])
v.append(d[i][1])
v.append(d[i][2])
counter = counter + 1
else:
counter = counter + 1
"""
if counter == (self.numled-1):
counter = 0
Message = bytes(v)
clientSock = socket.socket (socket.AF_INET, socket.SOCK_DGRAM)
clientSock.sendto (Message, (self.udpBroadcastIp, self.udpPort))
sleep(1/self.fps)
warls = WLED_WiFi()
warls.run()
| 35.59375 | 76 | 0.47849 | 2,166 | 0.950834 | 0 | 0 | 0 | 0 | 0 | 0 | 434 | 0.190518 |
7fbb3e0a262f49733f933fde19767b6609edf780 | 16,513 | py | Python | learning_experiments/src3/eval_trained_model.py | TommasoBendinelli/spatial_relations_experiments | cd165437835a37c947ccf13a77531a5a42d4c925 | [
"MIT"
] | null | null | null | learning_experiments/src3/eval_trained_model.py | TommasoBendinelli/spatial_relations_experiments | cd165437835a37c947ccf13a77531a5a42d4c925 | [
"MIT"
] | null | null | null | learning_experiments/src3/eval_trained_model.py | TommasoBendinelli/spatial_relations_experiments | cd165437835a37c947ccf13a77531a5a42d4c925 | [
"MIT"
] | null | null | null | import argparse
import os
import os.path as osp
import cv2
import numpy as np
from scipy.stats import multivariate_normal
from scipy.stats import norm
import matplotlib
# matplotlib.use('agg')
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
import subprocess
import shutil
import chainer
from chainer import training
from chainer.training import extensions
from chainer.dataset import concat_examples
from chainer.backends.cuda import to_cpu
import chainer.functions as F
from chainer import serializers
import net_200x200 as net
import data_generator
from config_parser import ConfigParser
from utils import *
def save_reconstruction_arrays(data, model, folder_name="."):
print("Clear Images from Last Reconstructions\n")
all_files = list([filename for filename in os.listdir(folder_name) if '.' in filename])
list(map(lambda x : os.remove(folder_name + x), all_files))
print("Saving Array RECONSTRUCTIONS\n")
(train_b0, train_b1) = data
no_images = 10
train_ind = np.linspace(0, len(train_b0) - 1, no_images, dtype=int)
result = model(train_b0[train_ind], train_b1[train_ind])
gt_b0 = np.swapaxes(train_b0[train_ind], 1, 3)
gt_b1 = np.swapaxes(train_b1[train_ind], 1, 3)
rec_b0 = np.swapaxes(result[0].data, 1, 3)
rec_b1 = np.swapaxes(result[1].data, 1, 3)
output = {"gt_b0": gt_b0, "gt_b1": gt_b1, 'rec_b0': rec_b0, 'rec_b1': rec_b1}
np.savez(os.path.join("result", "reconstruction_arrays/train" + ".npz"), **output)
def eval_seen_data(data, model, groups, folder_name=".", pairs=None):
print("Clear Images from Last Seen Scatter\n")
all_files = list([filename for filename in os.listdir(folder_name) if '.' in filename])
list(map(lambda x : os.remove(folder_name + x), all_files))
print("Evaluating on SEEN data\n")
(data_b0, data_b1) = data
n = 100
every_nth = len(data_b0) / n
if every_nth == 0:
every_nth = 1
axis_ranges = [-5, 5]
for group_key in groups:
for label in groups[group_key]:
print(("Visualising label:\t{0}, Group:\t{1}".format(label, group_key)))
indecies = [i for i, x in enumerate(train_labels) if x == label]
filtered_data_b0 = data_b0.take(indecies, axis=0)[::every_nth]
filtered_data_b1 = data_b1.take(indecies, axis=0)[::every_nth]
latent_mu = model.get_latent(filtered_data_b0, filtered_data_b1).data
pairs = [(0,1), (0,2), (1,2)]
for pair in pairs:
plt.scatter(latent_mu[:, pair[0]], latent_mu[:, pair[1]], c='red', label=label, alpha=0.75)
plt.grid()
# major axes
plt.plot([axis_ranges[0], axis_ranges[1]], [0,0], 'k')
plt.plot([0,0], [axis_ranges[0], axis_ranges[1]], 'k')
plt.xlim(axis_ranges[0], axis_ranges[1])
plt.ylim(axis_ranges[0], axis_ranges[1])
plt.xlabel("Z_" + str(pair[0]))
plt.ylabel("Z_" + str(pair[1]))
plt.legend(loc='upper left', bbox_to_anchor=(1, 1), fontsize=14)
plt.savefig(osp.join(folder_name, "group_" + str(group_key) + "_" + label + "_Z_" + str(pair[0]) + "_Z_" + str(pair[1])), bbox_inches="tight")
plt.close()
def eval_seen_data_single(data, model, labels=[], folder_name=".", pairs=None):
print("Clear Images from Last Seen Scatter Single\n")
all_files = list([filename for filename in os.listdir(folder_name) if '.' in filename])
list(map(lambda x : os.remove(folder_name + x), all_files))
print("Evaluating on SEEN SINGLE data\n")
(data_b0, data_b1) = data
axis_ranges = [-15, 15]
# pairs = [(0,1)]
n = 100
every_nth = len(data_b0) / n
if every_nth == 0:
every_nth = 1
filtered_data_b0 = data_b0.take(list(range(len(data_b0))), axis=0)[::every_nth]
filtered_data_b1 = data_b1.take(list(range(len(data_b1))), axis=0)[::every_nth]
labels = labels[::every_nth]
latent = np.array(model.get_latent(filtered_data_b0, filtered_data_b1))
filtered_data_b0 = np.swapaxes(filtered_data_b0, 1, 3)
filtered_data_b1 = np.swapaxes(filtered_data_b1, 1, 3)
for i in range(0, len(latent[0]), 33):
fig = plt.figure()
fig.canvas.set_window_title(labels[i])
ax = fig.add_subplot(1, len(pairs) + 1, 1, projection='3d')
points = filtered_data_b0[i].reshape(200*200,3)
filtered_points = np.array(list([row for row in points if [point for point in row if (point != [0,0,0]).all()]]))
xs_0 = filtered_points[...,0][::3]
ys_0 = filtered_points[...,1][::3]
zs_0 = filtered_points[...,2][::3]
ax.scatter(xs_0, ys_0, zs_0, c='r', alpha=0.5)
points = filtered_data_b1[i].reshape(200*200,3)
filtered_points = np.array(list([row for row in points if [point for point in row if (point != [0,0,0]).all()]]))
xs_1 = filtered_points[...,0][::3]
ys_1 = filtered_points[...,1][::3]
zs_1 = filtered_points[...,2][::3]
ax.scatter(xs_1, ys_1, zs_1, c='c', alpha=0.5)
ax.set_xlabel('X', fontweight="bold")
ax.set_ylabel('Y', fontweight="bold")
ax.set_zlabel('Z', fontweight="bold")
for j, pair in enumerate(pairs):
ax = fig.add_subplot(1, len(pairs) + 1, j + 2)
ax.scatter(latent[pair[0], i], latent[pair[1], i], c='red', label="unseen", alpha=0.75)
ax.grid()
# major axes
ax.plot([axis_ranges[0], axis_ranges[1]], [0,0], 'k')
ax.plot([0,0], [axis_ranges[0], axis_ranges[1]], 'k')
ax.set_xlim(axis_ranges[0], axis_ranges[1])
ax.set_ylim(axis_ranges[0], axis_ranges[1])
ax.set_xlabel("Z_" + str(pair[0]))
ax.set_ylabel("Z_" + str(pair[1]))
# ax.legend(loc='upper left', bbox_to_anchor=(1, 1), fontsize=14)
# plt.savefig(osp.join(folder_name, str(i) + "_Z_" + str(pair[0]) + "_Z_" + str(pair[1])), bbox_inches="tight")
# plt.close()
plt.show()
def eval_unseen_data(data, model, folder_name=".", pairs=None):
print("Clear Images from Last Unseen Scatter\n")
all_files = list([filename for filename in os.listdir(folder_name) if '.' in filename])
list(map(lambda x : os.remove(folder_name + x), all_files))
print("Evaluating on UNSEEN data\n")
(data_b0, data_b1) = data
axis_ranges = [-5, 5]
# pairs = [(0,1), (0,2), (1,2)]
# pairs = [(0,1)]
# n = 100
# every_nth = len(data_b0) / n
# if every_nth == 0:
# every_nth = 1
every_nth = 2
filtered_data_b0 = data_b0.take(list(range(len(data_b0))), axis=0)[::every_nth]
filtered_data_b1 = data_b1.take(list(range(len(data_b1))), axis=0)[::every_nth]
latent = np.array(model.get_latent(filtered_data_b0, filtered_data_b1))
latent_flipped = np.array(model.get_latent(filtered_data_b1, filtered_data_b0))
filtered_data_b0 = np.swapaxes(filtered_data_b0, 1, 3)
filtered_data_b1 = np.swapaxes(filtered_data_b1, 1, 3)
for i in range(len(filtered_data_b0)):
print(("{0}/{1}".format(i, len(latent[0]))))
fig = plt.figure()
ax = fig.add_subplot(2, 4, 1, projection='3d')
points = filtered_data_b0[i].reshape(200*200,3)
filtered_points = np.array(list([row for row in points if [point for point in row if (point != [0,0,0]).all()]]))
xs_0 = filtered_points[...,0][::3]
ys_0 = filtered_points[...,1][::3]
zs_0 = filtered_points[...,2][::3]
ax.scatter(xs_0, ys_0, zs_0, c='r', alpha=0.5)
points = filtered_data_b1[i].reshape(200*200,3)
filtered_points = np.array(list([row for row in points if [point for point in row if (point != [0,0,0]).all()]]))
xs_1 = filtered_points[...,0][::3]
ys_1 = filtered_points[...,1][::3]
zs_1 = filtered_points[...,2][::3]
ax.scatter(xs_1, ys_1, zs_1, c='c', alpha=0.5)
ax.set_xlabel('X', fontweight="bold")
ax.set_ylabel('Y', fontweight="bold")
ax.set_zlabel('Z', fontweight="bold")
for j, pair in enumerate(pairs):
ax = fig.add_subplot(2, 4, j + 2)
ax.scatter(latent[pair[0], i], latent[pair[1], i], c='red', label="unseen", alpha=0.75)
ax.grid()
# major axes
ax.plot([axis_ranges[0], axis_ranges[1]], [0,0], 'k')
ax.plot([0,0], [axis_ranges[0], axis_ranges[1]], 'k')
# ax.set_xlim(axis_ranges[0], axis_ranges[1])
# ax.set_ylim(axis_ranges[0], axis_ranges[1])
ax.set_xlabel("Z_" + str(pair[0]))
ax.set_ylabel("Z_" + str(pair[1]))
# ax.legend(loc='upper left', bbox_to_anchor=(1, 1), fontsize=14)
ax = fig.add_subplot(2, 4, 5, projection='3d')
ax.scatter(xs_1, ys_1, zs_1, c='r', alpha=0.5)
ax.scatter(xs_0, ys_0, zs_0, c='c', alpha=0.5)
ax.set_xlabel('X', fontweight="bold")
ax.set_ylabel('Y', fontweight="bold")
ax.set_zlabel('Z', fontweight="bold")
for j, pair in enumerate(pairs):
ax = fig.add_subplot(2, 4, j + 6)
ax.scatter(latent_flipped[pair[0], i], latent_flipped[pair[1], i], c='red', label="unseen", alpha=0.75)
ax.grid()
# major axes
ax.plot([axis_ranges[0], axis_ranges[1]], [0,0], 'k')
ax.plot([0,0], [axis_ranges[0], axis_ranges[1]], 'k')
# ax.set_xlim(axis_ranges[0], axis_ranges[1])
# ax.set_ylim(axis_ranges[0], axis_ranges[1])
ax.set_xlabel("Z_" + str(pair[0]))
ax.set_ylabel("Z_" + str(pair[1]))
# ax.legend(loc='upper left', bbox_to_anchor=(1, 1), fontsize=14)
# plt.savefig(osp.join(folder_name, str(i) + "_Z_" + str(pair[0]) + "_Z_" + str(pair[1])), bbox_inches="tight")
# plt.close()
plt.show()
def eval_unseen_time(data, model, folder_name=".", pairs=None):
print("Clear Images from Last Unseen Scatter\n")
all_files = list([filename for filename in os.listdir(folder_name) if '.' in filename])
list(map(lambda x : os.remove(folder_name + x), all_files))
print("Evaluating on UNSEEN data through time\n")
cmap = plt.cm.get_cmap('cool')
(data_b0, data_b1) = data
axis_ranges = [-20, 20]
# pairs = [(0,1), (0,2), (1,2)]
pairs = [(0,1), (2,3)]
npz_size = 50
npz_files = 4
for k in range(npz_files):
filtered_data_b0 = data_b0.take(list(range(len(data_b0))), axis=0)[k * npz_size : (k+1) * npz_size - 1]
filtered_data_b1 = data_b1.take(list(range(len(data_b1))), axis=0)[k * npz_size : (k+1) * npz_size - 1]
latent = np.array(model.get_latent(filtered_data_b0, filtered_data_b1))
latent_flipped = np.array(model.get_latent(filtered_data_b1, filtered_data_b0))
filtered_data_b0 = np.swapaxes(filtered_data_b0, 1, 3)
filtered_data_b1 = np.swapaxes(filtered_data_b1, 1, 3)
print(("{0}/{1}".format(k, npz_files)))
fig = plt.figure()
###################
#### FIRST ROW ####
###################
ax = fig.add_subplot(2, len(pairs) + 2, 1, projection='3d')
points = filtered_data_b0[1].reshape(200*200,3)
filtered_points = np.array(list([row for row in points if [point for point in row if (point != [0,0,0]).all()]]))
xs_0_first = filtered_points[...,0][::3]
ys_0_first = filtered_points[...,1][::3]
zs_0_first = filtered_points[...,2][::3]
ax.scatter(xs_0_first, ys_0_first, zs_0_first, c='r', alpha=0.5)
points = filtered_data_b1[1].reshape(200*200,3)
filtered_points = np.array(list([row for row in points if [point for point in row if (point != [0,0,0]).all()]]))
xs_1_first = filtered_points[...,0][::3]
ys_1_first = filtered_points[...,1][::3]
zs_1_first = filtered_points[...,2][::3]
ax.scatter(xs_1_first, ys_1_first, zs_1_first, c='c', alpha=0.5)
ax.set_xlabel('X', fontweight="bold")
ax.set_ylabel('Y', fontweight="bold")
ax.set_zlabel('Z', fontweight="bold")
ax = fig.add_subplot(2, len(pairs) + 2, 2, projection='3d')
points = filtered_data_b0[-1].reshape(200*200,3)
filtered_points = np.array(list([row for row in points if [point for point in row if (point != [0,0,0]).all()]]))
xs_0_last = filtered_points[...,0][::3]
ys_0_last = filtered_points[...,1][::3]
zs_0_last = filtered_points[...,2][::3]
ax.scatter(xs_0_last, ys_0_last, zs_0_last, c='r', alpha=0.5)
points = filtered_data_b1[-1].reshape(200*200,3)
filtered_points = np.array(list([row for row in points if [point for point in row if (point != [0,0,0]).all()]]))
xs_1_last = filtered_points[...,0][::3]
ys_1_last = filtered_points[...,1][::3]
zs_1_last = filtered_points[...,2][::3]
ax.scatter(xs_1_last, ys_1_last, zs_1_last, c='c', alpha=0.5)
ax.set_xlabel('X', fontweight="bold")
ax.set_ylabel('Y', fontweight="bold")
ax.set_zlabel('Z', fontweight="bold")
for j, pair in enumerate(pairs):
ax = fig.add_subplot(2, len(pairs) + 2, j + 3)
for i in range(len(latent[0])):
x = (latent[pair[0], i], latent[pair[1], i])
rgba = cmap(i/float(npz_size))
ax.scatter(x[0], x[1], c=[rgba[:3]], label="unseen", s=30, alpha=0.75)
ax.grid()
# major axes
ax.plot([axis_ranges[0], axis_ranges[1]], [0,0], 'k')
ax.plot([0,0], [axis_ranges[0], axis_ranges[1]], 'k')
ax.set_xlabel("Z_" + str(pair[0]))
ax.set_ylabel("Z_" + str(pair[1]))
ax.set_xlim(axis_ranges[0], axis_ranges[1])
ax.set_ylim(axis_ranges[0], axis_ranges[1])
##################
### SECOND ROW ###
##################
ax = fig.add_subplot(2, len(pairs) + 2, len(pairs) + 3, projection='3d')
ax.scatter(xs_1_first, ys_1_first, zs_1_first, c='r', alpha=0.5)
ax.scatter(xs_0_first, ys_0_first, zs_0_first, c='c', alpha=0.5)
ax.set_xlabel('X', fontweight="bold")
ax.set_ylabel('Y', fontweight="bold")
ax.set_zlabel('Z', fontweight="bold")
ax = fig.add_subplot(2, len(pairs) + 2, len(pairs) + 4, projection='3d')
ax.scatter(xs_1_last, ys_1_last, zs_1_last, c='r', alpha=0.5)
ax.scatter(xs_0_last, ys_0_last, zs_0_last, c='c', alpha=0.5)
ax.set_xlabel('X', fontweight="bold")
ax.set_ylabel('Y', fontweight="bold")
ax.set_zlabel('Z', fontweight="bold")
for j, pair in enumerate(pairs):
ax = fig.add_subplot(2, len(pairs) + 2, j + len(pairs) + 5)
for i in range(len(latent_flipped[0])):
x = (latent_flipped[pair[0], i], latent_flipped[pair[1], i])
rgba = cmap(i/float(npz_size))
ax.scatter(x[0], x[1], c=[rgba[:3]], label="unseen", s=30, alpha=0.75)
ax.grid()
# major axes
ax.plot([axis_ranges[0], axis_ranges[1]], [0,0], 'k')
ax.plot([0,0], [axis_ranges[0], axis_ranges[1]], 'k')
ax.set_xlabel("Z_" + str(pair[0]))
ax.set_ylabel("Z_" + str(pair[1]))
ax.set_xlim(axis_ranges[0], axis_ranges[1])
ax.set_ylim(axis_ranges[0], axis_ranges[1])
# plt.savefig(osp.join(folder_name, "npz_" + str(k) + "_Z_" + str(pair[0]) + "_Z_" + str(pair[1])), bbox_inches="tight")
# plt.close()
plt.show()
if __name__ == "__main__":
ignore = ["unlabelled", "train"]
generator = data_generator.DataGenerator()
train_b0, train_b1, train_labels, train_concat, train_vectors, test_b0, test_b1, test_labels, test_concat, test_vectors, unseen_b0, unseen_b1,\
unseen_labels, groups = generator.generate_dataset(ignore=ignore, args=None)
print('\n###############################################')
print("DATA_LOADED")
print(("# Training Branch 0: \t\t{0}".format(train_b0.shape)))
print(("# Training Branch 1: \t\t{0}".format(train_b1.shape)))
print(("# Training labels: \t{0}".format(set(train_labels))))
print(("# Training labels: \t{0}".format(train_labels.shape)))
print(("# Training concat: \t{0}".format(len(train_concat))))
print(("# Training vectors: \t{0}".format(train_vectors.shape)))
print(("# Testing Branch 0: \t\t{0}".format(test_b0.shape)))
print(("# Testing Branch 1: \t\t{0}".format(test_b1.shape)))
print(("# Testing labels: \t{0}".format(set(test_labels))))
print(("# Testing concat: \t{0}".format(len(test_concat))))
print(("# Testing labels: \t{0}".format(test_labels.shape)))
print(("# Testing vectors: \t{0}".format(test_vectors.shape)))
print(("# Unseen Branch 0: \t\t{0}".format(unseen_b0.shape)))
print(("# Unseen Branch 1: \t\t{0}".format(unseen_b1.shape)))
print(("# Unseen labels: \t{0}".format(set(unseen_labels))))
print(("\n# Groups: \t{0}".format(groups)))
print('###############################################\n')
model = net.Conv_Siam_VAE(train_b0.shape[1], train_b1.shape[1], n_latent=8, groups=groups, alpha=1, beta=1, gamma=1)
serializers.load_npz("result/models/final.model", model)
model.to_cpu()
pairs = list(itertools.combinations(list(range(len(groups))), 2))
# save the pointcloud reconstructions
# save_reconstruction_arrays((train_b0, train_b0), model, folder_name="result/reconstruction_arrays/")
# evaluate on the data that was seen during trainig
# eval_seen_data((train_b0, train_b1), model, groups, folder_name="eval/scatter/seen/", pairs=pairs)
# evaluate on the data that was seen during trainig one by one + 3D
# eval_seen_data_single((test_b0, test_b1), model, labels=test_labels, folder_name="eval/scatter/seen_single/", pairs=pairs)
# evaluate on the data that was NOT seen during trainig
# eval_unseen_data((unseen_b0, unseen_b1), model, folder_name="eval/scatter/unseen/", pairs=pairs)
# evaluate the unseen data through time
eval_unseen_time((unseen_b0, unseen_b1), model, folder_name="eval/scatter/unseen_time/", pairs=pairs) | 36.452539 | 146 | 0.664507 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,408 | 0.206383 |
7fbb8f0f4fee8e352ae2772b0862fa10d84c521b | 1,290 | py | Python | temp/pattern.py | BlueBeret/ComputerGraphics-Assignment | 5df905adfe8bcc958493337a80bd6eb75806bf6b | [
"MIT"
] | 1 | 2022-03-11T14:03:37.000Z | 2022-03-11T14:03:37.000Z | temp/pattern.py | BlueBeret/ComputerGraphics-Assignment | 5df905adfe8bcc958493337a80bd6eb75806bf6b | [
"MIT"
] | null | null | null | temp/pattern.py | BlueBeret/ComputerGraphics-Assignment | 5df905adfe8bcc958493337a80bd6eb75806bf6b | [
"MIT"
] | null | null | null | from tkinter import *
root = Tk()
canvas = Canvas(root,
width=1000,
height=1000,
background="#FFFFFF")
canvas.pack(expand=YES, fill=BOTH)
def dot(x,y,size=1, color="#FF0000", outline=""):
if size==1:
canvas.create_line(x,y,x+1,y, fill=color, width=size)
return
canvas.create_oval(x-size/2,y-size/2,x+size/2,y+size/2,fill=color,width=0, outline=outline)
def onClick(event):
global BERZIER_COORD
if len(BERZIER_COORD) < 8:
BERZIER_COORD.append(event.x)
BERZIER_COORD.append(event.y)
if len(BERZIER_COORD) == 8:
kurvaBezier(BERZIER_COORD[0], BERZIER_COORD[1], BERZIER_COORD[2],
BERZIER_COORD[3], BERZIER_COORD[4], BERZIER_COORD[5], BERZIER_COORD[6], BERZIER_COORD[7])
BERZIER_COORD = []
def kurvaBezier(x1,y1,x2,y2,x3,y3,x4,y4, warna="#FF0000"):
panjang = max(abs(x1-x2), abs(y1-y2)) + max(abs(x2-x3), abs(y2-y3))+max (abs(x3-x4), abs(y3-y4))
u = 0
for i in range(panjang):
x = x1 * ((1-u) **3) + 3*x2*u*((1-u)**2) + 3*x3*(u**2)*(1-u) + x4*(u**3)
y = y1 * ((1-u) **3) + 3*y2*u*((1-u)**2) + 3*y3*(u**2)*(1-u) + y4*(u**3)
u += 1/panjang
dot(round(x),round(y),color=warna)
BERZIER_COORD = []
canvas.bind("<Button-1>", onClick)
root.mainloop() | 29.318182 | 100 | 0.591473 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 41 | 0.031783 |
7fbc6027f656cd1c5bb097bcdf8644aa472c2b01 | 7,034 | py | Python | AKextensions.py | bmilosh/Common-Information-and-Matroid-Ports | 653908c2d1a4b9c1774573b1f95b870e7f99f875 | [
"BSD-3-Clause"
] | 1 | 2020-10-19T05:20:53.000Z | 2020-10-19T05:20:53.000Z | AKextensions.py | bmilosh/Common-Information-and-Matroid-Ports | 653908c2d1a4b9c1774573b1f95b870e7f99f875 | [
"BSD-3-Clause"
] | null | null | null | AKextensions.py | bmilosh/Common-Information-and-Matroid-Ports | 653908c2d1a4b9c1774573b1f95b870e7f99f875 | [
"BSD-3-Clause"
] | null | null | null | from gurobipy import *
from itertools import combinations
from time import localtime, strftime, time
import config
from fibonew2 import (
AK2exp, InitMatNew, MatroidCompatible, Resol2m, bi, bs, disjoint, rankfinder,
ib, sb)
from timing import endlog, log
def CheckOneAK(mbases,gset,rnk):
'''
We check if the matroid is 1-AK.
We also implement some optimizations that help reduce the number of flats to check.
mbases (dictionary) containing matroids to be checked and their bases
gset (list) is the ground set of the matroids
rnk (int) is the rank of the matroids
Note: observe that the matroids in a particular run need to be of the same size and rank.
'''
start = time()
log("Start Program")
checker = 1
nonAK = 0
oneAK = 0
noAKmats = list()
sfiles = open('runresultAK.txt','a+')
nm = 'File listing checked files from polymatroid extension run using all sets (AK) with some optimizations.'
nm1 = '%' * len(nm)
sfiles.write('{}\n'.format(nm1))
sfiles.write('{}\n'.format(nm))
sfiles.write('{}\n'.format(nm1))
for key in mbases:
counter = 0
begin = time()
log('Start polymatroid extension check (AK) for {} using all sets with some optimizations.'.format(key))
rankd,allranks = rankfinder(mbases[key],gset)
Ar = list()
for i in range(0,len(allranks)-1,2):
if len(allranks[i]) < 2: continue
#if Ar1[i+1] == rnk: continue # not sure why this is here
Ar.append(set([str(it3) for it3 in allranks[i]]))
combs3 = combinations( [i for i in range(len(Ar))], 3)
comb_hlder = list()
################################################
## We remove tuples (U,V,Z) where:
## (i) UV has full rank
## (ii) U and V are subsets of Z
## (iii) UV is a subset of Z
## (iv) Z is the intersection of U and V
## (v) Z is a subset of UV
## (vi) UV and Z are a modular pair
## (vii) UV and Z have zero mutual information
################################################
for combo in combs3:
pre_comb_hlder = list()
cmbs12 = Ar[combo[0]].union(Ar[combo[1]])
excld = set([int(itm) for itm in cmbs12])
ind = allranks.index(excld)
rnk_excld = allranks[ind + 1]
if rnk_excld == rnk: continue
if (Ar[combo[0]].issubset(Ar[combo[2]]) and Ar[combo[1]].issubset(Ar[combo[2]])) or cmbs12.issubset(Ar[combo[2]]): continue
if Ar[combo[2]]==Ar[combo[0]].intersection(Ar[combo[1]]) or cmbs12.issuperset(Ar[combo[2]]): continue
#int_combo01 = [int(item) for item in cmbs12]
set_combo01 = set( [int(item) for item in cmbs12] )
index_combo01 = allranks.index(set_combo01)
rnk_combo01 = allranks[index_combo01+1]
#int_combo2 = [int(item) for item in Ar[combo[2]]]
set_combo2 = set( [int(item) for item in Ar[combo[2]]] )
index_combo2 = allranks.index(set_combo2)
rnk_combo2 = allranks[index_combo2+1]
combo_inters = cmbs12.intersection(Ar[combo[2]])
#int_combointers = [int(item) for item in combo_inters]
set_combointers = set( [int(item) for item in combo_inters] )
index_combointers = allranks.index(set_combointers)
rnk_combointers = allranks[index_combointers+1]
combo_union = cmbs12.union(Ar[combo[2]])
#int_combounion = [int(item) for item in combo_union]
set_combounion = set( [int(item) for item in combo_union] )
index_combounion = allranks.index(set_combounion)
rnk_combounion = allranks[index_combounion+1]
check_modularity = rnk_combo01 + rnk_combo2 - rnk_combounion - rnk_combointers
mutual_info = rnk_combo01 + rnk_combo2 - rnk_combounion
if check_modularity != 0 and mutual_info != 0:
pre_comb_hlder.append(Ar[combo[0]])
pre_comb_hlder.append(Ar[combo[1]])
pre_comb_hlder.append(Ar[combo[2]])
comb_hlder.append(pre_comb_hlder)
print('{} has {} 3-member working combinations.'.format(key,len(comb_hlder)))
for i in range(len(comb_hlder)):
combo1 = comb_hlder[i]
J = combo1[0]
K = combo1[1]
L = combo1[2]
config.p = Model("gurotest")
config.w = config.p.addVars(range(0,2**config.vrbls+1),name="w")
InitMatNew()
MatroidCompatible(mbases[key],gset)
AK2exp(bi(sb(J)), bi(sb(K)), bi(sb(L)), 2**(config.Part))
Resol2m()
if config.p.status == GRB.Status.OPTIMAL: continue
print('{} is a non-AK matroid with violating sets {}, {} and {}.'.format(key,J,K,L))
sfiles.write('{} is a non-AK matroid with violating sets {}, {} and {}.\n'.format(key,J,K,L))
noAKmats.append(key)
counter = 1
break ###### To find ALL combinations that break AK, suppress this line #####
if counter == 0:
oneAK += 1
sfiles.write('{} is an AK matroid.\n'.format(key))
else:
nonAK += 1
endlog(begin)
if checker < len(mbases):
difference = len(mbases)-checker
if difference > 1:
print('{0}done. {1} matroids remaining. Moving to the next one... \n'.format(key,difference))
else:
print('{}done. One matroid left.'.format(key))
else:
print('*********************************************************')
print('Last run made. Program concluded.')
print('*********************************************************')
sfiles.write('\n All {} matroids checked.\n'.format(len(mbases)))
if nonAK == 0:
sfiles.write('All {} matroids are AK.\n'.format(oneAK))
else:
sfiles.write('non_AK_mats = {}\n'.format(noAKmats))
if nonAK == 1 and nonAK != len(mbases):
if oneAK == 1:
sfiles.write('There is one non-AK and {} AK matroid here.\n'.format(oneAK))
else:
sfiles.write('There is one non-AK and {} AK matroids here.\n'.format(oneAK))
elif nonAK > 1 and nonAK < len(mbases):
if oneAK == 1:
sfiles.write('There are {} non-AK matroids, and {} AK matroid here.\n'.format(nonAK,oneAK))
else:
sfiles.write('There are {} non-AK matroids, and {} AK matroids here.\n'.format(nonAK,oneAK))
elif nonAK == len(mbases):
sfiles.write('All {} matroids are non-AK.\n'.format(nonAK))
checker += 1
endlog(start)
| 43.419753 | 136 | 0.540517 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,104 | 0.299119 |
7fbcbad92e0529fe666dd21bc3e6c50e0d0a628f | 7,160 | py | Python | Multiobjective Optimization/Compass.py | joymallyac/Fairway | 6aa11e4a182e7271059a473cd6857f91d1668e7f | [
"BSD-2-Clause"
] | 6 | 2020-07-15T06:33:05.000Z | 2022-03-22T14:02:09.000Z | Multiobjective Optimization/Compass.py | joymallyac/Fairway | 6aa11e4a182e7271059a473cd6857f91d1668e7f | [
"BSD-2-Clause"
] | 1 | 2020-07-15T06:34:43.000Z | 2020-07-15T06:34:43.000Z | Multiobjective Optimization/Compass.py | joymallyac/Fairway | 6aa11e4a182e7271059a473cd6857f91d1668e7f | [
"BSD-2-Clause"
] | 1 | 2020-07-15T06:39:31.000Z | 2020-07-15T06:39:31.000Z | import pandas as pd
import numpy as np
import random,time
import math,copy
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeRegressor
from sklearn.metrics import classification_report, confusion_matrix, accuracy_score
from sklearn import tree
from sklearn.naive_bayes import GaussianNB
from sklearn.ensemble import RandomForestClassifier
from result.measure import calculate_recall,calculate_far,calculate_average_odds_difference, calculate_equal_opportunity_difference, get_counts, measure_final_score
from optimizer.flash import flash_fair_LSR
## Load dataset
dataset_orig = pd.read_csv('dataset/compas-scores-two-years.csv')
## Drop categorical features
## Removed two duplicate coumns - 'decile_score','priors_count'
dataset_orig = dataset_orig.drop(['id','name','first','last','compas_screening_date','dob','age','juv_fel_count','decile_score','juv_misd_count','juv_other_count','days_b_screening_arrest','c_jail_in','c_jail_out','c_case_number','c_offense_date','c_arrest_date','c_days_from_compas','c_charge_desc','is_recid','r_case_number','r_charge_degree','r_days_from_arrest','r_offense_date','r_charge_desc','r_jail_in','r_jail_out','violent_recid','is_violent_recid','vr_case_number','vr_charge_degree','vr_offense_date','vr_charge_desc','type_of_assessment','decile_score','score_text','screening_date','v_type_of_assessment','v_decile_score','v_score_text','v_screening_date','in_custody','out_custody','start','end','event'],axis=1)
## Drop NULL values
dataset_orig = dataset_orig.dropna()
## Change symbolics to numerics
dataset_orig['sex'] = np.where(dataset_orig['sex'] == 'Female', 1, 0)
dataset_orig['race'] = np.where(dataset_orig['race'] != 'Caucasian', 0, 1)
dataset_orig['priors_count'] = np.where((dataset_orig['priors_count'] >= 1 ) & (dataset_orig['priors_count'] <= 3), 3, dataset_orig['priors_count'])
dataset_orig['priors_count'] = np.where(dataset_orig['priors_count'] > 3, 4, dataset_orig['priors_count'])
dataset_orig['age_cat'] = np.where(dataset_orig['age_cat'] == 'Greater than 45',45,dataset_orig['age_cat'])
dataset_orig['age_cat'] = np.where(dataset_orig['age_cat'] == '25 - 45', 25, dataset_orig['age_cat'])
dataset_orig['age_cat'] = np.where(dataset_orig['age_cat'] == 'Less than 25', 0, dataset_orig['age_cat'])
dataset_orig['c_charge_degree'] = np.where(dataset_orig['c_charge_degree'] == 'F', 1, 0)
## Rename class column
dataset_orig.rename(index=str, columns={"two_year_recid": "Probability"}, inplace=True)
## Divide into train,validation,test
dataset_orig_train, dataset_orig_vt = train_test_split(dataset_orig, test_size=0.3, random_state=0)
dataset_orig_valid, dataset_orig_test = train_test_split(dataset_orig_vt, test_size=0.5, random_state=0)
X_train, y_train = dataset_orig_train.loc[:, dataset_orig_train.columns != 'Probability'], dataset_orig_train['Probability']
X_valid , y_valid = dataset_orig_valid.loc[:, dataset_orig_valid.columns != 'Probability'], dataset_orig_valid['Probability']
X_test , y_test = dataset_orig_test.loc[:, dataset_orig_test.columns != 'Probability'], dataset_orig_test['Probability']
def run_ten_times_default():
print(" ---------- Default Results --------")
for i in range(3):
print("----Run No----",i)
start = time.time()
## Divide into train,validation,test
dataset_orig_train, dataset_orig_vt = train_test_split(dataset_orig, test_size=0.3)
dataset_orig_valid, dataset_orig_test = train_test_split(dataset_orig_vt, test_size=0.5)
X_train, y_train = dataset_orig_train.loc[:, dataset_orig_train.columns != 'Probability'], dataset_orig_train['Probability']
X_valid, y_valid = dataset_orig_valid.loc[:, dataset_orig_valid.columns != 'Probability'], dataset_orig_valid['Probability']
X_test, y_test = dataset_orig_test.loc[:, dataset_orig_test.columns != 'Probability'], dataset_orig_test['Probability']
#### DEFAULT Learners ####
# --- LSR
clf = LogisticRegression(C=1.0, penalty='l2', solver='liblinear', max_iter=100) # LSR Default Config
# --- CART
# clf = tree.DecisionTreeClassifier(criterion="gini",splitter="best",min_samples_leaf=1,min_samples_split=2) # CART Default Config
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
cnf_matrix_test = confusion_matrix(y_test, y_pred)
print(cnf_matrix_test)
TN, FP, FN, TP = confusion_matrix(y_test,y_pred).ravel()
print("recall:", 1 - calculate_recall(TP,FP,FN,TN))
print("far:",calculate_far(TP,FP,FN,TN))
print("aod for sex:",measure_final_score(dataset_orig_test, clf, X_train, y_train, X_test, y_test, 'sex', 'aod'))
print("eod for sex:",measure_final_score(dataset_orig_test, clf, X_train, y_train, X_test, y_test, 'sex', 'eod'))
print("aod for race:",measure_final_score(dataset_orig_test, clf, X_train, y_train, X_test, y_test, 'race', 'aod'))
print("eod for race:",measure_final_score(dataset_orig_test, clf, X_train, y_train, X_test, y_test, 'race', 'eod'))
end = time.time()
print(end-start)
def run_ten_times_FLASH():
print(" ---------- FLASH Results --------")
for i in range(3):
print("----Run No----",i)
start = time.time()
## Divide into train,validation,test
dataset_orig_train, dataset_orig_vt = train_test_split(dataset_orig, test_size=0.3)
dataset_orig_valid, dataset_orig_test = train_test_split(dataset_orig_vt, test_size=0.5)
X_train, y_train = dataset_orig_train.loc[:, dataset_orig_train.columns != 'Probability'], dataset_orig_train['Probability']
X_valid, y_valid = dataset_orig_valid.loc[:, dataset_orig_valid.columns != 'Probability'], dataset_orig_valid['Probability']
X_test, y_test = dataset_orig_test.loc[:, dataset_orig_test.columns != 'Probability'], dataset_orig_test['Probability']
# tuner = LR_TUNER()
# best_config = tune_with_flash(tuner, X_train, y_train, X_valid, y_valid, 'adult', dataset_orig_valid, 'sex')
best_config = flash_fair_LSR(dataset_orig,"sex","ABCD")
print("best_config",best_config)
p1 = best_config[0]
if best_config[1] == 1:
p2 = 'l1'
else:
p2 = 'l2'
if best_config[2] == 1:
p3 = 'liblinear'
else:
p3 = 'saga'
p4 = best_config[3]
clf = LogisticRegression(C=p1, penalty=p2, solver=p3, max_iter=p4)
# clf = tuner.get_clf(best_config)
print("recall :", 1 - measure_final_score(dataset_orig_test, clf, X_train, y_train, X_test, y_test, 'sex', 'recall'))
print("far :",measure_final_score(dataset_orig_test, clf, X_train, y_train, X_test, y_test, 'sex', 'far'))
print("aod :",measure_final_score(dataset_orig_test, clf, X_train, y_train, X_test, y_test, 'sex', 'aod'))
print("eod :",measure_final_score(dataset_orig_test, clf, X_train, y_train, X_test, y_test, 'sex', 'eod'))
end = time.time()
print(end - start)
run_ten_times_default()
run_ten_times_FLASH()
| 53.834586 | 727 | 0.714246 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,241 | 0.312989 |
7fbccc8974c67902bc5ad1e661356673f890179b | 2,814 | py | Python | test_module.py | devsysenv/tests | 6aa7635b281f2e186daa6375abb8050e44c02d6a | [
"MIT"
] | null | null | null | test_module.py | devsysenv/tests | 6aa7635b281f2e186daa6375abb8050e44c02d6a | [
"MIT"
] | null | null | null | test_module.py | devsysenv/tests | 6aa7635b281f2e186daa6375abb8050e44c02d6a | [
"MIT"
] | 1 | 2022-03-09T12:23:26.000Z | 2022-03-09T12:23:26.000Z | #!/usr/bin/env python
import pytest
import os
import sys
import logging
logging.basicConfig()
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
from pathlib import Path
from dselib.thread import initTLS
initTLS()
from constants import CONST
from dselib.path import normalizePath
from dselib.dir import GetDSEExtDirectory
from dselib.module import LoadModule, LoadModulePath
from dselib.module import LoadModulePath2, LoadModuleFromPackage
def test_1():
# add tests for LoadModule
mh = LoadModule('argparse')
assert mh is not None
assert hasattr(mh, 'ArgumentParser')
mh = LoadModule('madeup_module_name')
assert mh is None
testmodule = 'dselib.env'
if testmodule in sys.modules:
del sys.modules[testmodule]
assert testmodule not in sys.modules
mh = LoadModule(testmodule)
assert mh is not None
assert testmodule in sys.modules
assert hasattr(mh, 'GetDSEDebug')
def test_2():
# add tests for LoadModulePath2
tempModule = GetDSEExtDirectory() / 'dseutil/dsehelp'
mh = LoadModulePath2(tempModule)
assert mh is not None
assert hasattr(mh, 'GenerateDocs')
mh = LoadModulePath2(tempModule, removeIfLoaded=True)
assert mh is not None
assert hasattr(mh, 'GenerateDocs')
def test_3():
# add tests for LoadModuleFromPackage
testpackage = 'root.dselib'
testmodulestem = 'env'
testmodule = f'{testpackage}.{testmodulestem}'
if testmodule in sys.modules:
del sys.modules[testmodule]
assert testmodule not in sys.modules
mh = LoadModuleFromPackage(testpackage, testmodulestem)
assert mh is not None
assert testmodule in sys.modules
assert hasattr(mh, 'GetDSEDebug')
# def _create_python_module(path, retVal=True):
# logger.debug('writing python module to %s', path)
# with open(path, 'w', encoding='utf-8') as fp:
# fp.write(f'''def test{retVal}():
# \treturn {retVal}
# ''')
def test_4():
# add tests for LoadModulePath
tmpdir = normalizePath(Path('./tmod').resolve())
tempModule = tmpdir / 'p42.py'
logger.debug('tempModule=%s', tempModule)
assert tempModule.is_file()
mh = LoadModulePath(str(tempModule))
assert mh is not None
assert hasattr(mh, 'testTrue')
assert mh.testTrue() is True
assert str(tmpdir) in sys.path
sys.path.remove(str(tmpdir))
sys.modules.pop(str(tempModule.stem)) # not necessary, but so I'll remember this method
tempModule2 = Path(tmpdir) / 'p42false.py'
assert tempModule2.is_file()
mh = LoadModulePath(tempModule2, remove=True)
assert mh is not None
assert str(tmpdir) not in sys.path
assert hasattr(mh, 'testFalse')
assert mh.testFalse() is False
if __name__ == '__main__':
pytest.main(['-k', 'test_module.py'])
| 29.3125 | 93 | 0.702914 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 705 | 0.250533 |
7fbcf17f7708af8db907793b903a38dee32b4986 | 4,823 | py | Python | pilemma/env/pilemma_env.py | cmackeen/pilemma | 614542081c963063d0255af7b24395fae37b5934 | [
"MIT"
] | null | null | null | pilemma/env/pilemma_env.py | cmackeen/pilemma | 614542081c963063d0255af7b24395fae37b5934 | [
"MIT"
] | 6 | 2020-04-17T20:05:04.000Z | 2022-03-12T00:24:39.000Z | pilemma/env/pilemma_env.py | cmackeen/pilemma | 614542081c963063d0255af7b24395fae37b5934 | [
"MIT"
] | null | null | null |
import gym
from gym import error, spaces, utils
from gym.utils import seeding
import numpy as np
import pandas as pd
import gym
from gym import error, spaces, utils
from gym.utils import seeding
from pilemma.env.dai_auct import Urn
import glob
import os
import random
init_dai=1000
init_eth=0
ACTION_SKIP = 0
ACTION_MINT = 1
ACTION_CLOSE = 2
ACTION_BUY=3
ACTION_SELL=4
csv_file='./pilemma/data/ETHUSDT_long_simpdt.csv'
nr=sum(1 for line in open(csv_file))-101
window=1000
random_row = 0
class SystemState:
def __init__(self, equity_path=csv_file, sep=',',skiprows=random_row):
df = self.read_csv(equity_path, sep=sep, skiprows=random_row)
df = df.fillna(method='ffill')
df = df.fillna(method='bfill')
self.df = df
self.index = 0
print("Imported tick data from {}".format(equity_path))
def read_csv(self, path, sep, skiprows):
dtypes = {'Date': str, 'Time': str}
df = pd.read_csv(path, sep=sep, header=0, skiprows=random_row, nrows=window, names=['Date', 'Time', 'Open', 'High', 'Low', 'Close', 'Volume','Deltat'], dtype=dtypes)
dtime = df.Date + ' ' + df.Time
df.index = pd.to_datetime(dtime)
df.drop(['Date', 'Time'], axis=1, inplace=True)
return df
def reset(self):
self.index = 0
def next(self):
if self.index >= len(self.df) - 1:
return None, True
values = self.df.iloc[self.index].values
self.index += 1
return values, False
def shape(self):
return self.df.shape
def current_price(self):
return self.df.ix[self.index, 'Close']
def current_time(self):
return self.df.ix[self.index, 'Deltat']
class DaiLemmaEnv(gym.Env):
metadata = {'render.modes': ['human']}
def __init__(self, datadir='./pilemma/data'):
self.bound = 100000
self.num = 0.10
self.eth = init_eth
self.dai = init_dai
self.rowpick = 0
self.meta_counter =0
self.equity = 0
self.states = []
self.urns = 0
self.state = None
for path in glob.glob(datadir + '/ETHUSDT_long_simpdt.csv'):
if not os.path.isfile(path):
continue
self.states.append(path)
self.observation_space = spaces.Box(low=0, high=self.bound, shape=(6,))
self.action_space = spaces.Discrete(5)
self.episode_total_reward=0
self.counter=0
if len(self.states) == 0:
raise NameError('Invalid empty directory {}'.format(dirname))
def step(self, action):
assert self.action_space.contains(action)
prev_portfolio= 1.01*self.dai + 0.99*self.eth * self.state.current_price()
price = self.state.current_price()
fee = np.random.normal(4,.8)*(1./price)*0.50
cost = price * self.num
cdp = Urn()
if action == ACTION_SELL:
if self.eth >= (self.num+fee):
self.eth-=(self.num+0*fee)
self.dai+=price*self.num
else:
self.eth-=fee
if action == ACTION_BUY:
if self.dai >= price*self.num:
self.eth+=(self.num-0*fee)
self.dai-=price*self.num
else:
self.eth-=fee
if action == ACTION_MINT:
self.eth-=self.num+fee
if self.urns == 0 and self.eth>self.num+fee:
self.eth-=self.num+fee
cdp.mint(1.52*self.num*price,self.num,self.state.current_time())
self.dai += cdp.art
self.urns = 1
else:
self.eth -= fee
if action == ACTION_CLOSE:
if self.urns == 1:
self.dai-=cdp.art
haul=(np.random.normal(.5,.1))*cdp.close(cdp.art,self.state.current_time())
self.eth+=haul-fee
self.urns=0
else:
self.eth -= fee
state, done = self.state.next()
new_price = price
if not done:
new_price = self.state.current_price()
portfolio= 1.01*self.dai + 0.99*self.eth * self.state.current_price()
reward = portfolio - prev_portfolio
return state, reward, done, {}
def reset(self):
if self.meta_counter%1000==0:
self.rowpick=np.random.choice(range(1,nr))
print(self.meta_counter)
print("new time series")
self.state = SystemState(random.choice(self.states),skiprows=self.rowpick)
self.eth = init_eth
self.dai = init_dai
self.equity = 0
self.urns = 0
self.counter = 0
self.meta_counter += 1
state, done = self.state.next()
return state
def render(self, mode='human', close=False):
pass
| 28.538462 | 173 | 0.571014 | 4,328 | 0.897367 | 0 | 0 | 0 | 0 | 0 | 0 | 294 | 0.060958 |
7fbdd22fcfc2573215684df2e76cac9a67962312 | 154 | py | Python | String/P151 - reverseWordsInString.py | HarshOza36/LeetCode_Problems | 6d7035e0d681213ac602b9e0382dbfa87f8d4745 | [
"MIT"
] | null | null | null | String/P151 - reverseWordsInString.py | HarshOza36/LeetCode_Problems | 6d7035e0d681213ac602b9e0382dbfa87f8d4745 | [
"MIT"
] | null | null | null | String/P151 - reverseWordsInString.py | HarshOza36/LeetCode_Problems | 6d7035e0d681213ac602b9e0382dbfa87f8d4745 | [
"MIT"
] | null | null | null | class Solution:
def reverseWords(self, s: str) -> str:
s = s.split(" ")
s = [i for i in s if i != ""]
return " ".join(s[::-1]) | 30.8 | 42 | 0.454545 | 154 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 8 | 0.051948 |
7fbe88d42805d4e48e13a5db9e5dcc9dd3f4444c | 2,700 | py | Python | kaggle-santa-2015/submission.py | gabormakrai/kaggle-santa-2015 | fa383c7130b30e501a7533efd5d372ccde0e6f29 | [
"Apache-2.0"
] | null | null | null | kaggle-santa-2015/submission.py | gabormakrai/kaggle-santa-2015 | fa383c7130b30e501a7533efd5d372ccde0e6f29 | [
"Apache-2.0"
] | null | null | null | kaggle-santa-2015/submission.py | gabormakrai/kaggle-santa-2015 | fa383c7130b30e501a7533efd5d372ccde0e6f29 | [
"Apache-2.0"
] | null | null | null | from haversine import haversine
def loadOrder(fileName):
print("Loading order from " + fileName + "...")
order = []
# open the file
with open(fileName) as infile:
# read line by line
for line in infile:
# remove newline character from the end
line = line.rstrip()
order.append(int(line))
print("Done... #order: " + str(len(order)))
return order
def doSubmissionFromOrder(order, gifts):
sleighMaxWeight = 1000.0
currentWeight = 0.0
currentTrip = []
trips = []
for giftId in order:
gift = gifts[giftId]
if currentWeight + gift.weight > sleighMaxWeight:
trips.append(currentTrip)
currentWeight = gift.weight
currentTrip = [gift]
else:
currentTrip.append(gift)
currentWeight = currentWeight + gift.weight
trips.append(currentTrip)
# counter = 0
# for trip in trips:
# tripWeight = 0
# for gift in trip:
# counter = counter + 1
# tripWeight = tripWeight + gift.weight
# print(str(tripWeight))
# print("order: " + str(len(order)))
# print("counter: " + str(counter))
return trips
def calculateWRW(trips, gifts):
of = 0.0
for trip in trips:
tripWeight = 0.0
for gift in trip:
tripWeight = tripWeight + gift.weight
# add north pole to first gift weight
of = of + haversine((90.0, 0.0), (trip[0].latitude, trip[0].longitude)) * (tripWeight + 10.0)
# add last gift to north pole
of = of + haversine((trip[len(trip) - 1].latitude, trip[len(trip) - 1].longitude), (90.0, 0.0)) * (10.0)
previousGift = None
for gift in trip:
if previousGift == None:
previousGift = gift
tripWeight = tripWeight - previousGift.weight
else:
currentGift = gift
of = of + haversine((currentGift.latitude, currentGift.longitude), (previousGift.latitude, previousGift.longitude)) * (tripWeight + 10.0)
tripWeight = tripWeight - currentGift.weight
previousGift = currentGift
return of
def createSubmissionFromTrips(trips, fileName):
tripId = 1
output = open(fileName, 'w')
output.write("GiftId,TripId\n")
for trip in trips:
for gift in trip:
output.write(str(gift.ID) + "," + str(tripId) + "\n")
tripId = tripId + 1
output.close()
| 31.395349 | 154 | 0.534444 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 517 | 0.191481 |
7fbfa1a77953c005769371575397429508029495 | 4,425 | py | Python | healthkit_to_sqlite/utils.py | cwkendall/healthkit-to-sqlite | f0bb31079370b7e25b17ddd057e68023763a0938 | [
"Apache-2.0"
] | null | null | null | healthkit_to_sqlite/utils.py | cwkendall/healthkit-to-sqlite | f0bb31079370b7e25b17ddd057e68023763a0938 | [
"Apache-2.0"
] | null | null | null | healthkit_to_sqlite/utils.py | cwkendall/healthkit-to-sqlite | f0bb31079370b7e25b17ddd057e68023763a0938 | [
"Apache-2.0"
] | null | null | null | from xml.etree import ElementTree as ET
import io
import os.path
import sys
import gpxpy
import builtins
def find_all_tags(fp, tags, progress_callback=None):
parser = ET.XMLPullParser(("start", "end"))
root = None
while True:
chunk = fp.read(1024 * 1024)
if not chunk:
break
parser.feed(chunk)
for event, el in parser.read_events():
if event == "start" and root is None:
root = el
if event == "end" and el.tag in tags:
yield el.tag, el
root.clear()
if progress_callback is not None:
progress_callback(len(chunk))
def convert_xml_to_sqlite(fp, db, progress_callback=None, zipfile=None):
activity_summaries = []
records = []
workout_id = 1
for tag, el in find_all_tags(
fp, {"Record", "Workout", "ActivitySummary"}, progress_callback
):
if tag == "ActivitySummary":
activity_summaries.append(dict(el.attrib))
if len(activity_summaries) >= 100:
db["activity_summary"].insert_all(activity_summaries)
activity_summaries = []
elif tag == "Workout":
el.set("seq", workout_id)
workout_to_db(el, db, zipfile)
workout_id += 1
elif tag == "Record":
record = dict(el.attrib)
for child in el.findall("MetadataEntry"):
record["metadata_" + child.attrib["key"]] = child.attrib["value"]
records.append(record)
if len(records) >= 200:
write_records(records, db)
records = []
el.clear()
if records:
write_records(records, db)
if activity_summaries:
db["activity_summary"].insert_all(activity_summaries)
if progress_callback is not None:
progress_callback(sys.maxsize)
def workout_to_db(workout, db, zf):
record = dict(workout.attrib)
# add metadata entry items as extra keys
for el in workout.findall("MetadataEntry"):
record["metadata_" + el.attrib["key"]] = el.attrib["value"]
# Dump any WorkoutEvent in a nested list for the moment
record["workout_events"] = [el.attrib for el in workout.findall("WorkoutEvent")]
pk = db["workouts"].insert(record, alter=True, hash_id="id").last_pk
points = [
dict(el.attrib, workout_id=pk)
for el in workout.findall("WorkoutRoute/Location")
]
if len(points) == 0:
# Location not embedded, sidecar gpx files used instead
gpx_files = [os.path.join("apple_health_export", *(item.get("path").split("/")))
for item in workout.findall("WorkoutRoute/FileReference")]
# support zip or flat files
for path in gpx_files:
with open_file_or_zip(zf, path) as xml_file:
gpx = parse_gpx(xml_file)
for point in gpx.walk(only_points=False):
point[0].extensions = [etree_to_dict(e) for e in point[0].extensions]
points.append(dict({key: getattr(point[0], key) for key in point[0].__slots__}, workout_id=pk))
if len(points):
db["workout_points"].insert_all(
points, foreign_keys=[("workout_id", "workouts")], batch_size=50
)
def etree_to_dict(t):
d = {t.tag: list(map(etree_to_dict, list(t))) or t.text}
if t.attrib:
d.update({"@attr": t.attrib})
return d
def open_file_or_zip(zf, file):
if zf is not None:
return zf.open(file)
else:
return builtins.open(file, 'rb')
def parse_gpx(xml_file):
doc = io.TextIOWrapper(xml_file, encoding='UTF-8', newline=None)
doc.readline() # skip xml header
return gpxpy.parse("".join(doc.readlines()))
def write_records(records, db):
# We write records into tables based on their types
records_by_type = {}
for record in records:
table = "r{}".format(
record.pop("type")
.replace("HKQuantityTypeIdentifier", "")
.replace("HKCategoryTypeIdentifier", "")
)
records_by_type.setdefault(table, []).append(record)
# Bulk inserts for each one
for table, records_for_table in records_by_type.items():
db[table].insert_all(
records_for_table,
alter=True,
column_order=["startDate", "endDate", "value", "unit"],
batch_size=50,
)
| 34.570313 | 115 | 0.598644 | 0 | 0 | 551 | 0.12452 | 0 | 0 | 0 | 0 | 764 | 0.172655 |
7fc0a9a618c9ad1b7397ee8f2734d93ece534de6 | 9,628 | py | Python | tests/utils/test_kronecker.py | gtpash/rom-operator-inference-Python3 | a5e05a4442121f7c536060194fec0166976b04b1 | [
"MIT"
] | null | null | null | tests/utils/test_kronecker.py | gtpash/rom-operator-inference-Python3 | a5e05a4442121f7c536060194fec0166976b04b1 | [
"MIT"
] | null | null | null | tests/utils/test_kronecker.py | gtpash/rom-operator-inference-Python3 | a5e05a4442121f7c536060194fec0166976b04b1 | [
"MIT"
] | null | null | null | # utils/test_kronecker.py
"""Tests for rom_operator_inference.utils._kronecker."""
import pytest
import numpy as np
import rom_operator_inference as opinf
# Index generation for fast self-product kronecker evaluation =================
def test_kron2c_indices(n_tests=100):
"""Test utils._kronecker.kron2c_indices()."""
mask = opinf.utils.kron2c_indices(4)
assert np.all(mask == np.array([[0, 0],
[1, 0], [1, 1],
[2, 0], [2, 1], [2, 2],
[3, 0], [3, 1], [3, 2], [3, 3]],
dtype=int))
submask = opinf.utils.kron2c_indices(3)
assert np.allclose(submask, mask[:6])
r = 10
_r2 = r * (r + 1) // 2
mask = opinf.utils.kron2c_indices(r)
assert mask.shape == (_r2, 2)
assert np.all(mask[0] == 0)
assert np.all(mask[-1] == r - 1)
assert mask.sum(axis=0)[0] == sum(i*(i+1) for i in range(r))
# Ensure consistency with utils.kron2c().
for _ in range(n_tests):
x = np.random.random(r)
assert np.allclose(np.prod(x[mask], axis=1), opinf.utils.kron2c(x))
def test_kron3c_indices(n_tests=100):
"""Test utils._kronecker.kron3c_indices()."""
mask = opinf.utils.kron3c_indices(2)
assert np.all(mask == np.array([[0, 0, 0],
[1, 0, 0], [1, 1, 0], [1, 1, 1]],
dtype=int))
r = 10
mask = opinf.utils.kron3c_indices(r)
_r3 = r * (r + 1) * (r + 2) // 6
mask = opinf.utils.kron3c_indices(r)
assert mask.shape == (_r3, 3)
assert np.all(mask[0] == 0)
assert np.all(mask[-1] == r - 1)
# Ensure consistency with utils.kron3c().
for _ in range(n_tests):
x = np.random.random(r)
assert np.allclose(np.prod(x[mask], axis=1), opinf.utils.kron3c(x))
# Kronecker (Khatri-Rao) products =============================================
# utils.kron2c() --------------------------------------------------------------
def _test_kron2c_single_vector(n):
"""Do one vector test of utils._kronecker.kron2c()."""
x = np.random.random(n)
x2 = opinf.utils.kron2c(x)
assert x2.ndim == 1
assert x2.shape[0] == n*(n+1)//2
for i in range(n):
assert np.allclose(x2[i*(i+1)//2:(i+1)*(i+2)//2], x[i]*x[:i+1])
def _test_kron2c_single_matrix(n):
"""Do one matrix test of utils._kronecker.kron2c()."""
X = np.random.random((n,n))
X2 = opinf.utils.kron2c(X)
assert X2.ndim == 2
assert X2.shape[0] == n*(n+1)//2
assert X2.shape[1] == n
for i in range(n):
assert np.allclose(X2[i*(i+1)//2:(i+1)*(i+2)//2], X[i]*X[:i+1])
def test_kron2c(n_tests=100):
"""Test utils._kronecker.kron2c()."""
# Try with bad input.
with pytest.raises(ValueError) as exc:
opinf.utils.kron2c(np.random.random((3,3,3)), checkdim=True)
assert exc.value.args[0] == "x must be one- or two-dimensional"
# Correct inputs.
for n in np.random.randint(2, 100, n_tests):
_test_kron2c_single_vector(n)
_test_kron2c_single_matrix(n)
# utils.kron3c() --------------------------------------------------------------
def _test_kron3c_single_vector(n):
"""Do one vector test of utils._kronecker.kron3c()."""
x = np.random.random(n)
x3 = opinf.utils.kron3c(x)
assert x3.ndim == 1
assert x3.shape[0] == n*(n+1)*(n+2)//6
for i in range(n):
assert np.allclose(x3[i*(i+1)*(i+2)//6:(i+1)*(i+2)*(i+3)//6],
x[i]*opinf.utils.kron2c(x[:i+1]))
def _test_kron3c_single_matrix(n):
"""Do one matrix test of utils._kronecker.kron3c()."""
X = np.random.random((n,n))
X3 = opinf.utils.kron3c(X)
assert X3.ndim == 2
assert X3.shape[0] == n*(n+1)*(n+2)//6
assert X3.shape[1] == n
for i in range(n):
assert np.allclose(X3[i*(i+1)*(i+2)//6:(i+1)*(i+2)*(i+3)//6],
X[i]*opinf.utils.kron2c(X[:i+1]))
def test_kron3c(n_tests=50):
"""Test utils._kronecker.kron3c()."""
# Try with bad input.
with pytest.raises(ValueError) as exc:
opinf.utils.kron3c(np.random.random((2,4,3)), checkdim=True)
assert exc.value.args[0] == "x must be one- or two-dimensional"
# Correct inputs.
for n in np.random.randint(2, 30, n_tests):
_test_kron3c_single_vector(n)
_test_kron3c_single_matrix(n)
# Matricized tensor management ================================================
# utils.expand_quadratic() ----------------------------------------------------
def _test_expand_quadratic_single(r):
"""Do one test of utils._kronecker.expand_quadratic()."""
x = np.random.random(r)
# Do a valid expand_quadratic() calculation and check dimensions.
s = r*(r+1)//2
Hc = np.random.random((r,s))
H = opinf.utils.expand_quadratic(Hc)
assert H.shape == (r,r**2)
# Check that Hc(x^2) == H(x⊗x).
Hxx = H @ np.kron(x,x)
assert np.allclose(Hc @ opinf.utils.kron2c(x), Hxx)
# Check properties of the tensor for H.
Htensor = H.reshape((r,r,r))
assert np.allclose(Htensor @ x @ x, Hxx)
for subH in H:
assert np.allclose(subH, subH.T)
def test_expand_quadratic(n_tests=100):
"""Test utils._kronecker.expand_quadratic()."""
# Try to do expand_quadratic() with a bad second dimension.
r = 5
sbad = r*(r+3)//2
Hc = np.random.random((r, sbad))
with pytest.raises(ValueError) as exc:
opinf.utils.expand_quadratic(Hc)
assert exc.value.args[0] == \
f"invalid shape (r,s) = {(r,sbad)} with s != r(r+1)/2"
# Do 100 test cases of varying dimensions.
for r in np.random.randint(2, 100, n_tests):
_test_expand_quadratic_single(r)
# utils.compress_quadratic() --------------------------------------------------
def _test_compress_quadratic_single(r):
"""Do one test of utils._kronecker.compress_quadratic()."""
x = np.random.random(r)
# Do a valid compress_quadratic() calculation and check dimensions.
H = np.random.random((r,r**2))
s = r*(r+1)//2
Hc = opinf.utils.compress_quadratic(H)
assert Hc.shape == (r,s)
# Check that Hc(x^2) == H(x⊗x).
Hxx = H @ np.kron(x,x)
assert np.allclose(Hxx, Hc @ opinf.utils.kron2c(x))
# Check that expand_quadratic() and compress_quadratic()
# are inverses up to symmetry.
H2 = opinf.utils.expand_quadratic(Hc)
Ht = H.reshape((r,r,r))
Htnew = np.empty_like(Ht)
for i in range(r):
Htnew[i] = (Ht[i] + Ht[i].T) / 2
assert np.allclose(H2, Htnew.reshape(H.shape))
def test_compress_quadratic(n_tests=100):
"""Test utils._kronecker.compress_quadratic()."""
# Try to do compress_quadratic() with a bad second dimension.
r = 5
r2bad = r**2 + 1
H = np.random.random((r, r2bad))
with pytest.raises(ValueError) as exc:
opinf.utils.compress_quadratic(H)
assert exc.value.args[0] == \
f"invalid shape (r,a) = {(r,r2bad)} with a != r**2"
# Do 100 test cases of varying dimensions.
for r in np.random.randint(2, 100, n_tests):
_test_compress_quadratic_single(r)
# utils.expand_cubic() --------------------------------------------------------
def _test_expand_cubic_single(r):
"""Do one test of utils._kronecker.expand_cubic()."""
x = np.random.random(r)
# Do a valid expand_cubic() calculation and check dimensions.
s = r*(r+1)*(r+2)//6
Gc = np.random.random((r,s))
G = opinf.utils.expand_cubic(Gc)
assert G.shape == (r,r**3)
# Check that Gc(x^3) == G(x⊗x⊗x).
Gxxx = G @ np.kron(x,np.kron(x,x))
assert np.allclose(Gc @ opinf.utils.kron3c(x), Gxxx)
# Check properties of the tensor for G.
Gtensor = G.reshape((r,r,r,r))
assert np.allclose(Gtensor @ x @ x @ x, Gxxx)
for subG in G:
assert np.allclose(subG, subG.T)
def test_expand_cubic(n_tests=50):
"""Test utils._kronecker.expand_cubic()."""
# Try to do expand_cubic() with a bad second dimension.
r = 5
sbad = r*(r+1)*(r+3)//6
Gc = np.random.random((r, sbad))
with pytest.raises(ValueError) as exc:
opinf.utils.expand_cubic(Gc)
assert exc.value.args[0] == \
f"invalid shape (r,s) = {(r,sbad)} with s != r(r+1)(r+2)/6"
# Do 100 test cases of varying dimensions.
for r in np.random.randint(2, 30, n_tests):
_test_expand_cubic_single(r)
# utils.compress_cubic() ------------------------------------------------------
def _test_compress_cubic_single(r):
"""Do one test of utils._kronecker.compress_cubic()."""
x = np.random.random(r)
# Do a valid compress_cubic() calculation and check dimensions.
G = np.random.random((r,r**3))
s = r*(r+1)*(r+2)//6
Gc = opinf.utils.compress_cubic(G)
assert Gc.shape == (r,s)
# Check that Gc(x^3) == G(x⊗x⊗x).
Gxxx = G @ np.kron(x,np.kron(x,x))
assert np.allclose(Gxxx, Gc @ opinf.utils.kron3c(x))
# Check that expand_cubic() and compress_cubic() are "inverses."
G_new = opinf.utils.expand_cubic(Gc)
assert np.allclose(Gc, opinf.utils.compress_cubic(G_new))
def test_compress_cubic(n_tests=50):
"""Test utils._kronecker.compress_cubic()."""
# Try to do compress_cubic() with a bad second dimension.
r = 5
r3bad = r**3 + 1
G = np.random.random((r, r3bad))
with pytest.raises(ValueError) as exc:
opinf.utils.compress_cubic(G)
assert exc.value.args[0] == \
f"invalid shape (r,a) = {(r,r3bad)} with a != r**3"
# Do 100 test cases of varying dimensions.
for r in np.random.randint(2, 30, n_tests):
_test_compress_cubic_single(r)
| 34.141844 | 79 | 0.575301 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,047 | 0.316079 |
f68072e40e9326d2208563fb1dc9bb70cc1a0ff4 | 3,420 | py | Python | gadann/updater.py | dpineo/gadann | ff5dce9a8fc6192ba1efd854672f593872116beb | [
"MIT"
] | null | null | null | gadann/updater.py | dpineo/gadann | ff5dce9a8fc6192ba1efd854672f593872116beb | [
"MIT"
] | null | null | null | gadann/updater.py | dpineo/gadann | ff5dce9a8fc6192ba1efd854672f593872116beb | [
"MIT"
] | null | null | null | #
# GADANN - GPU Accelerated Deep Artificial Neural Network
#
# Copyright (C) 2014 Daniel Pineo (daniel@pineo.net)
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
import logging
from . import kernels
logger = logging.getLogger(__name__)
class Updater(object):
def __init__(self):
pass
def update(self, args):
for key in params.iterkeys():
params[key] = params[key] + grads[key]*learning_rate
class SgdUpdater(Updater):
def __init__(self, learning_rate=0.1, weight_cost=0.01):
self.weight_cost = weight_cost
self.learning_rate = learning_rate
def update(self, params, grads):
for k, v in grads.items():
params[k] = params[k] - self.learning_rate*v - params[k]*self.weight_cost
def status(self):
return ''
class MomentumUpdater(Updater):
def __init__(self, learning_rate=0.1, inertia=0.9, weight_cost=0.00):
self.inertia = inertia
self.weight_cost = weight_cost
self.learning_rate = learning_rate
def update(self, params, grads):
try:
self.velocities = [self.inertia*v + (1-self.inertia)*g for (v,g) in zip(self.velocities, grads)]
except:
self.velocities = [(1-self.inertia)*g for g in grads]
for i in range(len(params)):
params[i] = params[i] - self.learning_rate*self.velocities[i] - params[i]*self.weight_cost
self.inertia += .001*(1-self.inertia)
def status(self):
return 'inertia:' + str(self.inertia)
class RmspropUpdater(Updater):
def __init__(self, learning_rate=0.1, inertia=0.0, weight_cost=0.00):
self.epsilon = 0.000001
self.inertia = inertia
self.weight_cost = weight_cost
self.learning_rate = learning_rate
def update(self, params, grads):
try:
self.accum = [self.inertia*a + (1-self.inertia)*(g**2) for (a,g) in zip(self.accum, grads)]
except:
self.accum = [(1-self.inertia)*(g**2) for g in grads]
for i in range(len(params)):
params[i] = params[i] - self.learning_rate * grads[i] / (kernels.sqrt(self.accum[i]) + self.epsilon) - params[i]*self.weight_cost
self.inertia += .001*(1-self.inertia)
def status(self):
return 'inertia:' + str(self.inertia)
| 37.173913 | 142 | 0.65614 | 2,108 | 0.616374 | 0 | 0 | 0 | 0 | 0 | 0 | 1,205 | 0.352339 |
f682fe676c7e1a1a9637f751482cd8ab906a78f3 | 880 | py | Python | Aula37/View/testesquad.py | PabloSchumacher/TrabalhosPython | 828edd35eb40442629211bc9f1477f75fb025d74 | [
"bzip2-1.0.6",
"MIT"
] | null | null | null | Aula37/View/testesquad.py | PabloSchumacher/TrabalhosPython | 828edd35eb40442629211bc9f1477f75fb025d74 | [
"bzip2-1.0.6",
"MIT"
] | null | null | null | Aula37/View/testesquad.py | PabloSchumacher/TrabalhosPython | 828edd35eb40442629211bc9f1477f75fb025d74 | [
"bzip2-1.0.6",
"MIT"
] | null | null | null | import sys
sys.path.append( r"C:\Users\900157\Documents\Github\TrabalhosPython\Aula37" )
from Controller.squad_controller import SquadController
from Model.squad import Squad
squad = Squad()
squad.nome = 'JooJ'
squad.descricao = 'Intermediário'
squad.npessoas = 20
squad.backend.nome = 'Lua' #Adicionar
squad.backend.idbackend = 1 #Alterar
squad.frontend.nome = 'Lua' #Adicionar
squad.frontend.idfrontend = 1 #Alterar
squad.sgbd.nome = 'Lua' #Adicionar
squad.sgbd.idsgbd = 1 #Alterar
squad.id = 6
controller = SquadController()
#controller.deletar(5) #Deletando por id
#controller.salvar(squad) #Adicionando
controller.alterar(squad) #Alterando
print(controller.listar_todos()) #Buscando todos
#print(controller.buscar_por_id(squad)) #Buscando por id | 38.26087 | 77 | 0.671591 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 351 | 0.398411 |
f684bccdded45f6ae63003186c9bee87eac9d795 | 5,427 | py | Python | restapi/resources/login.py | fossabot/http-api | 57a646884e62fe024f6dd7edcc572c85f2955f16 | [
"MIT"
] | null | null | null | restapi/resources/login.py | fossabot/http-api | 57a646884e62fe024f6dd7edcc572c85f2955f16 | [
"MIT"
] | null | null | null | restapi/resources/login.py | fossabot/http-api | 57a646884e62fe024f6dd7edcc572c85f2955f16 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from datetime import datetime, timedelta
import pytz
from restapi.rest.definition import EndpointResource
from restapi.exceptions import RestApiException
from restapi.connectors.authentication import HandleSecurity
from restapi import decorators
from restapi.confs import WRAP_RESPONSE
from restapi.utilities.htmlcodes import hcodes
class Login(EndpointResource):
""" Let a user login by using the configured method """
baseuri = "/auth"
depends_on = ["MAIN_LOGIN_ENABLE"]
labels = ["authentication"]
POST = {
"/login": {
"summary": "Login with basic credentials",
"description": "Normal credentials (username and password) login endpoint",
"parameters": [
{
"name": "credentials",
"in": "body",
"schema": {"$ref": "#/definitions/Credentials"},
}
],
"responses": {
"200": {"description": "Credentials are valid"},
"401": {"description": "Invalid username or password"},
},
}
}
def verify_information(self, user, security, totp_auth, totp_code, now=None):
message = {
'actions': [],
'errors': []
}
if totp_auth and totp_code is None:
message['actions'].append(self.auth.SECOND_FACTOR_AUTHENTICATION)
message['errors'].append("You do not provided a valid second factor")
epoch = datetime.fromtimestamp(0, pytz.utc)
last_pwd_change = user.last_password_change
if last_pwd_change is None or last_pwd_change == 0:
last_pwd_change = epoch
if self.auth.FORCE_FIRST_PASSWORD_CHANGE and last_pwd_change == epoch:
message['actions'].append('FIRST LOGIN')
message['errors'].append("Please change your temporary password")
if totp_auth:
qr_code = security.get_qrcode(user)
message["qr_code"] = qr_code
elif self.auth.MAX_PASSWORD_VALIDITY > 0:
if last_pwd_change == epoch:
expired = True
else:
valid_until = last_pwd_change + timedelta(
days=self.auth.MAX_PASSWORD_VALIDITY
)
if now is None:
now = datetime.now(pytz.utc)
expired = valid_until < now
if expired:
message['actions'].append('PASSWORD EXPIRED')
message['errors'].append("Your password is expired, please change it")
if not message['errors']:
return None
return self.response(
errors=message, code=hcodes.HTTP_BAD_FORBIDDEN
)
@decorators.catch_errors()
def post(self):
jargs = self.get_input()
username = jargs.get('username')
if username is None:
username = jargs.get('email')
password = jargs.get('password')
if password is None:
password = jargs.get('pwd')
# Now credentials are checked at every request
if username is None or password is None:
msg = "Missing username or password"
raise RestApiException(msg, status_code=hcodes.HTTP_BAD_UNAUTHORIZED)
username = username.lower()
now = datetime.now(pytz.utc)
new_password = jargs.get('new_password')
password_confirm = jargs.get('password_confirm')
totp_authentication = (
self.auth.SECOND_FACTOR_AUTHENTICATION is not None
and self.auth.SECOND_FACTOR_AUTHENTICATION == self.auth.TOTP
)
if totp_authentication:
totp_code = jargs.get('totp_code')
else:
totp_code = None
security = HandleSecurity(self.auth)
# ##################################################
# Authentication control
security.verify_blocked_username(username)
token, jti = self.auth.make_login(username, password)
security.verify_token(username, token)
user = self.auth.get_user()
security.verify_blocked_user(user)
security.verify_active_user(user)
if totp_authentication and totp_code is not None:
security.verify_totp(user, totp_code)
# ##################################################
# If requested, change the password
if new_password is not None and password_confirm is not None:
pwd_changed = security.change_password(
user, password, new_password, password_confirm
)
if pwd_changed:
password = new_password
token, jti = self.auth.make_login(username, password)
# ##################################################
# Something is missing in the authentication, asking action to user
ret = self.verify_information(
user, security, totp_authentication, totp_code, now
)
if ret is not None:
return ret
# Everything is ok, let's save authentication information
if user.first_login is None:
user.first_login = now
user.last_login = now
self.auth.save_token(user, token, jti)
if WRAP_RESPONSE:
return self.response({'token': token})
return self.response(token)
| 32.112426 | 87 | 0.575456 | 5,064 | 0.933112 | 0 | 0 | 2,631 | 0.484798 | 0 | 0 | 1,160 | 0.213746 |
f684dba1f0e8b0d64fb281ab8830a5710f866b0a | 13,196 | py | Python | bot.py | egor5q/casino | 732c35e2f87074be86329ad4706098d23f52fc4b | [
"MIT"
] | null | null | null | bot.py | egor5q/casino | 732c35e2f87074be86329ad4706098d23f52fc4b | [
"MIT"
] | null | null | null | bot.py | egor5q/casino | 732c35e2f87074be86329ad4706098d23f52fc4b | [
"MIT"
] | 1 | 2019-05-18T16:19:20.000Z | 2019-05-18T16:19:20.000Z | # -*- coding: utf-8 -*-
import redis
import os
import telebot
import math
import random
import threading
from telebot import types
from emoji import emojize
from pymongo import MongoClient
token = os.environ['TELEGRAM_TOKEN']
bot = telebot.TeleBot(token)
admins=[441399484]
games={}
client1=os.environ['database']
client=MongoClient(client1)
db=client.chlenomer
users=db.ids_people
@bot.message_handler(commands=['begin'])
def begin(m):
if m.from_user.id in admins:
if m.chat.id in games:
bot.send_message(m.chat.id, 'Начинаем делать ставки (в лс бота)! Про коэффициенты выигрышей вы можете узнать с помощью команды /help')
games[m.chat.id]['began']=1
for ids in games[m.chat.id]['players']:
bot.send_message(ids, 'Напишите, сколько членокоинов вы хотите поставить.')
@bot.message_handler(commands=['help'])
def help(m):
bot.send_message(m.chat.id, 'Коэффициенты ставок:\n'+
'1-15, 16-30: *1.5*\n'+
'1-5, 6-10, 11-15, 16-20, 21-25, 26-30: *5*\n'+
'0: *25*', parse_mode='markdown')
@bot.message_handler(commands=['join'])
def join(m):
if m.chat.id in games:
if m.from_user.id not in games[m.chat.id]['players']:
if games[m.chat.id]['began']!=1:
try:
bot.send_message(m.from_user.id, 'Вы присоединились к казино!')
bot.send_message(m.chat.id, m.from_user.first_name+' Вошел в казино!')
games[m.chat.id]['players'].update(createuser(m.from_user.id, m.from_user.first_name))
except:
bot.send_message(m.chat.id, m.from_user.first_name+', сначала напишите боту в личку!')
@bot.message_handler(commands=['roll'])
def roll(m):
if m.from_user.id in admins:
if m.chat.id in games:
if games[m.chat.id]['began']==1:
x=random.randint(0,30)
msg=bot.send_message(m.chat.id, 'Крутим барабан...\n'+'🕐')
t=threading.Timer(0.1, roll2, args=[m.chat.id, msg.message_id])
t.start()
def roll2(id, id2):
medit('Крутим барабан...\n'+'🕑', id, id2)
t=threading.Timer(0.1, roll3, args=[id, id2])
t.start()
def roll3(id, id2):
medit('Крутим барабан...\n'+'🕒', id, id2)
t=threading.Timer(0.1, roll4, args=[id, id2])
t.start()
def roll4(id, id2):
medit('Крутим барабан...\n'+'🕓', id, id2)
t=threading.Timer(0.1, roll5, args=[id, id2])
t.start()
def roll5(id, id2):
medit('Крутим барабан...\n'+'🕔', id, id2)
t=threading.Timer(0.1, roll6, args=[id, id2])
t.start()
def roll6(id, id2):
medit('Крутим барабан...\n'+'🕕', id, id2) #half
t=threading.Timer(0.1, roll7, args=[id, id2])
t.start()
def roll7(id, id2):
medit('Крутим барабан...\n'+'🕖', id, id2)
t=threading.Timer(0.1, roll8, args=[id, id2])
t.start()
def roll8(id, id2):
medit('Крутим барабан...\n'+'🕗', id, id2)
t=threading.Timer(0.1, roll9, args=[id, id2])
t.start()
def roll9(id, id2):
medit('Крутим барабан...\n'+'🕘', id, id2)
t=threading.Timer(0.1, roll10, args=[id, id2])
t.start()
def roll10(id, id2):
medit('Крутим барабан...\n'+'🕙', id, id2)
t=threading.Timer(0.1, roll11, args=[id, id2])
t.start()
def roll11(id, id2):
medit('Крутим барабан...\n'+'🕚', id, id2)
t=threading.Timer(0.1, roll12, args=[id, id2])
t.start()
def roll12(id, id2):
medit('Крутим барабан...\n'+'🕛', id, id2)
t=threading.Timer(0.1, rollend, args=[id, id2])
t.start()
def rollend(id, id2):
x=random.randint(0,30)
medit('Выпавшее число: *'+str(x)+'*.', id, id2)
text='Результаты:\n\n'
for ids in games[id]['players']:
if games[id]['players'][ids]['betto']!=None:
if games[id]['players'][ids]['betto']=='1-15':
if x>0 and x<=15:
win=games[id]['players'][ids]['bet']*1.5
win=round(win, 0)
text+='*'+games[id]['players'][ids]['name']+'*'+' Выиграл '+str(win)+' членокоин(ов)!\n'
users.update_one({'id':ids}, {'$inc':{'chlenocoins':-games[id]['players'][ids]['bet']}})
else:
win=-games[id]['players'][ids]['bet']
text+='*'+games[id]['players'][ids]['name']+'*'+' проиграл '+str(win+(-win*2))+' членокоин(ов)!\n'
if games[id]['players'][ids]['betto']=='16-30':
if x>=16 and x<=30:
win=games[id]['players'][ids]['bet']*1.5
win=round(win, 0)
text+='*'+games[id]['players'][ids]['name']+'*'+' Выиграл '+str(win)+' членокоин(ов)!\n'
users.update_one({'id':ids}, {'$inc':{'chlenocoins':-games[id]['players'][ids]['bet']}})
else:
win=-games[id]['players'][ids]['bet']
text+='*'+games[id]['players'][ids]['name']+'*'+' проиграл '+str(win+(-win*2))+' членокоин(ов)!\n'
if games[id]['players'][ids]['betto']=='1-5':
if x>=1 and x<=5:
win=games[id]['players'][ids]['bet']*5
win=round(win, 0)
text+='*'+games[id]['players'][ids]['name']+'*'+' Выиграл '+str(win)+' членокоин(ов)!\n'
users.update_one({'id':ids}, {'$inc':{'chlenocoins':-games[id]['players'][ids]['bet']}})
else:
win=-games[id]['players'][ids]['bet']
text+='*'+games[id]['players'][ids]['name']+'*'+' проиграл '+str(win+(-win*2))+' членокоин(ов)!\n'
if games[id]['players'][ids]['betto']=='6-10':
if x>=6 and x<=10:
win=games[id]['players'][ids]['bet']*5
win=round(win, 0)
text+='*'+games[id]['players'][ids]['name']+'*'+' Выиграл '+str(win)+' членокоин(ов)!\n'
users.update_one({'id':ids}, {'$inc':{'chlenocoins':-games[id]['players'][ids]['bet']}})
else:
win=-games[id]['players'][ids]['bet']
text+='*'+games[id]['players'][ids]['name']+'*'+' проиграл '+str(win+(-win*2))+' членокоин(ов)!\n'
print(win)
if games[id]['players'][ids]['betto']=='11-15':
if x>=11 and x<=15:
win=games[id]['players'][ids]['bet']*5
win=round(win, 0)
text+='*'+games[id]['players'][ids]['name']+'*'+' Выиграл '+str(win)+' членокоин(ов)!\n'
users.update_one({'id':ids}, {'$inc':{'chlenocoins':-games[id]['players'][ids]['bet']}})
else:
win=-games[id]['players'][ids]['bet']
text+='*'+games[id]['players'][ids]['name']+'*'+' проиграл '+str(win+(-win*2))+' членокоин(ов)!\n'
if games[id]['players'][ids]['betto']=='16-20':
if x>=16 and x<=20:
win=games[id]['players'][ids]['bet']*5
win=round(win, 0)
text+='*'+games[id]['players'][ids]['name']+'*'+' Выиграл '+str(win)+' членокоин(ов)!\n'
users.update_one({'id':ids}, {'$inc':{'chlenocoins':-games[id]['players'][ids]['bet']}})
else:
win=-games[id]['players'][ids]['bet']
text+='*'+games[id]['players'][ids]['name']+'*'+' проиграл '+str(win+(-win*2))+' членокоин(ов)!\n'
if games[id]['players'][ids]['betto']=='21-25':
if x>=21 and x<=25:
win=games[id]['players'][ids]['bet']*5
win=round(win, 0)
text+='*'+games[id]['players'][ids]['name']+'*'+' Выиграл '+str(win)+' членокоин(ов)!\n'
users.update_one({'id':ids}, {'$inc':{'chlenocoins':-games[id]['players'][ids]['bet']}})
else:
win=-games[id]['players'][ids]['bet']
text+='*'+games[id]['players'][ids]['name']+'*'+' проиграл '+str(win+(-win*2))+' членокоин(ов)!\n'
if games[id]['players'][ids]['betto']=='26-30':
if x>=26 and x<=30:
win=games[id]['players'][ids]['bet']*5
win=round(win, 0)
text+='*'+games[id]['players'][ids]['name']+'*'+' Выиграл '+str(win)+' членокоин(ов)!\n'
users.update_one({'id':ids}, {'$inc':{'chlenocoins':-games[id]['players'][ids]['bet']}})
else:
win=-games[id]['players'][ids]['bet']
text+='*'+games[id]['players'][ids]['name']+'*'+' проиграл '+str(win+(-win*2))+' членокоин(ов)!\n'
if games[id]['players'][ids]['betto']=='0':
if x==0:
win=games[id]['players'][ids]['bet']*25
win=round(win, 0)
text+='*'+games[id]['players'][ids]['name']+'*'+' Выиграл '+str(win)+' членокоин(ов)!\n'
users.update_one({'id':ids}, {'$inc':{'chlenocoins':-games[id]['players'][ids]['bet']}})
else:
win=-games[id]['players'][ids]['bet']
print(int(win))
text+='*'+games[id]['players'][ids]['name']+'*'+' проиграл '+str(win+(-win*2))+' членокоин(ов)!\n'
else:
text+='*'+games[id]['players'][ids]['name']+'*'+' Не поставил ничего!\n'
win=0
users.update_one({'id':ids}, {'$inc':{'chlenocoins':win}})
bot.send_message(id, text, parse_mode='markdown')
del games[id]
@bot.message_handler(commands=['stavki'])
def stavki(m):
if m.from_user.id in admins:
if m.chat.id not in games:
games.update(createlobby(m.chat.id))
bot.send_message(m.chat.id, 'Казино открыто! Жмите /join, чтобы испытать удачу и выиграть членокоины!')
else:
bot.send_message(m.chat.id, 'Создать лобби может только администратор казино!')
@bot.message_handler(content_types=['text'])
def texttt(m):
if m.chat.id==m.from_user.id:
i=0
for ids in games:
if m.from_user.id in games[ids]['players']:
i=1
y=games[ids]
if i==1:
try:
x=int(m.text)
x=round(x, 0)
player=users.find_one({'id':m.from_user.id})
if player!=None:
if player['chlenocoins']>=x:
y['players'][m.from_user.id]['bet']=x
kb=types.InlineKeyboardMarkup()
kb.add(types.InlineKeyboardButton(text='1-15', callback_data='1-15'),types.InlineKeyboardButton(text='16-30', callback_data='16-30'),types.InlineKeyboardButton(text='1-5', callback_data='1-5'))
kb.add(types.InlineKeyboardButton(text='6-10', callback_data='6-10'),types.InlineKeyboardButton(text='11-15', callback_data='11-15'),types.InlineKeyboardButton(text='16-20', callback_data='16-20'))
kb.add(types.InlineKeyboardButton(text='21-25', callback_data='21-25'),types.InlineKeyboardButton(text='26-30', callback_data='26-30'),types.InlineKeyboardButton(text='0', callback_data='0'))
bot.send_message(m.from_user.id, 'Вы поставили '+str(x)+' членокоинов! Теперь выберите, на что вы их ставите:', reply_markup=kb)
else:
bot.send_message(m.chat.id, 'Недостаточно членокоинов!')
except:
pass
@bot.callback_query_handler(func=lambda call:True)
def inline(call):
i=0
for ids in games:
if call.from_user.id in games[ids]['players']:
i=1
y=games[ids]
if i==1:
y['players'][call.from_user.id]['betto']=call.data
medit('Ставка принята. Вы поставили '+str(y['players'][call.from_user.id]['bet'])+' членокоинов на '+call.data+'! Ждите результатов в чате', call.from_user.id, call.message.message_id)
bot.send_message(y['id'], y['players'][call.from_user.id]['name']+' поставил '+str(y['players'][call.from_user.id]['bet'])+' членокоинов на '+call.data+'!')
def createuser(id, name):
return{id:{
'id':id,
'bet':None,
'betto':None,
'name':name
}
}
def createlobby(id):
return{id:{
'id':id,
'began':0,
'players':{},
'result':None,
'coef':{'1-15':1.5,
'16-30':1.5,
'0':25,
'1-5':5,
'6-10':5,
'11-15':5,
'16-20':5,
'21-25':5,
'26-30':5
}
}
}
def medit(message_text,chat_id, message_id,reply_markup=None,parse_mode='Markdown'):
return bot.edit_message_text(chat_id=chat_id,message_id=message_id,text=message_text,reply_markup=reply_markup,
parse_mode=parse_mode)
while True:
from requests.exceptions import ReadTimeout
from requests.exceptions import ConnectionError
try:
bot.polling()
except(ReadTimeout, ConnectionError):
pass
| 39.508982 | 221 | 0.510533 | 0 | 0 | 0 | 0 | 4,674 | 0.328877 | 0 | 0 | 4,321 | 0.304039 |
f6852dfc604081fcd05261f22cc95fbff38847fc | 2,317 | py | Python | qrogue/game/logic/collectibles/collectible.py | 7Magic7Mike7/Qrogue | 70bd5671a77981c1d4b633246321ba44f13c21ff | [
"MIT"
] | 4 | 2021-12-14T19:13:43.000Z | 2022-02-16T13:25:38.000Z | qrogue/game/logic/collectibles/collectible.py | 7Magic7Mike7/Qrogue | 70bd5671a77981c1d4b633246321ba44f13c21ff | [
"MIT"
] | null | null | null | qrogue/game/logic/collectibles/collectible.py | 7Magic7Mike7/Qrogue | 70bd5671a77981c1d4b633246321ba44f13c21ff | [
"MIT"
] | 1 | 2022-01-04T18:35:51.000Z | 2022-01-04T18:35:51.000Z | import math
from abc import ABC, abstractmethod
from enum import Enum
from typing import Iterator
class CollectibleType(Enum):
Consumable = 1
Gate = 2
ActiveItem = 3
PassiveItem = 4
Pickup = 5
Qubit = 6
Multi = 0 # wraps multiple collectibles
def type_str(c_type: CollectibleType) -> str:
if c_type is CollectibleType.Gate:
return " Gate"
else:
return ""
class Collectible(ABC):
def __init__(self, c_type: CollectibleType):
self.__type = c_type
@property
def type(self):
return self.__type
@abstractmethod
def name(self) -> str:
pass
@abstractmethod
def description(self) -> str:
pass
@abstractmethod
def default_price(self) -> int:
pass
@abstractmethod
def to_string(self) -> str:
pass
class MultiCollectible(Collectible):
PRICE_MULT = 0.9
def __init__(self, content: [Collectible]):
super(MultiCollectible, self).__init__(CollectibleType.Multi)
self.__content = content
def name(self) -> str:
return "Collectible Pack"
def description(self) -> str:
desc = "Contains multiple Collectibles:"
for collectible in self.__content:
desc += "\n - " + collectible.name()
return desc
def default_price(self) -> int:
price = 0
for collectible in self.__content:
price += collectible.default_price()
return math.ceil(price * MultiCollectible.PRICE_MULT)
def to_string(self) -> str:
text = "Multi ["
for collectible in self.__content:
text += collectible.to_string() + ", "
return text + "]"
def iterator(self) -> Iterator[Collectible]:
return iter(self.__content)
class ShopItem:
def __init__(self, collectible: Collectible, price: int = -1):
self.__collectible = collectible
if price < 0:
price = collectible.default_price()
self.__price = price
@property
def collectible(self) -> Collectible:
return self.__collectible
@property
def price(self) -> int:
return self.__price
def to_string(self) -> str:
return f"{self.collectible}, {self.price}$"
def __str__(self):
return self.to_string()
| 22.715686 | 69 | 0.616314 | 2,069 | 0.892965 | 0 | 0 | 447 | 0.192922 | 0 | 0 | 149 | 0.064307 |
f685eb18698eec9ccc94c0ce1603ea2b34b10c5c | 2,952 | py | Python | flux/resources/workflow.py | siq/flux | ca7563deb9ebef14840bbf0cb7bab4d9478b2470 | [
"Linux-OpenIB"
] | null | null | null | flux/resources/workflow.py | siq/flux | ca7563deb9ebef14840bbf0cb7bab4d9478b2470 | [
"Linux-OpenIB"
] | null | null | null | flux/resources/workflow.py | siq/flux | ca7563deb9ebef14840bbf0cb7bab4d9478b2470 | [
"Linux-OpenIB"
] | null | null | null | from mesh.standard import *
from scheme import *
__all__ = ('Workflow',)
Layout = Sequence(Structure({
'title': Text(),
'view': Token(),
'elements': Sequence(Structure({
'type': Token(nonempty=True),
'field': Token(nonempty=True),
'label': Text(),
'options': Field(),
})),
}))
FormStructure = {
'schema': Definition(nonempty=True),
'layout': Layout.clone(),
}
class Workflow(Resource):
"""A workflow."""
name = 'workflow'
version = 1
requests = 'create delete get load put query update'
class schema:
id = UUID(nonnull=True, oncreate=True, operators='equal in')
name = Text(nonempty=True, operators='equal icontains')
designation = Token(operators='equal')
is_service = Boolean(default=False, operators='equal')
specification = Text(deferred=True)
form = Structure(FormStructure, deferred=True, readonly=True)
modified = DateTime(utc=True, readonly=True)
type = Enumeration('yaml mule', default='yaml', operators='equal in')
policies = Sequence(Text(), deferred=True, readonly=True)
mule_extensions = Structure({
'packageurl': Text(nonempty=True),
'endpointurl': Text(nonempty=True),
'readmeurl': Text(),
})
class create(Resource.create):
fields = {
'specification': Text(),
'filepath': Token(),
}
support_returning = True
class generate:
endpoint = ('GENERATE', 'workflow')
title = 'Generate a workflow specification'
schema = {
'name': Text(nonempty=True),
'description': Text(),
'schema': Definition(),
'layout': Layout.clone(),
'operations': Sequence(Structure({
'description': Text(),
'operation': Token(segments=2, nonempty=False),
'run_params': Field(nonnull=True),
'step_params': Field(),
}), min_length=1, nonempty=True),
}
responses = {
OK: Response({
'name': Text(nonempty=True),
'specification': Text(nonempty=True),
'description': Text()
}),
INVALID: Response(Errors),
}
class update(Resource.update):
fields = {
'specification': Text(nonnull=True, min_length=1)
}
support_returning = True
class WorkflowMule(Resource):
"""A workflow mule extensions."""
name = 'workflow_mule'
version = 1
requests = 'query'
class schema:
id = UUID(nonnull=True, oncreate=True, operators='equal')
workflow_id = UUID(nonnull=True, operators='equal')
packageurl = Text(nonempty=True, operators='equal icontains')
endpointurl = Text(nonempty=True, operators='equal icontains')
readmeurl = Text(operators='equal icontains')
| 31.073684 | 77 | 0.570122 | 2,529 | 0.856707 | 0 | 0 | 0 | 0 | 0 | 0 | 595 | 0.201558 |
f6862c3a762adb42778672f66157ed1731e5cdfe | 1,079 | py | Python | Most Asked DSA By Companies/Meta/3-973.py | neelaadityakumar/leetcode | e78e0b8dc0113bdc1721bf7d025a463bea04847f | [
"MIT"
] | null | null | null | Most Asked DSA By Companies/Meta/3-973.py | neelaadityakumar/leetcode | e78e0b8dc0113bdc1721bf7d025a463bea04847f | [
"MIT"
] | null | null | null | Most Asked DSA By Companies/Meta/3-973.py | neelaadityakumar/leetcode | e78e0b8dc0113bdc1721bf7d025a463bea04847f | [
"MIT"
] | null | null | null | # https://leetcode.com/problems/k-closest-points-to-origin/
# 973. K Closest Points to Origin
# Medium
# Share
# Given an array of points where points[i] = [xi, yi] represents a point on the X-Y plane and an integer k, return the k closest points to the origin (0, 0).
# The distance between two points on the X-Y plane is the Euclidean distance (i.e., √(x1 - x2)2 + (y1 - y2)2).
# You may return the answer in any order. The answer is guaranteed to be unique (except for the order that it is in).
# Example 1:
# Input: points = [[1,3],[-2,2]], k = 1
# Output: [[-2,2]]
# Explanation:
# The distance between (1, 3) and the origin is sqrt(10).
# The distance between (-2, 2) and the origin is sqrt(8).
# Since sqrt(8) < sqrt(10), (-2, 2) is closer to the origin.
# We only want the closest k = 1 points from the origin, so the answer is just [[-2,2]].
# Example 2:
# Input: points = [[3,3],[5,-1],[-2,4]], k = 2
# Output: [[3,3],[-2,4]]
# Explanation: The answer [[-2,4],[3,3]] would also be accepted.
# Constraints:
# 1 <= k <= points.length <= 104
# -104 < xi, yi < 104
| 34.806452 | 157 | 0.644115 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,051 | 0.972248 |
f6866a52596fe4972475bf119793b740f2d4ea78 | 1,232 | py | Python | src/huaytools/pytorch/modules/loss/cosine_similarity.py | imhuay/studies-gitbook | 69a31c20c91d131d0fafce0622f4035b9b95e93a | [
"MIT"
] | 100 | 2021-10-13T01:22:27.000Z | 2022-03-31T09:52:49.000Z | src/huaytools/pytorch/modules/loss/cosine_similarity.py | imhuay/studies-gitbook | 69a31c20c91d131d0fafce0622f4035b9b95e93a | [
"MIT"
] | null | null | null | src/huaytools/pytorch/modules/loss/cosine_similarity.py | imhuay/studies-gitbook | 69a31c20c91d131d0fafce0622f4035b9b95e93a | [
"MIT"
] | 27 | 2021-11-01T01:05:09.000Z | 2022-03-31T03:32:01.000Z | #!/usr/bin/env python
# -*- coding:utf-8 -*-
"""
Time: 2021-10-13 8:30 下午
Author: huayang
Subject:
"""
import os
import sys
import json
import doctest
from typing import *
from collections import defaultdict
from torch.nn import functional as F # noqa
from huaytools.pytorch.modules.loss.mean_squared_error import mean_squared_error_loss
def cosine_similarity_loss(x1, x2, labels):
""" cosine 相似度损失
Examples:
# >>> logits = torch.randn(5, 5).clamp(min=_EPSILON) # 负对数似然的输入需要值大于 0
# >>> labels = torch.arange(5)
# >>> onehot_labels = F.one_hot(labels)
#
# # 与官方结果比较
# >>> my_ret = negative_log_likelihood_loss(logits, onehot_labels)
# >>> official_ret = F.nll_loss(torch.log(logits + _EPSILON), labels, reduction='none')
# >>> assert torch.allclose(my_ret, official_ret, atol=1e-5)
Args:
x1: [B, N]
x2: same shape as x1
labels: [B] or scalar
Returns:
[B] vector or scalar
"""
cosine_scores = F.cosine_similarity(x1, x2, dim=-1) # [B]
return mean_squared_error_loss(cosine_scores, labels) # [B]
def _test():
""""""
doctest.testmod()
if __name__ == '__main__':
""""""
_test()
| 21.614035 | 95 | 0.624188 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 803 | 0.624417 |
f688e307748f33124d94de4e0957467d5780593f | 866 | py | Python | Tree/98. 验证二叉搜索树.py | graveszhang/LeetCode-Practice | 72be5ce43048461e0b702b84eee1f2f86f356d11 | [
"MIT"
] | null | null | null | Tree/98. 验证二叉搜索树.py | graveszhang/LeetCode-Practice | 72be5ce43048461e0b702b84eee1f2f86f356d11 | [
"MIT"
] | null | null | null | Tree/98. 验证二叉搜索树.py | graveszhang/LeetCode-Practice | 72be5ce43048461e0b702b84eee1f2f86f356d11 | [
"MIT"
] | null | null | null | # Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
# if root.left and root.right:
# if v <= root.left.val or v >= root.right.val: return False
# elif root.left:
# if v <= root.left.val: return False
# elif root.right:
# if v >= root.right.val: return False
class Solution:
def isValidBST(self, root: TreeNode) -> bool:
self.prev = None
return self.recur(root)
def recur(self, root):
if not root: return True
left = self.recur(root.left)
if self.prev and self.prev.val >= root.val:
return False
self.prev = root
right = self.recur(root.right)
return left and right
| 30.928571 | 72 | 0.553118 | 396 | 0.457275 | 0 | 0 | 0 | 0 | 0 | 0 | 399 | 0.460739 |
f689b3c7d9297254645c801a0aa3f131ae851790 | 11,197 | py | Python | scNLP_spaCy_GeneExpression_word2vec.py | alexcwsmith/scNLP | 1691316f1dc13774fa05b0e6ea075941667fa6b0 | [
"MIT"
] | 1 | 2021-06-23T13:07:29.000Z | 2021-06-23T13:07:29.000Z | scNLP_spaCy_GeneExpression_word2vec.py | alexcwsmith/scNLP | 1691316f1dc13774fa05b0e6ea075941667fa6b0 | [
"MIT"
] | null | null | null | scNLP_spaCy_GeneExpression_word2vec.py | alexcwsmith/scNLP | 1691316f1dc13774fa05b0e6ea075941667fa6b0 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Jul 1 12:48:08 2020
@author: smith
"""
import spacy
from gensim.test.utils import common_texts, get_tmpfile
from gensim.models import Word2Vec
from gensim.models.phrases import Phrases, Phraser
import os
import multiprocessing
import csv
import re
import pandas as pd
from time import time
from datetime import datetime
from collections import defaultdict
import numpy as np
from sklearn.decomposition import PCA
from sklearn.manifold import TSNE
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_style("darkgrid")
import logging
import gensim
logging.basicConfig(format="%(levelname)s - %(asctime)s: %(message)s", datefmt= '%H:%M:%S', level=logging.INFO)
w2v_dir = '/home/smith/Smith_Scripts/NLP_GeneExpression/w2v_model/model071520/'
w2v_model = Word2Vec.load(os.path.join(w2v_dir, 'w2v_model071520_MarkerGenes_UpOC_DownOC_CombinedSentences.model'))
modelName = '_w2v071520_'
resultDirectory = '/home/smith/Smith_Scripts/NLP_GeneExpression/spaCy_model062920/Results/'
clusters = ['Cluster' + str(x) for x in range(20)]
category = 'CellTypes'
comparison = 'MarkerGenes'
termIndex = pd.read_excel(os.path.join(resultDirectory, 'MarkerGenes_Results/Combined_Clusters_' + category + '_' + comparison + '_Frequency.xlsx'), index_col=0)
termIndex = termIndex.sort_values(by='Combined Occurances', ascending=False)
enrichIndex = pd.read_excel('/home/smith/Smith_Scripts/NLP_GeneExpression/spaCy_model062920/Results/Combined_Clusters_Enriched_CellTypes_MarkerGenes.xlsx', index_col=0)
enrIndex = enrichIndex.iloc[:,::4]
def calcTopSimilarities(cluster, category, min_freq=5, topn=2000, save=False):
resultDirectory = '/home/smith/Smith_Scripts/NLP_GeneExpression/spaCy_model062920/Results/AllComparisons_Results/'
clusterDirectory = os.path.join(resultDirectory, cluster + '_MarkerGenes_Results/')
clusterNum=cluster.replace('Cluster', '')
genesDf = pd.read_excel('/d1/studies/cellranger/ACWS_DP/scanpy_DiffExp_V2/results_maxGenes3000_maxMito.05_MinDisp0.2/DP_OC_Saline_Merged_t-test_pval_table_500genes_clusters.xlsx')
genesList = genesDf[str(clusterNum) + '_n'].tolist()
genes = genesList
genes = []
for gene in genesList:
genes.append(gene.lower())
# words = pd.read_excel(os.path.join(resultDirectory, str(cluster) + '_' + comparison + '_Results/' + category + '_' + cluster + '_Frequency.xlsx'), index_col=0)
# words = pd.read_excel('/home/smith/Smith_Scripts/NLP_GeneExpression/spaCy_model062920/Results/Cluster0_EnrichedFunctions_onlyTest.xlsx', index_col=0)
# wordsRedacted = words.loc[words['Occurances'] > min_freq]['word'].tolist()
words = enrIndex
wordsRedacted = words[cluster + ' term'].tolist()[:-1]
if category == 'CellTypes':
wordsRedacted = termIndex['word'].tolist()[:150]
newWords = []
for item in wordsRedacted:
try:
item = item.replace(' ', '_')
newWords.append(item)
except AttributeError:
pass
cat = pd.DataFrame()
catX = pd.DataFrame()
for gene in genes:
gene = gene.lower()
try:
df = pd.DataFrame(w2v_model.wv.most_similar(positive=[str(gene)], topn=topn), columns=['entity', 'similarity'])
df['gene'] = gene
df2 = df.loc[df['entity'].isin(newWords)]
df2 = df2.reset_index(drop=True)
dfX = pd.DataFrame(w2v_model.wv.most_similar(positive=[str(gene)], topn=topn), columns=['entity ' + gene, 'similarity ' + gene])
dfX2 = dfX.loc[dfX['entity ' + gene].isin(newWords)]
dfX2 = dfX2.reset_index(drop=True)
cat = pd.concat([cat, df2], axis=0)
cat = cat.reset_index(drop=True)
catX = pd.concat([catX, dfX2], axis=1)
catX = catX.reset_index(drop=True)
except KeyError:
pass
if save:
# cat.to_excel(os.path.join(clusterDirectory, cluster + '_Similarities_Enriched_' + category + modelName + '.xlsx'))
# catX.to_excel(os.path.join(clusterDirectory, cluster + '_Similarities_Enriched_' + category + modelName + 'axis1.xlsx'))
cat.to_excel(os.path.join(resultDirectory, cluster + '_Similarities_Enriched_' + category + modelName + '.xlsx'))
catX.to_excel(os.path.join(resultDirectory, cluster + '_Similarities_Enriched_' + category + modelName + 'axis1.xlsx'))
return(cat, catX)
def averageSimilarities(cluster, category):
clusterDirectory = '/home/smith/Smith_Scripts/NLP_GeneExpression/spaCy_model062920/Results/AllComparisons_Results/'
# clusterDirectory = os.path.join(resultDirectory, cluster + '_MarkerGenes_Results/')
if not os.path.exists(os.path.join(clusterDirectory, cluster + '_Similarities_Enriched_' + category + modelName + '.xlsx')):
raise FileNotFoundError("Similarities file doesn't exist at " + os.path.join(clusterDirectory, cluster + modelName + '_Similarities_Enriched_' + category + '.xlsx'))
else:
df = pd.read_excel(os.path.join(clusterDirectory, cluster + '_Similarities_Enriched_' + category + modelName + '.xlsx'))
itemList = []
aveList = []
stdList = []
weightList = []
countList = []
geneList = []
for item in df['entity'].unique().tolist():
ave = np.mean(df.loc[df['entity']==item]['similarity'])
std = np.std(df.loc[df['entity']==item]['similarity'])
gene = df.loc[df['entity']==item]['gene'].tolist()
count = len(gene)
weightedAve = df.loc[df['entity']==item].shape[0]*ave
itemList.append(item)
aveList.append(ave)
stdList.append(std)
weightList.append(weightedAve)
countList.append(count)
geneList.append(gene)
df = pd.DataFrame(data=[itemList, aveList, stdList, weightList, countList, geneList]).T
df.columns=['entity', 'ave_similarity', 'stdev', 'weighted_ave', 'count', 'similar_genes']
df = df.sort_values(by='weighted_ave', ascending=False)
df = df.drop_duplicates(subset='entity', keep='first')
df.to_excel(os.path.join(clusterDirectory, cluster + '_averageSimilarities_Enriched' + category + modelName + '.xlsx'))
return(df)
def combineAverageSims(clusters, category, save=True):
clusterDirectory = '/home/smith/Smith_Scripts/NLP_GeneExpression/spaCy_model062920/Results/AllComparisons_Results/'
bigDf = pd.DataFrame()
for cluster in clusters:
df = pd.read_excel(os.path.join(clusterDirectory, cluster + '_averageSimilarities_Enriched' + category + modelName + '.xlsx'), index_col=0)
df.columns=[cluster + '_entity', cluster + '_average_sim', cluster + '_stdev', cluster + '_weightedAve', cluster + '_count', cluster + '_similarGenes']
bigDf = pd.concat([bigDf, df], axis=1)
if save:
bigDf.to_excel(os.path.join(clusterDirectory, 'Combined_AverageSimilarities' + modelName + category + '.xlsx'))
return(bigDf)
cat, catX = calcTopSimilarities('Cluster0', 'Functions', save=True)
df = averageSimilarities('Cluster0', 'Functions')
for cluster in clusters:
calcTopSimilarities(cluster, 'CellTypes', min_freq=5, topn=10000, save=True)
for cluster in clusters:
averageSimilarities(cluster, 'CellTypes')
df = combineAverageSims(clusters, 'CellTypes', save=True)
df = averageSimilarities('Cluster5', 'Functions')
###FREQUENCY DISTRIBUTION:
cat = pd.read_excel('/home/smith/Smith_Scripts/NLP_GeneExpression/spaCy_model062920/Results/Cluster3_Results/Functions_cat_model062920_Cluster3.xlsx')
def tsnescatterplot(model, setName, word, list_names,):
""" Plot in seaborn the results from the t-SNE dimensionality reduction algorithm of the vectors of a query word,
its list of most similar words, and a list of words.
"""
arrays = np.empty((0, 300), dtype='f')
word_labels = [word]
color_list = ['red']
# adds the vector of the query word
arrays = np.append(arrays, model.wv.__getitem__([word]), axis=0)
# gets list of most similar words
close_words = model.wv.most_similar([word])
# adds the vector for each of the closest words to the array
try:
for wrd_score in close_words:
wrd_vector = model.wv.__getitem__([wrd_score[0]])
word_labels.append(wrd_score[0])
color_list.append('blue')
arrays = np.append(arrays, wrd_vector, axis=0)
# adds the vector for each of the words from list_names to the array
for wrd in list_names:
wrd_vector = model.wv.__getitem__([wrd])
word_labels.append(wrd)
color_list.append('green')
arrays = np.append(arrays, wrd_vector, axis=0)
except KeyError:
pass
# Reduces the dimensionality from 300 to 50 dimensions with PCA
reduc = PCA(n_components=42).fit_transform(arrays) ###### CHANGED FROM 50 DURING TUTORIAL
# Finds t-SNE coordinates for 2 dimensions
np.set_printoptions(suppress=True)
Y = TSNE(n_components=2, random_state=0, perplexity=10).fit_transform(reduc)
# Sets everything up to plot
df = pd.DataFrame({'x': [x for x in Y[:, 0]],
'y': [y for y in Y[:, 1]],
'words': word_labels,
'color': color_list})
fig, _ = plt.subplots()
fig.set_size_inches(9, 9)
# Basic plot
p1 = sns.regplot(data=df,
x="x",
y="y",
fit_reg=False,
marker="o",
scatter_kws={'s': 40,
'facecolors': df['color']
}
)
# Adds annotations one by one with a loop
for line in range(0, df.shape[0]):
p1.text(df["x"][line],
df['y'][line],
' ' + df["words"][line].title(),
horizontalalignment='left',
verticalalignment='bottom', size='medium',
color=df['color'][line],
weight='normal'
).set_size(15)
plt.xlim(Y[:, 0].min()-50, Y[:, 0].max()+50)
plt.ylim(Y[:, 1].min()-50, Y[:, 1].max()+50)
plt.title('t-SNE visualization for {}'.format(word.title()))
plt.savefig(os.path.join(resultDirectory, setName + modelName + word + '_tSNE_42PCs.png'))
tsnescatterplot(w2v_model, setName, word, newWords)
w2v_model.wv.most_similar(positive=["drug_addiction"], topn=20)
w2v_model.wv.most_similar(positive=["nucleus_accumbens"], topn=20)
w2v_model.wv.most_similar(positive=["vta"], topn=20)
w2v_model.wv.most_similar(positive=["dbi"], topn=20)
w2v_model.wv.most_similar(positive=["enkephalin", "cacng4"], negative=["opioid"], topn=20)
w2v_model.wv.most_similar(positive=["slc17a7", "cacng4"], negative=["glutamatergic_neuron"], topn=20)
###RUN PCA:
# fit a 2d PCA model to the vectors
X = w2v_model[w2v_model.wv.vocab]
pca = PCA(n_components=50)
result = pca.fit_transform(X)
#Plot the result
fig, ax = plt.subplots()
ax.plot(result[:, 0], result[:, 1], 'o')
ax.set_title('Entities')
plt.show()
words = list(w2v_model.wv.vocab.keys())
| 42.093985 | 183 | 0.662945 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,691 | 0.329642 |
f689cb30eecdf29de4c6be77cd697849835314bd | 180 | py | Python | cvp/setup.py | hashnfv/hashnfv-dovetail | 73f332fc513f184513be483db6a108bd3c7b7d9b | [
"Apache-2.0"
] | 1 | 2017-11-03T01:06:51.000Z | 2017-11-03T01:06:51.000Z | cvp/setup.py | hashnfv/hashnfv-dovetail | 73f332fc513f184513be483db6a108bd3c7b7d9b | [
"Apache-2.0"
] | null | null | null | cvp/setup.py | hashnfv/hashnfv-dovetail | 73f332fc513f184513be483db6a108bd3c7b7d9b | [
"Apache-2.0"
] | 2 | 2017-09-05T13:38:20.000Z | 2017-10-12T21:56:11.000Z | import setuptools
__author__ = 'serena'
try:
import multiprocessing # noqa
except ImportError:
pass
setuptools.setup(
setup_requires=['pbr==2.0.0'],
pbr=True)
| 12.857143 | 34 | 0.683333 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 26 | 0.144444 |
f68a9ad9cd1070d7dee249f1f3d7af1fd606f8ce | 3,406 | py | Python | data_utils/preprocess.py | AnastasiiaNovikova/sentiment-discovery | eaae55921038d674e2f16fbd0bfd2e63194a9545 | [
"BSD-3-Clause"
] | null | null | null | data_utils/preprocess.py | AnastasiiaNovikova/sentiment-discovery | eaae55921038d674e2f16fbd0bfd2e63194a9545 | [
"BSD-3-Clause"
] | null | null | null | data_utils/preprocess.py | AnastasiiaNovikova/sentiment-discovery | eaae55921038d674e2f16fbd0bfd2e63194a9545 | [
"BSD-3-Clause"
] | 1 | 2019-03-13T11:43:13.000Z | 2019-03-13T11:43:13.000Z | import os
import re
import html
import unidecode
import torch
HTML_CLEANER_REGEX = re.compile('<.*?>')
def clean_html(text):
"""remove html div tags"""
return re.sub(HTML_CLEANER_REGEX, ' ', text)
def binarize_labels(labels, hard=True):
"""If hard, binarizes labels to values of 0 & 1. If soft thresholds labels to [0,1] range."""
labels = np.array(labels)
min_label = min(labels)
label_range = max(labels)-min_label
if label_range == 0:
return labels
labels = (labels-min_label)/label_range
if hard:
labels = (labels > .5).astype(int)
return labels
def process_str(text, front_pad='\n ', end_pad=' ', maxlen=None, clean_markup=True,
clean_unicode=True, encode='utf-8', limit_repeats=3):
"""
Processes utf-8 encoded text according to the criterion specified in seciton 4 of https://arxiv.org/pdf/1704.01444.pdf (Radford et al).
We use unidecode to clean unicode text into ascii readable text
"""
if clean_markup:
text = clean_html(text)
if clean_unicode:
text = unidecode.unidecode(text)
text = html.unescape(text)
text = text.split()
if maxlen is not None:
len2use = maxlen-len(front_pad)-len(end_pad)
text = text[:len2use]
if limit_repeats > 0:
remove_repeats(text, limit_repeats, join=False)
text = front_pad+(" ".join(text))+end_pad
if encode is not None:
text = text.encode(encoding=encode)
return text
def remove_repeats(string, n, join=True):
count = 0
output = []
last = ''
for c in string:
if c == last:
count = count + 1
else:
count = 0
last = c
if count < n:
output.append(c)
if join:
return "".join(output)
return output
def tokenize_str_batch(strings, rtn_maxlen=True, process=True, maxlen=None):
"""
Tokenizes a list of strings into a ByteTensor
Args:
strings: List of utf-8 encoded strings to tokenize into ByteTensor form
rtn_maxlen: Boolean with functionality specified in Returns.lens
Returns:
batch_tensor: ByteTensor of shape `[len(strings),maxlen_of_strings]`
lens: Length of each string in strings after being preprocessed with `preprocess` (useful for
dynamic length rnns). If `rtn_maxlen` is `True` then max(lens) is returned instead.
"""
if process:
processed_strings = [process_str(x, maxlen=maxlen) for x in strings]
else:
processed_strings = [x.encode('ascii', 'ignore') for x in strings]
lens = list(map(len, processed_strings))
maxlen = max(lens)
batch_tensor = torch.ByteTensor(len(lens), maxlen)
for i, string in enumerate(processed_strings):
_tokenize_str(string, batch_tensor[i])
if not rtn_maxlen and rtn_maxlen is not None:
return batch_tensor, lens
if rtn_maxlen is None:
return batch_tensor
return batch_tensor, maxlen
def _tokenize_str(string, char_tensor=None):
"""
Parses a utf-8 encoded string and assigns to ByteTensor char_tensor.
If no char_tensor is provide one is created.
Typically used internally by `tokenize_str_batch`.
"""
if char_tensor is None:
char_tensor = torch.ByteTensor(len(string.encode()))
for i, char in enumerate(string):
char_tensor[i] = char
return char_tensor
| 31.537037 | 139 | 0.654433 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,085 | 0.318555 |
f68c3bc365dd845146ca474b6dad80a6bde410a7 | 659 | py | Python | Python/_10_09/2.3.19.py | MBkkt/Homework | db92bdb9262f1737d637f01d5cf2d3680e379a7b | [
"MIT"
] | 1 | 2019-04-07T18:27:29.000Z | 2019-04-07T18:27:29.000Z | Python/_10_09/2.3.19.py | MBkkt/Homework | db92bdb9262f1737d637f01d5cf2d3680e379a7b | [
"MIT"
] | 1 | 2019-10-02T23:01:01.000Z | 2019-10-02T23:01:01.000Z | Python/_10_09/2.3.19.py | MBkkt/Homework | db92bdb9262f1737d637f01d5cf2d3680e379a7b | [
"MIT"
] | 2 | 2018-10-19T22:42:54.000Z | 2019-03-10T14:57:59.000Z | """ Комбинации. Составьте программу combinations. ру, получающую из командной
строки один аргумент n и выводящую все 2" комбинаций любого
размера. Комбинация - это подмножество из п элементов, независимо
от порядка. Например, когда п = 3, вы должны получить следующий вывод:
а аЬ аЬс ас Ь ьс с
Обратите внимание, что программа должна выводить и пустую строку
(подмножество размером О). """
from itertools import combinations
def sum_comb(n):
return ' '.join(map(lambda i: ' '.join(map(lambda x: ''.join(map(str, x)), combinations(range(n), i))), range(n + 1)))
if __name__ == '__main__':
n = int(input())
print(sum_comb(n))
| 34.684211 | 123 | 0.70258 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 706 | 0.741597 |
f68c40097ffb41e51b2152c6fd56a2d8749d4869 | 2,445 | py | Python | KerasTest_XOR.py | RaduGrig/DeepLearning | 96bc451ea2c77f184df9c015a57068ada4363727 | [
"Apache-2.0"
] | null | null | null | KerasTest_XOR.py | RaduGrig/DeepLearning | 96bc451ea2c77f184df9c015a57068ada4363727 | [
"Apache-2.0"
] | null | null | null | KerasTest_XOR.py | RaduGrig/DeepLearning | 96bc451ea2c77f184df9c015a57068ada4363727 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Fri Jun 16 22:19:57 2017
@author: RaduGrig
@inspiration: NikCleju
"""
# import stuff
from keras.optimizers import Adam
from keras.models import Sequential
from keras.layers import Dense, Activation
import numpy as np
import matplotlib.pyplot as plot
#Params
DataPoints = 100
Noise = 0.1
#create NN object
XoRNN = Sequential()
#
#Other models:
#???To add
#
#stack NN layers(build NN)
XoRNN.add(Dense(4, input_dim=2))
XoRNN.add(Activation('softmax'))
XoRNN.add(Dense(1))
XoRNN.add(Activation('linear'))
XoRNN.compile(optimizer=Adam(), loss="mse")
#
#Other activations:
#https://keras.io/activations/
#
#Create training data
Data00 = np.tile( np.array([0, 0]), (DataPoints, 1) ) + Noise * np.random.randn( DataPoints, 2 )
Data01 = np.tile( np.array([0, 1]), (DataPoints, 1) ) + Noise * np.random.randn( DataPoints, 2 )
Data10 = np.tile( np.array([1, 0]), (DataPoints, 1) ) + Noise * np.random.randn( DataPoints, 2 )
Data11 = np.tile( np.array([1, 1]), (DataPoints, 1) ) + Noise * np.random.randn( DataPoints, 2 )
Tests00 = np.tile( np.array([0, 0]), (DataPoints, 1) ) + Noise * np.random.randn( DataPoints, 2 )
Tests01 = np.tile( np.array([0, 0]), (DataPoints, 1) ) + Noise * np.random.randn( DataPoints, 2 )
Tests10 = np.tile( np.array([0, 0]), (DataPoints, 1) ) + Noise * np.random.randn( DataPoints, 2 )
Tests11 = np.tile( np.array([0, 0]), (DataPoints, 1) ) + Noise * np.random.randn( DataPoints, 2 )
Labels00 = np.zeros(( DataPoints, 1 ))
Labels01 = np.ones(( DataPoints, 1 ))
Labels10 = np.ones(( DataPoints, 1 ))
Labels11 = np.zeros(( DataPoints, 1 ))
TrainingSet = np.array( np.vstack((Data00, Data01, Data10, Data11)), dtype=np.float32)
TestSet = np.array( np.vstack((Tests00, Tests01, Tests10, Tests11)), dtype=np.float32)
Labels = np.vstack((Labels00, Labels01, Labels10, Labels11))
#Plot training set
plot.scatter(TrainingSet[:,0], TrainingSet[:,1], c=Labels)
plot.show()
#NikCleju's function for visualizing decission areas
def plot_separating_curve(model):
points = np.array([(i, j) for i in np.linspace(0,1,100) for j in np.linspace(0,1,100)])
#outputs = net(Variable(torch.FloatTensor(points)))
outputs = model.predict(points)
outlabels = outputs > 0.5
plot.scatter(points[:,0], points[:,1], c=outlabels, alpha=0.5)
plot.title('Decision areas')
plot.show()
XoRNN.fit(TrainingSet, Labels, epochs=200, batch_size=50)
plot_separating_curve(XoRNN) | 31.753247 | 97 | 0.686299 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 429 | 0.17546 |
f68cede11c2ec066665c8fcba60338febd58bbff | 221 | py | Python | lib/opentok/__init__.py | Rudi9719/booksearch-web | c48300e04ebdf2f01a990c67d4ff63d282da8168 | [
"Apache-2.0"
] | null | null | null | lib/opentok/__init__.py | Rudi9719/booksearch-web | c48300e04ebdf2f01a990c67d4ff63d282da8168 | [
"Apache-2.0"
] | null | null | null | lib/opentok/__init__.py | Rudi9719/booksearch-web | c48300e04ebdf2f01a990c67d4ff63d282da8168 | [
"Apache-2.0"
] | null | null | null | from .opentok import OpenTok, Roles, MediaModes, ArchiveModes
from .session import Session
from .archives import Archive, ArchiveList, OutputModes
from .exceptions import OpenTokException
from .version import __version__
| 36.833333 | 61 | 0.841629 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
f68d61972a56264e1422d85b1127e77df23bf4c2 | 2,130 | py | Python | src/rubrik_config/rubrik_config_base.py | rubrikinc/rubrik-config-backup | 4c421f5090064372f6e89673a5d445de8248ddda | [
"MIT"
] | 2 | 2021-01-27T21:15:24.000Z | 2021-06-08T17:31:15.000Z | src/rubrik_config/rubrik_config_base.py | rubrikinc/rubrik-config-backup | 4c421f5090064372f6e89673a5d445de8248ddda | [
"MIT"
] | 1 | 2020-12-10T07:01:09.000Z | 2020-12-10T07:01:09.000Z | src/rubrik_config/rubrik_config_base.py | rubrikinc/rubrik-config-backup | 4c421f5090064372f6e89673a5d445de8248ddda | [
"MIT"
] | null | null | null | import abc
import json
import os
from rubrik_config import helpers
class RubrikConfigBase(abc.ABC):
def __init__(self, path, rubrik, logger):
self.path = path
self.rubrik = rubrik
self.logger = logger
self.cluster_version = self.rubrik.cluster_version()
self.cluster_name = helpers.cluster_name(self.rubrik)
self.config_name = helpers.config_name(self)
self.dependencies = set()
@abc.abstractmethod
def backup(self):
"""Backup all configuration items of this type.
Returns:
int: The number of items backed up.
"""
pass
@abc.abstractmethod
def restore(self, items):
"""Restore the given configuration items.
Args:
items ([str]): The configuration items to restore.
Returns:
list: A list of jobs that have been initiated on the cluster as part
of the recovery of the configuration.
"""
pass
@abc.abstractmethod
def status(self, job):
"""Return the status of the given job.
Args:
job (str): Job ID
Returns:
list: A list containing the status details of the given job.
"""
pass
# Private Methods
def _write(self, content, content_type, name_fn=lambda x: x['name']):
if len(content) == 0:
return
content_dir_name = content_type.split('.')[0]
path = f"{self.path}/{content_dir_name}"
os.makedirs(path, exist_ok=True)
for item in content:
filename = f"{path}/{helpers.secure_filename(name_fn(item))}.json"
with open(filename, 'w') as f:
file_content = {
'clusterName': self.cluster_name,
'clusterVersion': self.cluster_version,
'type': content_type,
'config': item
}
f.write(json.dumps(file_content, indent=4, sort_keys=True))
self.logger.info("'%s' successfully saved to %s" % (name_fn(item), filename))
| 26.296296 | 89 | 0.567606 | 2,059 | 0.966667 | 0 | 0 | 799 | 0.375117 | 0 | 0 | 788 | 0.369953 |
f68d9b8db5553fd496f890e0ba612aaca3bab81b | 26,309 | py | Python | Gather_Data.py | batumoglu/Home_Credit | bf3f918bafdc0e9be1c24809068fac1242fff881 | [
"Apache-2.0"
] | 1 | 2019-11-04T08:49:34.000Z | 2019-11-04T08:49:34.000Z | Gather_Data.py | batumoglu/Home_Credit | bf3f918bafdc0e9be1c24809068fac1242fff881 | [
"Apache-2.0"
] | null | null | null | Gather_Data.py | batumoglu/Home_Credit | bf3f918bafdc0e9be1c24809068fac1242fff881 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon May 28 19:51:12 2018
@author: ozkan
"""
import pandas as pd
import numpy as np
#from sklearn.preprocessing import MinMaxScaler, LabelEncoder
from scipy import stats
import gc
import GatherTables
def one_hot_encoder(df):
original_columns = list(df.columns)
categorical_columns = [col for col in df.columns if df[col].dtype == 'object']
df = pd.get_dummies(df, columns= categorical_columns, dummy_na= True)
new_columns = [c for c in df.columns if c not in original_columns]
return df, new_columns
def checkTrainTestConsistency(train, test):
return (train,test)
def AllData_v2(reduce_mem=True):
app_data, len_train = GatherTables.getAppData()
app_data = GatherTables.generateAppFeatures(app_data)
merged_df = GatherTables.handlePrev(app_data)
merged_df = GatherTables.handleCreditCard(merged_df)
merged_df = GatherTables.handleBuro(merged_df)
merged_df = GatherTables.handleBuroBalance(merged_df)
merged_df = GatherTables.handlePosCash(merged_df)
merged_df = GatherTables.handleInstallments(merged_df)
categorical_feats = [f for f in merged_df.columns if merged_df[f].dtype == 'object']
for f_ in categorical_feats:
merged_df[f_], indexer = pd.factorize(merged_df[f_])
merged_df.drop('SK_ID_CURR', axis=1, inplace=True)
data = merged_df[:len_train]
test = merged_df[len_train:]
y = data.pop('TARGET')
test.drop(['TARGET'], axis=1, inplace=True)
return(data, test, y)
def AllData_v3(reduce_mem=True):
app_data, len_train = GatherTables.getAppData()
app_data = GatherTables.generateAppFeatures(app_data)
merged_df = GatherTables.handlePrev_v2(app_data)
merged_df = GatherTables.handleCreditCard_v2(merged_df)
merged_df = GatherTables.handleBuro_v2(merged_df)
merged_df = GatherTables.handleBuroBalance_v2(merged_df)
merged_df = GatherTables.handlePosCash_v2(merged_df)
merged_df = GatherTables.handleInstallments_v2(merged_df)
categorical_feats = [f for f in merged_df.columns if merged_df[f].dtype == 'object']
for f_ in categorical_feats:
merged_df[f_], indexer = pd.factorize(merged_df[f_])
merged_df.drop('SK_ID_CURR', axis=1, inplace=True)
data = merged_df[:len_train]
test = merged_df[len_train:]
y = data.pop('TARGET')
test.drop(['TARGET'], axis=1, inplace=True)
return(data, test, y)
def AllData_v4(reduce_mem=True):
app_data, len_train = GatherTables.getAppData()
app_data = GatherTables.generateAppFeatures_v4(app_data)
merged_df = GatherTables.handlePrev_v4(app_data)
merged_df = GatherTables.handleCreditCard_v4(merged_df)
merged_df = GatherTables.handleBuro_v4(merged_df)
merged_df = GatherTables.handleBuroBalance_v2(merged_df)
merged_df = GatherTables.handlePosCash_v2(merged_df)
merged_df = GatherTables.handleInstallments_v2(merged_df)
merged_df,cat_cols = one_hot_encoder(merged_df)
merged_df.drop('SK_ID_CURR', axis=1, inplace=True)
data = merged_df[:len_train]
test = merged_df[len_train:]
y = data.pop('TARGET')
test.drop(['TARGET'], axis=1, inplace=True)
return(data, test, y)
def ApplicationBuroBalance(reduce_mem=True):
data = pd.read_csv('../input/application_train.csv')
test = pd.read_csv('../input/application_test.csv')
buro = pd.read_csv('../input/bureau.csv')
buro_balance = pd.read_csv('../input/bureau_balance.csv')
# Handle Buro Balance
buro_balance.loc[buro_balance['STATUS']=='C', 'STATUS'] = '0'
buro_balance.loc[buro_balance['STATUS']=='X', 'STATUS'] = '0'
buro_balance['STATUS'] = buro_balance['STATUS'].astype('int64')
buro_balance_group = buro_balance.groupby('SK_ID_BUREAU').agg({'STATUS':['max','mean'], 'MONTHS_BALANCE':'max'})
buro_balance_group.columns = [' '.join(col).strip() for col in buro_balance_group.columns.values]
idx = buro_balance.groupby('SK_ID_BUREAU')['MONTHS_BALANCE'].transform(max) == buro_balance['MONTHS_BALANCE']
Buro_Balance_Last = buro_balance[idx][['SK_ID_BUREAU','STATUS']]
Buro_Balance_Last.rename(columns={'STATUS': 'Buro_Balance_Last_Value'}, inplace=True)
Buro_Balance_Last['Buro_Balance_Max'] = Buro_Balance_Last['SK_ID_BUREAU'].map(buro_balance_group['STATUS max'])
Buro_Balance_Last['Buro_Balance_Mean'] = Buro_Balance_Last['SK_ID_BUREAU'].map(buro_balance_group['STATUS mean'])
Buro_Balance_Last['Buro_Balance_Last_Month'] = Buro_Balance_Last['SK_ID_BUREAU'].map(buro_balance_group['MONTHS_BALANCE max'])
# Handle Buro Data
def nonUnique(x):
return x.nunique()
def modeValue(x):
return stats.mode(x)[0][0]
def totalBadCredit(x):
badCredit = 0
for value in x:
if(value==2 or value==3):
badCredit+=1
return badCredit
def creditOverdue(x):
overdue=0
for value in x:
if(value>0):
overdue+=1
return overdue
categorical_feats = [f for f in buro.columns if buro[f].dtype == 'object']
for f_ in categorical_feats:
buro[f_], indexer = pd.factorize(buro[f_])
categorical_feats = [f for f in data.columns if data[f].dtype == 'object']
for f_ in categorical_feats:
data[f_], indexer = pd.factorize(data[f_])
test[f_] = indexer.get_indexer(test[f_])
# Aggregate Values on All Credits
buro_group = buro.groupby('SK_ID_CURR').agg({'SK_ID_BUREAU':'count',
'AMT_CREDIT_SUM':'sum',
'AMT_CREDIT_SUM_DEBT':'sum',
'CREDIT_CURRENCY': [nonUnique, modeValue],
'CREDIT_TYPE': [nonUnique, modeValue],
'CNT_CREDIT_PROLONG': 'sum',
'CREDIT_ACTIVE': totalBadCredit,
'CREDIT_DAY_OVERDUE': creditOverdue
})
buro_group.columns = [' '.join(col).strip() for col in buro_group.columns.values]
# Aggregate Values on Active Credits
buro_active = buro.loc[buro['CREDIT_ACTIVE']==1]
buro_group_active = buro_active.groupby('SK_ID_CURR').agg({'AMT_CREDIT_SUM': ['sum', 'count'],
'AMT_CREDIT_SUM_DEBT': 'sum',
'AMT_CREDIT_SUM_LIMIT': 'sum'
})
buro_group_active.columns = [' '.join(col).strip() for col in buro_group_active.columns.values]
# Getting last credit for each user
idx = buro.groupby('SK_ID_CURR')['SK_ID_BUREAU'].transform(max) == buro['SK_ID_BUREAU']
Buro_Last = buro[idx][['SK_ID_CURR','CREDIT_TYPE','DAYS_CREDIT_UPDATE','DAYS_CREDIT',
'DAYS_CREDIT_ENDDATE','DAYS_ENDDATE_FACT', 'SK_ID_BUREAU']]
Buro_Last['Credit_Count'] = Buro_Last['SK_ID_CURR'].map(buro_group['SK_ID_BUREAU count'])
Buro_Last['Total_Credit_Amount'] = Buro_Last['SK_ID_CURR'].map(buro_group['AMT_CREDIT_SUM sum'])
Buro_Last['Total_Debt_Amount'] = Buro_Last['SK_ID_CURR'].map(buro_group['AMT_CREDIT_SUM_DEBT sum'])
Buro_Last['NumberOfCreditCurrency'] = Buro_Last['SK_ID_CURR'].map(buro_group['CREDIT_CURRENCY nonUnique'])
Buro_Last['MostCommonCreditCurrency'] = Buro_Last['SK_ID_CURR'].map(buro_group['CREDIT_CURRENCY modeValue'])
Buro_Last['NumberOfCreditType'] = Buro_Last['SK_ID_CURR'].map(buro_group['CREDIT_TYPE nonUnique'])
Buro_Last['MostCommonCreditType'] = Buro_Last['SK_ID_CURR'].map(buro_group['CREDIT_TYPE modeValue'])
Buro_Last['NumberOfCreditProlong'] = Buro_Last['SK_ID_CURR'].map(buro_group['CNT_CREDIT_PROLONG sum'])
Buro_Last['NumberOfBadCredit'] = Buro_Last['SK_ID_CURR'].map(buro_group['CREDIT_ACTIVE totalBadCredit'])
Buro_Last['NumberOfDelayedCredit'] = Buro_Last['SK_ID_CURR'].map(buro_group['CREDIT_DAY_OVERDUE creditOverdue'])
Buro_Last['Active_Credit_Amount'] = Buro_Last['SK_ID_CURR'].map(buro_group_active['AMT_CREDIT_SUM sum'])
Buro_Last['Active_Credit_Count'] = Buro_Last['SK_ID_CURR'].map(buro_group_active['AMT_CREDIT_SUM count'])
Buro_Last['Active_Debt_Amount'] = Buro_Last['SK_ID_CURR'].map(buro_group_active['AMT_CREDIT_SUM_DEBT sum'])
Buro_Last['Active_Credit_Card_Limit'] = Buro_Last['SK_ID_CURR'].map(buro_group_active['AMT_CREDIT_SUM_LIMIT sum'])
Buro_Last['BalanceOnCreditBuro'] = Buro_Last['Active_Debt_Amount'] / Buro_Last['Active_Credit_Amount']
# Merge buro with Buro Balance
buro_merged = pd.merge(buro, Buro_Balance_Last, how='left', on='SK_ID_BUREAU')
buro_merged = buro_merged[['SK_ID_CURR','SK_ID_BUREAU','Buro_Balance_Last_Value','Buro_Balance_Max',
'Buro_Balance_Mean','Buro_Balance_Last_Month']]
buro_merged_group = buro_merged.groupby('SK_ID_CURR').agg(np.mean)
buro_merged_group.reset_index(inplace=True)
buro_merged_group.drop('SK_ID_BUREAU', axis=1, inplace=True)
# Add Tables to main Data
data = data.merge(right=Buro_Last.reset_index(), how='left', on='SK_ID_CURR')
test = test.merge(right=Buro_Last.reset_index(), how='left', on='SK_ID_CURR')
data = data.merge(right=buro_merged_group.reset_index(), how='left', on='SK_ID_CURR')
test = test.merge(right=buro_merged_group.reset_index(), how='left', on='SK_ID_CURR')
y = data['TARGET']
data.drop(['SK_ID_CURR','TARGET'], axis=1, inplace=True)
test.drop(['SK_ID_CURR'], axis=1, inplace=True)
if(reduce_mem==True):
data = reduce_mem_usage(data)
test = reduce_mem_usage(test)
return(data, test, y)
def ApplicationBuro(reduce_mem=True):
data = pd.read_csv('../input/application_train.csv')
test = pd.read_csv('../input/application_test.csv')
buro = pd.read_csv('../input/bureau.csv')
def nonUnique(x):
return x.nunique()
def modeValue(x):
return stats.mode(x)[0][0]
def totalBadCredit(x):
badCredit = 0
for value in x:
if(value==2 or value==3):
badCredit+=1
return badCredit
def creditOverdue(x):
overdue=0
for value in x:
if(value>0):
overdue+=1
return overdue
categorical_feats = [f for f in buro.columns if buro[f].dtype == 'object']
for f_ in categorical_feats:
buro[f_], indexer = pd.factorize(buro[f_])
categorical_feats = [f for f in data.columns if data[f].dtype == 'object']
for f_ in categorical_feats:
data[f_], indexer = pd.factorize(data[f_])
test[f_] = indexer.get_indexer(test[f_])
# Aggregate Values on All Credits
buro_group = buro.groupby('SK_ID_CURR').agg({'SK_ID_BUREAU':'count',
'AMT_CREDIT_SUM':'sum',
'AMT_CREDIT_SUM_DEBT':'sum',
'CREDIT_CURRENCY': [nonUnique, modeValue],
'CREDIT_TYPE': [nonUnique, modeValue],
'CNT_CREDIT_PROLONG': 'sum',
'CREDIT_ACTIVE': totalBadCredit,
'CREDIT_DAY_OVERDUE': creditOverdue
})
buro_group.columns = [' '.join(col).strip() for col in buro_group.columns.values]
# Aggregate Values on Active Credits
buro_active = buro.loc[buro['CREDIT_ACTIVE']==1]
buro_group_active = buro_active.groupby('SK_ID_CURR').agg({'AMT_CREDIT_SUM': ['sum', 'count'],
'AMT_CREDIT_SUM_DEBT': 'sum',
'AMT_CREDIT_SUM_LIMIT': 'sum'
})
buro_group_active.columns = [' '.join(col).strip() for col in buro_group_active.columns.values]
# Getting last credit for each user
idx = buro.groupby('SK_ID_CURR')['SK_ID_BUREAU'].transform(max) == buro['SK_ID_BUREAU']
Buro_Last = buro[idx][['SK_ID_CURR','CREDIT_TYPE','DAYS_CREDIT_UPDATE','DAYS_CREDIT',
'DAYS_CREDIT_ENDDATE','DAYS_ENDDATE_FACT']]
Buro_Last['Credit_Count'] = Buro_Last['SK_ID_CURR'].map(buro_group['SK_ID_BUREAU count'])
Buro_Last['Total_Credit_Amount'] = Buro_Last['SK_ID_CURR'].map(buro_group['AMT_CREDIT_SUM sum'])
Buro_Last['Total_Debt_Amount'] = Buro_Last['SK_ID_CURR'].map(buro_group['AMT_CREDIT_SUM_DEBT sum'])
Buro_Last['NumberOfCreditCurrency'] = Buro_Last['SK_ID_CURR'].map(buro_group['CREDIT_CURRENCY nonUnique'])
Buro_Last['MostCommonCreditCurrency'] = Buro_Last['SK_ID_CURR'].map(buro_group['CREDIT_CURRENCY modeValue'])
Buro_Last['NumberOfCreditType'] = Buro_Last['SK_ID_CURR'].map(buro_group['CREDIT_TYPE nonUnique'])
Buro_Last['MostCommonCreditType'] = Buro_Last['SK_ID_CURR'].map(buro_group['CREDIT_TYPE modeValue'])
Buro_Last['NumberOfCreditProlong'] = Buro_Last['SK_ID_CURR'].map(buro_group['CNT_CREDIT_PROLONG sum'])
Buro_Last['NumberOfBadCredit'] = Buro_Last['SK_ID_CURR'].map(buro_group['CREDIT_ACTIVE totalBadCredit'])
Buro_Last['NumberOfDelayedCredit'] = Buro_Last['SK_ID_CURR'].map(buro_group['CREDIT_DAY_OVERDUE creditOverdue'])
Buro_Last['Active_Credit_Amount'] = Buro_Last['SK_ID_CURR'].map(buro_group_active['AMT_CREDIT_SUM sum'])
Buro_Last['Active_Credit_Count'] = Buro_Last['SK_ID_CURR'].map(buro_group_active['AMT_CREDIT_SUM count'])
Buro_Last['Active_Debt_Amount'] = Buro_Last['SK_ID_CURR'].map(buro_group_active['AMT_CREDIT_SUM_DEBT sum'])
Buro_Last['Active_Credit_Card_Limit'] = Buro_Last['SK_ID_CURR'].map(buro_group_active['AMT_CREDIT_SUM_LIMIT sum'])
Buro_Last['BalanceOnCreditBuro'] = Buro_Last['Active_Debt_Amount'] / Buro_Last['Active_Credit_Amount']
data = data.merge(right=Buro_Last.reset_index(), how='left', on='SK_ID_CURR')
test = test.merge(right=Buro_Last.reset_index(), how='left', on='SK_ID_CURR')
y = data['TARGET']
data.drop(['SK_ID_CURR','TARGET'], axis=1, inplace=True)
test.drop(['SK_ID_CURR'], axis=1, inplace=True)
if(reduce_mem==True):
data = reduce_mem_usage(data)
test = reduce_mem_usage(test)
return(data, test, y)
def ApplicationOnly(reduce_mem=True):
data = pd.read_csv('../input/application_train.csv')
test = pd.read_csv('../input/application_test.csv')
categorical_feats = [f for f in data.columns if data[f].dtype == 'object']
for f_ in categorical_feats:
data[f_], indexer = pd.factorize(data[f_])
test[f_] = indexer.get_indexer(test[f_])
y = data['TARGET']
data.drop(['SK_ID_CURR','TARGET'], axis=1, inplace=True)
test.drop(['SK_ID_CURR'], axis=1, inplace=True)
if(reduce_mem==True):
data = reduce_mem_usage(data)
test = reduce_mem_usage(test)
return(data, test, y)
def ApplicationBuroAndPrev(reduce_mem=True):
data = pd.read_csv('../input/application_train.csv')
test = pd.read_csv('../input/application_test.csv')
prev = pd.read_csv('../input/previous_application.csv')
buro = pd.read_csv('../input/bureau.csv')
categorical_feats = [f for f in data.columns if data[f].dtype == 'object']
for f_ in categorical_feats:
data[f_], indexer = pd.factorize(data[f_])
test[f_] = indexer.get_indexer(test[f_])
prev_cat_features = [f_ for f_ in prev.columns if prev[f_].dtype == 'object']
for f_ in prev_cat_features:
prev = pd.concat([prev, pd.get_dummies(prev[f_], prefix=f_)], axis=1)
cnt_prev = prev[['SK_ID_CURR', 'SK_ID_PREV']].groupby('SK_ID_CURR').count()
prev['SK_ID_PREV'] = prev['SK_ID_CURR'].map(cnt_prev['SK_ID_PREV'])
avg_prev = prev.groupby('SK_ID_CURR').mean()
avg_prev.columns = ['prev_app_' + f_ for f_ in avg_prev.columns]
buro_cat_features = [f_ for f_ in buro.columns if buro[f_].dtype == 'object']
for f_ in buro_cat_features:
buro = pd.concat([buro, pd.get_dummies(buro[f_], prefix=f_)], axis=1)
avg_buro = buro.groupby('SK_ID_CURR').mean()
avg_buro['buro_count'] = buro[['SK_ID_BUREAU','SK_ID_CURR']].groupby('SK_ID_CURR').count()['SK_ID_BUREAU']
avg_buro.columns = ['bureau_' + f_ for f_ in avg_buro.columns]
data = data.merge(right=avg_prev.reset_index(), how='left', on='SK_ID_CURR')
data = data.merge(right=avg_buro.reset_index(), how='left', on='SK_ID_CURR')
test = test.merge(right=avg_prev.reset_index(), how='left', on='SK_ID_CURR')
test = test.merge(right=avg_buro.reset_index(), how='left', on='SK_ID_CURR')
y = data['TARGET']
data.drop(['SK_ID_CURR','TARGET'], axis=1, inplace=True)
test.drop(['SK_ID_CURR'], axis=1, inplace=True)
if(reduce_mem==True):
data = reduce_mem_usage(data)
test = reduce_mem_usage(test)
return(data, test, y)
def AllData(reduce_mem=True):
data = pd.read_csv('../input/application_train.csv')
test = pd.read_csv('../input/application_test.csv')
prev = pd.read_csv('../input/previous_application.csv')
buro = pd.read_csv('../input/bureau.csv')
buro_balance = pd.read_csv('../input/bureau_balance.csv')
credit_card = pd.read_csv('../input/credit_card_balance.csv')
POS_CASH = pd.read_csv('../input/POS_CASH_balance.csv')
payments = pd.read_csv('../input/installments_payments.csv')
categorical_feats = [f for f in data.columns if data[f].dtype == 'object']
for f_ in categorical_feats:
data[f_], indexer = pd.factorize(data[f_])
test[f_] = indexer.get_indexer(test[f_])
y = data['TARGET']
del data['TARGET']
#Pre-processing buro_balance
print('Pre-processing buro_balance...')
buro_grouped_size = buro_balance.groupby('SK_ID_BUREAU')['MONTHS_BALANCE'].size()
buro_grouped_max = buro_balance.groupby('SK_ID_BUREAU')['MONTHS_BALANCE'].max()
buro_grouped_min = buro_balance.groupby('SK_ID_BUREAU')['MONTHS_BALANCE'].min()
buro_counts = buro_balance.groupby('SK_ID_BUREAU')['STATUS'].value_counts(normalize = False)
buro_counts_unstacked = buro_counts.unstack('STATUS')
buro_counts_unstacked.columns = ['STATUS_0', 'STATUS_1','STATUS_2','STATUS_3','STATUS_4','STATUS_5','STATUS_C','STATUS_X',]
buro_counts_unstacked['MONTHS_COUNT'] = buro_grouped_size
buro_counts_unstacked['MONTHS_MIN'] = buro_grouped_min
buro_counts_unstacked['MONTHS_MAX'] = buro_grouped_max
buro = buro.join(buro_counts_unstacked, how='left', on='SK_ID_BUREAU')
#Pre-processing previous_application
print('Pre-processing previous_application...')
#One-hot encoding of categorical features in previous application data set
prev_cat_features = [pcol for pcol in prev.columns if prev[pcol].dtype == 'object']
prev = pd.get_dummies(prev, columns=prev_cat_features)
avg_prev = prev.groupby('SK_ID_CURR').mean()
cnt_prev = prev[['SK_ID_CURR', 'SK_ID_PREV']].groupby('SK_ID_CURR').count()
avg_prev['nb_app'] = cnt_prev['SK_ID_PREV']
del avg_prev['SK_ID_PREV']
#Pre-processing buro
print('Pre-processing buro...')
#One-hot encoding of categorical features in buro data set
buro_cat_features = [bcol for bcol in buro.columns if buro[bcol].dtype == 'object']
buro = pd.get_dummies(buro, columns=buro_cat_features)
avg_buro = buro.groupby('SK_ID_CURR').mean()
avg_buro['buro_count'] = buro[['SK_ID_BUREAU', 'SK_ID_CURR']].groupby('SK_ID_CURR').count()['SK_ID_BUREAU']
del avg_buro['SK_ID_BUREAU']
#Pre-processing POS_CASH
print('Pre-processing POS_CASH...')
le = LabelEncoder()
POS_CASH['NAME_CONTRACT_STATUS'] = le.fit_transform(POS_CASH['NAME_CONTRACT_STATUS'].astype(str))
nunique_status = POS_CASH[['SK_ID_CURR', 'NAME_CONTRACT_STATUS']].groupby('SK_ID_CURR').nunique()
nunique_status2 = POS_CASH[['SK_ID_CURR', 'NAME_CONTRACT_STATUS']].groupby('SK_ID_CURR').max()
POS_CASH['NUNIQUE_STATUS'] = nunique_status['NAME_CONTRACT_STATUS']
POS_CASH['NUNIQUE_STATUS2'] = nunique_status2['NAME_CONTRACT_STATUS']
POS_CASH.drop(['SK_ID_PREV', 'NAME_CONTRACT_STATUS'], axis=1, inplace=True)
#Pre-processing credit_card
print('Pre-processing credit_card...')
credit_card['NAME_CONTRACT_STATUS'] = le.fit_transform(credit_card['NAME_CONTRACT_STATUS'].astype(str))
nunique_status = credit_card[['SK_ID_CURR', 'NAME_CONTRACT_STATUS']].groupby('SK_ID_CURR').nunique()
nunique_status2 = credit_card[['SK_ID_CURR', 'NAME_CONTRACT_STATUS']].groupby('SK_ID_CURR').max()
credit_card['NUNIQUE_STATUS'] = nunique_status['NAME_CONTRACT_STATUS']
credit_card['NUNIQUE_STATUS2'] = nunique_status2['NAME_CONTRACT_STATUS']
credit_card.drop(['SK_ID_PREV', 'NAME_CONTRACT_STATUS'], axis=1, inplace=True)
#Pre-processing payments
print('Pre-processing payments...')
avg_payments = payments.groupby('SK_ID_CURR').mean()
avg_payments2 = payments.groupby('SK_ID_CURR').max()
avg_payments3 = payments.groupby('SK_ID_CURR').min()
del avg_payments['SK_ID_PREV']
#Join data bases
print('Joining databases...')
data = data.merge(right=avg_prev.reset_index(), how='left', on='SK_ID_CURR')
test = test.merge(right=avg_prev.reset_index(), how='left', on='SK_ID_CURR')
data = data.merge(right=avg_buro.reset_index(), how='left', on='SK_ID_CURR')
test = test.merge(right=avg_buro.reset_index(), how='left', on='SK_ID_CURR')
data = data.merge(POS_CASH.groupby('SK_ID_CURR').mean().reset_index(), how='left', on='SK_ID_CURR')
test = test.merge(POS_CASH.groupby('SK_ID_CURR').mean().reset_index(), how='left', on='SK_ID_CURR')
data = data.merge(credit_card.groupby('SK_ID_CURR').mean().reset_index(), how='left', on='SK_ID_CURR')
test = test.merge(credit_card.groupby('SK_ID_CURR').mean().reset_index(), how='left', on='SK_ID_CURR')
data = data.merge(right=avg_payments.reset_index(), how='left', on='SK_ID_CURR')
test = test.merge(right=avg_payments.reset_index(), how='left', on='SK_ID_CURR')
data = data.merge(right=avg_payments2.reset_index(), how='left', on='SK_ID_CURR')
test = test.merge(right=avg_payments2.reset_index(), how='left', on='SK_ID_CURR')
data = data.merge(right=avg_payments3.reset_index(), how='left', on='SK_ID_CURR')
test = test.merge(right=avg_payments3.reset_index(), how='left', on='SK_ID_CURR')
if(reduce_mem==True):
data = reduce_mem_usage(data)
test = reduce_mem_usage(test)
return(data, test, y)
def reduce_mem_usage(df):
start_mem = df.memory_usage().sum() / 1024**2
print('Memory usage of dataframe is {:.2f} MB'.format(start_mem))
for col in df.columns:
col_type = df[col].dtype
if col_type != object:
c_min = df[col].min()
c_max = df[col].max()
if str(col_type)[:3] == 'int':
if c_min > np.iinfo(np.int8).min and c_max < np.iinfo(np.int8).max:
df[col] = df[col].astype(np.int8)
elif c_min > np.iinfo(np.int16).min and c_max < np.iinfo(np.int16).max:
df[col] = df[col].astype(np.int16)
elif c_min > np.iinfo(np.int32).min and c_max < np.iinfo(np.int32).max:
df[col] = df[col].astype(np.int32)
elif c_min > np.iinfo(np.int64).min and c_max < np.iinfo(np.int64).max:
df[col] = df[col].astype(np.int64)
else:
if c_min > np.finfo(np.float16).min and c_max < np.finfo(np.float16).max:
df[col] = df[col].astype(np.float16)
elif c_min > np.finfo(np.float32).min and c_max < np.finfo(np.float32).max:
df[col] = df[col].astype(np.float32)
else:
df[col] = df[col].astype(np.float64)
else:
df[col] = df[col].astype('category')
end_mem = df.memory_usage().sum() / 1024**2
print('Memory usage after optimization is: {:.2f} MB'.format(end_mem))
print('Decreased by {:.1f}%'.format(100 * (start_mem - end_mem) / start_mem))
return df
def AllData_v5(reduce_mem=True):
df = GatherTables.application_train_test()
with GatherTables.timer("Process bureau and bureau_balance"):
bureau = GatherTables.bureau_and_balance()
print("Bureau df shape:", bureau.shape)
df = df.join(bureau, how='left', on='SK_ID_CURR')
print("Current data shape:", df.shape)
del bureau
gc.collect()
with GatherTables.timer("Process previous_applications"):
prev = GatherTables.previous_applications()
print("Previous applications df shape:", prev.shape)
df = df.join(prev, how='left', on='SK_ID_CURR')
print("Current data shape:", df.shape)
del prev
gc.collect()
with GatherTables.timer("Process POS-CASH balance"):
pos = GatherTables.pos_cash()
print("Pos-cash balance df shape:", pos.shape)
df = df.join(pos, how='left', on='SK_ID_CURR')
print("Current data shape:", df.shape)
del pos
gc.collect()
with GatherTables.timer("Process installments payments"):
ins = GatherTables.installments_payments()
print("Installments payments df shape:", ins.shape)
df = df.join(ins, how='left', on='SK_ID_CURR')
print("Current data shape:", df.shape)
del ins
gc.collect()
with GatherTables.timer("Process credit card balance"):
cc = GatherTables.credit_card_balance()
print("Credit card balance df shape:", cc.shape)
df = df.join(cc, how='left', on='SK_ID_CURR')
print("Current data shape:", df.shape)
del cc
gc.collect()
df, new_columns = one_hot_encoder(df)
df.drop('SK_ID_CURR', axis=1, inplace=True)
data = df[df['TARGET'].notnull()]
test = df[df['TARGET'].isnull()]
y = data.pop('TARGET')
test.drop(['TARGET'], axis=1, inplace=True)
return(data, test, y) | 47.661232 | 130 | 0.656505 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7,412 | 0.281729 |
f68df4f6b83e61acb9a971c85b9efef84578fc72 | 216 | py | Python | ois/data_request.py | pandincus/ois-service | 7ed45ea5a758f5b529d823aeda60b73a89da66e2 | [
"MIT"
] | 3 | 2018-07-09T04:02:01.000Z | 2018-08-29T09:57:36.000Z | ois/data_request.py | pandincus/ois-service | 7ed45ea5a758f5b529d823aeda60b73a89da66e2 | [
"MIT"
] | null | null | null | ois/data_request.py | pandincus/ois-service | 7ed45ea5a758f5b529d823aeda60b73a89da66e2 | [
"MIT"
] | null | null | null | from .data_request_type import DataRequestType
class DataRequest():
def __init__(self, fieldName, requestType):
self.fieldName = fieldName
self.requestType = requestType
self.value = 0
| 21.6 | 47 | 0.699074 | 166 | 0.768519 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
f68e6d7c7601724ef28e0085ff5c928432db3954 | 15,242 | py | Python | doc/source/conf.py | OpenTreeOfLife/nexson | d2c803b58300ed5024bc1be98e280cfa8fefeb2e | [
"BSD-2-Clause"
] | null | null | null | doc/source/conf.py | OpenTreeOfLife/nexson | d2c803b58300ed5024bc1be98e280cfa8fefeb2e | [
"BSD-2-Clause"
] | null | null | null | doc/source/conf.py | OpenTreeOfLife/nexson | d2c803b58300ed5024bc1be98e280cfa8fefeb2e | [
"BSD-2-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Sphinx configuration for nexson.
Largely based on Jeet Sukumaran's conf.py in DendroPy.
"""
import sys
import os
import time
from sphinx.ext import autodoc
from nexson import __version__ as PROJECT_VERSION
# -- Sphinx Hackery ------------------------------------------------
# Following allows for a docstring of a method to be inserted "nakedly"
# (without the signature etc.) into the current context by, for example::
#
# .. autodocstringonly:: dendropy.dataio.newickreader.NewickReader.__init__
#
# Based on:
#
# http://stackoverflow.com/questions/7825263/including-docstring-in-sphinx-documentation
class DocStringOnlyMethodDocumenter(autodoc.MethodDocumenter):
objtype = "docstringonly"
# do not indent the content
content_indent = " "
# do not add a header to the docstring
def add_directive_header(self, sig):
pass
# def add_line(self, line, source, *lineno):
# """Append one line of generated reST to the output."""
# print self.indent + line
# self.directive.result.append(self.indent + line, source, *lineno)
class KeywordArgumentsOnlyMethodDocumenter(autodoc.MethodDocumenter):
objtype = "keywordargumentsonly"
priority = 0 # do not override normal autodocumenter
# do not indent the content
content_indent = " "
# do not add a header to the docstring
def add_directive_header(self, sig):
pass
def add_line(self, line, source, *lineno):
if ":Keyword Arguments:" in line:
line = line.replace(":Keyword Arguments:", " ")
self._emit_line = True
if getattr(self, "_emit_line", False):
self.directive.result.append(self.indent + line, source, *lineno)
def setup(app):
# app.add_autodocumenter(DocStringOnlyMethodDocumenter)
app.add_autodocumenter(KeywordArgumentsOnlyMethodDocumenter)
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode',
'sphinx.ext.autosummary',
'sphinxcontrib.napoleon', # requires: pip install sphinxcontrib-napoleon
# 'numpydoc', # requires: pip install numpydoc
]
# If 'both', then class docstring and class.__init__() docstring combined for
# class documentation.
# If 'init', then only class.__init__() docstring shown for class documentation
# (class docstring omitted).
# If not specified, then only class docstring shown for class documentation
# (__init__ docstring omitted).
autoclass_content = 'both' # or 'init'
# numpydoc settings
# numpydoc_show_class_members = False
# numpydoc_class_members_toctree = False
# Napoleon settings
# napoleon_google_docstring = True
# napoleon_numpy_docstring = True
napoleon_include_private_with_doc = False
napoleon_include_special_with_doc = True
# napoleon_use_admonition_for_examples = False
# napoleon_use_admonition_for_notes = False
# napoleon_use_admonition_for_references = False
# napoleon_use_ivar = False
# napoleon_use_param = False
napoleon_use_rtype = False
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'nexson'
copyright = u'2020-{}, Open Tree of Life developers'.format(time.strftime('%Y'))
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = PROJECT_VERSION
# The full version, including alpha/beta/rc tags.
release = PROJECT_VERSION
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
default_role = "any"
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = False
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
rst_prolog = """
.. |mth| replace:: Mark T. Holder
.. _mth: https://phylo.bio.ku.edu/content/mark-t-holder
.. |Python| replace:: Python
.. _Python: http://www.python.org/
.. |Python27| replace:: Python 2.7
.. _Python 2.7: http://www.python.org/download/releases/2.7/
.. |Python2| replace:: Python 2
.. _Python 2: http://www.python.org/download/releases/2.7/
.. |Python3| replace:: Python 3
.. _Python 3: https://www.python.org/download/releases/3.4.0/
.. |setuptools| replace:: setuptools
.. _setuptools: http://pypi.python.org/pypi/setuptools
.. |pip| replace:: pip
.. _pip: http://pypi.python.org/pypi/pip
.. |Git| replace:: Git
.. _Git: http://git-scm.com/
"""
rst_prolog += """\
.. |nexson_source_archive_url| replace:: https://pypi.python.org/packages/source/P/nexson/nexson-%s.tar.gz
.. |nexson_source_archive| replace:: nexson source code archive
.. _nexson_source_archive: https//pypi.python.org/packages/source/P/nexson/nexson-%s.tar.gz
""" % (version, version)
rst_prolog += """\
.. |nexson_copyright| replace:: Copyright {copyright}. All rights reserved.
.. |
""".format(copyright=copyright)
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_nexson_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ["_themes"]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = "_static/dendropy_logo.png"
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
html_sidebars = {
"**" : ["logo.html",
"cleardiv.html",
"searchbox.html",
"cleardiv.html",
"localtoc.html",
"cleardiv.html",
"relations.html",
"cleardiv.html",
"side_supplemental.html"],
}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
html_show_sourcelink = False
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
html_show_sphinx = False
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'nexsondoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'nexson.tex', u'nexson Documentation',
u'Open Tree of Life developers', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
# ('library/index', 'dendropy', u'DendroPy Library API Reference',
# [u'Jeet Sukumaran and Mark T. Holder'], 1),
# ('primer/index', 'dendropy-primer', u'DendroPy Primer',
# [u'Jeet Sukumaran and Mark T. Holder'], 1),
# ('programs/sumtrees', 'sumtrees', u'SumTrees User Manual',
# [u'Jeet Sukumaran and Mark T. Holder'], 1),
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
# ('library/index', 'DendroPy', u'DendroPy Documentation',
# u'Jeet Sukumaran and Mark T. Holder', 'DendroPy', 'Python library for phylogenetic computing',
# 'Miscellaneous'),
# ('primer/index', 'DendroPy-Primer', u'DendroPy Primer',
# u'Jeet Sukumaran and Mark T. Holder', 'DendroPy', 'Python library for phylogenetic computing',
# 'Miscellaneous'),
# ('programs/sumtrees', 'SumTrees', u'SumTrees Documentation',
# u'Jeet Sukumaran and Mark T. Holder', 'DendroPy', 'Python library for phylogenetic computing',
# 'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# -- Options for Epub output ----------------------------------------------
# Bibliographic Dublin Core info.
epub_title = u'nexson'
epub_author = u'Open Tree of Life developers'
epub_publisher = epub_author
epub_copyright = u'2020, {}'.format(epub_author)
# The basename for the epub file. It defaults to the project name.
#epub_basename = u'nexson'
# The HTML theme for the epub output. Since the default themes are not optimized
# for small screen space, using the same theme for HTML and epub output is
# usually not wise. This defaults to 'epub', a theme designed to save visual
# space.
#epub_theme = 'epub'
# The language of the text. It defaults to the language option
# or en if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
#epub_cover = ()
# A sequence of (type, uri, title) tuples for the guide element of content.opf.
#epub_guide = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
# Choose between 'default' and 'includehidden'.
#epub_tocscope = 'default'
# Fix unsupported image types using the PIL.
#epub_fix_images = False
# Scale large images.
#epub_max_image_width = 0
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#epub_show_urls = 'inline'
# If false, no index is generated.
#epub_use_index = True
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/': None}
| 32.778495 | 106 | 0.708831 | 1,127 | 0.07394 | 0 | 0 | 0 | 0 | 0 | 0 | 12,961 | 0.850348 |
f68e8c40892b76a4713ca99cffde2e9dcbf87b67 | 2,105 | py | Python | prev_ob_models/exclude/GilraBhalla2015/synapses/mitral_granule_NMDA.py | fameshpatel/olfactorybulb | 8d7a644b4560309ef177c0590ff73ed4c2432604 | [
"MIT"
] | null | null | null | prev_ob_models/exclude/GilraBhalla2015/synapses/mitral_granule_NMDA.py | fameshpatel/olfactorybulb | 8d7a644b4560309ef177c0590ff73ed4c2432604 | [
"MIT"
] | null | null | null | prev_ob_models/exclude/GilraBhalla2015/synapses/mitral_granule_NMDA.py | fameshpatel/olfactorybulb | 8d7a644b4560309ef177c0590ff73ed4c2432604 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import math
# The PYTHONPATH should contain the location of moose.py and _moose.so
# files. Putting ".." with the assumption that moose.py and _moose.so
# has been generated in ${MOOSE_SOURCE_DIRECTORY}/pymoose/ (as default
# pymoose build does) and this file is located in
# ${MOOSE_SOURCE_DIRECTORY}/pymoose/examples
# sys.path.append('..\..')
try:
import moose
except ImportError:
print "ERROR: Could not import moose. Please add the directory containing moose.py in your PYTHONPATH"
import sys
sys.exit(1)
from synapseConstants import *
class mitral_granule_NMDA(moose.SynChan):
"""Non-saturating NMDA synapse from mitral to granule cell."""
def __init__(self, *args):
#### The Mg_block way
moose.SynChan.__init__(self,*args)
self.mgblock = moose.Mg_block(self.path+"/mgblock")
self.mgblock.CMg = mitral_granule_NMDA_MgConc
self.mgblock.KMg_A = mitral_granule_NMDA_KMg_A
## KMg_B has not been wrapped properly in pymoose,
## needed to set it via setField available in every pymoose object
#mgblock.KMg_B = mitral_granule_NMDA_KMg_B
self.mgblock.setField('KMg_B',str(mitral_granule_NMDA_KMg_B))
## connect source to destination.
## excsyn2 sends Gk and Ek to mgblock. other way around gives error.
self.connect("origChannel", self.mgblock, "origChannel")
self.addField('mgblock')
self.setField('mgblock','True')
#### The Mg_block way ends
##### The NMDAChan way
#moose.NMDAChan.__init__(self,*args)
#self.MgConc = mitral_granule_NMDA_MgConc
#connect this in the calling script in the usual way as below:
#granulecomp.connect("channel", excsyn2, "channel")
##### The NMDAChan way - ends
self.Ek = mitral_granule_NMDA_Ek
self.Gbar = mitral_granule_NMDA_Gbar
self.tau1 = mitral_granule_NMDA_tau1
self.tau2 = mitral_granule_NMDA_tau2
self.addField('graded')
self.setField('graded','False')
| 38.981481 | 106 | 0.679335 | 1,481 | 0.703563 | 0 | 0 | 0 | 0 | 0 | 0 | 1,168 | 0.554869 |
f6900c8bcfbed26d0c4e1219d03eea149f0418aa | 31,420 | py | Python | marltoolbox/experiments/rllib_api/amtft_various_env.py | longtermrisk/marltoolbox | cae1ba94ccb44700b66a32e0734a0f11c9c6c7fe | [
"MIT"
] | 17 | 2021-01-17T21:21:08.000Z | 2022-01-27T00:57:30.000Z | marltoolbox/experiments/rllib_api/amtft_various_env.py | longtermrisk/marltoolbox | cae1ba94ccb44700b66a32e0734a0f11c9c6c7fe | [
"MIT"
] | 5 | 2021-02-21T21:43:00.000Z | 2021-05-04T12:27:23.000Z | marltoolbox/experiments/rllib_api/amtft_various_env.py | longtermrisk/marltoolbox | cae1ba94ccb44700b66a32e0734a0f11c9c6c7fe | [
"MIT"
] | 3 | 2021-02-21T11:38:22.000Z | 2022-03-04T12:06:19.000Z | import copy
import logging
import os
import ray
from ray import tune
from ray.rllib.agents import dqn
from ray.rllib.agents.dqn.dqn_torch_policy import postprocess_nstep_and_prio
from ray.rllib.utils import merge_dicts
from ray.rllib.utils.schedules import PiecewiseSchedule
from ray.tune.integration.wandb import WandbLogger
from ray.tune.logger import DEFAULT_LOGGERS
from marltoolbox.algos import amTFT
from marltoolbox.envs import (
matrix_sequential_social_dilemma,
vectorized_coin_game,
vectorized_mixed_motive_coin_game,
ssd_mixed_motive_coin_game,
)
from marltoolbox.envs.utils.wrappers import (
add_RewardUncertaintyEnvClassWrapper,
)
from marltoolbox.scripts import aggregate_and_plot_tensorboard_data
from marltoolbox.utils import (
exploration,
log,
postprocessing,
miscellaneous,
plot,
self_and_cross_perf,
callbacks,
)
logger = logging.getLogger(__name__)
def main(debug, train_n_replicates=None, filter_utilitarian=None, env=None):
hparams = get_hyperparameters(
debug, train_n_replicates, filter_utilitarian, env
)
if hparams["load_plot_data"] is None:
ray.init(
num_cpus=os.cpu_count(), num_gpus=0, local_mode=hparams["debug"]
)
# Train
if hparams["load_policy_data"] is None:
tune_analysis_per_welfare = train_for_each_welfare_function(
hparams
)
else:
tune_analysis_per_welfare = load_tune_analysis(
hparams["load_policy_data"]
)
# Eval & Plot
analysis_metrics_per_mode = config_and_evaluate_cross_play(
tune_analysis_per_welfare, hparams
)
ray.shutdown()
else:
tune_analysis_per_welfare = None
# Plot
analysis_metrics_per_mode = config_and_evaluate_cross_play(
tune_analysis_per_welfare, hparams
)
return tune_analysis_per_welfare, analysis_metrics_per_mode
def get_hyperparameters(
debug,
train_n_replicates=None,
filter_utilitarian=None,
env=None,
reward_uncertainty=0.0,
):
if debug:
train_n_replicates = 2
n_times_more_utilitarians_seeds = 1
elif train_n_replicates is None:
n_times_more_utilitarians_seeds = 4
train_n_replicates = 4
else:
n_times_more_utilitarians_seeds = 4
n_seeds_to_prepare = train_n_replicates * (
1 + n_times_more_utilitarians_seeds
)
pool_of_seeds = miscellaneous.get_random_seeds(n_seeds_to_prepare)
exp_name, _ = log.log_in_current_day_dir("amTFT")
hparams = {
"debug": debug,
"filter_utilitarian": filter_utilitarian
if filter_utilitarian is not None
else not debug,
"seeds": pool_of_seeds,
"train_n_replicates": train_n_replicates,
"n_times_more_utilitarians_seeds": n_times_more_utilitarians_seeds,
"exp_name": exp_name,
"log_n_points": 250,
"load_plot_data": None,
# Example: "load_plot_data": ".../SelfAndCrossPlay_save.p",
"load_policy_data": None,
# "load_policy_data": {
# "Util": [
# ".../IBP/amTFT/trials/"
# "DQN_AsymCoinGame_...",
# ".../IBP/amTFT/trials/"
# "DQN_AsymCoinGame_..."],
# 'IA':[
# ".../temp/IBP/amTFT/trials/"
# "DQN_AsymCoinGame_...",
# ".../IBP/amTFT/trials/"
# "DQN_AsymCoinGame_..."],
# },
# "load_policy_data": {
# "Util": [
# "~/dev-maxime/CLR/vm-data/instance-60-cpu-1-preemtible/amTFT"
# "/2021_03_28/19_38_55/utilitarian_welfare/coop"
# "/DQN_VectMixedMotiveCG_06231_00000_0_seed=1616960338_2021-03-29_00-52-23/checkpoint_250/checkpoint-250",
# # "~/dev-maxime/CLR/vm-data/instance-60-cpu-1-preemtible/amTFT"
# # "/2021_03_24/18_22_47/utilitarian_welfare/coop"
# # "/DQN_VectMixedMotiveCG_e1de7_00001_1_seed=1616610171_2021-03-25_00-27-29/checkpoint_250/checkpoint-250",
# # "~/dev-maxime/CLR/vm-data/instance-60-cpu-1-preemtible/amTFT"
# # "/2021_03_24/18_22_47/utilitarian_welfare/coop"
# # "/DQN_VectMixedMotiveCG_e1de7_00002_2_seed=1616610172_2021-03-25_00-27-29/checkpoint_250/checkpoint-250",
# ],
# 'IA':[
# "~/dev-maxime/CLR/vm-data/instance-60-cpu-1-preemtible"
# "/amTFT/2021_03_28/19_38_55/inequity_aversion_welfare/coop"
# "/DQN_VectMixedMotiveCG_d5a2a_00000_0_seed=1616960335_2021-03-28_21-23-26/checkpoint_250/checkpoint-250",
# # "~/dev-maxime/CLR/vm-data/instance-60-cpu-1-preemtible"
# # "/amTFT/2021_03_24/18_22_47/inequity_aversion_welfare/coop"
# # "/DQN_VectMixedMotiveCG_9cfe6_00001_1_seed=1616610168_2021-03-24_20-22-11/checkpoint_250/checkpoint-250",
# # "~/dev-maxime/CLR/vm-data/instance-60-cpu-1-preemtible"
# # "/amTFT/2021_03_24/18_22_47/inequity_aversion_welfare/coop"
# # "/DQN_VectMixedMotiveCG_9cfe6_00002_2_seed=1616610169_2021-03-24_20-22-11/checkpoint_250/checkpoint-250",
# ],
# },
# "load_policy_data": {
# "Util": [
# "~/ray_results/amTFT"
# "/2021_03_24/18_22_47/utilitarian_welfare/coop"
# "/DQN_VectMixedMotiveCG_e1de7_00000_0_seed=1616610170_2021-03-25_00-27-29/checkpoint_250/checkpoint-250",
# "~/ray_results/amTFT"
# "/2021_03_24/18_22_47/utilitarian_welfare/coop"
# "/DQN_VectMixedMotiveCG_e1de7_00001_1_seed=1616610171_2021-03-25_00-27-29/checkpoint_250/checkpoint-250",
# "~/ray_results/amTFT"
# "/2021_03_24/18_22_47/utilitarian_welfare/coop"
# "/DQN_VectMixedMotiveCG_e1de7_00002_2_seed=1616610172_2021-03-25_00-27-29/checkpoint_250/checkpoint-250",
# ],
# 'IA': [
# "~/ray_results"
# "/amTFT/2021_03_24/18_22_47/inequity_aversion_welfare/coop"
# "/DQN_VectMixedMotiveCG_9cfe6_00000_0_seed=1616610167_2021-03-24_20-22-10/checkpoint_250/checkpoint-250",
# "~/ray_results"
# "/amTFT/2021_03_24/18_22_47/inequity_aversion_welfare/coop"
# "/DQN_VectMixedMotiveCG_9cfe6_00001_1_seed=1616610168_2021-03-24_20-22-11/checkpoint_250/checkpoint-250",
# "~/ray_results"
# "/amTFT/2021_03_24/18_22_47/inequity_aversion_welfare/coop"
# "/DQN_VectMixedMotiveCG_9cfe6_00002_2_seed=1616610169_2021-03-24_20-22-11/checkpoint_250/checkpoint-250",
# ],
# },
"amTFTPolicy": amTFT.AmTFTRolloutsTorchPolicy,
"welfare_functions": [
(postprocessing.WELFARE_INEQUITY_AVERSION, "inequity_aversion"),
(postprocessing.WELFARE_UTILITARIAN, "utilitarian"),
],
"jitter": 0.05,
"hiddens": [64],
"gamma": 0.96,
# If not in self play then amTFT
# will be evaluated against a naive selfish policy or an exploiter
"self_play": True,
# "self_play": False, # Not tested
"env_name": "IteratedPrisonersDilemma" if env is None else env,
# "env_name": "IteratedAsymBoS" if env is None else env,
# "env_name": "CoinGame" if env is None else env,
# "env_name": "AsymCoinGame" if env is None else env,
# "env_name": "MixedMotiveCoinGame" if env is None else env,
# "env_name": "SSDMixedMotiveCoinGame" if env is None else env,
"overwrite_reward": True,
"explore_during_evaluation": True,
"reward_uncertainty": reward_uncertainty,
}
hparams = modify_hyperparams_for_the_selected_env(hparams)
hparams["plot_keys"] = amTFT.PLOT_KEYS + hparams["plot_keys"]
hparams["plot_assemblage_tags"] = (
amTFT.PLOT_ASSEMBLAGE_TAGS + hparams["plot_assemblage_tags"]
)
return hparams
def load_tune_analysis(grouped_checkpoints_paths: dict):
tune_analysis = {}
msg = "start load_tune_analysis"
print(msg)
logger.info(msg)
for group_name, checkpoints_paths in grouped_checkpoints_paths.items():
one_tune_analysis = miscellaneous.load_one_tune_analysis(
checkpoints_paths, n_dir_level_between_ckpt_and_exp_state=3
)
tune_analysis[group_name] = one_tune_analysis
msg = "end load_tune_analysis"
print(msg)
logger.info(msg)
return tune_analysis
def modify_hyperparams_for_the_selected_env(hp):
hp["plot_keys"] = (
amTFT.PLOT_KEYS + aggregate_and_plot_tensorboard_data.PLOT_KEYS
)
hp["plot_assemblage_tags"] = (
amTFT.PLOT_ASSEMBLAGE_TAGS
+ aggregate_and_plot_tensorboard_data.PLOT_ASSEMBLAGE_TAGS
)
mul_temp = 1.0
hp["punishment_multiplier"] = 3.0
hp["buf_frac"] = 0.125
hp["training_intensity"] = 10
# hp["rollout_length"] = 40
# hp["n_rollout_replicas"] = 20
hp["rollout_length"] = 4
hp["n_rollout_replicas"] = 5
if "CoinGame" in hp["env_name"]:
hp["plot_keys"] += vectorized_coin_game.PLOT_KEYS
hp["plot_assemblage_tags"] += vectorized_coin_game.PLOT_ASSEMBLAGE_TAGS
hp["n_steps_per_epi"] = 20 if hp["debug"] else 100
hp["n_epi"] = 10 if hp["debug"] else 4000
hp["base_lr"] = 0.1
hp["bs_epi_mul"] = 1
hp["both_players_can_pick_the_same_coin"] = False
hp["sgd_momentum"] = 0.9
hp["lambda"] = 0.96
hp["alpha"] = 0.0
hp["beta"] = 0.5
hp["debit_threshold"] = 30.0
hp["jitter"] = 0.02
hp["filter_utilitarian"] = False
hp["target_network_update_freq"] = 100 * hp["n_steps_per_epi"]
hp["last_exploration_temp_value"] = 0.03 * mul_temp
hp["temperature_schedule"] = PiecewiseSchedule(
endpoints=[
(0, 2.0 * mul_temp),
(
int(hp["n_steps_per_epi"] * hp["n_epi"] * 0.20),
0.5 * mul_temp,
),
(
int(hp["n_steps_per_epi"] * hp["n_epi"] * 0.60),
hp["last_exploration_temp_value"],
),
],
outside_value=hp["last_exploration_temp_value"],
framework="torch",
)
if "AsymCoinGame" in hp["env_name"]:
hp["x_limits"] = (-0.5, 3.0)
hp["y_limits"] = (-1.1, 0.6)
hp["env_class"] = vectorized_coin_game.AsymVectorizedCoinGame
elif "MixedMotiveCoinGame" in hp["env_name"]:
if "SSDMixedMotiveCoinGame" in hp["env_name"]:
hp["debit_threshold"] = 3.0
hp["x_limits"] = (-0.25, 1.0)
hp["y_limits"] = (-0.25, 1.5)
hp[
"env_class"
] = ssd_mixed_motive_coin_game.SSDMixedMotiveCoinGame
else:
hp["x_limits"] = (-2.0, 2.0)
hp["y_limits"] = (-0.5, 3.0)
hp[
"env_class"
] = vectorized_mixed_motive_coin_game.VectMixedMotiveCG
hp["both_players_can_pick_the_same_coin"] = True
else:
hp["x_limits"] = (-0.5, 0.6)
hp["y_limits"] = (-0.5, 0.6)
hp["env_class"] = vectorized_coin_game.VectorizedCoinGame
else:
hp["plot_keys"] += matrix_sequential_social_dilemma.PLOT_KEYS
hp[
"plot_assemblage_tags"
] += matrix_sequential_social_dilemma.PLOT_ASSEMBLAGE_TAGS
hp["base_lr"] = 0.03
hp["bs_epi_mul"] = 1
hp["n_steps_per_epi"] = 20
hp["n_epi"] = 10 if hp["debug"] else 800
hp["lambda"] = 0.96
hp["alpha"] = 0.0
hp["beta"] = 1.0
hp["sgd_momentum"] = 0.0
hp["debit_threshold"] = 10.0
hp["target_network_update_freq"] = 30 * hp["n_steps_per_epi"]
hp["last_exploration_temp_value"] = 0.1 * mul_temp
hp["temperature_schedule"] = PiecewiseSchedule(
endpoints=[
(0, 2.0 * mul_temp),
(
int(hp["n_steps_per_epi"] * hp["n_epi"] * 0.33),
0.5 * mul_temp,
),
(
int(hp["n_steps_per_epi"] * hp["n_epi"] * 0.66),
hp["last_exploration_temp_value"],
),
],
outside_value=hp["last_exploration_temp_value"],
framework="torch",
)
if "IteratedPrisonersDilemma" in hp["env_name"]:
hp["filter_utilitarian"] = False
hp["x_limits"] = (-3.5, 0.5)
hp["y_limits"] = (-3.5, 0.5)
hp["utilitarian_filtering_threshold"] = -2.5
hp[
"env_class"
] = matrix_sequential_social_dilemma.IteratedPrisonersDilemma
elif "IteratedAsymBoS" in hp["env_name"]:
hp["x_limits"] = (-0.1, 4.1)
hp["y_limits"] = (-0.1, 4.1)
hp["utilitarian_filtering_threshold"] = 3.2
hp["env_class"] = matrix_sequential_social_dilemma.IteratedAsymBoS
else:
raise NotImplementedError(f'hp["env_name"]: {hp["env_name"]}')
hp["lr_schedule"] = [
(0, 0.0),
(int(hp["n_steps_per_epi"] * hp["n_epi"] * 0.05), hp["base_lr"]),
(int(hp["n_steps_per_epi"] * hp["n_epi"]), hp["base_lr"] / 1e9),
]
hp["plot_axis_scale_multipliers"] = (
(1 / hp["n_steps_per_epi"]), # for x axis
(1 / hp["n_steps_per_epi"]),
) # for y axis
hp["env_class"] = add_RewardUncertaintyEnvClassWrapper(
env_class=hp["env_class"],
reward_uncertainty_std=hp["reward_uncertainty"],
)
return hp
def train_for_each_welfare_function(hp):
tune_analysis_per_welfare = {}
for welfare_fn, welfare_group_name in hp["welfare_functions"]:
print("==============================================")
print(
"Going to start two_steps_training with welfare function",
welfare_fn,
)
if welfare_fn == postprocessing.WELFARE_UTILITARIAN:
hp = preprocess_utilitarian_config(hp)
stop, env_config, rllib_config = get_rllib_config(hp, welfare_fn)
exp_name = os.path.join(hp["exp_name"], welfare_fn)
results = amTFT.train_amtft(
stop_config=stop,
rllib_config=rllib_config,
name=exp_name,
TrainerClass=dqn.DQNTrainer,
plot_keys=hp["plot_keys"],
plot_assemblage_tags=hp["plot_assemblage_tags"],
debug=hp["debug"],
log_to_file=not hp["debug"],
loggers=None if hp["debug"] else DEFAULT_LOGGERS + (WandbLogger,),
)
if welfare_fn == postprocessing.WELFARE_UTILITARIAN:
results, hp = postprocess_utilitarian_results(
results, env_config, hp
)
tune_analysis_per_welfare[welfare_group_name] = results
return tune_analysis_per_welfare
def preprocess_utilitarian_config(hp):
hp_copy = copy.deepcopy(hp)
if hp_copy["filter_utilitarian"]:
hp_copy["train_n_replicates"] = (
hp_copy["train_n_replicates"]
* hp_copy["n_times_more_utilitarians_seeds"]
)
return hp_copy
def get_rllib_config(hp, welfare_fn, eval=False):
stop = {
"episodes_total": hp["n_epi"],
}
env_config = get_env_config(hp)
policies = get_policies(hp, env_config, welfare_fn, eval)
selected_seeds = hp["seeds"][: hp["train_n_replicates"]]
hp["seeds"] = hp["seeds"][hp["train_n_replicates"] :]
rllib_config = {
"env": hp["env_class"],
"env_config": env_config,
"multiagent": {
"policies": policies,
"policy_mapping_fn": lambda agent_id: agent_id,
# When replay_mode=lockstep, RLlib will replay all the agent
# transitions at a particular timestep together in a batch.
# This allows the policy to implement differentiable shared
# computations between agents it controls at that timestep.
# When replay_mode=independent,
# transitions are replayed independently per policy.
# "replay_mode": "lockstep",
"observation_fn": amTFT.observation_fn,
},
"gamma": hp["gamma"],
"seed": tune.grid_search(selected_seeds),
# === Optimization ===
# Learning rate for adam optimizer
"lr": hp["base_lr"],
# Learning rate schedule
"lr_schedule": hp["lr_schedule"],
# If not None, clip gradients during optimization at this value
"grad_clip": 1,
# Update the replay buffer with this many samples at once. Note that
# this setting applies per-worker if num_workers > 1.
"rollout_fragment_length": hp["n_steps_per_epi"],
# Size of a batch sampled from replay buffer for training. Note that
# if async_updates is set, then each worker returns gradients for a
# batch of this size.
"train_batch_size": int(hp["n_steps_per_epi"] * hp["bs_epi_mul"]),
"training_intensity": hp["training_intensity"],
# Minimum env steps to optimize for per train call. This value does
# not affect learning, only the length of iterations.
"timesteps_per_iteration": hp["n_steps_per_epi"]
if hp["debug"]
else int(hp["n_steps_per_epi"] * hp["n_epi"] / hp["log_n_points"]),
"min_iter_time_s": 0.0,
# General config
"framework": "torch",
# LE supports only 1 worker only otherwise
# it would be mixing several opponents trajectories
"num_workers": 0,
# LE supports only 1 env per worker only otherwise
# several episodes would be played at the same time
"num_envs_per_worker": 1,
# Callbacks that will be run during various phases of training. See the
# `DefaultCallbacks` class and
# `examples/custom_metrics_and_callbacks.py` for more usage
# information.
"callbacks": callbacks.merge_callbacks(
amTFT.AmTFTCallbacks,
log.get_logging_callbacks_class(
log_full_epi=True, log_full_epi_interval=100
),
),
"logger_config": {
"wandb": {
"project": "amTFT",
"group": hp["exp_name"],
"api_key_file": os.path.join(
os.path.dirname(__file__), "../../../api_key_wandb"
),
"log_config": True,
},
},
# === DQN Models ===
# Update the target network every `target_network_update_freq` steps.
"target_network_update_freq": hp["target_network_update_freq"],
# === Replay buffer ===
# Size of the replay buffer. Note that if async_updates is set, then
# each worker will have a replay buffer of this size.
"buffer_size": max(
int(hp["n_steps_per_epi"] * hp["n_epi"] * hp["buf_frac"]), 5
),
# Whether to use dueling dqn
"dueling": True,
# Dense-layer setup for each the advantage branch and the value branch
# in a dueling architecture.
"hiddens": hp["hiddens"],
# Whether to use double dqn
"double_q": True,
# If True prioritized replay buffer will be used.
"prioritized_replay": False,
"model": {
# Number of hidden layers for fully connected net
"fcnet_hiddens": hp["hiddens"],
# Nonlinearity for fully connected net (tanh, relu)
"fcnet_activation": "relu",
},
# How many steps of the model to sample before learning starts.
"learning_starts": int(hp["n_steps_per_epi"] * hp["bs_epi_mul"]),
# === Exploration Settings ===
# Default exploration behavior, iff `explore`=None is passed into
# compute_action(s).
# Set to False for no exploration behavior (e.g., for evaluation).
"explore": True,
# Provide a dict specifying the Exploration object's config.
"exploration_config": {
# The Exploration class to use. In the simplest case,
# this is the name (str) of any class present in the
# `rllib.utils.exploration` package.
# You can also provide the python class directly or
# the full location of your class (e.g.
# "ray.rllib.utils.exploration.epsilon_greedy.
# EpsilonGreedy").
"type": exploration.SoftQSchedule,
# Add constructor kwargs here (if any).
"temperature_schedule": hp["temperature_schedule"],
},
}
if "CoinGame" in hp["env_name"]:
rllib_config["model"] = {
"dim": env_config["grid_size"],
"conv_filters": [[16, [3, 3], 1], [32, [3, 3], 1]],
# [Channel, [Kernel, Kernel], Stride]]
}
return stop, env_config, rllib_config
def get_env_config(hp):
if "CoinGame" in hp["env_name"]:
env_config = {
"players_ids": ["player_red", "player_blue"],
"max_steps": hp["n_steps_per_epi"],
"grid_size": 3,
"both_players_can_pick_the_same_coin": hp[
"both_players_can_pick_the_same_coin"
],
}
else:
env_config = {
"players_ids": ["player_row", "player_col"],
"max_steps": hp["n_steps_per_epi"],
}
return env_config
def get_policies(hp, env_config, welfare_fn, eval=False):
PolicyClass = hp["amTFTPolicy"]
NestedPolicyClass, CoopNestedPolicyClass = get_nested_policy_class(
hp, welfare_fn
)
if eval:
NestedPolicyClass = CoopNestedPolicyClass
amTFT_config_update = merge_dicts(
amTFT.DEFAULT_CONFIG,
{
# Set to True to train the nested policies and to False to use them
"working_state": "train_coop",
"welfare_key": welfare_fn,
"verbose": 1 if hp["debug"] else 0,
# "verbose": 1 if hp["debug"] else 2,
"punishment_multiplier": hp["punishment_multiplier"],
"debit_threshold": hp["debit_threshold"],
"rollout_length": min(hp["n_steps_per_epi"], hp["rollout_length"]),
"n_rollout_replicas": hp["n_rollout_replicas"],
"optimizer": {
"sgd_momentum": hp["sgd_momentum"],
},
"nested_policies": [
{"Policy_class": CoopNestedPolicyClass, "config_update": {}},
{"Policy_class": NestedPolicyClass, "config_update": {}},
{"Policy_class": CoopNestedPolicyClass, "config_update": {}},
{"Policy_class": NestedPolicyClass, "config_update": {}},
],
},
)
policy_1_config = copy.deepcopy(amTFT_config_update)
policy_1_config["own_policy_id"] = env_config["players_ids"][0]
policy_1_config["opp_policy_id"] = env_config["players_ids"][1]
policy_2_config = copy.deepcopy(amTFT_config_update)
policy_2_config["own_policy_id"] = env_config["players_ids"][1]
policy_2_config["opp_policy_id"] = env_config["players_ids"][0]
policies = {
env_config["players_ids"][0]: (
# The default policy is DQN defined in DQNTrainer but
# we overwrite it to use the LE policy
PolicyClass,
hp["env_class"](env_config).OBSERVATION_SPACE,
hp["env_class"].ACTION_SPACE,
policy_1_config,
),
env_config["players_ids"][1]: (
PolicyClass,
hp["env_class"](env_config).OBSERVATION_SPACE,
hp["env_class"].ACTION_SPACE,
policy_2_config,
),
}
return policies
def get_nested_policy_class(hp, welfare_fn):
NestedPolicyClass = amTFT.DEFAULT_NESTED_POLICY_SELFISH
CoopNestedPolicyClass = NestedPolicyClass.with_updates(
# TODO problem: this prevent to use HP searches on gamma etc.
postprocess_fn=miscellaneous.merge_policy_postprocessing_fn(
postprocessing.welfares_postprocessing_fn(
add_utilitarian_welfare=(
welfare_fn == postprocessing.WELFARE_UTILITARIAN
),
add_inequity_aversion_welfare=(
welfare_fn == postprocessing.WELFARE_INEQUITY_AVERSION
),
inequity_aversion_alpha=hp["alpha"],
inequity_aversion_beta=hp["beta"],
inequity_aversion_gamma=hp["gamma"],
inequity_aversion_lambda=hp["lambda"],
),
postprocess_nstep_and_prio,
)
)
return NestedPolicyClass, CoopNestedPolicyClass
def postprocess_utilitarian_results(results, env_config, hp):
hp_cp = copy.deepcopy(hp)
if hp["filter_utilitarian"]:
hp_cp["train_n_replicates"] = (
hp_cp["train_n_replicates"]
// hp_cp["n_times_more_utilitarians_seeds"]
)
results = miscellaneous.filter_tune_results(
results,
metric=f"policy_reward_mean/{env_config['players_ids'][0]}",
metric_threshold=hp_cp["utilitarian_filtering_threshold"]
* hp_cp["n_steps_per_epi"],
metric_mode="last-5-avg",
threshold_mode="above",
)
if len(results.trials) > hp_cp["train_n_replicates"]:
results.trials = results.trials[: hp_cp["train_n_replicates"]]
elif len(results.trials) < hp_cp["train_n_replicates"]:
print("WARNING: not enough Utilitarian trials above threshold!!!")
return results, hp_cp
def config_and_evaluate_cross_play(tune_analysis_per_welfare, hp):
config_eval, env_config, stop, hp_eval = generate_eval_config(hp)
return evaluate_self_play_cross_play(
tune_analysis_per_welfare, config_eval, env_config, stop, hp_eval
)
def evaluate_self_play_cross_play(
tune_analysis_per_welfare, config_eval, env_config, stop, hp_eval
):
exp_name = os.path.join(hp_eval["exp_name"], "eval")
evaluator = self_and_cross_perf.SelfAndCrossPlayEvaluator(
exp_name=exp_name,
local_mode=hp_eval["debug"],
)
analysis_metrics_per_mode = evaluator.perform_evaluation_or_load_data(
evaluation_config=config_eval,
stop_config=stop,
policies_to_load_from_checkpoint=copy.deepcopy(
env_config["players_ids"]
),
tune_analysis_per_exp=tune_analysis_per_welfare,
TrainerClass=dqn.DQNTrainer,
n_self_play_per_checkpoint=hp_eval["n_self_play_per_checkpoint"],
n_cross_play_per_checkpoint=hp_eval["n_cross_play_per_checkpoint"],
to_load_path=hp_eval["load_plot_data"],
)
if "CoinGame" in hp_eval["env_name"]:
background_area_coord = None
else:
background_area_coord = hp_eval["env_class"].PAYOUT_MATRIX
plot_config = plot.PlotConfig(
xlim=hp_eval["x_limits"],
ylim=hp_eval["y_limits"],
markersize=5,
alpha=1.0,
jitter=hp_eval["jitter"],
xlabel="player 1 payoffs",
ylabel="player 2 payoffs",
plot_max_n_points=hp_eval["train_n_replicates"],
x_scale_multiplier=hp_eval["plot_axis_scale_multipliers"][0],
y_scale_multiplier=hp_eval["plot_axis_scale_multipliers"][1],
background_area_coord=background_area_coord,
)
evaluator.plot_results(
analysis_metrics_per_mode,
plot_config=plot_config,
x_axis_metric=f"policy_reward_mean/{env_config['players_ids'][0]}",
y_axis_metric=f"policy_reward_mean/{env_config['players_ids'][1]}",
)
print_inequity_aversion_welfare(env_config, analysis_metrics_per_mode)
return analysis_metrics_per_mode
def generate_eval_config(hp):
hp_eval = modify_hp_for_evaluation(hp)
fake_welfare_function = postprocessing.WELFARE_INEQUITY_AVERSION
stop, env_config, rllib_config = get_rllib_config(
hp_eval, fake_welfare_function, eval=True
)
config_eval = modify_config_for_evaluation(
rllib_config, hp_eval, env_config
)
return config_eval, env_config, stop, hp_eval
def modify_hp_for_evaluation(hp: dict, eval_over_n_epi: int = 1):
hp_eval = copy.deepcopy(hp)
# TODO is the overwrite_reward hp useless?
hp_eval["overwrite_reward"] = False
hp_eval["n_epi"] = eval_over_n_epi
hp_eval["n_steps_per_epi"] = 5 if hp_eval["debug"] else 100
hp_eval["bs_epi_mul"] = 1
hp_eval["plot_axis_scale_multipliers"] = (
# for x axis
(1 / hp_eval["n_steps_per_epi"]),
# for y axis
(1 / hp_eval["n_steps_per_epi"]),
)
hp_eval["n_self_play_per_checkpoint"] = 1
hp_eval["n_cross_play_per_checkpoint"] = min(
5,
(
(hp_eval["train_n_replicates"] * len(hp_eval["welfare_functions"]))
- 1
),
)
return hp_eval
def modify_config_for_evaluation(config_eval, hp, env_config):
config_eval["explore"] = False
config_eval["seed"] = None
policies = config_eval["multiagent"]["policies"]
for policy_id in policies.keys():
policy_config = policies[policy_id][3]
policy_config["working_state"] = "eval_amtft"
if not hp["self_play"]:
naive_player_id = env_config["players_ids"][-1]
naive_player_policy_config = policies[naive_player_id][3]
naive_player_policy_config["working_state"] = "eval_naive_selfish"
if hp["explore_during_evaluation"]:
tmp_mul = 1.0
config_eval["explore"] = (miscellaneous.OVERWRITE_KEY, True)
config_eval["exploration_config"] = {
"type": config_eval["exploration_config"]["type"],
"temperature_schedule": PiecewiseSchedule(
endpoints=[
(0, tmp_mul * hp["last_exploration_temp_value"]),
(0, tmp_mul * hp["last_exploration_temp_value"]),
],
outside_value=tmp_mul * hp["last_exploration_temp_value"],
framework="torch",
),
}
if hp["debug"] and hp.get("debit_threshold_debug_override", True):
for policy_id in policies.keys():
policies[policy_id][3]["debit_threshold"] = 0.5
policies[policy_id][3]["last_k"] = hp["n_steps_per_epi"] - 1
return config_eval
def print_inequity_aversion_welfare(env_config, analysis_metrics_per_mode):
plotter = self_and_cross_perf.SelfAndCrossPlayPlotter()
plotter._reset(
x_axis_metric=f"nested_policy/{env_config['players_ids'][0]}/worker_0/"
f"policy_0/sum_over_epi_inequity_aversion_welfare",
y_axis_metric=f"nested_policy/{env_config['players_ids'][1]}/worker_0/"
f"policy_0/sum_over_epi_inequity_aversion_welfare",
metric_mode="avg",
)
for mode_metric in analysis_metrics_per_mode:
print("mode_metric", mode_metric[0], mode_metric[3])
x, y = plotter._extract_x_y_points(mode_metric[1])
print("x", x)
print("y", y)
if __name__ == "__main__":
debug_mode = True
main(debug_mode)
| 38.646986 | 125 | 0.614258 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 12,495 | 0.397677 |
f690fa8f1e57e3fdc1e97a3ebbefe8d097b05980 | 1,370 | py | Python | src/core/utils/win32/media_control.py | younger-1/yasb | d0ad23e65eb481f7c05dde1e040f55bec2aa6689 | [
"MIT"
] | 53 | 2021-11-02T21:04:25.000Z | 2022-03-30T12:45:55.000Z | src/core/utils/win32/media_control.py | younger-1/yasb | d0ad23e65eb481f7c05dde1e040f55bec2aa6689 | [
"MIT"
] | 12 | 2021-12-02T05:06:18.000Z | 2022-03-20T13:39:07.000Z | src/core/utils/win32/media_control.py | younger-1/yasb | d0ad23e65eb481f7c05dde1e040f55bec2aa6689 | [
"MIT"
] | 4 | 2022-01-18T06:17:56.000Z | 2022-03-30T13:18:55.000Z | from winrt.windows.media.control import GlobalSystemMediaTransportControlsSessionManager
from winrt.windows.storage.streams import DataReader, Buffer, InputStreamOptions
async def get_current_session():
"""
current_session.try_play_async()
current_session.try_pause_async()
current_session.try_toggle_play_pause()
current_session.try_change_shuffle_active()
current_session.try_skip_next()
current_session.try_skip_previous()
current_session.try_stop()
"""
sessions = await GlobalSystemMediaTransportControlsSessionManager.request_async()
return sessions.get_current_session()
async def get_media_info():
current_session = await get_current_session()
if current_session:
media_props = await current_session.try_get_media_properties_async()
return {
song_attr: media_props.__getattribute__(song_attr)
for song_attr in dir(media_props)
if song_attr[0] != '_'
}
async def read_stream_into_buffer(thumbnail_ref) -> bytearray:
buffer = Buffer(5000000)
readable_stream = await thumbnail_ref.open_read_async()
readable_stream.read_async(buffer, buffer.capacity, InputStreamOptions.READ_AHEAD)
buffer_reader = DataReader.from_buffer(buffer)
thumbnail_buffer = buffer_reader.read_bytes(buffer.length)
return bytearray(thumbnail_buffer)
| 36.052632 | 88 | 0.766423 | 0 | 0 | 0 | 0 | 0 | 0 | 1,191 | 0.869343 | 288 | 0.210219 |
f69101537e1714c27d7b564d92d8f58ca2160bfc | 4,001 | py | Python | lawerWeb/settings.py | xia-deng/lawerWeb | 6d2fe3642b2b7fbdda568e3af240bbcf6fda6c48 | [
"Apache-2.0"
] | null | null | null | lawerWeb/settings.py | xia-deng/lawerWeb | 6d2fe3642b2b7fbdda568e3af240bbcf6fda6c48 | [
"Apache-2.0"
] | null | null | null | lawerWeb/settings.py | xia-deng/lawerWeb | 6d2fe3642b2b7fbdda568e3af240bbcf6fda6c48 | [
"Apache-2.0"
] | null | null | null | """
Django settings for lawerWeb project.
Generated by 'django-admin startproject' using Django 2.1.3.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'h4ep$74a1pw@9)kgv2%#!ohfe_1a6!v_17x^((h3g*^3**lqco'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'blog',
'froala_editor',
'xadmin',
'crispy_forms',
'reversion',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'lawerWeb.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'lawerWeb.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
# 中文配置
LANGUAGE_CODE = 'zh-Hans'
TIME_ZONE = 'Asia/Shanghai'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
# 公共的 static 文件,比如 jquery.js 可以放这里,这里面的文件夹不能包含 STATIC_ROOT
STATIC_URL = '/static/'
STATIC_ROOT=os.path.join(BASE_DIR,'static')
STATICFILES_DIRS = (
"common_static",
)
IS_POLL_NUM_EDIT = True
IS_COMMENT_NUM_EDIT = True
PER_PAGE_SHOW = 10
FROALA_EDITOR_PLUGINS = ('align', 'char_counter', 'code_beautifier' ,'code_view', 'colors', 'draggable', 'emoticons',
'entities', 'file', 'font_family', 'font_size', 'image_manager', 'image',
'line_breaker', 'link', 'lists', 'paragraph_format', 'paragraph_style', 'quick_insert', 'quote', 'save', 'table',
'url', 'video')
USE_FROALA_EDITOR = True
#FROALA_UPLOAD_PATH = os.path.join(BASE_DIR, 'media')
# upload folder
#MEDIA_URL: URL访问路径
MEDIA_URL = '/media/'
#MEDIA_ROOT:上传存放路径,必须是本地路径的绝对路径
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
SELECT_INPUT_COLUMN_NUMBER=10
| 27.784722 | 121 | 0.699075 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,914 | 0.709866 |
f691044e8c6bcf3b4b49b0a2b8a4fe8c5bbb4023 | 91 | py | Python | kriging/__init__.py | solab-ntu/kriging | 1f9b4144dd5a18c63f212473216593d5f1722c14 | [
"MIT"
] | null | null | null | kriging/__init__.py | solab-ntu/kriging | 1f9b4144dd5a18c63f212473216593d5f1722c14 | [
"MIT"
] | null | null | null | kriging/__init__.py | solab-ntu/kriging | 1f9b4144dd5a18c63f212473216593d5f1722c14 | [
"MIT"
] | null | null | null | from .kparam import Kparam
from .variogram import Variogram
from .utilities import predict
| 22.75 | 32 | 0.835165 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
f691db8dd8822bcebc9d504797dbc6f906497158 | 900 | py | Python | hparams.py | SuzukiDaishi/AutoVC.pytorch | 0ce6f2dd5b6e34f812c56fc1466bb3444ef837bd | [
"MIT"
] | 25 | 2020-06-09T14:44:14.000Z | 2022-03-26T11:19:47.000Z | hparams.py | SuzukiDaishi/AutoVC.pytorch | 0ce6f2dd5b6e34f812c56fc1466bb3444ef837bd | [
"MIT"
] | 2 | 2020-10-27T02:31:28.000Z | 2021-02-21T02:47:02.000Z | hparams.py | SuzukiDaishi/AutoVC.pytorch | 0ce6f2dd5b6e34f812c56fc1466bb3444ef837bd | [
"MIT"
] | 4 | 2020-10-27T02:03:35.000Z | 2022-03-26T11:19:41.000Z | class hparams:
sample_rate = 16000
n_fft = 1024
#fft_bins = n_fft // 2 + 1
num_mels = 80
hop_length = 256
win_length = 1024
fmin = 90
fmax = 7600
min_level_db = -100
ref_level_db = 20
seq_len_factor = 64
bits = 12
seq_len = seq_len_factor * hop_length
dim_neck = 32
dim_emb = 256
dim_pre = 512
freq = 32
## wavenet vocoder
builder = 'wavenet'
hop_size = 256
log_scale_min = float(-32.23619130191664)
out_channels = 10 * 3
layers = 24
stacks = 4
residual_channels = 512
gate_channels = 512
skip_out_channels = 256
dropout = 1 - 0.95
kernel_size = 3
cin_channels = 80
upsample_conditional_features = True
upsample_scales = [4, 4, 4, 4]
freq_axis_kernel_size = 3
gin_channels = -1
n_speakers = -1
weight_normalization = True
legacy = True
| 21.428571 | 45 | 0.61 | 899 | 0.998889 | 0 | 0 | 0 | 0 | 0 | 0 | 53 | 0.058889 |
f69215f25e7debe516c3b3acd6ad6f0976bbd082 | 1,338 | py | Python | custom_components/hdhomerun/__init__.py | Berserkir-Wolf/ha-hdhomerun | 2d62d37c21b4ee712c86b7f5241b39e5d42d90e7 | [
"MIT"
] | 4 | 2021-04-22T13:41:25.000Z | 2022-03-04T17:10:50.000Z | custom_components/hdhomerun/__init__.py | Berserkir-Wolf/ha-hdhomerun | 2d62d37c21b4ee712c86b7f5241b39e5d42d90e7 | [
"MIT"
] | null | null | null | custom_components/hdhomerun/__init__.py | Berserkir-Wolf/ha-hdhomerun | 2d62d37c21b4ee712c86b7f5241b39e5d42d90e7 | [
"MIT"
] | null | null | null | """The HDHomeRun component."""
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
from homeassistant import config_entries
from homeassistant.const import CONF_PORT
from homeassistant.components.sensor import DOMAIN as SENSOR_DOMAIN
from .const import DOMAIN, CONF_HOST
DEVICE_SCHEMA = vol.Schema(
{
CONF_HOST: cv.string
}
)
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: {
SENSOR_DOMAIN: vol.Schema(
vol.All(cv.ensure_list, [DEVICE_SCHEMA])
)
}
},
extra=vol.ALLOW_EXTRA
)
async def async_setup(hass, config):
"""Set up the HDHomeRun component."""
conf = config.get(DOMAIN)
hass.data[DOMAIN] = conf or {}
if conf is not None:
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_IMPORT}
)
)
return True
async def async_setup_entry(hass, entry):
"""Set up HDHomeRun from a config entry."""
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(entry, SENSOR_DOMAIN)
)
return True
async def async_unload_entry(hass, entry):
"""Unload a config entry."""
await hass.config_entries.async_forward_entry_unload(entry, SENSOR_DOMAIN)
return True | 23.473684 | 78 | 0.670404 | 0 | 0 | 0 | 0 | 0 | 0 | 752 | 0.562033 | 146 | 0.109118 |
f692bbf1983670a97677d806c68b916963e86d4a | 2,542 | py | Python | expRT/BTS_Material.py | EJ-Chang/OSD-Study | d7029879fa95406daf8f8e67a15bf859e7bcbce9 | [
"MIT"
] | null | null | null | expRT/BTS_Material.py | EJ-Chang/OSD-Study | d7029879fa95406daf8f8e67a15bf859e7bcbce9 | [
"MIT"
] | 15 | 2020-04-17T08:38:50.000Z | 2020-06-04T10:01:50.000Z | expRT/BTS_Material.py | EJ-Chang/OSD-Study | d7029879fa95406daf8f8e67a15bf859e7bcbce9 | [
"MIT"
] | null | null | null | # Material
buttonType = ['Arrow button', 'Radio button', 'Switch button', 'Option']
requestLUT = [
{
'name' : 'Radio',
'on' : 'OSD_ImgFolder/radio_on.png',
'off' : 'OSD_ImgFolder/radio_off.png',
'default' : 'off',
'hint_path' : 'OSD_ImgFolder/L4 off.png',
'hint': 0
},
{
'name' : 'Switch',
'on' : 'OSD_ImgFolder/switch_on.png',
'off' : 'OSD_ImgFolder/switch_off.png',
'default' : 'off',
'hint_path' : 'OSD_ImgFolder/L4 off.png',
'hint' : 0
},
{
'name' : 'Switch_clue',
'on' : 'OSD_ImgFolder/switch_on.png',
'off' : 'OSD_ImgFolder/switch_off.png',
'default' : 'off',
'hint_path' : 'OSD_ImgFolder/L4 off.png',
'hint' : 1
}
]
backgroundLUT = [
{
'name' : 'Background',
'position' : (0, 0),
'path' : 'OSD_ImgFolder/MainFrame.png'
},
{
'name' : 'Lay_1',
'position' : (-405, -30),
'path' : 'OSD_ImgFolder/Layer_1.png'
},
{
'name' : 'Lay_2',
'position' : (-235, -30),
'path' : 'OSD_ImgFolder/Layer_2.png'
},
{
'name' : 'Lay_3',
'position' : (35, -30),
'path' : 'OSD_ImgFolder/Layer_3.png'
},
{
'name' : 'Lay_4',
'position' : (305, -30),
'path' : 'OSD_ImgFolder/Layer_4.png'
}
]
strLUT = [
{
'name' : 'L1 str',
'position' : (-405, -30),
'path' : 'OSD_ImgFolder/aLayer_1.png',
'hint' : 'OSD_ImgFolder/aLayer_1.png'
},
{
'name' : 'L2_str',
'position' : (-235, -30),
'path' : 'OSD_ImgFolder/L2 str.png',
'hint' : 'OSD_ImgFolder/L2 str.png'
},
{
'name' : 'L3_str',
'position' : (35, -30),
'path' : 'OSD_ImgFolder/L3 str.png',
'hint' : 'OSD_ImgFolder/L3 off.png'
},
{
'name' : 'L4_str',
'position' : (305, -30),
'path' : 'OSD_ImgFolder/L4 str.png',
'hint' : 'OSD_ImgFolder/L4 off.png'
}
]
indicatorLUT = [
{
'name' : 'L1 selector',
'width' : 68,
'height' : 70,
'position' : [(-405, 110), (-405, 40), (-405, -30), (-405, -100), (-405, -170)]
},
{
'name' : 'L2 selector',
'width' : 268,
'height' : 70,
'position' : [(-235, 110), (-235, 40), (-235, -30), (-235, -100), (-235, -170)]
},
{
'name' : 'L3 selector',
'width' : 268,
'height' : 70,
'position' : [(35, 110), (35, 40), (35, -30), (35, -100), (35, -170)]
},
{
'name' : 'L4 selector',
'width' : 268,
'height' : 70,
'position' : [(305, 110), (305, 40), (305, -30), (305, -100), (305, -170)]
}
] | 21.361345 | 84 | 0.486231 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,299 | 0.511015 |
f6973c3813278e555197312de18aff5e1c09e73a | 2,142 | py | Python | URMC_CTSI_openbadge_analysis/Demo.py | VerolaX/Capstone-URMC | 754bff5699383d1270a2301d39f60ca4359952c7 | [
"MIT"
] | 1 | 2020-07-10T16:02:36.000Z | 2020-07-10T16:02:36.000Z | URMC_CTSI_openbadge_analysis/Demo.py | txiao3/Capstone-URMC | 754bff5699383d1270a2301d39f60ca4359952c7 | [
"MIT"
] | 1 | 2021-11-15T17:49:48.000Z | 2021-11-15T17:49:48.000Z | URMC_CTSI_openbadge_analysis/Demo.py | VerolaX/Capstone-URMC | 754bff5699383d1270a2301d39f60ca4359952c7 | [
"MIT"
] | 2 | 2019-11-15T19:44:27.000Z | 2020-06-12T16:39:22.000Z | import Dynamic_Network_Graph_Exploration_py3 as dynamic
import Data_Cleaning as dc
#import heatmap_functions as heatmap
#import Member_Distribution as dist
'''
So far the main method only includes dynamic network graph functions
Heatmap_functions and Member_Distribution functions to be added
'''
def main():
SELECTED_BEACON = 12,
time_zone = 'US/Eastern'
log_version = '2.0'
time_bins_size = '1min'
members_metadata_filename = "Member-2019-05-28.csv"
beacons_metadata_filename = "location table.xlsx"
attendees_metadata_filename = "Badge assignments_Attendees_2019.xlsx"
data_dir = "../proximity_2019-06-01/"
tmp_m2ms,tmp_m2bs,attendees_metadata,members_metadata= dc.DataCleaning(SELECTED_BEACON,
time_zone,log_version,time_bins_size,
members_metadata_filename,
beacons_metadata_filename,
attendees_metadata_filename,
data_dir)
dynamic.NetworkGraphBasicExample('2019-06-01 10:00','2019-06-01 11:20',tmp_m2ms)
'''
This function is the code given in the Open Badge Analysis Library
'''
dynamic.LunchTimeAnalysis(tmp_m2ms)
'''
Find interactive groups during lunch time
All the parameters in this function are preset
'''
dynamic.BreakoutSessionAnalysis(tmp_m2ms)
'''
Find interactive groups during breakout session
All the parameters in this function are preset
'''
dynamic.InteractionNetworkGraph(time_interval_start_h=9,time_interval_start_m=50,
time_interval_end_h=11,time_interval_end_m=20,
interval=2,t_count_threshold = 2,tmp_m2ms=tmp_m2ms)
'''
Find interactive groups during any given time with any given parameters
The values of the parameters are manually determined by the activities
engaged in the time frame given
'''
if __name__ == "__main__":
main() | 36.305085 | 92 | 0.639122 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 916 | 0.427638 |
f697ae39334810b9255aec06cc2f5e7c7c446c65 | 6,606 | py | Python | evap/rewards/tests.py | karyon/EvaP | 9026fe0af4e261be73423131cfc52a7e655c82e6 | [
"MIT"
] | null | null | null | evap/rewards/tests.py | karyon/EvaP | 9026fe0af4e261be73423131cfc52a7e655c82e6 | [
"MIT"
] | null | null | null | evap/rewards/tests.py | karyon/EvaP | 9026fe0af4e261be73423131cfc52a7e655c82e6 | [
"MIT"
] | null | null | null | from django.conf import settings
from django_webtest import WebTest
from evap.evaluation.models import Course
from evap.evaluation.models import UserProfile
from evap.rewards.models import SemesterActivation
from evap.rewards.models import RewardPointRedemptionEvent
from evap.rewards.tools import reward_points_of_user
from evap.staff.tests import lastform
from django.core.urlresolvers import reverse
from model_mommy import mommy
class RewardTests(WebTest):
fixtures = ['minimal_test_data_rewards']
csrf_checks = False
def test_delete_redemption_events(self):
"""
Submits a request that tries to delete an event where users already redeemed points -> should not work.
Secondly it issues a GET Request and asserts that the page for deleting events is returned.
Last it submits a request that should delete the event.
"""
# try to delete event that can not be deleted, because people already redeemed points
response = self.app.post(reverse("rewards:reward_point_redemption_event_delete", args=[1]), user="evap")
self.assertRedirects(response, reverse('rewards:reward_point_redemption_events'))
response = response.follow()
self.assertContains(response, "cannot be deleted")
self.assertTrue(RewardPointRedemptionEvent.objects.filter(pk=2).exists())
# make sure that a GET Request does not delete an event
response = self.app.get(reverse("rewards:reward_point_redemption_event_delete", args=[2]), user="evap")
self.assertTemplateUsed(response, "rewards_reward_point_redemption_event_delete.html")
self.assertTrue(RewardPointRedemptionEvent.objects.filter(pk=2).exists())
# now delete for real
response = self.app.post(reverse("rewards:reward_point_redemption_event_delete", args=[2]), user="evap")
self.assertRedirects(response, reverse('rewards:reward_point_redemption_events'))
self.assertFalse(RewardPointRedemptionEvent.objects.filter(pk=2).exists())
def test_redeem_reward_points(self):
"""
Submits a request that redeems all available reward points and checks that this works.
Also checks that it is not possible to redeem more points than the user actually has.
"""
response = self.app.get(reverse("rewards:index"), user="student")
self.assertEqual(response.status_code, 200)
user = UserProfile.objects.get(pk=5)
form = lastform(response)
form.set("points-1", reward_points_of_user(user))
response = form.submit()
self.assertEqual(response.status_code, 200)
self.assertContains(response, "You successfully redeemed your points.")
self.assertEqual(0, reward_points_of_user(user))
form.set("points-1", 1)
form.set("points-2", 3)
response = form.submit()
self.assertIn(b"have enough reward points.", response.body)
def test_create_redemption_event(self):
"""
submits a newly created redemption event and checks that the event has been created
"""
response = self.app.get(reverse("rewards:reward_point_redemption_event_create"), user="evap")
form = lastform(response)
form.set('name', 'Test3Event')
form.set('date', '2014-12-10')
form.set('redeem_end_date', '2014-11-20')
response = form.submit()
self.assertRedirects(response, reverse('rewards:reward_point_redemption_events'))
self.assertEqual(RewardPointRedemptionEvent.objects.count(), 3)
def test_edit_redemption_event(self):
"""
submits a changed redemption event and tests whether it actually has changed
"""
response = self.app.get(reverse("rewards:reward_point_redemption_event_edit", args=[2]), user="evap")
form = lastform(response)
name = form.get('name').value
form.set('name', 'new name')
response = form.submit()
self.assertRedirects(response, reverse('rewards:reward_point_redemption_events'))
self.assertNotEqual(RewardPointRedemptionEvent.objects.get(pk=2).name, name)
def test_grant_reward_points(self):
"""
submits several requests that trigger the reward point granting and checks that the reward point
granting works as expected for the different requests.
"""
user = UserProfile.objects.get(pk=5)
reward_points_before_end = reward_points_of_user(user)
response = self.app.get(reverse("student:vote", args=[9]), user="student")
form = lastform(response)
for key, value in form.fields.items():
if key is not None and "question" in key:
form.set(key, 6)
response = form.submit()
self.assertRedirects(response, reverse('student:index'))
# semester is not activated --> number of reward points should not increase
self.assertEqual(reward_points_before_end, reward_points_of_user(user))
# reset course for another try
course = Course.objects.get(pk=9)
course.voters = []
# activate semester
activation = SemesterActivation.objects.get(semester=course.semester)
activation.is_active = True
activation.save()
# create a new course
new_course = mommy.make(Course, semester=course.semester)
new_course.save()
new_course.participants.add(user)
new_course.save()
response = form.submit()
self.assertRedirects(response, reverse('student:index'))
# user also has other courses this semester --> number of reward points should not increase
self.assertEqual(reward_points_before_end, reward_points_of_user(user))
course.voters = []
course.save()
new_course.participants.remove(user)
new_course.save()
# last course of user so he may get reward points
response = form.submit()
self.assertRedirects(response, reverse('student:index'))
# if this test fails because of this assertion check that the user is allowed to receive reward points!
self.assertEqual(reward_points_before_end + settings.REWARD_POINTS_PER_SEMESTER, reward_points_of_user(user))
# test behaviour if user already got reward points
course.voters = []
course.save()
response = form.submit()
self.assertRedirects(response, reverse('student:index'))
self.assertEqual(reward_points_before_end + settings.REWARD_POINTS_PER_SEMESTER, reward_points_of_user(user))
| 45.558621 | 117 | 0.690887 | 6,170 | 0.933999 | 0 | 0 | 0 | 0 | 0 | 0 | 2,338 | 0.353921 |
f69968193f8656cf1f5a6508a5e4bdf8cffb6a3d | 1,416 | py | Python | tests/test_seo_client.py | bedroesb/ddipy | fec0c6954c6455ac2487c93542564a41909f715f | [
"Apache-2.0"
] | 1 | 2020-08-24T18:55:02.000Z | 2020-08-24T18:55:02.000Z | tests/test_seo_client.py | bedroesb/ddipy | fec0c6954c6455ac2487c93542564a41909f715f | [
"Apache-2.0"
] | 8 | 2020-02-04T10:19:45.000Z | 2021-02-09T17:08:52.000Z | tests/test_seo_client.py | bedroesb/ddipy | fec0c6954c6455ac2487c93542564a41909f715f | [
"Apache-2.0"
] | 2 | 2020-02-04T19:24:07.000Z | 2020-05-06T15:57:44.000Z | from unittest import TestCase
from ddipy.constants import MISSING_PARAMETER
from ddipy.ddi_utils import BadRequest
from ddipy.seo_client import SeoClient
class TestSeoClient(TestCase):
def test_seo_home(self):
client = SeoClient()
res = client.get_seo_home()
assert len(res.graph) > 0
def test_seo_search(self):
client = SeoClient()
res = client.get_seo_search()
assert res.name == "Browse"
def test_seo_api(self):
client = SeoClient()
res = client.get_seo_api()
assert res.name == "API"
def test_seo_database(self):
client = SeoClient()
res = client.get_seo_database()
assert res.name == "Databases"
def test_seo_dataset(self):
client = SeoClient()
res = client.get_seo_dataset("pride", "PXD000210")
assert res.name == "Proteome analysis by charge state-selective separation of peptides: a multidimensional approach"
try:
res = client.get_seo_dataset("pride", "PXDqqqqqqqq")
except BadRequest as err:
assert err.status == 500
try:
res = client.get_seo_dataset(None, "PXDqqqqqqqq")
except BadRequest as err:
assert err.status == MISSING_PARAMETER
def test_seo_about(self):
client = SeoClient()
res = client.get_seo_about()
assert res.name == "About OmicsDI"
| 29.5 | 124 | 0.637006 | 1,258 | 0.888418 | 0 | 0 | 0 | 0 | 0 | 0 | 187 | 0.132062 |
f6996c46ab316655cd0bf4a158ff83417ba37b8e | 3,900 | py | Python | construction/markdown.py | rik/mesconseilscovid | ff4b365a677da6bb73284ca5bba73651cde570e9 | [
"MIT"
] | null | null | null | construction/markdown.py | rik/mesconseilscovid | ff4b365a677da6bb73284ca5bba73651cde570e9 | [
"MIT"
] | null | null | null | construction/markdown.py | rik/mesconseilscovid | ff4b365a677da6bb73284ca5bba73651cde570e9 | [
"MIT"
] | null | null | null | import re
from textwrap import indent
import mistune
from jinja2 import Template
from .directives.injection import InjectionDirective
from .directives.renvoi import RenvoiDirective
from .directives.section import SectionDirective
from .directives.question import QuestionDirective
from .directives.toc import DirectiveToc
from .typographie import typographie
class FrenchTypographyMixin:
def text(self, text_):
return typographie(super().text(text_))
def block_html(self, html):
return typographie(super().block_html(html))
class ClassMixin:
"""Possibilité d’ajouter une classe CSS sur un paragraphe ou un élément de liste.
Par exemple :
* {.maClasse} item classique de la liste en markdown
"""
RE_CLASS = re.compile(
r"""^
(?P<before>.*?)
(?:\s*\{\.(?P<class>[\w\- ]+?)\}\s*)
(?P<after>.*)
$
""",
re.MULTILINE | re.VERBOSE,
)
def paragraph(self, text):
return self._element_with_classes("p", text) or super().paragraph(text)
def list_item(self, text, level):
return self._element_with_classes("li", text) or super().list_item(text, level)
def _element_with_classes(self, name, text):
mo = self.RE_CLASS.match(text)
if mo is not None:
class_ = mo.group("class")
content = " ".join(filter(None, [mo.group("before"), mo.group("after")]))
return f'<{name} class="{class_}">{content}</{name}>\n'
class CustomHTMLRenderer(FrenchTypographyMixin, ClassMixin, mistune.HTMLRenderer):
pass
def create_markdown_parser(questions_index=None):
plugins = [
SectionDirective(),
QuestionDirective(),
DirectiveToc(),
]
if questions_index is not None:
plugins.append(RenvoiDirective(questions_index=questions_index))
plugins.append(InjectionDirective(questions_index=questions_index))
return mistune.create_markdown(
renderer=CustomHTMLRenderer(escape=False),
plugins=plugins,
)
class MarkdownContent:
"""Block content."""
def __init__(self, text, markdown):
self.text = text
self.markdown = markdown
def __str__(self):
return self.render_block()
def render_block(self):
return self.markdown(self.text)
def split(self, separator="\n---\n"):
return [
self.__class__(text.strip(), self.markdown)
for text in self.text.split(separator)
]
def render_me(self, tag="div"):
return f'<{tag} class="me visible">{str(self).strip()}</{tag}>'
def render_them(self, tag="div"):
return f'<{tag} class="them" hidden>{str(self).strip()}</{tag}>'
class MarkdownInlineContent(MarkdownContent):
"""Inline content."""
def __str__(self):
return self.render_inline()
def render_inline(self):
return self.markdown.inline(self.text, {}).strip()
def render_me(self):
return super().render_me(tag="span")
def render_them(self):
return super().render_them(tag="span")
def render_markdown_file(file_path, markdown_parser):
source = file_path.read_text()
templated_source = Template(source).render(formulaire=render_formulaire)
return MarkdownContent(templated_source, markdown_parser)
def render_formulaire(nom_formulaire, prefixe=""):
from .thematiques import THEMATIQUES_DIR
path = THEMATIQUES_DIR / "formulaires" / f"{nom_formulaire}.md"
with path.open() as f:
template = Template(f.read())
if prefixe:
prefixe = nom_formulaire + "-" + prefixe
else:
prefixe = nom_formulaire
markdown = (
f'<div class="formulaire" data-nom="{nom_formulaire}" data-prefixe="{prefixe}">\n\n'
+ template.render(prefixe=prefixe)
+ "\n\n</div>"
)
return indent(markdown, " ").lstrip()
| 27.857143 | 92 | 0.647436 | 2,272 | 0.581818 | 0 | 0 | 0 | 0 | 0 | 0 | 712 | 0.18233 |
f699d90b4f3cca23aac7e6c012ba4fdf6a39d432 | 5,404 | py | Python | HMS/Hospital/models.py | Arshad360/Hospital-Management-System-Cse327-Projectr | 707ab80d021f8a2d10e28b25dd8df5aea512787a | [
"MIT"
] | 2 | 2021-02-10T18:10:30.000Z | 2021-04-27T18:07:51.000Z | HMS/Hospital/models.py | Arshad360/Hospital-Management-System-Cse327-Projectr | 707ab80d021f8a2d10e28b25dd8df5aea512787a | [
"MIT"
] | 1 | 2020-09-23T19:00:54.000Z | 2020-09-23T19:04:22.000Z | HMS/Hospital/models.py | Arshad360/Hospital-Management-System-Cse327-Projectr | 707ab80d021f8a2d10e28b25dd8df5aea512787a | [
"MIT"
] | 1 | 2021-05-02T17:11:33.000Z | 2021-05-02T17:11:33.000Z | from django.db import models
from django.contrib.auth.models import User
# Create your models here
# All the departments
departments = [('Cardiologist', 'Cardiologist'),
('Dermatologists', 'Dermatologists'),
('Emergency Medicine Specialists', 'Emergency Medicine Specialists'),
('Allergists/Immunologists', 'Allergists/Immunologists'),
('Anesthesiologists', 'Anesthesiologists'),
('Colon and Rectal Surgeons', 'Colon and Rectal Surgeons')
]
# Defines of the Appointment Class
class Appointment(models.Model):
# Gets the patientId
patientId = models.PositiveIntegerField(null=True)
# Gets the doctorId
doctorId = models.PositiveIntegerField(null=True)
# Gets the patientName
patientName = models.CharField(max_length=40, null=True)
# Gets the doctorName
doctorName = models.CharField(max_length=40, null=True)
# Gets the appointmentDate
appointmentDate = models.DateField(auto_now=True)
# Gets the description
description = models.TextField(max_length=500)
status = models.BooleanField(default=False)
# Ambulance class define
class Ambulance(models.Model):
title = models.CharField(max_length=40)
pub_date = models.DateTimeField()
body = models.TextField()
# Emergency class define
class Emergency(models.Model):
title = models.CharField(max_length=40)
pub_date = models.DateTimeField()
body = models.TextField()
=======
""""
:Title = CharField
:Maxlength = 40
:body = Textfield
"""
class Pharmacy(models.Model):
title = models.CharField(max_length=40,null=True)
pub_date = models.DateTimeField()
body = models.TextField()
class availablebloodGroup(models.Model):
title = models.CharField(max_length=40,null=True)
pub_date = models.DateTimeField()
body = models.TextField()
class bloodBank(models.Model):
title = models.CharField(max_length=40,null=True)
pub_date = models.DateTimeField()
body = models.TextField()
class coronaUpdate(models.Model):
title = models.CharField(max_length=40)
pub_date = models.DateTimeField()
body = models.TextField()
class donateBlood(models.Model):
title = models.CharField(max_length=40)
pub_date = models.DateTimeField()
body = models.TextField()
class footer(models.Model):
title = models.CharField(max_length=40)
pub_date = models.DateTimeField()
body = models.TextField()
class home(models.Model):
title = models.CharField(max_length=40)
pub_date = models.DateTimeField()
body = models.TextField()
class homeSlider(models.Model):
title = models.CharField(max_length=40)
pub_date = models.DateTimeField()
body = models.TextField()
class homeBase(models.Model):
title = models.CharField(max_length=40)
pub_date = models.DateTimeField()
body = models.TextField()
class login(models.Model):
title = models.PositiveIntegerField(max_length=40)
pub_date = models.DateTimeField()
body = models.TextField()
class navBar(models.Model):
title = models.CharField(max_length=40)
pub_date = models.DateTimeField()
body = models.TextField()
class notice(models.Model):
title = models.CharField(max_length=40)
pub_date = models.DateTimeField()
body = models.TextField()
class Pharmacy(models.Model):
title = models.CharField(max_length=40)
pub_date = models.DateTimeField()
body = models.TextField()
class saveLife(models.Model):
title = models.CharField(max_length=40)
pub_date = models.DateTimeField()
body = models.TextField()
class specialCare(models.Model):
title = models.CharField(max_length=40, null=True)
pub_date = models.DateTimeField()
body = models.TextField()
=======
departments=[('Cardiologist','Cardiologist')
('Dermatologists','Dermatologists'),
('Emergency Medicine Specialists','Emergency Medicine Specialists'),
('Allergists/Immunologists','Allergists/Immunologists'),
('Anesthesiologists','Anesthesiologists'),
('Colon and Rectal Surgeons','Colon and Rectal Surgeons')
]
class Doctor(models.Model):
user=models.OneToOneField(User,on_delete=models.CASCADE)
profile_pic= models.ImageField(upload_to='profile_pic/DoctorProfilePic/',null=True,blank=True)
address = models.CharField(max_length=40)
mobile = models.CharField(max_length=20,null=True)
department= models.CharField(max_length=50,choices=departments,default='Cardiologist')
status=models.BooleanField(default=False)
@property
def get_name(self):
return self.user.first_name+" "+self.user.last_name
@property
def get_id(self):
return self.user.id
def __str__(self):
return "{} ({})".format(self.user.first_name,self.department)
# corona class define
class coronacenter(models.Model):
title = models.CharField(max_length=40)
pub_date = models.DateTimeField()
body = models.TextField()
# diabetes class define
class diabetescenter(models.Model):
title = models.CharField(max_length=40)
pub_date = models.DateTimeField()
body = models.TextField()
| 29.210811 | 98 | 0.675611 | 4,274 | 0.790896 | 0 | 0 | 152 | 0.028127 | 0 | 0 | 957 | 0.177091 |
f69a73d2180c85c42d121979fa3a85f52e293b5b | 2,008 | py | Python | intents/connectors/_experimental/snips/entities_test.py | dario-chiappetta/dialogflow_agents | ecb03bdce491a3c9d6769816507f3027fd5a60d1 | [
"Apache-2.0"
] | 6 | 2021-06-24T12:22:21.000Z | 2021-07-21T21:06:19.000Z | intents/connectors/_experimental/snips/entities_test.py | dario-chiappetta/dialogflow_agents | ecb03bdce491a3c9d6769816507f3027fd5a60d1 | [
"Apache-2.0"
] | 27 | 2021-06-05T10:41:08.000Z | 2021-11-01T17:29:38.000Z | intents/connectors/_experimental/snips/entities_test.py | dariowho/intents | ecb03bdce491a3c9d6769816507f3027fd5a60d1 | [
"Apache-2.0"
] | null | null | null | from datetime import datetime
import pytest
from intents import Sys
from intents.connectors._experimental.snips import entities
from intents.connectors._experimental.snips import prediction_format as pf
def test_date_mapping_from_service():
mapping = entities.DateMapping()
snips_date_result = {
'input': 'My birthday is on august 24',
'intent': {
'intentName': 'UserSaysBirthday',
'probability': 1.0
},
'slots': [
{
'range': {'start': 15, 'end': 27},
'rawValue': 'on august 24',
'value': {
'kind': 'InstantTime',
'value': '2021-08-24 00:00:00 +02:00',
'grain': 'Day',
'precision': 'Exact'
},
'entity': 'snips/date',
'slotName': 'birthday_date'
}
]
}
parse_result = pf.from_dict(snips_date_result)
entity = mapping.from_service(parse_result.slots[0].value)
assert entity == Sys.Date(2021, 8, 24)
def test_date_mapping_unexpected_grain():
mapping = entities.DateMapping()
value = {
'kind': 'InstantTime',
'value': '2021-08-24 00:00:00 +02:00',
'grain': 'Month',
'precision': 'Exact'
}
entity = mapping.from_service(value)
with pytest.warns(None):
assert entity == Sys.Date(2021, 8, 24)
def test_date_mapping_unexpected_kind():
mapping = entities.DateMapping()
value = {
'kind': 'UNEXPECTED',
'value': '2021-08-24 00:00:00 +02:00',
'grain': 'Day',
'precision': 'Exact'
}
entity = mapping.from_service(value)
with pytest.warns(None):
assert entity == Sys.Date(2021, 8, 24)
def test_date_mapping_to_service():
mapping = entities.DateMapping()
assert mapping.to_service(Sys.Date(2021, 8, 8)) == "2021-08-08"
assert mapping.to_service(datetime(year=2021, month=8, day=8)) == "2021-08-08"
| 31.375 | 82 | 0.569721 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 466 | 0.232072 |
f69bf13f443e655c01aa380fb78f77a007b8bcce | 150 | py | Python | src/tests/unit/common/test_common_management_commands.py | hnzlmnn/pretalx | fcdf1a03c9428c1207ee4f4228694b2ed8e7495b | [
"Apache-2.0"
] | 1 | 2018-12-09T12:35:10.000Z | 2018-12-09T12:35:10.000Z | src/tests/unit/common/test_common_management_commands.py | hnzlmnn/pretalx | fcdf1a03c9428c1207ee4f4228694b2ed8e7495b | [
"Apache-2.0"
] | 1 | 2019-07-05T20:03:42.000Z | 2019-07-05T20:03:42.000Z | src/tests/unit/common/test_common_management_commands.py | hnzlmnn/pretalx | fcdf1a03c9428c1207ee4f4228694b2ed8e7495b | [
"Apache-2.0"
] | null | null | null | import pytest
from django.core.management import call_command
@pytest.mark.django_db
def test_common_runperiodic():
call_command('runperiodic')
| 18.75 | 47 | 0.813333 | 0 | 0 | 0 | 0 | 85 | 0.566667 | 0 | 0 | 13 | 0.086667 |
f69d29d7625c1a49845d962bb860467f26d1ff33 | 2,907 | py | Python | src/ui_elements/dropdown.py | MichalKacprzak99/WFIIS-3D-Graphics-2021 | 3daf87300daa6ba505d81a81c78f90be64c8dda4 | [
"MIT"
] | null | null | null | src/ui_elements/dropdown.py | MichalKacprzak99/WFIIS-3D-Graphics-2021 | 3daf87300daa6ba505d81a81c78f90be64c8dda4 | [
"MIT"
] | null | null | null | src/ui_elements/dropdown.py | MichalKacprzak99/WFIIS-3D-Graphics-2021 | 3daf87300daa6ba505d81a81c78f90be64c8dda4 | [
"MIT"
] | null | null | null | from typing import List, Optional
import pygame
from OpenGL.GL import *
from src.utils.drawing_utils import draw_texture, surface_to_texture
class DropDown:
def __init__(self, color_menu, color_option, x: int, y: int, width: int, height: int,
font_name: str, font_size: int, main: str, options: List[str], default: Optional[str] = None):
self._init_pygame()
self._init_openGL()
self.color_menu = color_menu
self.color_option = color_option
self.rect = pygame.Rect(x, y, width, height)
self.font = pygame.font.SysFont(font_name, font_size)
self.main = main
self.default = default if default else main
self.options = options
self.draw_menu = False
self.menu_active = False
self.active_option = -1
self.valid_option = False
def draw(self):
self.surface.fill("white")
pygame.draw.rect(self.surface, self.color_menu[self.menu_active], self.rect, 0)
msg = self.font.render(self.main, True, (0, 0, 0))
self.surface.blit(msg, msg.get_rect(center=self.rect.center))
if self.draw_menu:
for i, text in enumerate(self.options):
rect = self.rect.copy()
rect.y += (i + 1) * self.rect.height
pygame.draw.rect(self.surface, self.color_option[1 if i == self.active_option else 0], rect, 0)
msg = self.font.render(text, True, (0, 0, 0))
self.surface.blit(msg, msg.get_rect(center=rect.center))
surface_to_texture(pygame_surface=self.surface, texture_id=self.texture_id)
draw_texture(texture_id=self.texture_id)
def update(self, event_list):
mouse_pos = pygame.mouse.get_pos()
self.menu_active = self.rect.collidepoint(mouse_pos)
self.active_option = -1
for i in range(len(self.options)):
rect = self.rect.copy()
rect.y += (i + 1) * self.rect.height
if rect.collidepoint(mouse_pos):
self.active_option = i
break
if not self.menu_active and self.active_option == -1:
self.draw_menu = False
for event in event_list:
if event.type == pygame.MOUSEBUTTONDOWN and event.button == 1:
if self.menu_active:
self.draw_menu = not self.draw_menu
elif self.draw_menu and self.active_option >= 0:
self.draw_menu = False
self.valid_option = True
self.main = self.options[self.active_option]
return self.active_option
return -1
def reset(self):
self.main = self.default
self.valid_option = False
def _init_pygame(self):
self.surface = pygame.Surface((800, 600))
def _init_openGL(self):
self.texture_id = glGenTextures(1)
| 36.3375 | 111 | 0.602683 | 2,761 | 0.949776 | 0 | 0 | 0 | 0 | 0 | 0 | 7 | 0.002408 |
f69dfd5273c85c83e5fd287ed67d12b45233de18 | 43 | py | Python | simulation/device/simulated/air_conditioner/__init__.py | LBNL-ETA/LPDM | 3384a784b97e49cd7a801b758717a7107a51119f | [
"BSD-3-Clause-LBNL"
] | 2 | 2019-01-05T02:33:38.000Z | 2020-04-22T16:57:50.000Z | simulation/device/simulated/air_conditioner/__init__.py | LBNL-ETA/LPDM | 3384a784b97e49cd7a801b758717a7107a51119f | [
"BSD-3-Clause-LBNL"
] | 3 | 2019-04-17T18:13:08.000Z | 2021-04-23T22:40:23.000Z | simulation/device/simulated/air_conditioner/__init__.py | LBNL-ETA/LPDM | 3384a784b97e49cd7a801b758717a7107a51119f | [
"BSD-3-Clause-LBNL"
] | 1 | 2019-01-31T08:37:44.000Z | 2019-01-31T08:37:44.000Z | from air_conditioner import AirConditioner
| 21.5 | 42 | 0.906977 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
f69e65dd3ff7506ec8da4973473cc712782b9ed8 | 2,333 | py | Python | Chapter_2/rock_paper_scissors_functions.py | MrRiahi/Practices-of-the-Python-Pro | e1af57d31944a0b181890271639ac064eb151678 | [
"MIT"
] | null | null | null | Chapter_2/rock_paper_scissors_functions.py | MrRiahi/Practices-of-the-Python-Pro | e1af57d31944a0b181890271639ac064eb151678 | [
"MIT"
] | null | null | null | Chapter_2/rock_paper_scissors_functions.py | MrRiahi/Practices-of-the-Python-Pro | e1af57d31944a0b181890271639ac064eb151678 | [
"MIT"
] | null | null | null | import random
OPTIONS = ['rock', 'paper', 'scissors']
def print_options():
"""
This function prints game's options
:return:
"""
print('(1) Rock\n(2) Paper\n(3) Scissors')
def get_human_choice():
"""
This function gets the choice of the human
:return:
"""
human_choice_number = input('Enter the number of your choice: ')
human_choice_action = OPTIONS[int(human_choice_number) - 1]
print(f'Your choice is {human_choice_action}')
return human_choice_action
def get_computer_choice():
"""
This function gets the computer choice using a random number.
:return:
"""
computer_choice = random.choice(OPTIONS)
print(f'Computer choice is {computer_choice}')
return computer_choice
def print_win_lose_message(human_choice, computer_choice, human_beats, human_loses_to):
"""
This function prints the win or lose message.
:param human_choice:
:param computer_choice:
:param human_beats:
:param human_loses_to:
:return:
"""
if computer_choice == human_beats:
print(f'Yes, {human_choice} beats {computer_choice}')
elif computer_choice == human_loses_to:
print(f'Ops, {human_choice} loses to {computer_choice}')
def print_result(human_choice, computer_choice):
"""
This function prints the result of the game.
:param human_choice:
:param computer_choice:
:return:
"""
if human_choice == computer_choice:
print('Drawn, Please try again!')
elif human_choice == 'rock':
print_win_lose_message(human_choice=human_choice, computer_choice=computer_choice,
human_beats='scissors', human_loses_to='paper')
elif human_choice == 'scissors':
print_win_lose_message(human_choice=human_choice, computer_choice=computer_choice,
human_beats='paper', human_loses_to='rock')
elif human_choice == 'paper':
print_win_lose_message(human_choice=human_choice, computer_choice=computer_choice,
human_beats='rock', human_loses_to='scissors')
if __name__ == '__main__':
print_options()
human_choice = get_human_choice()
computer_choice = get_computer_choice()
print_result(human_choice=human_choice, computer_choice=computer_choice)
| 27.127907 | 90 | 0.676811 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 900 | 0.385769 |
f6a17ab311cc2ce265a769f7064d5bdc6f1392e2 | 899 | py | Python | ndcontainers/utils/mixins.py | MattEding/Array-Containers | 12156599d2f8f9e5f8cf5d3f784e98e2651e8bc1 | [
"MIT"
] | null | null | null | ndcontainers/utils/mixins.py | MattEding/Array-Containers | 12156599d2f8f9e5f8cf5d3f784e98e2651e8bc1 | [
"MIT"
] | null | null | null | ndcontainers/utils/mixins.py | MattEding/Array-Containers | 12156599d2f8f9e5f8cf5d3f784e98e2651e8bc1 | [
"MIT"
] | null | null | null | import inspect
import itertools
__all__ = ['NDArrayReprMixin']
class NDArrayReprMixin:
def _name_params(self, ignore=()):
name = type(self).__name__
sig = inspect.signature(type(self))
params = tuple(p for p in sig.parameters if p not in ignore)
return name, params
def _repr_(self, *param_values, ignore=()):
name, params = self._name_params(ignore)
head = params[0]
width = len(name) + len(head) + 1
tail = (p.rjust(width) for p in params[1:])
pair_seq = itertools.chain.from_iterable(zip(tail, param_values[1:]))
text = "{}(" + ",\n".join("{}={}" for p in params) + ")"
fmts = itertools.chain((name, head, param_values[0]), pair_seq)
return text.format(*fmts)
def _prefix_(self, param_idx):
name, params = self._name_params()
return f"{name}({params[param_idx]}="
| 31 | 77 | 0.607341 | 832 | 0.925473 | 0 | 0 | 0 | 0 | 0 | 0 | 68 | 0.07564 |
f6a1e1d0b083371e879f30be890154f2c8a815bc | 3,417 | py | Python | demo/python/horizon.py | ebraminio/astronomy-fork | f395ac486893858d9badd7a26181b2e392ec9fd5 | [
"MIT"
] | 138 | 2019-05-31T01:46:16.000Z | 2022-03-30T12:31:49.000Z | demo/python/horizon.py | ebraminio/astronomy-fork | f395ac486893858d9badd7a26181b2e392ec9fd5 | [
"MIT"
] | 152 | 2019-04-09T23:28:39.000Z | 2022-03-31T18:27:52.000Z | demo/python/horizon.py | ebraminio/astronomy-fork | f395ac486893858d9badd7a26181b2e392ec9fd5 | [
"MIT"
] | 19 | 2019-06-21T07:38:40.000Z | 2022-02-28T09:50:00.000Z | #!/usr/bin/env python3
#
# horizon.py - by Don Cross - 2019-12-18
#
# Example Python program for Astronomy Engine:
# https://github.com/cosinekitty/astronomy
#
# This is a more advanced example. It shows how to use coordinate
# transforms and a binary search to find the two azimuths where the
# ecliptic intersects with an observer's horizon at a given date and time.
#
# To execute, run the command:
#
# python3 horizon.py latitude longitude [yyyy-mm-ddThh:mm:ssZ]
#
import sys
import astronomy
from astro_demo_common import ParseArgs
NUM_SAMPLES = 4
def ECLIPLON(i):
return (360.0 * i) / NUM_SAMPLES
def HorizontalCoords(ecliptic_longitude, time, rot_ecl_hor):
eclip = astronomy.Spherical(
0.0, # being "on the ecliptic plane" means ecliptic latitude is zero.
ecliptic_longitude,
1.0 # any positive distance value will work fine.
)
# Convert ecliptic angular coordinates to ecliptic vector.
ecl_vec = astronomy.VectorFromSphere(eclip, time)
# Use the rotation matrix to convert ecliptic vector to horizontal vector.
hor_vec = astronomy.RotateVector(rot_ecl_hor, ecl_vec)
# Find horizontal angular coordinates, correcting for atmospheric refraction.
return astronomy.HorizonFromVector(hor_vec, astronomy.Refraction.Normal)
def Search(time, rot_ecl_hor, e1, e2):
tolerance = 1.0e-6 # one-millionth of a degree is close enough!
# Binary search: find the ecliptic longitude such that the horizontal altitude
# ascends through a zero value. The caller must pass e1, e2 such that the altitudes
# bound zero in ascending order.
while True:
e3 = (e1 + e2) / 2.0
h3 = HorizontalCoords(e3, time, rot_ecl_hor)
if abs(e2-e1) < tolerance:
return (e3, h3)
if h3.lat < 0.0:
e1 = e3
else:
e2 = e3
def FindEclipticCrossings(observer, time):
# The ecliptic is a celestial circle that describes the mean plane of
# the Earth's orbit around the Sun. We use J2000 ecliptic coordinates,
# meaning the x-axis is defined to where the plane of the Earth's
# equator on January 1, 2000 at noon UTC intersects the ecliptic plane.
# The positive x-axis points toward the March equinox.
# Calculate a rotation matrix that converts J2000 ecliptic vectors
# to horizontal vectors for this observer and time.
rot = astronomy.Rotation_ECL_HOR(time, observer)
# Sample several points around the ecliptic.
# Remember the horizontal coordinates for each sample.
hor = [HorizontalCoords(ECLIPLON(i), time, rot) for i in range(NUM_SAMPLES)]
for i in range(NUM_SAMPLES):
a1 = hor[i].lat
a2 = hor[(i+1) % NUM_SAMPLES].lat
e1 = ECLIPLON(i)
e2 = ECLIPLON(i+1)
if a1 * a2 <= 0.0:
if a2 > a1:
(ex, h) = Search(time, rot, e1, e2)
else:
(ex, h) = Search(time, rot, e2, e1)
if h.lon > 0.0 and h.lon < 180.0:
direction = 'ascends'
else:
direction = 'descends'
print('Ecliptic longitude {:0.4f} {} through horizon az {:0.4f}, alt {:0.5g}'.format(ex, direction, h.lon, h.lat))
return 0
if __name__ == '__main__':
observer, time = ParseArgs(sys.argv)
sys.exit(FindEclipticCrossings(observer, time))
| 35.968421 | 126 | 0.651156 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,679 | 0.491367 |
f6a574e48a07bd8f39a756c8f8ad677e91f5bd1e | 1,101 | py | Python | ProtocolHandlerAddonPython/tools/step1settings.py | p--q/ProtocolHandlerAddonPython | 5dbb6034b4b9b7cb39cb9d397c812550668dd27f | [
"BSD-3-Clause"
] | null | null | null | ProtocolHandlerAddonPython/tools/step1settings.py | p--q/ProtocolHandlerAddonPython | 5dbb6034b4b9b7cb39cb9d397c812550668dd27f | [
"BSD-3-Clause"
] | null | null | null | ProtocolHandlerAddonPython/tools/step1settings.py | p--q/ProtocolHandlerAddonPython | 5dbb6034b4b9b7cb39cb9d397c812550668dd27f | [
"BSD-3-Clause"
] | null | null | null | #!/opt/libreoffice5.2/program/python
# -*- coding: utf-8 -*-
# This name will be rdb file name, .components file name, oxt file name.
BASE_NAME = "ProtocolHandlerAddonPython" # これがrdbファイル名、.componentsファイル名、oxtファイル名になる。
# a list of a dict of Python UNO Component Files: (file name,service implementation name, service name,handled protocol)
LST = [
{"PYTHON_UNO_Component":"ProtocolHandlerAddon.py","IMPLE_NAME":'ProtocolHandlerAddonImpl',"SERVICE_NAME":'com.sun.star.frame.ProtocolHandler',"HANDLED_PROTOCOL":"org.openoffice.Office.addon.example" }
] # (Python UNO Componentファイル名、実装サービス名、サービス名,プロトコール名)の辞書のリスト。
import os
import sys
src_path = os.path.join(os.path.dirname(sys.path[0]),"src") # srcフォルダの絶対パスを取得。
def createBK(path): # 引数のファイルがあれば拡張子bkを付ける。
if os.path.exists(path): #ファイルがすでに存在するとき。
bk = path + ".bk" # バックアップファイル名の取得。
if os.path.exists(bk): os.remove(bk) # Windowsの場合は上書きできないので削除が必要。
os.rename(path, bk) # 既存のファイルを拡張子bkでバックアップ。
print("The previous version of " + os.path.basename(path) + " file has been renamed for backup.") | 55.05 | 204 | 0.722979 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,085 | 0.765702 |
f6a747c50017c4e0b7f038e1a5a105c807c87a64 | 8,508 | py | Python | backup/socketbackend.py | bit0fun/plugins | 1f6f701bf1e60882b8fa61cb735e7033c8c29e3c | [
"BSD-3-Clause"
] | 173 | 2019-01-17T12:40:47.000Z | 2022-03-27T12:14:00.000Z | backup/socketbackend.py | bit0fun/plugins | 1f6f701bf1e60882b8fa61cb735e7033c8c29e3c | [
"BSD-3-Clause"
] | 284 | 2019-03-01T17:54:14.000Z | 2022-03-29T13:27:51.000Z | backup/socketbackend.py | bit0fun/plugins | 1f6f701bf1e60882b8fa61cb735e7033c8c29e3c | [
"BSD-3-Clause"
] | 92 | 2019-02-26T03:45:40.000Z | 2022-03-28T03:23:50.000Z | from collections import namedtuple
import json, logging, socket, re, struct, time
from typing import Tuple, Iterator
from urllib.parse import urlparse, parse_qs
from backend import Backend, Change
from protocol import PacketType, recvall, PKT_CHANGE_TYPES, change_from_packet, packet_from_change, send_packet, recv_packet
# Total number of reconnection tries
RECONNECT_TRIES=5
# Delay in seconds between reconnections (initial)
RECONNECT_DELAY=5
# Scale delay factor after each failure
RECONNECT_DELAY_BACKOFF=1.5
HostPortInfo = namedtuple('HostPortInfo', ['host', 'port', 'addrtype'])
SocketURLInfo = namedtuple('SocketURLInfo', ['target', 'proxytype', 'proxytarget'])
# Network address type.
class AddrType:
IPv4 = 0
IPv6 = 1
NAME = 2
# Proxy type. Only SOCKS5 supported at the moment as this is sufficient for Tor.
class ProxyType:
DIRECT = 0
SOCKS5 = 1
def parse_host_port(path: str) -> HostPortInfo:
'''Parse a host:port pair.'''
if path.startswith('['): # bracketed IPv6 address
eidx = path.find(']')
if eidx == -1:
raise ValueError('Unterminated bracketed host address.')
host = path[1:eidx]
addrtype = AddrType.IPv6
eidx += 1
if eidx >= len(path) or path[eidx] != ':':
raise ValueError('Port number missing.')
eidx += 1
else:
eidx = path.find(':')
if eidx == -1:
raise ValueError('Port number missing.')
host = path[0:eidx]
if re.match('\d+\.\d+\.\d+\.\d+$', host): # matches IPv4 address format
addrtype = AddrType.IPv4
else:
addrtype = AddrType.NAME
eidx += 1
try:
port = int(path[eidx:])
except ValueError:
raise ValueError('Invalid port number')
return HostPortInfo(host=host, port=port, addrtype=addrtype)
def parse_socket_url(destination: str) -> SocketURLInfo:
'''Parse a socket: URL to extract the information contained in it.'''
url = urlparse(destination)
if url.scheme != 'socket':
raise ValueError('Scheme for socket backend must be socket:...')
target = parse_host_port(url.path)
proxytype = ProxyType.DIRECT
proxytarget = None
# parse query parameters
# reject unknown parameters (currently all of them)
qs = parse_qs(url.query)
for (key, values) in qs.items():
if key == 'proxy': # proxy=socks5:127.0.0.1:9050
if len(values) != 1:
raise ValueError('Proxy can only have one value')
(ptype, ptarget) = values[0].split(':', 1)
if ptype != 'socks5':
raise ValueError('Unknown proxy type ' + ptype)
proxytype = ProxyType.SOCKS5
proxytarget = parse_host_port(ptarget)
else:
raise ValueError('Unknown query string parameter ' + key)
return SocketURLInfo(target=target, proxytype=proxytype, proxytarget=proxytarget)
class SocketBackend(Backend):
def __init__(self, destination: str, create: bool):
self.version = None
self.prev_version = None
self.destination = destination
self.url = parse_socket_url(destination)
self.connect()
def connect(self):
if self.url.proxytype == ProxyType.DIRECT:
if self.url.target.addrtype == AddrType.IPv6:
self.sock = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
else: # TODO NAME is assumed to be IPv4 for now
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
else:
assert(self.url.proxytype == ProxyType.SOCKS5)
import socks
self.sock = socks.socksocket()
self.sock.set_proxy(socks.SOCKS5, self.url.proxytarget.host, self.url.proxytarget.port)
logging.info('Connecting to {}:{} (addrtype {}, proxytype {}, proxytarget {})...'.format(
self.url.target.host, self.url.target.port, self.url.target.addrtype,
self.url.proxytype, self.url.proxytarget))
self.sock.connect((self.url.target.host, self.url.target.port))
logging.info('Connected to {}'.format(self.destination))
def _send_packet(self, typ: int, payload: bytes) -> None:
send_packet(self.sock, typ, payload)
def _recv_packet(self) -> Tuple[int, bytes]:
return recv_packet(self.sock)
def initialize(self) -> bool:
'''
Initialize socket backend by request current metadata from server.
'''
logging.info('Initializing backend')
self._request_metadata()
logging.info('Initialized SocketBackend: protocol={}, version={}, prev_version={}, version_count={}'.format(
self.protocol, self.version, self.prev_version, self.version_count
))
return True
def _request_metadata(self) -> None:
self._send_packet(PacketType.REQ_METADATA, b'')
(typ, payload) = self._recv_packet()
assert(typ == PacketType.METADATA)
self.protocol, self.version, self.prev_version, self.version_count = struct.unpack("!IIIQ", payload)
def add_change(self, entry: Change) -> bool:
typ, payload = packet_from_change(entry)
base_version = self.version
retry = 0
retry_delay = RECONNECT_DELAY
need_connect = False
while True: # Retry loop
try:
if need_connect:
self.connect()
# Request metadata, to know where we stand
self._request_metadata()
if self.version == entry.version:
# If the current version at the server side matches the version of the
# entry, the packet was succesfully sent and processed and the error
# happened afterward. Nothing left to do.
return True
elif base_version == self.version:
# The other acceptable option is that the current version still matches
# that on the server side. Then we retry.
pass
else:
raise Exception('Unexpected backup version {} after reconnect'.format(self.version))
self._send_packet(typ, payload)
# Wait for change to be acknowledged before continuing.
(typ, _) = self._recv_packet()
assert(typ == PacketType.ACK)
except (BrokenPipeError, OSError):
pass
else:
break
if retry == RECONNECT_TRIES:
logging.error('Connection was lost while sending change (giving up after {} retries)'.format(retry))
raise IOError('Connection was lost while sending change')
retry += 1
logging.warning('Connection was lost while sending change (retry {} of {}, will try again after {} seconds)'.format(retry, RECONNECT_TRIES, retry_delay))
time.sleep(retry_delay)
retry_delay *= RECONNECT_DELAY_BACKOFF
need_connect = True
self.prev_version = self.version
self.version = entry.version
return True
def rewind(self) -> bool:
'''Rewind to previous version.'''
version = struct.pack("!I", self.prev_version)
self._send_packet(PacketType.REWIND, version)
# Wait for change to be acknowledged before continuing.
(typ, _) = self._recv_packet()
assert(typ == PacketType.ACK)
return True
def stream_changes(self) -> Iterator[Change]:
self._send_packet(PacketType.RESTORE, b'')
version = -1
while True:
(typ, payload) = self._recv_packet()
if typ in PKT_CHANGE_TYPES:
change = change_from_packet(typ, payload)
version = change.version
yield change
elif typ == PacketType.DONE:
break
else:
raise ValueError("Unknown entry type {}".format(typ))
if version != self.version:
raise ValueError("Versions do not match up: restored version {}, backend version {}".format(version, self.version))
assert(version == self.version)
def compact(self):
self._send_packet(PacketType.COMPACT, b'')
(typ, payload) = self._recv_packet()
assert(typ == PacketType.COMPACT_RES)
return json.loads(payload.decode())
| 38.672727 | 165 | 0.608134 | 5,663 | 0.665609 | 710 | 0.083451 | 0 | 0 | 0 | 0 | 2,033 | 0.238952 |
f6a8dbd297e720489b31ab7872d3338536905501 | 7,564 | py | Python | src/aac_map.py | vlomonaco/crlmaze | b9607b92b098ffb4fd350941f6c34aecd0ed0fe9 | [
"CC-BY-4.0"
] | 20 | 2019-05-27T16:38:59.000Z | 2021-04-06T16:24:10.000Z | src/aac_map.py | ThyLor/crlmaze | b9607b92b098ffb4fd350941f6c34aecd0ed0fe9 | [
"CC-BY-4.0"
] | 2 | 2021-04-08T15:52:17.000Z | 2021-09-08T13:16:29.000Z | src/aac_map.py | ThyLor/crlmaze | b9607b92b098ffb4fd350941f6c34aecd0ed0fe9 | [
"CC-BY-4.0"
] | 4 | 2019-06-19T07:55:28.000Z | 2020-12-20T22:02:06.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
################################################################################
# Copyright (c) 2019. Vincenzo Lomonaco, Karan Desai, Eugenio Culurciello, #
# Davide Maltoni. All rights reserved. #
# See the accompanying LICENSE file for terms. #
# #
# Date: 27-05-2019 #
# Authors: Vincenzo Lomonaco, Karan Desai, Eugenio Culurciello, Davide Maltoni #
# E-mail: vincenzo.lomonaco@unibo.it #
# Website: vincenzolomonaco.com #
################################################################################
import random
from collections import namedtuple
import torch
import torch.nn.functional as F
from torch import nn
from aac_base import AACBase
import cuda
from builtins import *
class BaseModel(AACBase):
def __init__(self, cfg):
super().__init__(cfg)
self.screen_feature_num = 128
self.conv1 = nn.Conv2d(in_channels=cfg['screen_size'][0] * cfg['frame_num'],
out_channels=32, kernel_size=(3, 3), stride=(2, 2))
self.conv2 = nn.Conv2d(in_channels=32, out_channels=32, kernel_size=(3, 3), stride=(2, 2))
self.conv3 = nn.Conv2d(in_channels=32, out_channels=32, kernel_size=(3, 3), stride=(1, 1))
self.screen_features1 = nn.Linear(32 * 27 * 37, self.screen_feature_num)
self.batch_norm = nn.BatchNorm1d(self.screen_feature_num)
layer1_size = 128
self.action1 = nn.Linear(self.screen_feature_num, layer1_size)
self.action2 = nn.Linear(layer1_size + cfg['variable_num'], cfg['button_num'])
self.batch_norm_action = nn.BatchNorm1d(layer1_size + cfg['variable_num'])
self.value1 = nn.Linear(self.screen_feature_num, layer1_size)
self.value2 = nn.Linear(layer1_size + cfg['variable_num'], 1)
self.batch_norm_value = nn.BatchNorm1d(layer1_size + cfg['variable_num'])
self.screens = None
self.frame_num = cfg['frame_num']
def forward(self, screen, variables):
# cnn
screen_features = F.max_pool2d(screen, kernel_size=(2, 2), stride=(2, 2))
screen_features = F.selu(self.conv1(screen_features))
screen_features = F.selu(self.conv2(screen_features))
screen_features = F.selu(self.conv3(screen_features))
screen_features = screen_features.view(screen_features.size(0), -1)
# features
input = self.screen_features1(screen_features)
input = self.batch_norm(input)
input = F.selu(input)
# action
action = F.selu(self.action1(input))
action = torch.cat([action, variables], 1)
action = self.batch_norm_action(action)
action = self.action2(action)
return action, input
def transform_input(self, screen, variables):
screen_batch = []
if self.frame_num > 1:
if self.screens is None:
self.screens = [[]] * len(screen)
for idx, screens in enumerate(self.screens):
if len(screens) >= self.frame_num:
screens.pop(0)
screens.append(screen[idx])
if len(screens) == 1:
for i in range(self.frame_num - 1):
screens.append(screen[idx])
screen_batch.append(torch.cat(screens, 0))
screen = torch.stack(screen_batch)
screen = cuda.Variable(screen, volatile=not self.training)
variables = cuda.Variable(variables / 100, volatile=not self.training)
return screen, variables
def set_terminal(self, terminal):
if self.screens is not None:
indexes = torch.nonzero(terminal == 0).squeeze()
for idx in range(len(indexes)):
self.screens[indexes[idx]] = []
ModelOutput = namedtuple('ModelOutput', ['log_action', 'value'])
class AdvantageActorCriticMap(BaseModel):
def __init__(self, cfg):
super().__init__(cfg)
if cfg['base_model'] is not None:
# load weights from the base model
base_model = torch.load(cfg['base_model'])
self.load_state_dict(base_model.state_dict())
del base_model
if cuda.USE_CUDA:
super().cuda()
self.discount = cfg['episode_discount']
self.outputs = []
self.rewards = []
self.discounts = []
def reset(self):
self.outputs = []
self.rewards = []
self.discounts = []
def forward(self, screen, variables):
action_prob, input = super().forward(screen, variables)
if not self.training:
_, action = action_prob.max(1, keepdim=True)
return action, None
# greedy actions
if random.random() < 0.1:
action = torch.LongTensor(action_prob.size(0), 1).random_(0, action_prob.size(1))
action = cuda.Variable(action)
if cuda.USE_CUDA:
action = action.cuda()
else:
_, action = action_prob.max(1, keepdim=True)
# value prediction - critic
value = F.selu(self.value1(input))
value = torch.cat([value, variables], 1)
value = self.batch_norm_value(value)
value = self.value2(value)
# save output for backpro
action_prob = F.log_softmax(action_prob, dim=1)
self.outputs.append(ModelOutput(action_prob.gather(-1, action), value))
return action, value
def get_action(self, state):
action, _ = self.forward(*self.transform_input(state.screen, state.variables))
return action.data
def set_reward(self, reward):
self.rewards.append(reward * 0.01) # no clone() b/c of * 0.01
def set_terminal(self, terminal):
super().set_terminal(terminal)
self.discounts.append(self.discount * terminal)
def backward(self):
# calculate step returns in reverse order
rewards = self.rewards
rew = torch.stack(self.rewards, dim=0)
returns = torch.Tensor(len(rewards) - 1, *self.outputs[-1].value.data.size())
step_return = self.outputs[-1].value.data.cpu()
for i in range(len(rewards) - 2, -1, -1):
step_return.mul_(self.discounts[i]).add_(rewards[i])
returns[i] = step_return
if cuda.USE_CUDA:
returns = returns.cuda()
# calculate losses
policy_loss = 0
value_loss = 0
steps = len(self.outputs) - 1
for i in range(steps):
advantage = cuda.Variable(returns[i] - self.outputs[i].value.data)
policy_loss += -self.outputs[i].log_action * advantage
value_loss += F.smooth_l1_loss(self.outputs[i].value, cuda.Variable(returns[i]))
weights_l2 = 0
for param in self.parameters():
weights_l2 += param.norm(2)
loss = policy_loss.mean() / steps + value_loss / steps + 0.00001 * weights_l2
ewc = self.ewc_reg.regularize(self.named_parameters())
if cuda.USE_CUDA:
ewc = ewc.cuda()
loss = loss + ewc
loss.backward()
# reset state
self.reset()
# episode average reward, rew size: [40x20x1]
ep_rew = torch.mean(torch.sum(rew, dim=0)) * 100
return ep_rew, loss.data[0], ewc.data[0]
| 37.078431 | 98 | 0.574167 | 6,451 | 0.852856 | 0 | 0 | 0 | 0 | 0 | 0 | 1,289 | 0.170412 |
f6a95519c740e8ae96e87997c89a4bc0b5d76c61 | 1,029 | py | Python | questions/8.py | xiaochus/LeetCode | bf4d7a39fd6b0fb2682490f90999cb218a910e14 | [
"MIT"
] | 1 | 2018-06-18T04:40:23.000Z | 2018-06-18T04:40:23.000Z | questions/8.py | xiaochus/LeetCode | bf4d7a39fd6b0fb2682490f90999cb218a910e14 | [
"MIT"
] | null | null | null | questions/8.py | xiaochus/LeetCode | bf4d7a39fd6b0fb2682490f90999cb218a910e14 | [
"MIT"
] | 1 | 2020-02-03T12:58:26.000Z | 2020-02-03T12:58:26.000Z | """8. String to Integer (atoi)
Implement atoi to convert a string to an integer.
Hint: Carefully consider all possible input cases. If you want a
challenge, please do not see below and ask yourself what are the
possible input cases.
Notes: It is intended for this problem to be specified vaguely
(ie, no given input specs). You are responsible to gather all the
input requirements up front.
"""
class Solution:
def myAtoi(self, str):
"""
:type str: str
:rtype: int
"""
str = str.strip()
if str is None or len(str) == 0:
return 0
ret, start, isPositive = 0, 0, 1
include = ["%d" % i for i in range(10)]
if str[0] == '-' or str[0] == '+':
isPositive = 1 if str[0] == '+' else -1
start += 1
for i in range(start, len(str)):
if str[i] not in include:
break
ret = ret * 10 + int(str[i])
ret = ret * isPositive
return max(-2**31, min(2**31 - 1, ret))
| 27.078947 | 65 | 0.556851 | 629 | 0.611273 | 0 | 0 | 0 | 0 | 0 | 0 | 467 | 0.453839 |
f6a98ab922bb283e3d03fe3c75cc8eb473f34455 | 95 | py | Python | models/dbconfig.py | FusionX9000/Reddit-Saves-Manager | 6109f3b40a992f99ba95346b1e39a2df07e1bcb7 | [
"MIT"
] | null | null | null | models/dbconfig.py | FusionX9000/Reddit-Saves-Manager | 6109f3b40a992f99ba95346b1e39a2df07e1bcb7 | [
"MIT"
] | 1 | 2021-06-02T01:12:35.000Z | 2021-06-02T01:12:35.000Z | models/dbconfig.py | FusionX9000/Reddit-Saves-Manager | 6109f3b40a992f99ba95346b1e39a2df07e1bcb7 | [
"MIT"
] | null | null | null | database = "database"
user = "postgres"
password = "password"
host = "localhost"
port = "5432"
| 15.833333 | 21 | 0.684211 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 47 | 0.494737 |
f6a9f53335a90945beeac0881642656ecefd349b | 318 | py | Python | Programs/HowToUse.py | aposum23/laborathory14 | 03e251e267b5783b12ea9b2f4ba2da41194f349e | [
"MIT"
] | null | null | null | Programs/HowToUse.py | aposum23/laborathory14 | 03e251e267b5783b12ea9b2f4ba2da41194f349e | [
"MIT"
] | null | null | null | Programs/HowToUse.py | aposum23/laborathory14 | 03e251e267b5783b12ea9b2f4ba2da41194f349e | [
"MIT"
] | null | null | null | def mul(a, b):
return a * b
mul(3, 4)
#12
def mul5(a):
return mul(5,a)
mul5(2)
#10
def mul(a):
def helper(b):
return a * b
return helper
mul(5)(2)
#10
def fun1(a):
x = a * 3
def fun2(b):
nonlocal x
return b + x
return fun2
test_fun = fun1(4)
test_fun(7)
#19
| 9.9375 | 20 | 0.512579 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 12 | 0.037736 |
f6ab9a9d8d75aed7d16ae4d945c1c5fe7a289970 | 152 | py | Python | py_tdlib/constructors/set_bot_updates_status.py | Mr-TelegramBot/python-tdlib | 2e2d21a742ebcd439971a32357f2d0abd0ce61eb | [
"MIT"
] | 24 | 2018-10-05T13:04:30.000Z | 2020-05-12T08:45:34.000Z | py_tdlib/constructors/set_bot_updates_status.py | MrMahdi313/python-tdlib | 2e2d21a742ebcd439971a32357f2d0abd0ce61eb | [
"MIT"
] | 3 | 2019-06-26T07:20:20.000Z | 2021-05-24T13:06:56.000Z | py_tdlib/constructors/set_bot_updates_status.py | MrMahdi313/python-tdlib | 2e2d21a742ebcd439971a32357f2d0abd0ce61eb | [
"MIT"
] | 5 | 2018-10-05T14:29:28.000Z | 2020-08-11T15:04:10.000Z | from ..factory import Method
class setBotUpdatesStatus(Method):
pending_update_count = None # type: "int32"
error_message = None # type: "string"
| 21.714286 | 45 | 0.743421 | 120 | 0.789474 | 0 | 0 | 0 | 0 | 0 | 0 | 31 | 0.203947 |
f6ac1d6095f521421f885471df134a2eb8d6bf74 | 811 | py | Python | Chapter 9/code1.py | PacktPublishing/Mastering-IPython-4 | d752f7ba38e0c9399a83d57da406fe26152f272b | [
"MIT"
] | 22 | 2016-06-07T07:52:35.000Z | 2021-11-08T13:12:21.000Z | Chapter 9/code1.py | PacktPublishing/Mastering-IPython-4 | d752f7ba38e0c9399a83d57da406fe26152f272b | [
"MIT"
] | 2 | 2016-05-23T08:20:54.000Z | 2018-07-02T08:21:32.000Z | Chapter 9/code1.py | PacktPublishing/Mastering-IPython-4 | d752f7ba38e0c9399a83d57da406fe26152f272b | [
"MIT"
] | 27 | 2016-05-23T08:19:51.000Z | 2021-08-31T02:46:00.000Z | """
This is an abbreviated version of my random number generator test suite.
It uses the pytest framework. It does not do much in this form.
"""
import numpy as np
import scipy.stats
import random
class TestRandoms( ):
"""
This is the main class.
Normally it would hold all the tests, plus and setup and teardown fixtures.
"""
def test_builtin(self):
"""
Test the built-in random number generator on 10000 numbers.
"""
num_tests = 10000
vals = [0 for i in range(10)]
for i in range(num_tests):
tmp = random.randint(0, 9)
vals[tmp] = vals[tmp] + 1
chi2, p = scipy.stats.chisquare(self.vals)
assert p > 0.05
def foo( ):
""" I just needed a function outside of a class as an example"""
pass
| 24.575758 | 79 | 0.614057 | 518 | 0.638718 | 0 | 0 | 0 | 0 | 0 | 0 | 413 | 0.509248 |
f6ad70965cd923a8fabd51c9477be6519ca96f2f | 281 | py | Python | tests/test.py | J35P312/vcf2cytosure | e2867afc1a697c09a1f2f400e1a5ac3365499234 | [
"MIT"
] | 1 | 2019-01-21T11:37:43.000Z | 2019-01-21T11:37:43.000Z | tests/test.py | J35P312/vcf2cytosure | e2867afc1a697c09a1f2f400e1a5ac3365499234 | [
"MIT"
] | 39 | 2017-04-06T09:30:09.000Z | 2022-02-06T10:32:09.000Z | tests/test.py | J35P312/vcf2cytosure | e2867afc1a697c09a1f2f400e1a5ac3365499234 | [
"MIT"
] | 3 | 2017-04-06T09:28:24.000Z | 2020-06-25T09:30:26.000Z | import pytest
from unittest.mock import patch
import vcf2cytosure
def test_version_argument():
with patch('sys.argv', ['vcf2cytosure.py','--version']):
with pytest.raises(SystemExit) as excinfo:
vcf2cytosure.main()
assert excinfo.value.code == 0
| 23.416667 | 60 | 0.683274 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 38 | 0.135231 |