blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
a0820891b0c99031ce98522037cd96f4e5de486c
|
359cc4d7961033748e665979bd5280349398810f
|
/LunchCartServer.py
|
376d2de24fa60fba5a22300e5b3cf683b9a0a2c4
|
[] |
no_license
|
DelveInc/LunchCart
|
00ec2c8ffb8904b1cfd8e1910060d88b03e1337c
|
d8400a0d41bbd3d026c128dca17dcb7f45b35d8c
|
refs/heads/master
| 2021-05-28T01:41:27.624812
| 2014-04-15T19:09:45
| 2014-04-15T19:09:45
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,808
|
py
|
###################################################################
# LunchCartServer.py
# written by Jack Boland
#
# Portions of socket code modified from:
# http://www.tutorialspoint.com/python/python_networking.htm
#
###################################################################
import smtplib
import socket
import time
# Email Settings
content = "The Lunch Cart is on the move." # Establish what the email will say
recipient = "jcboland91@gmail.com" # Determine who will receive the email
sender = "DCILunchCart@gmail.com" # Send email from this address
password = "***********" # Password of the sending email (redacted)
def sendEmail(emailAddr):
mail = smtplib.SMTP('smtp.gmail.com', 587)
mail.ehlo()
mail.starttls()
mail.login(sender, password)
mail.sendmail(sender, emailAddr, content)
# Confirm that the message was sent
print("Sent")
mail.close()
def checkLevel(incoming):
incoming = int(float(incoming))
if (incoming == 1):
print("Send Email")
sendEmail("jack.boland@design-concepts.com")
input = '0' # Incoming message
# Open up a socket
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # Create a socket object
host = socket.gethostname() # Get local machine name
port = 12345 # Reserve a port
ip = socket.gethostbyname(socket.gethostname())
print(ip)
s.bind((host, port)) # Bind to the port
s.listen(5) # Now wait for client connection
while True:
msg = 'Thank you for connecting'
c, addr = s.accept() # Establish connection with client
print ('Got Connection from ', addr) # Spits back IP Address of client
c.send(msg.encode('ASCII'))
while True:
data = str(c.recv(1024), 'ASCII')
checkLevel(data)
print("Done Sleeping")
c.send("Read this?".encode('ASCII'))
c.close() # Close the connection
exit()
|
[
"jcboland91@gmail.com"
] |
jcboland91@gmail.com
|
d4af9f129978a38539bdfade208945ce2455139c
|
55b2873fddb18860d78767f43381e36b844d893c
|
/Python/Python_Abs_Begin/Mod3_Conditionals/3_1-4_2-Str_comp_if.py
|
ba0a38cf042c4e2791fa7f461b9e297ef2958094
|
[] |
no_license
|
scotttct/tamuk
|
0b972b6966fda0f374face0ccf0324beece69a2b
|
7459691b0d1cf5adb21b1e7b98f7abbff3f4f6ae
|
refs/heads/master
| 2020-12-11T08:48:34.739951
| 2020-03-21T13:21:37
| 2020-03-21T13:21:37
| 233,805,905
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 844
|
py
|
#CONDITIONALS: COMPARISON OPERATORS WITH IF
# [ ] get input for a variable, answer, and ask user 'What is 8 + 13? : '
# [ ] print messages for correct answer "21" or incorrect answer using if/else
# note: input returns a "string"
#My solution below
# answer = "You are correct: 8 + 13 = 21"
# user_input = input("What is the sum of 8 + 13? ")
# wrong_answer = ("Sorry! That was not what 8 + 13 equals! ")
# x = 21
# if user_input == x:
# print(answer)
# else:
# print(wrong_answer)
# found on github
#define the variable
variable = 21
#define the question with input in order to have a result
input ("What is 8 + 13? : ")
#define the input as equal to the varialbe to have an if else stataement
if input == variable:
print("correct")
else:
print("incorrect")
#both answers give the incorrect answer?????Why?
|
[
"scotttct@outlook.com"
] |
scotttct@outlook.com
|
1984800ed08598b5158744cfc7cb4dff77f2dd8b
|
9febc3008d825a253bd870f05fe128c356586a9c
|
/venv/lib/python3.7/site-packages/github/Stargazer.pyi
|
7f7284ef872ca24a30fa581d73b2b8414ae5fa5e
|
[
"MIT",
"LGPL-3.0-or-later"
] |
permissive
|
bhaving07/pyup
|
7ca947a0f4e6b6e1ca1e1b7c49c6bd3d99943c28
|
17ad21a2957c5cce91ad0cf5f75853a3182806d2
|
refs/heads/main
| 2023-02-01T14:16:40.959832
| 2020-12-18T04:34:27
| 2020-12-18T04:34:27
| 322,286,443
| 0
| 0
|
MIT
| 2020-12-17T13:14:46
| 2020-12-17T12:20:49
|
Python
|
UTF-8
|
Python
| false
| false
| 459
|
pyi
|
from datetime import datetime
from typing import Any, Dict
from github.GithubObject import NonCompletableGithubObject
from github.NamedUser import NamedUser
class Stargazer(NonCompletableGithubObject):
def __repr__(self) -> str: ...
def _initAttributes(self) -> None: ...
def _useAttributes(self, attributes: Dict[str, Any]) -> None: ...
@property
def starred_at(self) -> datetime: ...
@property
def user(self) -> NamedUser: ...
|
[
"bvgolakiya4@gmail.com"
] |
bvgolakiya4@gmail.com
|
e5bf01108d918e80ad00aefbabb28a185806e465
|
a1b7c1357181320b272ef4c72b70d22600a407c1
|
/examples/boilerplates/base_test_case.py
|
227dee5670a125162ef469064f1f4ac63a54b340
|
[
"MIT"
] |
permissive
|
BarryYBL/SeleniumBase
|
5c96e21eaebd45e2f6ac26d5bd563b3ba300e6f6
|
e3cb810331183fa003cea8af81057e4136dfd660
|
refs/heads/master
| 2022-12-04T11:34:20.134294
| 2020-08-28T05:45:24
| 2020-08-28T05:45:24
| 290,998,663
| 1
| 0
|
MIT
| 2020-08-28T08:52:44
| 2020-08-28T08:52:44
| null |
UTF-8
|
Python
| false
| false
| 1,522
|
py
|
'''
You can use this as a boilerplate for your test framework.
Define your customized library methods in a master class like this.
Then have all your test classes inherit it.
BaseTestCase will inherit SeleniumBase methods from BaseCase.
With Python 3, simplify "super(...)" to super().setUp() and super().tearDown()
'''
from seleniumbase import BaseCase
class BaseTestCase(BaseCase):
def setUp(self):
super(BaseTestCase, self).setUp()
# <<< Run custom setUp() code for tests AFTER the super().setUp() >>>
def tearDown(self):
self.save_teardown_screenshot()
if self.has_exception():
# <<< Run custom code if the test failed. >>>
pass
else:
# <<< Run custom code if the test passed. >>>
pass
# (Wrap unreliable tearDown() code in a try/except block.)
# <<< Run custom tearDown() code BEFORE the super().tearDown() >>>
super(BaseTestCase, self).tearDown()
def login(self):
# <<< Placeholder. Add your code here. >>>
# Reduce duplicate code in tests by having reusable methods like this.
# If the UI changes, the fix can be applied in one place.
pass
def example_method(self):
# <<< Placeholder. Add your code here. >>>
pass
'''
# Now you can do something like this in your test files:
from base_test_case import BaseTestCase
class MyTests(BaseTestCase):
def test_example(self):
self.login()
self.example_method()
'''
|
[
"mdmintz@gmail.com"
] |
mdmintz@gmail.com
|
5343cda673d0c192d79a0e79165b52797487df55
|
cebf0d2fa977776f35ae0001935859176b80441a
|
/GPyT/make_data_copy.py
|
ceeb492243d4ae75de80219ae5c055b865a135fe
|
[] |
no_license
|
objoyful/our-code
|
bb5fbca0f7ad937724896266c2ecbc080facfb08
|
02640b8359023194b93c0587835ff9217d6fa467
|
refs/heads/master
| 2023-08-30T23:32:59.641061
| 2023-08-29T21:35:35
| 2023-08-29T21:35:35
| 200,138,999
| 2
| 1
| null | 2019-08-19T20:43:28
| 2019-08-02T00:57:21
|
Python
|
UTF-8
|
Python
| false
| false
| 1,037
|
py
|
import os
from curtsies.fmtfuncs import red, bold, green, on_blue, yellow, blue, cyan
import time
MAX_CHAR_LENGTH = 512
MIN_CHAR_LENGTH = 256
NEWLINECHAR = "<N>"
d = os.path.join("GPyT", "repos_test")
file_paths = []
for dirpath, dirnames, filenames in os.walk(d):
for f in filenames:
full_path = os.path.join(dirpath, f)
file_paths.append(full_path)
print(len(file_paths))
with open(os.path.join("GPyT", 'python_code.txt'), 'a') as m:
for file in file_paths:
f = open(file, 'r').read()
f = f.replace('\n', NEWLINECHAR)
if 100 < len(f) < MAX_CHAR_LENGTH:
print(f)
m.write(f + '\n')
else:
splits = f.split(NEWLINECHAR * 2)
segments = ''
for split in splits:
if MIN_CHAR_LENGTH <= len(segments) <= MAX_CHAR_LENGTH:
m.write(segments + '\n')
segments = split
else:
segments += split
print(len(segments))
|
[
"WilliamKennethCarden@yahoo.com"
] |
WilliamKennethCarden@yahoo.com
|
5827cfe9e8502757d7beaf88ca366d7649c43465
|
c7f9c1f8e56c69d405df48c3c57eee82b325a80e
|
/tests/test_cli.py
|
75140018819717910268cd61df637e1063e10d0b
|
[] |
no_license
|
vikramcse/pygit-user-info
|
5ede7f3dc718c34322607de3dae884550ac9ee8d
|
76869e83186d4e6c9858c0aecaf71c6e1250bfe8
|
refs/heads/master
| 2020-12-02T11:20:43.797782
| 2017-07-08T16:16:28
| 2017-07-08T16:16:28
| 96,629,859
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 715
|
py
|
import pytest
from click.testing import CliRunner
from git_info import cli
@pytest.fixture
def runner():
return CliRunner()
def test_cli(runner):
result = runner.invoke(cli.main)
assert result.exit_code == 0
assert not result.exception
assert result.output.strip() == 'Hello, world.'
def test_cli_with_option(runner):
result = runner.invoke(cli.main, ['--as-cowboy'])
assert not result.exception
assert result.exit_code == 0
assert result.output.strip() == 'Howdy, world.'
def test_cli_with_arg(runner):
result = runner.invoke(cli.main, ['Vikram'])
assert result.exit_code == 0
assert not result.exception
assert result.output.strip() == 'Hello, Vikram.'
|
[
"vikramcse.10@gmail.com"
] |
vikramcse.10@gmail.com
|
da9bcc35ddb0259e63a04370cd3a938053e4e4e4
|
7d1e164f64aea0b317183b1b986a9f271b9e4d69
|
/project/settings.py
|
b6defbb1321d27498d4bd7a3cbec520af76fa0bb
|
[] |
no_license
|
tinnguyen18920/test_deploy
|
fb27659b079e5a6b8dfa396fcf71b2a485fd8ae5
|
43b2551015fbaad094ff6cb7f7b2d2cf44076484
|
refs/heads/master
| 2023-08-15T09:11:15.398423
| 2020-05-04T07:22:06
| 2020-05-04T07:22:06
| 261,101,347
| 0
| 0
| null | 2021-09-22T18:57:59
| 2020-05-04T06:53:57
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 3,227
|
py
|
"""
Django settings for project project.
Generated by 'django-admin startproject' using Django 3.0.6.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'i^^twi-)5iiz!=@gnggi)u36x2%jnurak_02%^vdvg84ckbl+^'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['tn-test-deploy.herokuapp.com','127.0.0.1']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'project.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'project.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
# DATABASES = {
# 'default': {
# 'ENGINE': 'django.db.backends.sqlite3',
# 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
# }
# }
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
# AUTH_PASSWORD_VALIDATORS = [
# {
# 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
# },
# {
# 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
# },
# {
# 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
# },
# {
# 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
# },
# ]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
STATIC_URL = '/static/'
|
[
"tinnguyen18920@gmail.com"
] |
tinnguyen18920@gmail.com
|
96f478ef55a45b8eaf34ab68e1d50cded4d0b26f
|
93b866284ca1ac29c5005555f2cb30454a0fb5cf
|
/Problems/18-Problem/Problem 18.py
|
40cbb8dac25dc2bdde8a7f63427cfd7b87aca50e
|
[] |
no_license
|
FrancoisdeFouchecour/Projet-Euler
|
c2b17d1e35fbd10a708ba3221825a62a17818382
|
0cf70457c0418264c2eff7cdd0e92a07b61ecb07
|
refs/heads/master
| 2021-12-25T05:44:08.054648
| 2021-11-27T21:47:42
| 2021-11-27T21:47:42
| 168,253,571
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,106
|
py
|
import time
problem_number = 18
test_solution = 23
#read data
number_file = open("data.txt", "r")
raw_data = number_file.read()
number_file.close()
size = raw_data.count("\n")
matrix_value = [[0 for j in range(size)] for i in range(size)]
index_ver = 0
index_hor = 0
for index in range(len(raw_data)):
if index%3 == 1:
matrix_value[index_ver][index_hor] += int(raw_data[index - 1:index + 1])
if index%3 == 2:
if raw_data[index] == "\n":
index_ver += 1
index_hor = 0
elif raw_data[index] == " ":
index_hor += 1
#Solution
def solution(matrix_input):
size = len(matrix_input)
matrix_result = [[0 for j in range(size)] for i in range(size)]
#first colllum
matrix_result[0][0] = matrix_input[0][0]
for i in range(1, size):
matrix_result[i][0] = matrix_result[i - 1][0] + matrix_input[i][0]
for i in range(1, size):
for j in range(1, i + 1):
matrix_result[i][j] += max(matrix_result[i - 1][j], matrix_result[i - 1][j - 1])
matrix_result[i][j] += matrix_input[i][j]
return max(matrix_result[size - 1])
#Test & Result
fichier = open("Solution "+str(problem_number)+".txt", "w")
string = ""
begin_test = time.time()
test_value = solution([[3, 0, 0, 0], [7, 4, 0, 0], [2, 4, 6, 0], [8, 5, 9, 3]])
end_test = time.time()
test_time = end_test - begin_test
string += "TEST #1\n\n"
string += "Output: "+str(test_value)+"\n"
string += "Answer: "+str(test_solution)+"\n"
string += "Computation time: "+str(test_time)+" sec\n"
string += "Verification: "
if(test_value == test_solution):
string += "TRUE"
else:
string += "FALSE"
begin_problem = time.time()
problem_value = solution(matrix_value)
end_problem = time.time()
problem_time = end_problem - begin_problem
string += "\n\n\nRESULT PROBLEM #"+str(problem_number)+"\n\n"
string += "Output: "+str(problem_value)+"\n"
string += "Computation time: "+str(problem_time)+" sec\n"
string += "\n\n\nCurrent date & time: " + time.strftime("%c")
fichier.write(string)
fichier.close()
|
[
"francois.de-salivet-de-fouchecour@polytechnique.edu"
] |
francois.de-salivet-de-fouchecour@polytechnique.edu
|
d8224aa3c4831615590029c8bfb0449c9116b0ae
|
51c013bfbae77e41289d68f80cd8b4a02354db83
|
/Ticket/models.py
|
a694243b10e703bbaec0472d0d186024bfc63a96
|
[] |
no_license
|
monica-bhaskhar/DjangoRailwayTicket
|
d4d44865508753c1eb6ff77a3fe518737c7fe724
|
7ea8a48d7618a79e4b51e4196811c562880efd50
|
refs/heads/master
| 2021-03-26T14:32:19.777278
| 2020-03-17T16:32:08
| 2020-03-17T16:32:08
| 247,702,146
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 865
|
py
|
from django.db import models
# Create your models here.
class BookTicket(models.Model):
GENDER_CHOICES = (('Male', 'Male'),('Female', 'Female'),)
BERTH_CHOICES = (('Upper', 'Upper'),('Lower', 'Lower'),('Middel', 'Middel'),('Side', 'Side'))
STATUS_CHOICES = (('Confirmed', 'Confirmed'),('RAC ', 'RAC'),('Waiting', 'Waiting'))
COACH_CHOICES = (('S1', 'S1'),('S2', 'S2'),('S3', 'S3'),('S4', 'S4'))
name = models.CharField(max_length=100)
age = models.IntegerField(blank=True, null=True)
gender = models.CharField(max_length = 6, choices = GENDER_CHOICES)
berth_preference = models.CharField(max_length = 10, choices = BERTH_CHOICES)
coach = models.CharField(max_length = 10, choices = COACH_CHOICES)
status = models.CharField(max_length = 10, choices = STATUS_CHOICES)
def __str__(self):
return self.name
|
[
"monicabhaskhar1995@gmail.com"
] |
monicabhaskhar1995@gmail.com
|
211d5d79d96070cd6b0bb086a10b1a04e4070a09
|
974d04d2ea27b1bba1c01015a98112d2afb78fe5
|
/test/legacy_test/test_dist_sparse_load_ps0.py
|
985aa0d9337713e9470ba9393f4a12982e617e49
|
[
"Apache-2.0"
] |
permissive
|
PaddlePaddle/Paddle
|
b3d2583119082c8e4b74331dacc4d39ed4d7cff0
|
22a11a60e0e3d10a3cf610077a3d9942a6f964cb
|
refs/heads/develop
| 2023-08-17T21:27:30.568889
| 2023-08-17T12:38:22
| 2023-08-17T12:38:22
| 65,711,522
| 20,414
| 5,891
|
Apache-2.0
| 2023-09-14T19:20:51
| 2016-08-15T06:59:08
|
C++
|
UTF-8
|
Python
| false
| false
| 4,710
|
py
|
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import shutil
import tempfile
import unittest
import numpy as np
import paddle
from paddle import fluid
from paddle.distributed.fleet import fleet
from paddle.distributed.fleet.base import role_maker
class SparseLoadOp(unittest.TestCase):
"""Test load operator."""
def net(self, emb_array, fc_array):
with fluid.unique_name.guard():
dense_input = paddle.static.data(
'input', shape=[None, 1], dtype="int64"
)
emb = paddle.static.nn.embedding(
input=dense_input,
is_sparse=True,
size=[10, 10],
param_attr=fluid.ParamAttr(
name="embedding",
initializer=paddle.nn.initializer.Assign(emb_array),
),
)
fc1 = paddle.static.nn.fc(
x=emb,
size=10,
activation="relu",
weight_attr=fluid.ParamAttr(
name='fc',
initializer=paddle.nn.initializer.Assign(fc_array),
),
)
loss = paddle.mean(fc1)
return loss
def save_origin_model(self, emb_array, fc_array):
startup_program = fluid.framework.Program()
test_program = fluid.framework.Program()
with fluid.framework.program_guard(test_program, startup_program):
with fluid.unique_name.guard():
loss = self.net(emb_array, fc_array)
optimizer = paddle.optimizer.Adam(1e-3)
optimizer.minimize(loss)
exe = fluid.Executor(fluid.CPUPlace())
exe.run(startup_program)
model_path = tempfile.mkdtemp()
paddle.distributed.io.save_persistables(
executor=exe, dirname=model_path
)
return model_path
@unittest.skip(reason="Skip unstable ut, need rewrite with new implement")
class TestSparseLoadOpCase1(SparseLoadOp):
def test_2ps_0_load(self):
# init No.0 server env
env = {}
env["PADDLE_PSERVERS_IP_PORT_LIST"] = "127.0.0.1:4001,127.0.0.1:4002"
env["PADDLE_TRAINERS_NUM"] = str(2)
env["TRAINING_ROLE"] = "PSERVER"
env["PADDLE_PORT"] = "4001"
env["POD_IP"] = "127.0.0.1"
for k, v in env.items():
os.environ[k] = str(v)
"""
array([[0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. ],
[0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1],
[0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2],
[0.3, 0.3, 0.3, 0.3, 0.3, 0.3, 0.3, 0.3, 0.3, 0.3],
[0.4, 0.4, 0.4, 0.4, 0.4, 0.4, 0.4, 0.4, 0.4, 0.4],
[0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5],
[0.6, 0.6, 0.6, 0.6, 0.6, 0.6, 0.6, 0.6, 0.6, 0.6],
[0.7, 0.7, 0.7, 0.7, 0.7, 0.7, 0.7, 0.7, 0.7, 0.7],
[0.8, 0.8, 0.8, 0.8, 0.8, 0.8, 0.8, 0.8, 0.8, 0.8],
[0.9, 0.9, 0.9, 0.9, 0.9, 0.9, 0.9, 0.9, 0.9, 0.9]])
"""
emb_array = np.arange(0, 1, 0.1).repeat(10).reshape(10, 10)
fc_array = np.arange(0, 1, 0.1).repeat(10).reshape(10, 10)
model_path = self.save_origin_model(emb_array, fc_array)
role = role_maker.PaddleCloudRoleMaker()
fleet.init(role)
loss = self.net(emb_array, fc_array)
strategy = paddle.distributed.fleet.DistributedStrategy()
strategy.a_sync = True
optimizer = paddle.optimizer.Adam(1e-3)
optimizer = fleet.distributed_optimizer(optimizer, strategy)
optimizer.minimize(loss)
fleet.init_server(model_path)
fc_w = np.array(fluid.global_scope().find_var("fc").get_tensor())
emb = np.array(
fluid.global_scope().find_var("embedding.block0").get_tensor()
)
assert fc_w.all() == fc_array.all()
assert emb.all() == emb_array[::2].all()
shutil.rmtree(model_path)
if __name__ == "__main__":
paddle.enable_static()
unittest.main()
|
[
"noreply@github.com"
] |
PaddlePaddle.noreply@github.com
|
1ece4fc6c106792ad39a8138445d999cfbd44a5c
|
0875769ad6f34845e53aed09d592c850e335f410
|
/텍스트파일보고서 생성기.py
|
26b740b2439e7ee57b5af751ba1add60071d27e0
|
[] |
no_license
|
ggozlo/py.practice
|
bc515ce214cb8b35ec5c949f67287971e97fa667
|
b3bcdba0880e8ba7de362b7314be545e4fd1fc9a
|
refs/heads/main
| 2023-02-24T22:36:03.595599
| 2021-01-31T16:53:33
| 2021-01-31T16:53:33
| 333,743,728
| 0
| 0
| null | 2021-01-28T12:08:28
| 2021-01-28T11:53:30
| null |
UTF-8
|
Python
| false
| false
| 730
|
py
|
#___________________________________________________________________________________________________퀴즈
num = range(1, 51)
for txt in num :
report_file = open("{}주차.txt".format(txt),"w",encoding="utf8")
print("- {} 주차 주간보고 -".format(num), file= report_file)
print("부서 :", file= report_file)
print("이름 :", file= report_file)
print("업무 요약 :", file= report_file)
report_file.close
for i in range(1, 51):
with open(str(i) + "주차.txt","w", encoding="utf8") as report_file:
report_file.write("- {} 주차 주간보고 -".format(i))
report_file.write("\n부서 :")
report_file.write("\n이름 :")
report_file.write("\n업무 요약 :")
|
[
"78086722+ggozlo@users.noreply.github.com"
] |
78086722+ggozlo@users.noreply.github.com
|
5f102bf8684c2c36dbbdb998c3397bb7896b09d2
|
dab492c669ad2173dbac5b9c01c563bbc53a15af
|
/code/utils/pred_ground.py
|
c0bf2c4a9d610c93b09c0948151d0fe191879dc8
|
[
"MIT"
] |
permissive
|
bflashcp3f/textlabs-xwlp-code
|
d7582701efada6e00dc09c1d16993f6b0d99682c
|
b099ec7382316f80355c453b4bd4aa742bf68359
|
refs/heads/main
| 2023-03-23T03:56:21.208149
| 2021-03-10T02:15:12
| 2021-03-10T02:15:12
| 342,127,953
| 6
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 13,122
|
py
|
import math
import glob
import time
import json
import pickle
import os
import numpy as np
import operator
import time
import torch
import sys
import random
import argparse
from collections import defaultdict, Counter, OrderedDict
from keras.preprocessing.sequence import pad_sequences
from sklearn.model_selection import train_test_split
from transformers import BertTokenizer, BertModel
from transformers import BertForTokenClassification, BertPreTrainedModel, AdamW
from sklearn.model_selection import KFold
WLP_ENT = ['Action']
ID2LABEL = ['ignored', 'rg', 'convert-op', 'seal-op', 'spin-op', 'create-op', \
'd', 'default-op', 'destroy-op', 'loc', 'm', 'measure-op', 'mix-op', \
'mod', 'mth', 'remove-op', 's', 'sl', 'temp-treat-op', 'time-op', \
'transfer-op', 'wash-op']
LABEL2ID = dict((value, key) for key, value in enumerate(ID2LABEL))
NO_RELATION = 'ignored'
WLP_ENT_START = []
for item in WLP_ENT:
WLP_ENT_START.append("[ENT-" + item + "-START]")
WLP_ENT_END = []
for item in WLP_ENT:
WLP_ENT_END.append("[ENT-" + item + "-END]")
WLP2TL = {'Reagent': 'rg',
'Location': 'loc',
'Amount': 'm',
'Modifier': 'mod',
'Time': 's',
'Temperature': 's',
'Speed': 's',
'Generic-Measure': 'm',
'Device': 'd',
'Concentration': 'm',
'Seal': 'sl',
'Method': 'mth',
'Size': 'm',
'Measure-Type': 'm',
'pH': 'm'}
NONE_TYPE = ['Numerical', 'Mention', 'Misc']
def load_from_jsonl(file_name):
data_list = []
with open(file_name) as f:
for line in f:
data_list.append(json.loads(line))
return data_list
def score(key, prediction, verbose=False):
correct_by_relation = Counter()
guessed_by_relation = Counter()
gold_by_relation = Counter()
# Loop over the data to compute a score
for row in range(len(key)):
gold = key[row]
guess = prediction[row]
if gold == NO_RELATION and guess == NO_RELATION:
pass
elif gold == NO_RELATION and guess != NO_RELATION:
guessed_by_relation[guess] += 1
elif gold != NO_RELATION and guess == NO_RELATION:
gold_by_relation[gold] += 1
elif gold != NO_RELATION and guess != NO_RELATION:
guessed_by_relation[guess] += 1
gold_by_relation[gold] += 1
if gold == guess:
correct_by_relation[guess] += 1
# Print verbose information
if verbose:
print("Per-relation statistics:")
relations = gold_by_relation.keys()
longest_relation = 0
for relation in sorted(relations):
longest_relation = max(len(relation), longest_relation)
for relation in sorted(relations):
# (compute the score)
correct = correct_by_relation[relation]
guessed = guessed_by_relation[relation]
gold = gold_by_relation[relation]
prec = 1.0
if guessed > 0:
prec = float(correct) / float(guessed)
recall = 0.0
if gold > 0:
recall = float(correct) / float(gold)
f1 = 0.0
if prec + recall > 0:
f1 = 2.0 * prec * recall / (prec + recall)
# (print the score)
sys.stdout.write(("{:<" + str(longest_relation) + "}").format(relation))
sys.stdout.write(" P: ")
if prec < 0.1: sys.stdout.write(' ')
if prec < 1.0: sys.stdout.write(' ')
sys.stdout.write("{:.2%}".format(prec))
sys.stdout.write(" R: ")
if recall < 0.1: sys.stdout.write(' ')
if recall < 1.0: sys.stdout.write(' ')
sys.stdout.write("{:.2%}".format(recall))
sys.stdout.write(" F1: ")
if f1 < 0.1: sys.stdout.write(' ')
if f1 < 1.0: sys.stdout.write(' ')
sys.stdout.write("{:.2%}".format(f1))
sys.stdout.write(" #: %d" % gold)
sys.stdout.write("\n")
print("")
# Print the aggregate score
if verbose:
print("Final Score:")
prec_micro = 1.0
if sum(guessed_by_relation.values()) > 0:
prec_micro = float(sum(correct_by_relation.values())) / float(sum(guessed_by_relation.values()))
recall_micro = 0.0
if sum(gold_by_relation.values()) > 0:
recall_micro = float(sum(correct_by_relation.values())) / float(sum(gold_by_relation.values()))
f1_micro = 0.0
if prec_micro + recall_micro > 0.0:
f1_micro = 2.0 * prec_micro * recall_micro / (prec_micro + recall_micro)
if verbose:
print("Precision (micro): {:.3%}".format(prec_micro))
print(" Recall (micro): {:.3%}".format(recall_micro))
print(" F1 (micro): {:.3%}".format(f1_micro))
return prec_micro, recall_micro, f1_micro
def handle_doc_offset_ner(ner_tuple, doc_len, remove=True):
# print(ner_tuple)
ner_start, ner_end, ent_type = ner_tuple
if remove:
return [ner_start - doc_len, ner_end - doc_len, ent_type]
else:
return [ner_start + doc_len, ner_end + doc_len, ent_type]
def build_entity_typing_mention(tokens, sorted_entity_list):
ent_dict = dict([(item[2][0], item) for item in sorted_entity_list])
word_idx = 0
tagged_token_list = []
while word_idx < len(tokens):
if word_idx not in ent_dict:
tagged_token_list.append(tokens[word_idx])
word_idx += 1
else:
ent_id, ent_str, (ent_start, ent_end), ent_type, _ = ent_dict[word_idx]
if ent_str.strip() != ' '.join(tokens[ent_start:ent_end]).strip():
print(ent_str, ' '.join(tokens[ent_start:ent_end]))
assert ent_str.strip() == ' '.join(tokens[ent_start:ent_end]).strip()
tagged_token_list.append("[ENT-" + ent_type + "-START]")
tagged_token_list.append(ent_str)
tagged_token_list.append("[ENT-" + ent_type + "-END]")
word_idx = ent_end
return tagged_token_list
def prepare_entity_typing_data(tl_data, tokenizer, args):
start_time = time.time()
wlp_ent_type_list = []
tl_ent_type_list = []
process_sen_list = []
doc_name_list = []
sen_idx_list = []
ent_info_list = []
for file_idx in range(len(tl_data)):
doc_name = tl_data[file_idx]['doc_key']
doc_data = tl_data[file_idx]
doc_sen_len_list = [len(item) for item in doc_data['sentences']]
ner_list = doc_data['wlp_labels']
for sen_idx, (tl_sen, wlp_ner, tl_ner) in enumerate(zip(doc_data['sentences'], ner_list, doc_data['ner'])):
pre_doc_len = sum(doc_sen_len_list[:sen_idx])
tl_ner_remove_offset = [handle_doc_offset_ner(item, pre_doc_len, remove=True) for item in tl_ner]
assert len(tl_ner) == len(wlp_ner)
for tl_ent, wlp_label, tl_ent_old in zip(tl_ner_remove_offset, wlp_ner, tl_ner):
if wlp_label != 'Action':
continue
ent_start, ent_end, tl_label = tl_ent
ent_start_old, ent_end_old, tl_label_old = tl_ent_old
assert tl_label == tl_label_old
assert ent_start_old == ent_start + pre_doc_len
one_ent_list = [('ent_id', ' '.join(tl_sen[ent_start:ent_end + 1]), \
(ent_start, ent_end + 1), wlp_label, '')]
process_sen = build_entity_typing_mention(tl_sen, one_ent_list)
wlp_ent_type_list.append("[ENT-" + wlp_label + "-START]")
tl_ent_type_list.append(tl_label)
# process_sen_list.append("[CLS] " + ' '.join(process_sen) + " [SEP]")
process_sen_list.append(f"{tokenizer.bos_token} " + ' '.join(process_sen) + f" {tokenizer.eos_token}")
doc_name_list.append(doc_name)
sen_idx_list.append(sen_idx)
ent_info_list.append((ent_start_old, ent_end_old, ent_start, ent_end, sen_idx, tl_label))
tokenized_sen_list = [tokenizer.tokenize(sent) for sent in process_sen_list]
print(max([len(item) for item in tokenized_sen_list]))
# Get the input_ids and labels
input_ids = pad_sequences([tokenizer.convert_tokens_to_ids(txt) for txt in tokenized_sen_list],
maxlen=args.max_len, value=tokenizer.pad_token_id, dtype="long", truncating="post", padding="post")
attention_masks = [[float(i != tokenizer.pad_token_id) for i in ii] for ii in input_ids]
# print(tl_ent_type_list)
labels = [LABEL2ID[l] for l in tl_ent_type_list]
start_tkn_idx_list = [tokenized_sen_list[sen_idx].index(wlp_ent_type_list[sen_idx])
if tokenized_sen_list[sen_idx].index(
wlp_ent_type_list[sen_idx]) < args.max_len else args.max_len - 1
for sen_idx in range(len(tokenized_sen_list))
]
assert len(input_ids) == len(attention_masks) and len(labels) == len(start_tkn_idx_list)
inputs = torch.tensor(input_ids)
masks = torch.tensor(attention_masks)
labels = torch.tensor(labels)
start_idx = torch.tensor(start_tkn_idx_list)
print("--- %s seconds ---" % (time.time() - start_time))
return inputs, masks, labels, start_idx, tokenized_sen_list, wlp_ent_type_list, doc_name_list, ent_info_list
def prepare_ep_inference_data(tl_data, tokenizer, args):
start_time = time.time()
wlp_ent_type_list = []
tl_ent_type_list = []
process_sen_list = []
doc_name_list = []
sen_idx_list = []
ent_info_list = []
for file_idx in range(len(tl_data)):
doc_name = tl_data[file_idx]['doc_key']
# print(file_idx, doc_name)
doc_data = tl_data[file_idx]
doc_sen_len_list = [len(item) for item in doc_data['sentences_tokenized']]
for sen_idx, (tl_sen, tl_ner, wlp_ner, wlp_ner_pred) in enumerate(zip(doc_data['sentences_tokenized'], \
doc_data['tl_ner_tokenized'], \
doc_data['wlp_ner_tokenized'], \
doc_data['wlp_ner_pred_tokenized'])):
pre_doc_len = sum(doc_sen_len_list[:sen_idx])
wlp_ner_pred_remove_offset = [handle_doc_offset_ner(item, pre_doc_len, remove=True) for item in
wlp_ner_pred]
assert len(tl_ner) == len(wlp_ner)
for wlp_ent, wlp_ent_old in zip(wlp_ner_pred_remove_offset, wlp_ner_pred):
ent_start, ent_end, wlp_label_pred = wlp_ent
ent_start_old, ent_end_old, wlp_label_pred_old = wlp_ent_old
tl_label = 'ignored'
if wlp_label_pred != 'Action':
continue
one_ent_list = [('ent_id', ' '.join(tl_sen[ent_start:ent_end + 1]), \
(ent_start, ent_end + 1), wlp_label_pred, '')]
process_sen = build_entity_typing_mention(tl_sen, one_ent_list)
wlp_ent_type_list.append("[ENT-" + wlp_label_pred + "-START]")
tl_ent_type_list.append(tl_label)
process_sen_list.append("[CLS] " + ' '.join(process_sen) + " [SEP]")
doc_name_list.append(doc_name)
sen_idx_list.append(sen_idx)
ent_info_list.append((ent_start_old, ent_end_old, ent_start, ent_end, sen_idx, tl_label))
tokenized_sen_list = [sent.split(' ') for sent in process_sen_list]
# Get the input_ids and labels
input_ids = pad_sequences([tokenizer.convert_tokens_to_ids(txt) for txt in tokenized_sen_list],
maxlen=args.max_len, value=tokenizer.pad_token_id, dtype="long", truncating="post", padding="post")
attention_masks = [[float(i != tokenizer.pad_token_id) for i in ii] for ii in input_ids]
labels = [LABEL2ID[l] for l in tl_ent_type_list]
start_tkn_idx_list = [tokenized_sen_list[sen_idx].index(wlp_ent_type_list[sen_idx])
if tokenized_sen_list[sen_idx].index(
wlp_ent_type_list[sen_idx]) < args.max_len else args.max_len - 1
for sen_idx in range(len(tokenized_sen_list))
]
assert len(input_ids) == len(attention_masks) and len(labels) == len(start_tkn_idx_list)
inputs = torch.tensor(input_ids)
masks = torch.tensor(attention_masks)
labels = torch.tensor(labels)
start_idx = torch.tensor(start_tkn_idx_list)
print("--- %s seconds ---" % (time.time() - start_time))
return inputs, masks, labels, start_idx, tokenized_sen_list, wlp_ent_type_list, doc_name_list, ent_info_list
|
[
"bflashcp3f@gmail.com"
] |
bflashcp3f@gmail.com
|
2885cbbc471c1e76a88a5bcff98bb1cffcdb2e35
|
25564424dd568cd0755110b9875cecc700c480bc
|
/MyTutor/Tutorial/migrations/0009_merge_20171121_1755.py
|
66444575724de4797c117d833cb466008022264c
|
[] |
no_license
|
wyh5625/Tutoria
|
f8908addbdba63e6ba89549fe5ddf918ce68c54d
|
9d0d19e03860b9f49fb1bdc64cb936ca8f5c734a
|
refs/heads/master
| 2021-08-19T12:27:27.326509
| 2017-11-26T07:51:56
| 2017-11-26T07:51:56
| 107,742,862
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 343
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.6 on 2017-11-21 09:55
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('Tutorial', '0008_auto_20171121_1504'),
('Tutorial', '0007_tutorialsession_price'),
]
operations = [
]
|
[
"wyh562527789@gmail.com"
] |
wyh562527789@gmail.com
|
c08ba960a32c6f92ef5c4adac83005a866608f3c
|
532905538460b1e8ea6d456a7d12321bba503ea0
|
/lib/BlobDetector.py
|
6027fef26f4a2868c4e3c498eb5afab488e235b2
|
[] |
no_license
|
HANprojectsENG/MicroPlateImager
|
8b45d7bfaeffbaa5baaae11a4597e1002c0afb1a
|
efa133d34ec7b4734d1f118d586daaa5155b5d01
|
refs/heads/master
| 2020-12-28T04:23:39.142057
| 2020-05-26T15:46:45
| 2020-05-26T15:46:45
| 238,180,568
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,740
|
py
|
"""@package docstring
Detect blobs in the image
TODO:
how to pass roi's and imageQuality? via getter or (result) signal?
Test and improve local blob snr
"""
#!/usr/bin/python3
# -*- coding: utf-8 -*-
import numpy as np
from PySide2.QtCore import *
import cv2
import inspect
import traceback
from lib.manipulator import Manipulator
import matplotlib.pyplot as plt
class BlobDetector(Manipulator):
"""Object detector
detects blobs
\param image (the enhanced and segmented image)
\return image (annotated)
"""
def __init__(self, *args, **kwargs):
"""The constructor."""
super().__init__("blob detector")
# Blob area filtering parameters minBlobArea
self.minBlobArea = kwargs['minBlobArea'] if 'minBlobArea' in kwargs else 10
self.maxBlobArea = kwargs['maxBlobArea'] if 'maxBlobArea' in kwargs else 500
# adaptiveThresholdInvertBinary
self.invBin = kwargs['invBin'] if 'invBin' in kwargs else True
# adaptiveThresholdOffset
self.offset = kwargs['offset'] if 'offset' in kwargs else 0
# adaptiveThresholdBlocksize
self.blocksize = kwargs['blocksize'] if 'blocksize' in kwargs else 3
# Plotting
self.plot = kwargs['plot'] if 'plot' in kwargs else False
if self.plot:
cv2.namedWindow(self.name)
plt.show(block=False)
"""TODO: Add var rects -> detected blobs/rectangles"""
self.blobs = list
def __del__(self):
"""The deconstructor."""
None
def start(self, Image, ROIs):
"""Image processing function.
\param image (the enhanced and segmented image)
\return image (the annotated image )
local variable is the list of detected blobs with the following feature columns:
[bb_left,bb_top,bb_width,bb_height, cc_area, sharpness, SNR]
Sharpness is variation of the Laplacian (introduced by Pech-Pacheco
"Diatom autofocusing in brightfield microscopy: a comparative study."
"""
try:
self.startTimer()
self.image = Image
self.ROIs = ROIs
# Iterate ROis
for ROI in ROIs:
# slice image, assuming ROI:(left,top,width,height)
ROI_image = self.image[ROI[1]:ROI[1]+ROI[3],ROI[0]:ROI[0]+ROI[2]]
# Binarize and find blobs
BWImage = cv2.adaptiveThreshold(ROI_image, 255,
cv2.ADAPTIVE_THRESH_GAUSSIAN_C,
self.invBin,
self.blocksize,
self.offset)
# ConnectedComponentsWithStats output: number of labels, label matrix, stats(left,top,width,height), area
blobFeatures = cv2.connectedComponentsWithStats(BWImage, 8, cv2.CV_32S)
# Get blob RoI and area
blobFeatures = blobFeatures[2][1:] # skipping background (label 0)
# Filter by blob area
self.blobs = blobFeatures[
np.where( (blobFeatures[:, cv2.CC_STAT_AREA] > self.minBlobArea) &
(blobFeatures[:, cv2.CC_STAT_AREA] < self.maxBlobArea) ) ]
# Increase array size
self.blobs = np.concatenate([self.blobs,
np.zeros((self.blobs.shape[0],2), dtype=int)],
axis=1)
# Annotate blobs and compute additional features
for blob in self.blobs:
tl = (blob[0], blob[1])
br = (blob[0] + blob[2], blob[1] + blob[3])
# Compute some metrics of individual blobs
tempImage = self.image[tl[1]:br[1], tl[0]:br[0]]
I_0 = 255.0 - np.min(tempImage) # peak foreground intensity estimate
I_b = 255.0 - np.max(tempImage) # background intensity
# Add local sharpness column
blob[5] = int(cv2.Laplacian(tempImage, cv2.CV_64F).var())
# Add local SNR column
blob[6] = int((I_0-I_b)/np.sqrt(I_b)) if I_b>0 else 0
# Shift coordinates wrt ROI
blob[0] += ROI[0]
blob[1] += ROI[1]
# Mark in image
if self.plot:
None
#cv2.rectangle(ROI_image, tl, br, (0, 0, 0), 1)
#cv2.putText(ROI_image, str(blob[5]), br, cv2.FONT_HERSHEY_SIMPLEX, .5, (0,0,0), 1, cv2.LINE_AA)
# Plot last ROI
if self.plot:
cv2.imshow(self.name, BWImage)
# Finalize
self.stopTimer()
self.signals.finished.emit()
except Exception as err:
exc = traceback.format_exception(type(err), err, err.__traceback__, chain=False)
self.signals.error.emit(exc)
self.signals.message.emit('E: {} exception: {}'.format(self.name, err))
return self.image
@Slot(float)
def setOffset(self, val):
if -10.0 <= val <= 10.0:
self.offset = val
else:
raise ValueError('offset')
@Slot(int)
def setBlockSize(self, val):
if (3 <= val <= 21) and (val & 1) == 1:
self.blocksize = val
else:
raise ValueError('blocksize')
|
[
"gertvanlagen99@hotmail.com"
] |
gertvanlagen99@hotmail.com
|
d757c9a19d8d3adf5642e99b229eba903b9d8a89
|
b49ece85d473947bb6f47c6120538bc8374934d5
|
/myfirstprogects/views.py
|
fa75ade896e1d171d8ea4f9cab9a13150f35efa0
|
[] |
no_license
|
Ish2504/myfirstproject-root
|
138f868c745d8a129e121227333edab626ecc38e
|
69861d70f566cec5bec3de1b65340cb4ea4440d2
|
refs/heads/main
| 2023-06-23T17:24:09.949504
| 2021-07-24T19:48:11
| 2021-07-24T19:48:11
| 389,180,552
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 217
|
py
|
from django.http import HttpResponse
from django.shortcuts import render
def about(request):
return HttpResponse('This is about page')
def home(request):
return render(request, 'home.html', {'greeting':'Hello!'})
|
[
"comp-g@yandex.ru"
] |
comp-g@yandex.ru
|
545228fdf04416c3f4249be97d0719a04c08e210
|
131fbf5d17e595519b0e2b5078932d4861b4a4f1
|
/keywordfinder/forms.py
|
90d9b0a8a86efb532784f5ee9692b69c0049218f
|
[] |
no_license
|
RahulPGS/highbreedtask
|
b560ff32cb8a7cb84f0e2f944cfe2bfdf9420765
|
c02f5f19e94c8555aa56f7c06fb814b415257330
|
refs/heads/master
| 2023-05-09T23:04:05.426819
| 2021-06-07T07:31:42
| 2021-06-07T07:31:42
| 374,317,074
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 167
|
py
|
from .models import URLKeywords
from django import forms
class URLKeywordform(forms.ModelForm):
class Meta:
model = URLKeywords
fields = ['url']
|
[
"S160142@rguktsklm.ac.in"
] |
S160142@rguktsklm.ac.in
|
73c63e80f0b6d6cd0dc924cbf442c0ffd0fb0ab9
|
45f9013735913414b95f35a4081c37998b6a8de9
|
/samples/openapi3/client/petstore/python/petstore_api/model/quadrilateral.py
|
855e913d944cc627ee80bdec52f288b2991ea5fd
|
[
"Apache-2.0"
] |
permissive
|
dukeraphaelng/openapi-generator
|
58585af051e939c0e9f3b5f19c2704439c70bc4d
|
f5c49609d2e5c4c2bdf53aeef71347c6a1b3d29c
|
refs/heads/master
| 2023-02-22T09:56:56.951394
| 2021-01-19T05:16:20
| 2021-01-19T05:16:20
| 330,874,250
| 1
| 0
|
Apache-2.0
| 2021-01-19T06:10:00
| 2021-01-19T05:30:21
| null |
UTF-8
|
Python
| false
| false
| 9,750
|
py
|
"""
OpenAPI Petstore
This spec is mainly for testing Petstore server and contains fake endpoints, models. Please do not use this for any other purpose. Special characters: \" \\ # noqa: E501
The version of the OpenAPI document: 1.0.0
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
import nulltype # noqa: F401
from petstore_api.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
def lazy_import():
from petstore_api.model.complex_quadrilateral import ComplexQuadrilateral
from petstore_api.model.simple_quadrilateral import SimpleQuadrilateral
globals()['ComplexQuadrilateral'] = ComplexQuadrilateral
globals()['SimpleQuadrilateral'] = SimpleQuadrilateral
class Quadrilateral(ModelComposed):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
lazy_import()
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'shape_type': (str,), # noqa: E501
'quadrilateral_type': (str,), # noqa: E501
}
@cached_property
def discriminator():
lazy_import()
val = {
'ComplexQuadrilateral': ComplexQuadrilateral,
'SimpleQuadrilateral': SimpleQuadrilateral,
}
if not val:
return None
return {'quadrilateral_type': val}
attribute_map = {
'shape_type': 'shapeType', # noqa: E501
'quadrilateral_type': 'quadrilateralType', # noqa: E501
}
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
'_composed_instances',
'_var_name_to_model_instances',
'_additional_properties_model_instances',
])
@convert_js_args_to_python_args
def __init__(self, quadrilateral_type, *args, **kwargs): # noqa: E501
"""Quadrilateral - a model defined in OpenAPI
Args:
quadrilateral_type (str):
Keyword Args:
shape_type (str): defaults to nulltype.Null # noqa: E501
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
"""
shape_type = kwargs.get('shape_type', nulltype.Null)
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
constant_args = {
'_check_type': _check_type,
'_path_to_item': _path_to_item,
'_spec_property_naming': _spec_property_naming,
'_configuration': _configuration,
'_visited_composed_classes': self._visited_composed_classes,
}
required_args = {
'shape_type': shape_type,
'quadrilateral_type': quadrilateral_type,
}
# remove args whose value is Null because they are unset
required_arg_names = list(required_args.keys())
for required_arg_name in required_arg_names:
if required_args[required_arg_name] is nulltype.Null:
del required_args[required_arg_name]
model_args = {}
model_args.update(required_args)
model_args.update(kwargs)
composed_info = validate_get_composed_info(
constant_args, model_args, self)
self._composed_instances = composed_info[0]
self._var_name_to_model_instances = composed_info[1]
self._additional_properties_model_instances = composed_info[2]
unused_args = composed_info[3]
for var_name, var_value in required_args.items():
setattr(self, var_name, var_value)
for var_name, var_value in kwargs.items():
if var_name in unused_args and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
not self._additional_properties_model_instances:
# discard variable.
continue
setattr(self, var_name, var_value)
@cached_property
def _composed_schemas():
# we need this here to make our import statements work
# we must store _composed_schemas in here so the code is only run
# when we invoke this method. If we kept this at the class
# level we would get an error beause the class level
# code would be run when this module is imported, and these composed
# classes don't exist yet because their module has not finished
# loading
lazy_import()
return {
'anyOf': [
],
'allOf': [
],
'oneOf': [
ComplexQuadrilateral,
SimpleQuadrilateral,
],
}
|
[
"noreply@github.com"
] |
dukeraphaelng.noreply@github.com
|
c6dd5a39cb39904882c39318303ba5ec4e8e101e
|
0139bdde50d922893e718221a69e1ca4cb89757d
|
/SendEmail_py/test.py
|
728d1ae61f4134e81acd61f8d0256de6e361d130
|
[] |
no_license
|
nuaays/Miscellaneous_Scripts
|
79adc5d4a639f1c95d5206447593f89a813d2e06
|
803a3b30e8848bbcbce58eb12f9b25a12060a437
|
refs/heads/master
| 2021-01-10T05:46:24.227613
| 2017-08-04T02:30:18
| 2017-08-04T02:30:18
| 48,648,489
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 134
|
py
|
#coding:utf-8
import SendEmail
SendEmail.send_mail("nuaays@qq.com", ["nuaays@gmail.com"], "Email标题", "This is the Mail Body")
|
[
"nuaays@gmail.com"
] |
nuaays@gmail.com
|
0b6385d8c07a79f3c00f429fc0b39f0bc787e6b7
|
bdf370c0bc4e93a156087b7f86bb46a47120a435
|
/src/shifa/signup/signup.py
|
9e054ee0ddff56f79325e361b3768b5bded9a532
|
[] |
no_license
|
dethaa/Clinican
|
ae30d441058bb0834d738a417c3bc885f4794e25
|
5c665761ed7deb84a137f5325afdeb892b99c753
|
refs/heads/master
| 2023-06-17T02:58:48.484066
| 2021-07-08T05:17:59
| 2021-07-08T05:17:59
| 384,008,526
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,205
|
py
|
from kivy.app import App
from kivy.uix.boxlayout import BoxLayout
from kivy.core.window import Window
from kivy.lang import Builder
from kivy.uix.screenmanager import ScreenManager, Screen, SlideTransition, NoTransition
import mysql.connector
import hashlib
Builder.load_file('shifa/signup/signup.kv')
mydb = mysql.connector.connect(
host="localhost",
user="root",
password="132435", #Isi ini sama password kalian masing-masing yaa
database="RPL"
)
class SignupWindow(BoxLayout):
def __init__(self, **kwargs):
super().__init__(**kwargs)
def validate_account(self):
nama = self.ids.name_field.text
email = self.ids.email_field.text
notelp = self.ids.phone_field.text
username = self.ids.username_field.text
password = self.ids.pass_field.text
password = hashlib.sha256(password.encode()).hexdigest()
print(password)
#Fetch account with same username
result = self.fetch_account(username)
result_email = self.fetch_account_email(email)
#Update message box
message_box = self.ids.message
if (username == '' or password == '' or nama == ''\
or email == '' or notelp == ''):
message_box.text = '[color=#FF0000]Please fill in all boxes[/color]'
elif (len(result) != 0):
message_box.text = '[color=#FF0000]Username taken! Please choose another one[/color]'
elif (len(result_email) != 0):
message_box.text = '[color=#FF0000]Email already registered![/color]'
elif (not self.is_valid_phone(notelp)):
message_box.text = '[color=#FF0000]Invalid phone number! Only numbers allowed[/color]'
else:
#generate new id
id = self.create_id()
#hash password
# password = hashlib.sha256(password.encode()).hexdigest()
#insert new account
val = (id, nama, email, notelp, username, password)
self.insert_account(val)
message_box.text = '[color=#26AE4C]Sign up successful! You can now sign in[/color]'
def fetch_account(self, _username):
val = (_username.rstrip())
mycursor = mydb.cursor()
query = "SELECT * FROM Customer WHERE username='{0}'".format(val)
print(query)
mycursor.execute(query)
myresult = mycursor.fetchall()
print(myresult)
return myresult
def fetch_account_email(self, _email):
val = (_email.rstrip())
mycursor = mydb.cursor()
query = "SELECT * FROM Customer WHERE email='{0}'".format(val)
print(query)
mycursor.execute(query)
myresult = mycursor.fetchall()
print(myresult)
return myresult
def is_valid_phone(self, _phone_number):
for char in _phone_number:
if (ord(char) < 48 or ord(char) > 57):
return False
return True
def create_id(self):
mycursor = mydb.cursor()
query = "SELECT MAX(idakun) FROM Customer"
mycursor.execute(query)
myresult = mycursor.fetchall()
print(myresult)
return int(myresult[0][0])+1
def insert_account(self, val):
mycursor = mydb.cursor()
query = "INSERT INTO Customer VALUES ({0}, '{1}', '{2}', '{3}', '{4}', '{5}')"\
.format(val[0], val[1], val[2], val[3], val[4], val[5])
print(query)
mycursor.execute(query)
mydb.commit()
print("successfully inserted record")
def to_signin(self):
self.parent.parent.transition = SlideTransition()
self.parent.parent.switch_to(self.parent.parent.parent.ids.scrn_si, direction='right')
self.reset_signup()
def reset_signup(self):
self.ids.name_field.text = ''
self.ids.email_field.text = ''
self.ids.phone_field.text = ''
self.ids.username_field.text = ''
self.ids.pass_field.text = ''
self.ids.message.text = ''
class SignupApp(App):
def build(self):
Window.size = (1280, 720)
return SignupWindow()
if __name__ == '__main__':
SignupApp().run()
|
[
"sharonmarbun12@gmail.com"
] |
sharonmarbun12@gmail.com
|
0feafb3f894fca267b28bebdfaa75a2bfe6558cf
|
529189166b8b979f50c41695db4ae094d28998fd
|
/src/main/utils/comparable.py
|
ab98ea27a329f2f2d87ff904e6f2f41ed101b394
|
[] |
no_license
|
suzuki-kei/python-myapp
|
e4bbc859af7cc6d23ef67bb4b68fc5c9ddb5ecad
|
f99e43506c52061323f86d8deabbd2689d5edc19
|
refs/heads/main
| 2023-08-31T03:27:31.103690
| 2021-11-13T12:13:42
| 2021-11-13T12:13:42
| 137,279,852
| 0
| 0
| null | 2023-09-10T02:25:23
| 2018-06-13T22:50:55
|
Python
|
UTF-8
|
Python
| false
| false
| 1,887
|
py
|
class Comparable(type):
"""
大小比較する compare メソッドから比較演算子を自動生成するメタクラス.
"""
def __new__(self, name, bases, namespace, **kwargs):
Comparable.define_compare_methods(namespace)
return super(Comparable, self).__new__(self, name, bases, namespace, **kwargs)
@staticmethod
def define_compare_methods(namespace):
namespace["__eq__"] = lambda self, other: self.compare(other) == 0
namespace["__ne__"] = lambda self, other: self.compare(other) != 0
namespace["__lt__"] = lambda self, other: self.compare(other) < 0
namespace["__le__"] = lambda self, other: self.compare(other) <= 0
namespace["__gt__"] = lambda self, other: self.compare(other) > 0
namespace["__ge__"] = lambda self, other: self.compare(other) >= 0
def comparable(target="compare"):
"""
大小比較するメソッドから比較演算子を自動生成するデコレータ.
"""
def define_compare_methods(target_class, compare):
target_class.__eq__ = lambda self, other: compare(self, other) == 0
target_class.__ne__ = lambda self, other: compare(self, other) != 0
target_class.__lt__ = lambda self, other: compare(self, other) < 0
target_class.__le__ = lambda self, other: compare(self, other) <= 0
target_class.__gt__ = lambda self, other: compare(self, other) > 0
target_class.__ge__ = lambda self, other: compare(self, other) >= 0
if isinstance(target, str):
def wrapper(target_class):
compare = target_class.__dict__[target]
define_compare_methods(target_class, compare)
return target_class
return wrapper
else:
compare = lambda self, other: self.compare(other)
define_compare_methods(target, compare)
return target
|
[
"todokimasen@gmail.com"
] |
todokimasen@gmail.com
|
3f26efd33da5940d90d8e1ebf80e3f504640ec53
|
263e20c0d977a6ba56269ee266c7096b882000cd
|
/tests/test_product_model.py
|
f02ed1f79063fbefe82c0e4520506c0022158f3a
|
[] |
no_license
|
marcossilvaxx/teste_backend
|
ddb268aca62ea5faac0301bfc5a005fde4fe9c85
|
7555ab1939c9886727bacabc66fda829a319d80d
|
refs/heads/master
| 2022-11-22T04:15:12.096304
| 2020-07-25T04:39:41
| 2020-07-25T04:39:41
| 282,368,871
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,904
|
py
|
from unittest import TestCase
from app import app
from app.models.Product import Product
class TestProductModel(TestCase):
'''
Testing Product model
'''
def setUp(self):
app.testing = True
def test_create_product_basic(self):
try:
product = Product("arroz", 5.30, 3.48)
except:
self.fail("Exception was not expected.")
def test_create_product_none_arguments(self):
try:
product = Product(None, 5.30, 3.48)
self.fail("Exception was expected.")
except:
pass
try:
product = Product("arroz", None, 3.48)
self.fail("Exception was expected.")
except:
pass
try:
product = Product("arroz", 5.30, None)
self.fail("Exception was expected.")
except:
pass
try:
product = Product(None, None, None)
self.fail("Exception was expected.")
except:
pass
def test_create_product_empty_string_arguments(self):
try:
product = Product("", 5.30, 3.48)
self.fail("Exception was expected.")
except:
pass
try:
product = Product("arroz", "", 3.48)
self.fail("Exception was expected.")
except:
pass
try:
product = Product("arroz", 5.30, "")
self.fail("Exception was expected.")
except:
pass
try:
product = Product("", "", "")
self.fail("Exception was expected.")
except:
pass
def test_product_string_representation(self):
product = Product("arroz", 10.23, 9.33)
self.assertEqual("< Product : arroz >", product.__repr__())
|
[
"vinicius_marcosmartins@hotmail.com"
] |
vinicius_marcosmartins@hotmail.com
|
a562e14310acdd7d983727fb7f4adac44605916d
|
287c7cd11f458c6e26fc7c1f64ecc310472e2809
|
/bookmark/urls.py
|
f326bb3c5156bcdd93efd6b0514b41a078cfdb4c
|
[] |
no_license
|
JUKOOK/django_study
|
b3bfe252791913d11a6395b5a467c4e5fe77b353
|
0767aceb85dc270f54611f4b123a8fd0f2913b17
|
refs/heads/master
| 2022-10-12T21:25:20.275753
| 2018-03-15T17:33:38
| 2018-03-15T17:33:38
| 120,982,904
| 0
| 1
| null | 2022-10-09T07:28:48
| 2018-02-10T04:09:08
|
Python
|
UTF-8
|
Python
| false
| false
| 1,234
|
py
|
# bookmark/urls
from django.conf.urls import url
from django.views.generic import ListView, DetailView
# from .views import BookmarkLV, BookmarkDV
from .models import Bookmark
from .views import * # 북마크 포스트 생성, 수정, 삭제를 사이트 내에서 하기위한 view 클래스들 import
urlpatterns = [
# Class-based view
# url(r'^list/', BookmarkLV.as_view(), name="bookmark_list_view"),
# url(r'^detail/(?P<pk>\d+)/$', BookmarkDV.as_view(), name="bookmark_detail_view"),
# views.generic에 기반한 Class-based view
url(r'^$', ListView.as_view(model=Bookmark), name="index"), # list_view
url(r'^detail/(?P<pk>\d+)/$', DetailView.as_view(model=Bookmark), name="detail_view"),
# 북마크 포스트 추가, 자기가 만든.. 수정 가능한 레코드 리스트 보기, 수정, 삭제
# /add/
url(r'^add/$', BookmarkCreateView.as_view(), name="add_bookmark"),
# /change
url(r'^change/$', BookmarkChangeListView.as_view(), name="changeable_bookmark"),
# /99/update
url(r'^(?P<pk>[0-9]+)/update/$', BookmarkUpdateView.as_view(), name="update_bookmark"),
# /00/add/
url(r'^(?P<pk>[0-9]+)/delete/$', BookmarkDeleteView.as_view(), name="delete_bookmark"),
]
|
[
"wnrnrdl@gmail.com"
] |
wnrnrdl@gmail.com
|
1441470cd360c73a6547a2f2a220a704c85a8235
|
1ab7b3f2aa63de8488ce7c466a67d367771aa1f2
|
/Ricardo_OS/Python_backend/venv/lib/python3.8/site-packages/OpenGL/EGL/debug.py
|
dd519e6dbe0ccfa9fb226e2757a7a64fd815c2cc
|
[
"MIT"
] |
permissive
|
icl-rocketry/Avionics
|
9d39aeb11aba11115826fd73357b415026a7adad
|
95b7a061eabd6f2b607fba79e007186030f02720
|
refs/heads/master
| 2022-07-30T07:54:10.642930
| 2022-07-10T12:19:10
| 2022-07-10T12:19:10
| 216,184,670
| 9
| 1
|
MIT
| 2022-06-27T10:17:06
| 2019-10-19T09:57:07
|
C++
|
UTF-8
|
Python
| false
| false
| 7,538
|
py
|
"""Debug utilities for EGL operations"""
from OpenGL.EGL import *
import itertools
def eglErrorName(value):
"""Returns error constant if known, otherwise returns value"""
return KNOWN_ERRORS.get(value, value)
KNOWN_ERRORS = {
EGL_SUCCESS: EGL_SUCCESS,
EGL_NOT_INITIALIZED: EGL_NOT_INITIALIZED,
EGL_BAD_ACCESS: EGL_BAD_ACCESS,
EGL_BAD_ALLOC: EGL_BAD_ALLOC,
EGL_BAD_ATTRIBUTE: EGL_BAD_ATTRIBUTE,
EGL_BAD_CONTEXT: EGL_BAD_CONTEXT,
EGL_BAD_CONFIG: EGL_BAD_CONFIG,
EGL_BAD_CURRENT_SURFACE: EGL_BAD_CURRENT_SURFACE,
EGL_BAD_DISPLAY: EGL_BAD_DISPLAY,
EGL_BAD_SURFACE: EGL_BAD_SURFACE,
EGL_BAD_MATCH: EGL_BAD_MATCH,
EGL_BAD_PARAMETER: EGL_BAD_PARAMETER,
EGL_BAD_NATIVE_PIXMAP: EGL_BAD_NATIVE_PIXMAP,
EGL_BAD_NATIVE_WINDOW: EGL_BAD_NATIVE_WINDOW,
EGL_CONTEXT_LOST: EGL_CONTEXT_LOST,
}
def write_ppm(buf, filename):
"""Write height * width * 3-component buffer as ppm to filename
This lets us write a simple image format without
using any libraries that can be viewed on most
linux workstations.
"""
with open(filename, "w") as f:
h, w, c = buf.shape
print("P3", file=f)
print("# ascii ppm file created by pyopengl", file=f)
print("%i %i" % (w, h), file=f)
print("255", file=f)
for y in range(h - 1, -1, -1):
for x in range(w):
pixel = buf[y, x]
l = " %3d %3d %3d" % (pixel[0], pixel[1], pixel[2])
f.write(l)
f.write("\n")
def debug_config(display, config):
"""Get debug display for the given configuration"""
result = {}
value = EGLint()
for attr in CONFIG_ATTRS:
if not eglGetConfigAttrib(display, config, attr, value):
log.warning("Failed to get attribute %s from config", attr)
continue
if attr in BITMASK_FIELDS:
attr_value = {}
for subattr in BITMASK_FIELDS[attr]:
if value.value & subattr:
attr_value[subattr.name] = True
else:
attr_value = value.value
result[attr.name] = attr_value
return result
def debug_configs(display, configs=None, max_count=256):
"""Present a formatted list of configs for the display"""
if configs is None:
configs = (EGLConfig * max_count)()
num_configs = EGLint()
eglGetConfigs(display, configs, max_count, num_configs)
if not num_configs.value:
return []
configs = configs[: num_configs.value]
debug_configs = [debug_config(display, cfg) for cfg in configs]
return debug_configs
SURFACE_TYPE_BITS = [
EGL_MULTISAMPLE_RESOLVE_BOX_BIT,
EGL_PBUFFER_BIT,
EGL_PIXMAP_BIT,
EGL_SWAP_BEHAVIOR_PRESERVED_BIT,
EGL_VG_ALPHA_FORMAT_PRE_BIT,
EGL_VG_COLORSPACE_LINEAR_BIT,
EGL_WINDOW_BIT,
]
RENDERABLE_TYPE_BITS = [
EGL_OPENGL_BIT,
EGL_OPENGL_ES_BIT,
EGL_OPENGL_ES2_BIT,
EGL_OPENGL_ES3_BIT,
EGL_OPENVG_BIT,
]
CAVEAT_BITS = [
EGL_NONE,
EGL_SLOW_CONFIG,
EGL_NON_CONFORMANT_CONFIG,
]
TRANSPARENT_BITS = [
EGL_NONE,
EGL_TRANSPARENT_RGB,
]
CONFIG_ATTRS = [
EGL_CONFIG_ID,
EGL_RED_SIZE,
EGL_GREEN_SIZE,
EGL_BLUE_SIZE,
EGL_DEPTH_SIZE,
EGL_ALPHA_SIZE,
EGL_ALPHA_MASK_SIZE,
EGL_BUFFER_SIZE,
EGL_STENCIL_SIZE,
EGL_BIND_TO_TEXTURE_RGB,
EGL_BIND_TO_TEXTURE_RGBA,
EGL_COLOR_BUFFER_TYPE,
EGL_CONFIG_CAVEAT,
EGL_CONFORMANT,
EGL_LEVEL,
EGL_LUMINANCE_SIZE,
EGL_MAX_PBUFFER_WIDTH,
EGL_MAX_PBUFFER_HEIGHT,
EGL_MAX_PBUFFER_PIXELS,
EGL_MIN_SWAP_INTERVAL,
EGL_MAX_SWAP_INTERVAL,
EGL_NATIVE_RENDERABLE,
EGL_NATIVE_VISUAL_ID,
EGL_NATIVE_VISUAL_TYPE,
EGL_RENDERABLE_TYPE,
EGL_SAMPLE_BUFFERS,
EGL_SAMPLES,
EGL_SURFACE_TYPE,
EGL_TRANSPARENT_TYPE,
EGL_TRANSPARENT_RED_VALUE,
EGL_TRANSPARENT_GREEN_VALUE,
EGL_TRANSPARENT_BLUE_VALUE,
]
BITMASK_FIELDS = dict(
[
(EGL_SURFACE_TYPE, SURFACE_TYPE_BITS),
(EGL_RENDERABLE_TYPE, RENDERABLE_TYPE_BITS),
(EGL_CONFORMANT, RENDERABLE_TYPE_BITS),
(EGL_CONFIG_CAVEAT, CAVEAT_BITS),
(EGL_TRANSPARENT_TYPE, TRANSPARENT_BITS),
]
)
def bit_renderer(bit):
def render(value):
if bit.name in value:
return " Y"
else:
return " ."
return render
CONFIG_FORMAT = [
(EGL_CONFIG_ID, "0x%x", "id", "cfg"),
(EGL_BUFFER_SIZE, "%i", "sz", "bf"),
(EGL_LEVEL, "%i", "l", "lv"),
(EGL_RED_SIZE, "%i", "r", "cbuf"),
(EGL_GREEN_SIZE, "%i", "g", "cbuf"),
(EGL_BLUE_SIZE, "%i", "b", "cbuf"),
(EGL_ALPHA_SIZE, "%i", "a", "cbuf"),
(EGL_DEPTH_SIZE, "%i", "th", "dp"),
(EGL_STENCIL_SIZE, "%i", "t", "s"),
(EGL_SAMPLES, "%i", "ns", "mult"),
(EGL_SAMPLE_BUFFERS, "%i", "bu", "mult"),
(EGL_NATIVE_VISUAL_ID, "0x%x", "id", "visual"),
(EGL_RENDERABLE_TYPE, bit_renderer(EGL_OPENGL_BIT), "gl", "render"),
(EGL_RENDERABLE_TYPE, bit_renderer(EGL_OPENGL_ES_BIT), "es", "render"),
(EGL_RENDERABLE_TYPE, bit_renderer(EGL_OPENGL_ES2_BIT), "e2", "render"),
(EGL_RENDERABLE_TYPE, bit_renderer(EGL_OPENGL_ES3_BIT), "e3", "render"),
(EGL_RENDERABLE_TYPE, bit_renderer(EGL_OPENVG_BIT), "vg", "render"),
(EGL_SURFACE_TYPE, bit_renderer(EGL_WINDOW_BIT), "wn", "surface"),
(EGL_SURFACE_TYPE, bit_renderer(EGL_PBUFFER_BIT), "pb", "surface"),
(EGL_SURFACE_TYPE, bit_renderer(EGL_PIXMAP_BIT), "px", "surface"),
]
def format_debug_configs(debug_configs, formats=CONFIG_FORMAT):
"""Format config for compact debugging display
Produces a config summary display for a set of
debug_configs as a text-mode table.
Uses `formats` (default `CONFIG_FORMAT`) to determine
which fields are extracted and how they are formatted
along with the column/subcolum set to be rendered in
the overall header.
returns formatted ASCII table for display in debug
logs or utilities
"""
columns = []
for (key, format, subcol, col) in formats:
column = []
max_width = 0
for row in debug_configs:
if isinstance(row, EGLConfig):
raise TypeError(row, "Call debug_config(display,config)")
try:
value = row[key.name]
except KeyError:
formatted = "_"
else:
if isinstance(format, str):
formatted = format % (value)
else:
formatted = format(value)
max_width = max((len(formatted), max_width))
column.append(formatted)
columns.append(
{
"rows": column,
"key": key,
"format": format,
"subcol": subcol,
"col": col,
"width": max_width,
}
)
headers = []
subheaders = []
rows = [headers, subheaders]
last_column = None
last_column_width = 0
for header, subcols in itertools.groupby(columns, lambda x: x["col"]):
subcols = list(subcols)
width = sum([col["width"] for col in subcols]) + (len(subcols) - 1)
headers.append(header.center(width, ".")[:width])
for column in columns:
subheaders.append(column["subcol"].rjust(column["width"])[: column["width"]])
rows.extend(
zip(*[[v.rjust(col["width"], " ") for v in col["rows"]] for col in columns])
)
return "\n".join([" ".join(row) for row in rows])
|
[
"kd619@ic.ac.uk"
] |
kd619@ic.ac.uk
|
5ae6e7a503085d85bd504f8a21c42c68e270e4c6
|
fd529ba6ade52cd2a3dab94da01252d7ea90398d
|
/testlolakfmalwe/4dtest.py
|
d5c3e5b1245b28c2a86b3ec545320774a92f267f
|
[] |
no_license
|
fjfhfjfjgishbrk/AE401-Python
|
4a984deb0281542c205d72695285b35c7413338f
|
ee80fa4588b127cff2402fd81e732ede28a66411
|
refs/heads/master
| 2022-06-13T13:49:39.875567
| 2022-05-28T12:40:51
| 2022-05-28T12:40:51
| 251,178,163
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,315
|
py
|
import numpy as np
def pmul(rarr, p):
temp = p.copy()
for i in rarr:
temp = np.matmul(i, temp)
return temp
point = [3, 5, 7, 1]
u = [-5, -1, -3, -5]
v = [2, 4, 6, 8]
m = v[1] * u[0] - u[1] * v[0]
n = - v[1] * u[2] + u[1] * v[2]
print(m, n)
cp1 = n/np.sqrt(m**2 + n**2)
sp1 = -m/np.sqrt(m**2 + n**2)
r1 = [[cp1, 0, -sp1, 0],
[0, 1, 0, 0],
[sp1, 0, cp1, 0],
[0, 0, 0, 1]]
rr1 = [[cp1, 0, sp1, 0],
[0, 1, 0, 0],
[-sp1, 0, cp1, 0],
[0, 0, 0, 1]]
u1 = np.matmul(r1, u)
v1 = np.matmul(r1, v)
print(u1[1] * v1[0], (v[0]*u[1]*n+v[2]*u[1]*m)/np.sqrt(m**2 + n**2))
print(v1)
print(u[1]*(v[0]*cp1-v[2]*sp1), v[1]*(u[0]*cp1-u[2]*sp1))
cp2 = v1[1] / np.sqrt(v1[0]**2 + v1[1]**2)
sp2 = v1[0] / np.sqrt(v1[0]**2 + v1[1]**2)
r2 = [[cp2, -sp2, 0, 0],
[sp2, cp2, 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1]]
rr2 = [[cp2, sp2, 0, 0],
[-sp2, cp2, 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1]]
u2 = np.matmul(r2, u1)
v2 = np.matmul(r2, v1)
p = u2[2] * v2[1] - v2[2] * u2[1]
q = - u2[2] * v2[3] + v2[2] * u2[3]
cp3 = q / np.sqrt(p**2 + q**2)
sp3 = -p / np.sqrt(p**2 + q**2)
r3 = [[1, 0, 0, 0],
[0, cp3, 0, -sp3],
[0, 0, 1, 0],
[0, sp3, 0, cp3]]
rr3 = [[1, 0, 0, 0],
[0, cp3, 0, sp3],
[0, 0, 1, 0],
[0, -sp3, 0, cp3]]
u3 = np.matmul(r3, u2)
v3 = np.matmul(r3, v2)
cp4 = v3[2] / np.sqrt(v3[1] ** 2 + v3[2] ** 2)
sp4 = v3[1] / np.sqrt(v3[1] ** 2 + v3[2] ** 2)
r4 = [[1, 0, 0, 0],
[0, cp4, -sp4, 0],
[0, sp4, cp4, 0],
[0, 0, 0, 1]]
rr4 = [[1, 0, 0, 0],
[0, cp4, sp4, 0],
[0, -sp4, cp4, 0],
[0, 0, 0, 1]]
u4 = np.matmul(r4, u3)
v4 = np.matmul(r4, v3)
cs = np.cos(np.pi)
ss = np.sin(np.pi)
rf = [[cs, -ss, 0, 0],
[ss, cs, 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1]]
rp = np.add(u, v)
ps = np.subtract(point, rp)
plb = np.linalg.norm(ps)
us = np.subtract(u, rp)
ul = np.linalg.norm(us)
angb = np.dot(ps, us) / (plb * ul)
pointa = pmul([r1, r2, r3, r4, rf, rr4, rr3, rr2, rr1], point)
pointb = pmul([r1, r2, r3, r4, rf, rr4, rr3, rr2, rr1], pointa)
ua = pmul([r1, r2, r3, r4, rf, rr4, rr3, rr2, rr1], u)
psa = np.subtract(pointa, rp)
pla = np.linalg.norm(psa)
anga = np.dot(psa, us) / (pla * ul)
print(point, pointa, pointb)
print(angb, anga)
print(u, ua.round(7))
|
[
"59891511+fjfhfjfjgishbrk@users.noreply.github.com"
] |
59891511+fjfhfjfjgishbrk@users.noreply.github.com
|
45247a50d77675b36835255f5e2fc022163f6969
|
18b4522a48830a251853189659099a4d7a03ce16
|
/task1.py
|
347b05cfd3698d309b0585c5240fd0e76ad3871a
|
[] |
no_license
|
amet-vikram13/Machine-Learning-Project
|
61b4edc9520ef190ca7fc5cd50c88cc7ee4512af
|
c232f7995f873291b68fd2bf2891d95ebe5dbf4c
|
refs/heads/master
| 2022-04-20T01:48:02.861542
| 2020-04-22T20:52:11
| 2020-04-22T20:52:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,368
|
py
|
import math
import gym
from gym import spaces, logger
from gym.utils import seeding
import numpy as np
class CartPoleEnv(gym.Env):
metadata = {
'render.modes': ['human', 'rgb_array'],
'video.frames_per_second' : 50
}
def __init__(self,case=1):
self.__version__ = "0.2.0"
print("CartPoleEnv - Version {}, Noise case: {}".format(self.__version__,case))
self.gravity = 9.8
self.masscart = 1.0
self.masspole = 0.4
self.total_mass = (self.masspole + self.masscart)
self.length = 0.5
self.polemass_length = (self.masspole * self.length)
self._seed()
self.force_mag = 10.0
#self.force_mag = 10.0*(1+self.np_random.uniform(low=-0.10, high=0.10))
self.tau = 0.02 # seconds between state updates
self.frictioncart = 5e-4 # AA Added cart friction
self.frictionpole = 2e-6 # AA Added cart friction
self.gravity_eps = 0.99 # Random scaling for gravity
self.frictioncart_eps = 0.99 # Random scaling for friction
self.frictionpole_eps = 0.99 # Random scaling for friction
# Angle at which to fail the episode
self.theta_threshold_radians = 12 * 2 * math.pi / 360
self.x_threshold = 2.4
# Angle limit set to 2 * theta_threshold_radians so failing observation is still within bounds
high = np.array([
self.x_threshold * 2,
np.finfo(np.float32).max,
self.theta_threshold_radians * 2,
np.finfo(np.float32).max])
self.action_space = spaces.Discrete(2) # AA Set discrete states back to 2
self.observation_space = spaces.Box(-high, high)
self.viewer = None
self.state = None
self.steps_beyond_done = None
def _seed(self, seed=None): # Set appropriate seed value
self.np_random, seed = seeding.np_random(seed)
return [seed]
def _step(self, action):
assert self.action_space.contains(action), "%r (%s) invalid"%(action, type(action))
state = self.state
x, x_dot, theta, theta_dot = state
force = self.force_mag if action==1 else -self.force_mag
costheta = math.cos(theta)
sintheta = math.sin(theta)
temp = (force + self.polemass_length * theta_dot * theta_dot * sintheta - self.frictioncart * (4 + self.frictioncart_eps*np.random.randn()) *np.sign(x_dot)) / self.total_mass # AA Added cart friction
thetaacc = (self.gravity * (4 + self.gravity_eps*np.random.randn()) * sintheta - costheta* temp - self.frictionpole * (4 + self.frictionpole_eps*np.random.randn()) *theta_dot/self.polemass_length) / (self.length * (4.0/3.0 - self.masspole * costheta * costheta / self.total_mass)) # AA Added pole friction
xacc = temp - self.polemass_length * thetaacc * costheta / self.total_mass
noise = 0
#noise = self.np_random.uniform(low=-0.10, high=0.10)
x = (x + self.tau * x_dot)
x_dot = (x_dot + self.tau * xacc)
theta = (theta + self.tau * theta_dot)*(1 + noise)
theta_dot = (theta_dot + self.tau * thetaacc)
self.state = (x,x_dot,theta,theta_dot)
done = x < -self.x_threshold \
or x > self.x_threshold \
or theta < -self.theta_threshold_radians \
or theta > self.theta_threshold_radians
done = bool(done)
if not done:
reward = 1.0
elif self.steps_beyond_done is None:
# Pole just fell!
self.steps_beyond_done = 0
reward = 1.0
else:
if self.steps_beyond_done == 0:
logger.warning("You are calling 'step()' even though this environment has already returned done = True. You should always call 'reset()' once you receive 'done = True' -- any further steps are undefined behavior.")
self.steps_beyond_done += 1
reward = 0.0
return np.array(self.state), reward, done, {}
def _reset(self):
self.state = self.np_random.uniform(low=-0.05, high=0.05, size=(4,))
self.steps_beyond_done = None
return np.array(self.state)
def _render(self, mode='human', close=False):
if close:
if self.viewer is not None:
self.viewer.close()
self.viewer = None
return
screen_width = 600
screen_height = 400
world_width = self.x_threshold*2
scale = screen_width/world_width
carty = 100 # TOP OF CART
polewidth = 10.0
polelen = scale * 1.0
cartwidth = 50.0
cartheight = 30.0
if self.viewer is None:
from gym.envs.classic_control import rendering
self.viewer = rendering.Viewer(screen_width, screen_height)
l,r,t,b = -cartwidth/2, cartwidth/2, cartheight/2, -cartheight/2
axleoffset =cartheight/4.0
cart = rendering.FilledPolygon([(l,b), (l,t), (r,t), (r,b)])
self.carttrans = rendering.Transform()
cart.add_attr(self.carttrans)
self.viewer.add_geom(cart)
l,r,t,b = -polewidth/2,polewidth/2,polelen-polewidth/2,-polewidth/2
pole = rendering.FilledPolygon([(l,b), (l,t), (r,t), (r,b)])
pole.set_color(.8,.6,.4)
self.poletrans = rendering.Transform(translation=(0, axleoffset))
pole.add_attr(self.poletrans)
pole.add_attr(self.carttrans)
self.viewer.add_geom(pole)
self.axle = rendering.make_circle(polewidth/2)
self.axle.add_attr(self.poletrans)
self.axle.add_attr(self.carttrans)
self.axle.set_color(.5,.5,.8)
self.viewer.add_geom(self.axle)
self.track = rendering.Line((0,carty), (screen_width,carty))
self.track.set_color(0,0,0)
self.viewer.add_geom(self.track)
if self.state is None: return None
x = self.state
cartx = x[0]*scale+screen_width/2.0 # MIDDLE OF CART
self.carttrans.set_translation(cartx, carty)
self.poletrans.set_rotation(-x[2])
return self.viewer.render(return_rgb_array = mode=='rgb_array')
|
[
"amet97vikram@gmail.com"
] |
amet97vikram@gmail.com
|
98f4f3692153eadf94883f4f1231b5b43c968d97
|
18bc0c799f18de93d2ad982113e44e97eb266fdd
|
/RSAD/com/vo/AreaVO.py
|
a22efd90f6f9a8df33e6e54dbb01f3027732e3ca
|
[] |
no_license
|
Mysterious-Harsh/RSAD
|
a3dd0754eb279847cb6646bf9b5625ae6ec2d183
|
ada9017318ce7340beb79c44a4b7bfe22c2af2f2
|
refs/heads/main
| 2023-05-12T04:06:30.219484
| 2021-05-23T16:26:24
| 2021-05-23T16:26:24
| 369,973,737
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 463
|
py
|
from RSAD import db
class AreaVO(db.Model):
__tablename__ = 'areamaster'
areaId = db.Column('areaId', db.Integer, primary_key=True, autoincrement=True)
areaName = db.Column('areaName', db.String(100))
areaPincode = db.Column('areaPincode', db.String(100))
def as_dict(self):
return {
'areaId': self.areaId,
'areaName': self.areaName,
'areaPincode': self.areaPincode
}
db.create_all()
|
[
"harssh.s.patel@gmail.com"
] |
harssh.s.patel@gmail.com
|
1f8c8b2f3254374969079c53913a5c4411edde82
|
ddcbaa2262fb5e631caec4d6b81e355b6ba8a49c
|
/splinter-demo.py
|
283858dfceede45c3e662c70eb0b348b06a60be5
|
[] |
no_license
|
intergrate-dev/python-demo
|
97802949311679afa6786dac483a3f556fecf7af
|
b6ce6a775d9644cd58aa975a36323550d472db68
|
refs/heads/master
| 2020-07-03T14:27:48.962506
| 2019-08-12T13:31:33
| 2019-08-12T13:31:33
| 201,935,385
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 654
|
py
|
import time
import ChromeDriver as ChromeDriver
from splinter import Browser
def splinter(url):
browser = Browser()
#login 126 email websize
browser.visit(url)
#wait web element loading
time.sleep(5)
#fill in account and password
browser.find_by_id('idInput').fill('xxxxxx')
browser.find_by_id('pwdInput').fill('xxxxx')
#click the button of login
browser.find_by_id('loginBtn').click()
time.sleep(8)
#close the window of brower
browser.quit()
# https://www.2cto.com/kf/201704/622848.html
if __name__ == '__main__':
websize3 ='http://www.126.com'
splinter(websize3)
|
[
"2491042435@qq.com"
] |
2491042435@qq.com
|
4b7d8aafab6795c6e32d5d999e7d59360cd86f79
|
6472c4553c49a8c05103355ff53b1cbb7f025e8f
|
/pava/implementation/natives/java/nio/ByteOrder.py
|
0705f5375318079e3ee9dfafe50da86c3310f2b5
|
[
"MIT"
] |
permissive
|
laffra/pava
|
0b012e27c207a3e0f3ca772667b0c32168fe3123
|
54d10cf7f8def2f96e254c0356623d08f221536f
|
refs/heads/master
| 2021-01-23T04:23:22.887146
| 2020-12-21T23:14:09
| 2020-12-21T23:14:09
| 86,191,143
| 5
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 154
|
py
|
def add_native_methods(clazz):
def nativeOrder____():
raise NotImplementedError()
clazz.nativeOrder____ = staticmethod(nativeOrder____)
|
[
"iV29VQzQVT11"
] |
iV29VQzQVT11
|
28c77d329eb3e74d291468dd566ac572fb45d428
|
337cc6c6d2bfd44ac18b0559ec791847eaf65bd1
|
/attention.py
|
3bbc17813d464fd71bb1e0f77355a504ebd94feb
|
[] |
no_license
|
susiwen8/tf-keras-attention-layer
|
c45ffc84927bcc6226d4a04ce3c6c4fba7a02671
|
72038268b0b49dec858597948f58b2fd7fca79d7
|
refs/heads/master
| 2020-03-27T21:34:36.895294
| 2018-09-03T05:59:20
| 2018-09-03T05:59:20
| 147,158,070
| 4
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,629
|
py
|
import tensorflow as tf
from tensorflow.keras.layers import Layer
from tensorflow.keras import initializers
from tensorflow.keras import regularizers
from tensorflow.keras import constraints
class Attention(Layer):
"""Multi-headed attention layer."""
def __init__(self, hidden_size,
num_heads = 3,
attention_dropout=.1,
trainable=True,
name='Attention'):
if hidden_size % num_heads != 0:
raise ValueError("Hidden size must be evenly divisible by the number of heads.")
self.hidden_size = hidden_size
self.num_heads = num_heads
self.trainable = trainable
self.attention_dropout = attention_dropout
self.dense = tf.layers.Dense(self.hidden_size, use_bias=False)
super(Attention, self).__init__(name=name)
def split_heads(self, x):
"""Split x into different heads, and transpose the resulting value.
The tensor is transposed to insure the inner dimensions hold the correct
values during the matrix multiplication.
Args:
x: A tensor with shape [batch_size, length, hidden_size]
Returns:
A tensor with shape [batch_size, num_heads, length, hidden_size/num_heads]
"""
with tf.name_scope("split_heads"):
batch_size = tf.shape(x)[0]
length = tf.shape(x)[1]
# Calculate depth of last dimension after it has been split.
depth = (self.hidden_size // self.num_heads)
# Split the last dimension
x = tf.reshape(x, [batch_size, length, self.num_heads, depth])
# Transpose the result
return tf.transpose(x, [0, 2, 1, 3])
def combine_heads(self, x):
"""Combine tensor that has been split.
Args:
x: A tensor [batch_size, num_heads, length, hidden_size/num_heads]
Returns:
A tensor with shape [batch_size, length, hidden_size]
"""
with tf.name_scope("combine_heads"):
batch_size = tf.shape(x)[0]
length = tf.shape(x)[2]
x = tf.transpose(x, [0, 2, 1, 3]) # --> [batch, length, num_heads, depth]
return tf.reshape(x, [batch_size, length, self.hidden_size])
def call(self, inputs):
"""Apply attention mechanism to inputs.
Args:
inputs: a tensor with shape [batch_size, length_x, hidden_size]
Returns:
Attention layer output with shape [batch_size, length_x, hidden_size]
"""
# Google developper use tf.layer.Dense to linearly project the queries, keys, and values.
q = self.dense(inputs)
k = self.dense(inputs)
v = self.dense(inputs)
q = self.split_heads(q)
k = self.split_heads(k)
v = self.split_heads(v)
# Scale q to prevent the dot product between q and k from growing too large.
depth = (self.hidden_size // self.num_heads)
q *= depth ** -0.5
logits = tf.matmul(q, k, transpose_b=True)
# logits += self.bias
weights = tf.nn.softmax(logits, name="attention_weights")
if self.trainable:
weights = tf.nn.dropout(weights, 1.0 - self.attention_dropout)
attention_output = tf.matmul(weights, v)
attention_output = self.combine_heads(attention_output)
attention_output = self.dense(attention_output)
return attention_output
def compute_output_shape(self, input_shape):
return tf.TensorShape(input_shape)
|
[
"susiwen8@163.com"
] |
susiwen8@163.com
|
bf50493554e8a7fddbfd2cb49c08bc19942c3287
|
793b74314238724962ead84f05888d205bede08d
|
/index.py
|
4cba2033c01a42c4fda25bbf64ee3ea08e319cc1
|
[] |
no_license
|
amish-goyal/LanguageID
|
5955e5fdb980d9db541ea96bb2e21499532e071a
|
8d0bac8e9a090e014f32bc76cdae3e9038188d9c
|
refs/heads/master
| 2016-09-11T02:53:10.157149
| 2015-06-08T06:02:13
| 2015-06-08T06:02:13
| 37,046,437
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,993
|
py
|
"""
This script creates text files from chunks of XML documents.
The text files are stored in a directory hierarchy as follows:
TARGETDIR
-language1
-textfile1
-textfile2
- ...
-language2
-textfile1
-textfile2
- ...
The script assumes the XML documents to be in the following hierarchy:
ROOTDIR
-language1
-file1
-file2
- ...
-language2
-file1
-file2
- ...
"""
from config import *
import os
import xml.etree.ElementTree as ET
CHUNKSIZE = 50
TOTALDOCS = 10
def generateXML_Filepaths(rootdir):
"""Generate filepaths of all XML files present in the root folder.
Arguments:
rootdir - name of the root directory (string)
"""
for subdir, dirs, files in os.walk(rootdir):
for filename in files:
if filename.endswith('.xml'):
filepath = subdir + '/' + filename
yield filepath
def parseXML(filepath):
"""Return concatenated string of all the text in the XML document.
Arguments:
filepath - the complete filepath of the XML file (string)
"""
try:
tree = ET.parse(filepath)
root = tree.getroot()
text = ''
for paragraph in root.iter(tag = 'p'):
text += ' ' + paragraph.text
return text
except:
return ""
def generateDocs(lang, chunkSize, totalDocs):
"""Generate text files from the XML documents
Arguments:
lang - The language of the text content
chunkSize - Total XML files used for one document
totalDocs - Total number of documents to be generated
"""
docCount = 0
chunkCount = 0
text = ''
print "Language: ", lang
for filepath in generateXML_Filepaths(ROOTDIR+lang):
text += ' ' + parseXML(filepath)
chunkCount += 1
if chunkCount == chunkSize:
print "Creating document ",docCount
docCount += 1
with open(TARGETDIR+lang+'/'+lang+'-'+str(docCount)+'.txt','w') as filename:
filename.write(text.encode('utf-8'))
text = ''
chunkCount = 0
if docCount == totalDocs:
break
if __name__ == "__main__":
for lang in NEW_LANGS:
generateDocs(lang,CHUNKSIZE,TOTALDOCS)
|
[
"amish1804@gmail.com"
] |
amish1804@gmail.com
|
d1cb18ffc13f0eada00419376bcaa748ede307f6
|
c91e1be16c43d7e461fd75bb268fa86aa231a997
|
/06/assembler/assemble
|
9b3daa5e2ffbb63f25c3555296fc690fdb9cf81d
|
[] |
no_license
|
benwilhelm/nand2tetris
|
a89c52b89b72b4d0a9152ac62be0a6ecf279f66b
|
291765a7cbb98053644f6226abcc76f4356d9a6d
|
refs/heads/master
| 2021-09-03T03:16:18.776137
| 2018-01-05T04:33:30
| 2018-01-05T04:33:30
| 110,318,788
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 706
|
#!/usr/bin/env python
import sys
import parser
import code
import symboltable
lines = open(sys.argv[1], 'r')
# remove comments and whitespace
commands = [parser.extractCommand(line) for line in lines]
# filter out lines that are not commands
commands = [command for command in commands if command != None]
# build symbol table
symbols = symboltable.buildTable(commands)
# remove lines that are just labels
commands = [command for command in commands if parser.commandType(command) != 'L_COMMAND']
# convert symbol commands to their line numbers or RAM addresses
commands = [symboltable.convertSymbol(command) for command in commands]
for command in commands:
print parser.parseCommand(command)
|
[
"ben@doublebeamdesign.com"
] |
ben@doublebeamdesign.com
|
|
d23a710daa0970e79663b65be0009ab46dc7b607
|
93a9c36e85bd753608516efe581edf96bbdc3580
|
/user/migrations/0002_auto_20190115_1838.py
|
aaec583aff5cb88eaad4c7d00cfc0d67e5237dca
|
[] |
no_license
|
zhouf1234/django_obj
|
bef6a13fc3d183070725fcb937da7a0c4a688e1c
|
bb10edc03dfdbe692b6293ffc3e1d33a374604cf
|
refs/heads/master
| 2020-05-05T03:05:24.838804
| 2019-04-05T10:18:27
| 2019-04-05T10:18:27
| 179,660,674
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 352
|
py
|
# Generated by Django 2.1.3 on 2019-01-15 18:38
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('user', '0001_initial'),
]
operations = [
migrations.RenameField(
model_name='users',
old_name='nikename',
new_name='nickname',
),
]
|
[
"="
] |
=
|
ac97bd0d029519758be787a33fc1fa1192491180
|
cd99b28dd25b894e4a9d13e06ac7e8c077760b45
|
/server/parameter.py
|
89dee922b59a2f24d0cb64b8232e23814b384798
|
[] |
no_license
|
nlfox/hp
|
3288d4f9f2db86bc91eb0418721b56ca870d9b30
|
65802502452e00ebf71f9fd78aa7dcefbeca548b
|
refs/heads/master
| 2021-08-26T08:36:03.442921
| 2017-11-22T15:17:13
| 2017-11-22T15:17:13
| 107,770,937
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 367
|
py
|
import json
class Parameter(object):
def __init__(self, jsonData):
self.obj = json.loads(jsonData)
self.program = self.obj["name"]
self.obj = self.obj["data"]
def getParam(self):
param = self.program + " "
for i, v in self.obj.iteritems():
param += "-" + str(i) + " " + str(v) + " "
return param
|
[
"nlfox@msn.cn"
] |
nlfox@msn.cn
|
04639bfbcc22066b7701423101ce96385654d8df
|
9c53d9a199e7a64ff3041b4da3683d5122c53aca
|
/build/first_pkg/catkin_generated/pkg.develspace.context.pc.py
|
e18218a80176b9fd8834151103b4c5323b7008a3
|
[] |
no_license
|
NoaOr/robotics-ex2
|
bdd7cd0669158e3412e922921a0cf684785f23f2
|
1ca2d15d4b45e7db37f6c6b185b9e59fbf537928
|
refs/heads/master
| 2020-04-08T13:41:21.462335
| 2018-11-27T21:37:08
| 2018-11-27T21:37:08
| 159,402,253
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 368
|
py
|
# generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else []
PROJECT_CATKIN_DEPENDS = "".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "first_pkg"
PROJECT_SPACE_DIR = "/home/noa/catkin_ws/devel"
PROJECT_VERSION = "0.0.0"
|
[
"noaor7@gmail.com"
] |
noaor7@gmail.com
|
6e76664daad277a2abc5ab8a61795d3ad133efaa
|
c039f37a0f215efa7388b52ad1707c06aad5fd7d
|
/5-lstmsig/multirun/queuing.py
|
2e519983638181c2ffb6c8c6dd214a0476d483b0
|
[] |
no_license
|
vishalbelsare/phd-code
|
41ac4bceb21a8f6f3da17a4e3340b618c04d3420
|
f5aa3e18a30e9567bc0583ef89a274ef42795d2a
|
refs/heads/master
| 2021-06-12T09:18:26.685488
| 2019-04-21T21:56:19
| 2019-04-21T21:56:19
| 163,163,793
| 0
| 0
| null | 2021-04-04T18:10:46
| 2018-12-26T09:46:53
|
C++
|
UTF-8
|
Python
| false
| false
| 1,983
|
py
|
from azure.servicebus import ServiceBusService, Message, Queue
import itertools, tabulate
primaryKey="SOME_PRIMARY_KEY"
bus_service=ServiceBusService(
service_namespace="jezsbus",
shared_access_key_name="RootManageSharedAccessKey",
shared_access_key_value=primaryKey)
_name='taskqueue'
def queue_length():
return bus_service.get_queue(queue_name=_name).message_count
def empty_queue():
while 1:
msg=bus_service.receive_queue_message(queue_name=_name, peek_lock=False, timeout=0)
if msg.body is None:
if 0 == queue_length():
return
raise RuntimeError("failed to empty queue")
def create_queue():
bus_service.create_queue(_name)
#fill_queue and get_from_queue require something like a ParamSet:
#must provide from_string_index and size.
class ParameterSet:
def __init__(self, possibilities):
self.possibilities=possibilities
self.all_combinations=[list(i) for i in itertools.product(*possibilities)]
self.size = len(self.all_combinations)
def from_int_index(self,i):
return self.all_combinations[i]
def from_string_index(self,i):
return self.from_int_index(int(i))
class ExplicitParameterSet:
def __init__(self, wanted_combinations):
self.wanted_combinations = wanted_combinations
self.size = len(self.wanted_combinations)
def from_int_index(self,i):
return self.wanted_combinations[i]
def from_string_index(self,i):
return self.from_int_index(int(i))
def _fill_queue(maximum):
for i in range(maximum):
bus_service.send_queue_message(_name, Message(str(i)))
def fill_queue(parameters):
empty_queue()
_fill_queue(parameters.size)
#returns None when nothing to do
def get_from_queue(parameters):
msg = bus_service.receive_queue_message(queue_name=_name, peek_lock=False, timeout=0)
if msg.body is None:
return None
return parameters.from_string_index(msg.body)
|
[
"bottler@users.noreply.github.com"
] |
bottler@users.noreply.github.com
|
5da9a6a2bba2798788ddb402cfd55e5f8751aefb
|
0d559c6336703a35b23a586bf130fd2d2fab0f26
|
/Game.py
|
56088bb5120bd0eeddc675472ed4bc6a9de6d284
|
[] |
no_license
|
yentingw/public-goods-game
|
dfda9e8fa9700f718eb8500acd29556506dd4950
|
95ccfa8dc97fef66376bd735601e6271f2ebf373
|
refs/heads/master
| 2020-09-16T10:55:43.699110
| 2019-11-24T13:13:57
| 2019-11-24T13:13:57
| 223,748,201
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,493
|
py
|
import argparse
import datetime
import logging
import logging.config
import matplotlib.pyplot as plt
import numpy as np
import os
import pandas as pd
import yaml
import csv
from JointQAgent import JointQAgent as jointA
from QAgent import QAgent as qA
from plots import Plotter
__game_config = {}
__logger = None
class Game(object):
def __init__(self, agent_config, game_config):
self.agent_type_list = game_config['agent_type_list']
self.episodes = game_config['episodes'] + 1
self.population_size = game_config['population_size']
self.group_size = game_config['group_size']
self.group_number = self.population_size / self.group_size
self.rounds = game_config['rounds']
self.F = game_config['F']
self.M = game_config['M']
self.cost = game_config['cost'] / (self.F + 1)
self.agents = {}
self.dir = game_config['output_directory']
self.logger = game_config['logger']
self.plotter = game_config['plotter']
self.timestamp = game_config['timestamp']
self.csv = game_config['csv']
self.plot = game_config['plot']
self.grand_table = [[0, 0] for i in range(self.population_size)]
self.heatmap_q_ls = []
self.heatmap_j_ls = []
self.plot_q_ls = [0 for i in range(self.rounds)]
self.plot_j_ls = [0 for i in range(self.rounds)]
self.df_q = pd.DataFrame(columns=['Round', 'C%'])
self.df_j = pd.DataFrame(columns=['Round', 'C%'])
for a in self.agent_type_list:
for i in range(self.population_size):
if a == 'QAgent':
if 'QAgent' in self.agents:
self.agents['QAgent'].append(qA(agent_config, i, self.rounds))
else:
self.agents['QAgent'] = [qA(agent_config, i, self.rounds)]
elif a == 'JointQAgent':
if 'JointQAgent' in self.agents:
self.agents['JointQAgent'].append(jointA(agent_config, i, self.rounds))
else:
self.agents['JointQAgent'] = [jointA(agent_config, i, self.rounds)]
def shuffle_group(self, agents):
np.random.shuffle(agents)
group_num = self.population_size / self.group_size
list_arrays = np.array_split(agents, group_num)
return [grp.tolist() for grp in list_arrays]
def play_game(self, episodes):
self.logger.info(f"START: {self.timestamp}")
for e in range(1, episodes+1):
if 'QAgent' in self.agents:
self.q_agent_play(e, episodes)
if 'JointQAgent' in self.agents:
self.joint_q_agent_play(e, episodes)
self.logger.info("Calculations finished. Begin Dumping...")
if self.csv:
self.logger.info("Writing to CSV")
if 'QAgent' in self.agents:
self.load_to_heatmap_csv(self.heatmap_q_ls, "Q_Agent")
self.load_to_plot_csv(self.plot_q_ls, self.df_q, "Q_Agent")
if 'JointQAgent' in self.agents:
self.load_to_heatmap_csv(self.heatmap_j_ls, "J_Agent")
self.load_to_plot_csv(self.plot_j_ls, self.df_j, "J_Agent")
if self.plot:
self.logger.info("Begin plotting")
if 'QAgent' in self.agents and 'JointQAgent' in self.agents:
self.plotter.QJ_plot(self.df_q, self.df_j)
# if 'QAgent' in self.agents:
# self.plotter.plotAverAgent(df_q_info_q, "Q-Learning Agent")
# if 'JointQAgent' in self.agents:
# self.plotter.plotAverAgent(df_q_info_j, "Joint Action Q-Learning QAgent")
self.logger.info(f"FINISH: {datetime.datetime.utcnow().isoformat()}")
def q_agent_play(self, episode, episodes):
groups = self.shuffle_group(self.agents['QAgent'])
self.logger.debug(f"Number of q groups: {len(groups)};\t agents: {[agent.id for grps in groups for agent in grps]}")
group_num = 0
for grp in groups:
group_num += 1
for r in range(self.rounds):
actions = []
for agent in grp:
agent_action = agent.choose_action(r, episode / episodes)
actions.append(agent_action)
self.logger.debug(f"episode: {episode}, round: {r}, group_num: {group_num}, agent_id: {agent.id}, agent_type: {agent.__class__.__name__}, action: {agent_action}")
c_count = self.count_c(actions) / self.group_size
self.plot_q_ls[r] += c_count
if (episode == episodes - 1) and (r == self.rounds - 1):
self.heatmap_q_ls.append(c_count)
rewards = self.calculate_reward(actions)
for (agent, reward) in zip(self.agents['QAgent'], rewards):
agent.update_reward(r, reward)
def joint_q_agent_play(self, episode, episodes):
j_groups = self.shuffle_group(self.agents['JointQAgent'])
self.logger.debug(f"Number of Joint Q agent groups: {len(j_groups)};\t agents: {[agent.id for grps in j_groups for agent in grps]}")
group_num = 0
for j_group in j_groups:
group_num += 1
history_id = [agent.id for agent in j_group]
history = []
for i in history_id:
history.append(self.grand_table[i])
for r in range(self.rounds):
actions = []
actions_tuple = []
for agent in j_group:
opp_list = create_opp_list(j_group, agent)
agent_action = agent.choose_action(r, history, opp_list, episode / episodes, history_id)
actions.append(agent_action)
actions_tuple.append((agent.id, agent_action))
self.logger.debug(f"episode: {episode}, round: {r}, group_num: {group_num}, agent_id: {agent.id}, agent_type: {agent.__class__.__name__}, action: {agent_action}, opp_list: {opp_list}")
c_count = self.count_c(actions) / self.group_size
self.plot_j_ls[r] += c_count
if (episode == episodes - 1) and (r == self.rounds - 1):
self.heatmap_j_ls.append(c_count)
if (episode % 10 == 0):
self.df_episode_j = self.df_episode_j.append({'Episode': episode, 'Round': r, 'C%': c_count}, ignore_index=True)
rewards = self.calculate_reward(actions)
for i in range(len(history)):
history[i][actions[i]] += 1
for (agent, reward) in zip(j_group, rewards):
agent.update_reward(r, reward, actions_tuple)
self.logger.debug(f"history_id: {history_id}, table: {self.grand_table}")
def count_c(self, actions):
count = 0
for i in actions:
if i == 0:
count += 1
return count
def load_to_heatmap_csv(self, heatmap_ls, agent_type):
c_count = 0
for i in heatmap_ls:
c_count += i
c_count /= self.group_number
ls = [self.population_size, self.F / 5, self.M / 5, c_count]
heatmap_path = f"outputs/heatmap_" + agent_type + ".csv"
with open(heatmap_path, 'a', newline='') as f:
writer = csv.writer(f)
writer.writerow(ls)
f.close()
def load_to_plot_csv(self, plot_ls, df, agent_type):
for i in range(self.rounds):
repeat = self.group_number * self.episodes
plot_ls[i] /= repeat
column1 = [i for i in range(self.rounds)]
column2 = plot_ls
a = np.array([column1, column2])
a = a.T
for i in a:
df = df.append({'Round': i[0], 'C%': i[1]}, ignore_index=True)
path = f"{self.dir}/plot_c%_" + agent_type + ".csv"
df.to_csv(path)
if agent_type == "Q_Agent":
self.df_q = df
if agent_type == "J_Agent":
self.df_j = df
#self.plotter.q_plot(df)
def calculate_reward(self, actions):
""" reward formula """
num_c = 0
for a in actions:
if a == 0:
num_c += 1
sigma = 0
delta_count = num_c - self.M
if delta_count < 0:
sigma = 0
else:
sigma = 1
d_reward = (num_c * self.F / self.group_size * self.cost) * sigma
c_reward = d_reward - self.cost
return [(c_reward, d_reward)[a] for a in actions]
def create_opp_list(agents, a):
opp_list = []
for agent in agents:
if agent != a:
opp_list.append(str(agent.getId()))
return opp_list
def toBinary(plannings):
return [int(i) for i in bin(plannings)[2:]]
def print_agents(agents):
df_q_info = pd.DataFrame(columns=['round', 'agent_type', 'agent_id', 'acc_reward', 'q_value'])
df_j_info = pd.DataFrame(columns=['round', 'agent_type', 'agent_id', 'acc_reward', 'q_value'])
for agent in agents:
if agent.__class__.__name__ == 'QAgent':
df_q_info = df_q_info.append(agent.get_info())
elif agent.__class__.__name__ == 'JointQAgent':
df_j_info = df_j_info.append(agent.get_info())
df_q_info.to_csv(r'{}/q_info.csv'.format(self.dir), index=False)
df_j_info.to_csv(r'{}/j_info.csv'.format(self.dir), index=False)
#self.plotter.plotQAgent(df_q_info)
#self.plotter.plotAverQAgent(df_q_info)
def load_ini_config():
with open("game.yaml", 'r') as stream:
try:
global __game_config
__game_config = yaml.safe_load(stream)
except yaml.YAMLError as err:
raise(err)
def setup_directory(cfg):
"""
Create target Directory if it doesn't exist
"""
directory = f"{cfg.get('output_directory', 'outputs')}-F{cfg['F']}-M{cfg['M']}-P{cfg['population_size']}"
if not os.path.exists(directory):
os.mkdir(directory)
else:
directory = f"{directory}-{cfg['timestamp']}".replace(':', '-')
os.mkdir(directory)
return directory
def main():
""" main functions """
global __logger
load_ini_config()
agent_config = __game_config['agents']
game_config = __game_config['game']
with open('logging.yaml', 'r') as f:
logging.config.dictConfig(yaml.safe_load(f.read()))
__logger = logging.getLogger(__name__)
game_config['logger'] = logging.getLogger('game')
agent_config['logger'] = logging.getLogger('agent')
agent_config['episodes'] = game_config['episodes']
# Optionally Get F, M, and Population Overrides from CLI
parser = argparse.ArgumentParser(description='F,M,Population Overrides')
parser.add_argument('--F',
type=int,
default=game_config['F'])
parser.add_argument('--M',
type=int,
default=game_config['M'])
parser.add_argument('--P',
type=int,
default=game_config['population_size'])
args = parser.parse_args()
game_config['F'] = args.F
game_config['M'] = args.M
game_config['population_size'] = args.P
__logger.info(args)
game_config['timestamp'] = datetime.datetime.utcnow().isoformat()
game_config['output_directory'] = setup_directory(game_config)
game_config['plotter'] = Plotter(agent_config, game_config)
game = Game(agent_config, game_config)
game.play_game(game_config['episodes'])
if __name__== "__main__":
main()
|
[
"tingwang1223@gmail.com"
] |
tingwang1223@gmail.com
|
47754719a7927be9ffcd7eb7237789b985e80405
|
1fe8d4133981e53e88abf633046060b56fae883e
|
/venv/lib/python3.8/site-packages/tensorflow/python/tpu/datasets 2.py
|
e975daf9f4e1326f82e35a1c71ecdf1c30bd154b
|
[] |
no_license
|
Akira331/flask-cifar10
|
6c49db8485038731ce67d23f0972b9574746c7a7
|
283e7a2867c77d4b6aba7aea9013bf241d35d76c
|
refs/heads/master
| 2023-06-14T16:35:06.384755
| 2021-07-05T14:09:15
| 2021-07-05T14:09:15
| 382,864,970
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 129
|
py
|
version https://git-lfs.github.com/spec/v1
oid sha256:7884cb6a0cfa6e71f17a315d69eafbd78c0d930943f672ec88676d9ce3f90718
size 7899
|
[
"business030301@gmail.com"
] |
business030301@gmail.com
|
b9d3c202609dd2fda40b9a9958ebbb400e37b431
|
c097a8dc88f856c651fdaa23a088f42967f54b07
|
/att_table/views.py
|
8252162ada2ad2e5132298adf06cb6d20af2145f
|
[] |
no_license
|
Yahyaabualhaj/django_react_integration_ex
|
9ee644294d4f079fc3ba699836c0752e32dffdbd
|
d9e78fc3d92e683362cc7fbf687a0431cf6025bb
|
refs/heads/master
| 2020-04-30T17:15:07.473089
| 2019-03-24T11:46:42
| 2019-03-24T11:46:42
| 176,973,821
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 834
|
py
|
from django.core import serializers
from django.http import HttpResponse
from django.shortcuts import render
from django.views.generic import (ListView)
from att_table.models import Vacation
class TablesList(ListView):
model = Vacation
context_object_name = 'lists'
# queryset = Vacation.objects.all()
template_name = 'att_table/react_tem.html'
def get_queryset(self):
vacation_data = Vacation.objects.all()
data = serializers.serialize('json', vacation_data)
return HttpResponse(data, content_type="application/json")
def table(request):
vacation_data = Vacation.objects.all()
vacation_data_json = serializers.serialize('json', vacation_data)
return render(request,
"att_table/react_tem.html",
context={'table': vacation_data_json})
|
[
"noreply@github.com"
] |
Yahyaabualhaj.noreply@github.com
|
05ab8ad13167a301e5a49c1f9cad91ff928a7012
|
5d557b9e823a3461e832e7113b559b6fa623fbfd
|
/main/Final/Modular/Object_detection_picamera.py
|
03f85dbbd5194692b9b36d0bf8b2a8c648cbd510
|
[] |
no_license
|
vieri2006/SmartPlasticRecycler
|
fae8a6223daba6bdd01a5e9630a365e3b76f4e4b
|
04af1252bc6f0d74d0535e70af8cdc248a4ac649
|
refs/heads/master
| 2022-11-08T19:17:16.958560
| 2020-06-27T10:19:22
| 2020-06-27T10:19:22
| 275,344,257
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,000
|
py
|
######## Picamera Object Detection Using Tensorflow Classifier #########
#
# Author: Evan Juras
# Date: 4/15/18
# Description:
# This program uses a TensorFlow classifier to perform object detection.
# It loads the classifier uses it to perform object detection on a Picamera feed.
# It draws boxes and scores around the objects of interest in each frame from
# the Picamera. It also can be used with a webcam by adding "--usbcam"
# when executing this script from the terminal.
## Some of the code is copied from Google's example at
## https://github.com/tensorflow/models/blob/master/research/object_detection/object_detection_tutorial.ipynb
## and some is copied from Dat Tran's example at
## https://github.com/datitran/object_detector_app/blob/master/object_detection_app.py
## but I changed it to make it more understandable to me.
# Import packages
import os
import cv2
import numpy as np
from picamera.array import PiRGBArray
from picamera import PiCamera
import tensorflow as tf
import argparse
import sys
# Set up camera constants
IM_WIDTH = 1280
IM_HEIGHT = 720
#IM_WIDTH = 640 Use smaller resolution for
#IM_HEIGHT = 480 slightly faster framerate
# Select camera type (if user enters --usbcam when calling this script,
# a USB webcam will be used)
camera_type = 'picamera'
parser = argparse.ArgumentParser()
parser.add_argument('--usbcam', help='Use a USB webcam instead of picamera',
action='store_true')
args = parser.parse_args()
if args.usbcam:
camera_type = 'usb'
# This is needed since the working directory is the object_detection folder.
sys.path.append('..')
# Import utilites
from utils import label_map_util
from utils import visualization_utils as vis_util
# Name of the directory containing the object detection module we're using
MODEL_NAME = 'ssdlite_mobilenet_v2_coco_2018_05_09'
# Grab path to current working directory
CWD_PATH = os.getcwd()
# Path to frozen detection graph .pb file, which contains the model that is used
# for object detection.
PATH_TO_CKPT = os.path.join(CWD_PATH,MODEL_NAME,'frozen_inference_graph.pb')
# Path to label map file
PATH_TO_LABELS = os.path.join(CWD_PATH,'data','mscoco_label_map.pbtxt')
# Number of classes the object detector can identify
NUM_CLASSES = 90
## Load the label map.
# Label maps map indices to category names, so that when the convolution
# network predicts `5`, we know that this corresponds to `airplane`.
# Here we use internal utility functions, but anything that returns a
# dictionary mapping integers to appropriate string labels would be fine
label_map = label_map_util.load_labelmap(PATH_TO_LABELS)
categories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=NUM_CLASSES, use_display_name=True)
category_index = label_map_util.create_category_index(categories)
# Load the Tensorflow model into memory.
detection_graph = tf.Graph()
with detection_graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
sess = tf.Session(graph=detection_graph)
# Define input and output tensors (i.e. data) for the object detection classifier
# Input tensor is the image
image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')
# Output tensors are the detection boxes, scores, and classes
# Each box represents a part of the image where a particular object was detected
detection_boxes = detection_graph.get_tensor_by_name('detection_boxes:0')
# Each score represents level of confidence for each of the objects.
# The score is shown on the result image, together with the class label.
detection_scores = detection_graph.get_tensor_by_name('detection_scores:0')
detection_classes = detection_graph.get_tensor_by_name('detection_classes:0')
# Number of objects detected
num_detections = detection_graph.get_tensor_by_name('num_detections:0')
# Initialize frame rate calculation
frame_rate_calc = 1
freq = cv2.getTickFrequency()
font = cv2.FONT_HERSHEY_SIMPLEX
# Initialize camera and perform object detection.
# The camera has to be set up and used differently depending on if it's a
# Picamera or USB webcam.
# I know this is ugly, but I basically copy+pasted the code for the object
# detection loop twice, and made one work for Picamera and the other work
# for USB.
### Picamera ###
if camera_type == 'picamera':
# Initialize Picamera and grab reference to the raw capture
camera = PiCamera()
camera.resolution = (IM_WIDTH,IM_HEIGHT)
camera.framerate = 10
rawCapture = PiRGBArray(camera, size=(IM_WIDTH,IM_HEIGHT))
rawCapture.truncate(0)
for frame1 in camera.capture_continuous(rawCapture, format="bgr",use_video_port=True):
t1 = cv2.getTickCount()
# Acquire frame and expand frame dimensions to have shape: [1, None, None, 3]
# i.e. a single-column array, where each item in the column has the pixel RGB value
frame = np.copy(frame1.array)
frame.setflags(write=1)
frame_expanded = np.expand_dims(frame, axis=0)
# Perform the actual detection by running the model with the image as input
(boxes, scores, classes, num) = sess.run(
[detection_boxes, detection_scores, detection_classes, num_detections],
feed_dict={image_tensor: frame_expanded})
print(np.squeeze(boxes))
# Draw the results of the detection (aka 'visulaize the results')
vis_util.visualize_boxes_and_labels_on_image_array(
frame,
np.squeeze(boxes),
np.squeeze(classes).astype(np.int32),
np.squeeze(scores),
category_index,
use_normalized_coordinates=True,
line_thickness=8,
min_score_thresh=0.40)
cv2.putText(frame,"FPS: {0:.2f}".format(frame_rate_calc),(30,50),font,1,(255,255,0),2,cv2.LINE_AA)
# All the results have been drawn on the frame, so it's time to display it.
cv2.imshow('Object detector', frame)
t2 = cv2.getTickCount()
time1 = (t2-t1)/freq
frame_rate_calc = 1/time1
# Press 'q' to quit
if cv2.waitKey(1) == ord('q'):
break
rawCapture.truncate(0)
camera.close()
### USB webcam ###
elif camera_type == 'usb':
# Initialize USB webcam feed
camera = cv2.VideoCapture(0)
ret = camera.set(3,IM_WIDTH)
ret = camera.set(4,IM_HEIGHT)
while(True):
t1 = cv2.getTickCount()
# Acquire frame and expand frame dimensions to have shape: [1, None, None, 3]
# i.e. a single-column array, where each item in the column has the pixel RGB value
ret, frame = camera.read()
frame_expanded = np.expand_dims(frame, axis=0)
# Perform the actual detection by running the model with the image as input
(boxes, scores, classes, num) = sess.run(
[detection_boxes, detection_scores, detection_classes, num_detections],
feed_dict={image_tensor: frame_expanded})
# Draw the results of the detection (aka 'visulaize the results')
vis_util.visualize_boxes_and_labels_on_image_array(
frame,
np.squeeze(boxes),
np.squeeze(classes).astype(np.int32),
np.squeeze(scores),
category_index,
use_normalized_coordinates=True,
line_thickness=8,
min_score_thresh=0.85)
cv2.putText(frame,"FPS: {0:.2f}".format(frame_rate_calc),(30,50),font,1,(255,255,0),2,cv2.LINE_AA)
# All the results have been drawn on the frame, so it's time to display it.
cv2.imshow('Object detector', frame)
t2 = cv2.getTickCount()
time1 = (t2-t1)/freq
frame_rate_calc = 1/time1
# Press 'q' to quit
if cv2.waitKey(1) == ord('q'):
break
camera.release()
cv2.destroyAllWindows()
|
[
"noreply@github.com"
] |
vieri2006.noreply@github.com
|
31ea606b8a5805025bd159185a00aba285323a07
|
c3624cdd68bbefc419a4e67ed370a022d090e796
|
/audio_common/sound_play/scripts/soundplay_node.py
|
936938440a73a76564a9bf5da56c2ea73b8bd881
|
[] |
no_license
|
hubeihubei/Robot_waiter
|
37df356975b5ec812e86b11a780eae5c45db0c9e
|
e805c0bebb2d5d360039f85cdf4ed94271aeaf8d
|
refs/heads/master
| 2021-01-09T20:19:51.860246
| 2019-08-09T10:24:35
| 2019-08-09T10:24:35
| 63,237,228
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 17,687
|
py
|
#!/usr/bin/env python
#***********************************************************
#* Software License Agreement (BSD License)
#*
#* Copyright (c) 2009, Willow Garage, Inc.
#* All rights reserved.
#*
#* Redistribution and use in source and binary forms, with or without
#* modification, are permitted provided that the following conditions
#* are met:
#*
#* * Redistributions of source code must retain the above copyright
#* notice, this list of conditions and the following disclaimer.
#* * Redistributions in binary form must reproduce the above
#* copyright notice, this list of conditions and the following
#* disclaimer in the documentation and/or other materials provided
#* with the distribution.
#* * Neither the name of the Willow Garage nor the names of its
#* contributors may be used to endorse or promote products derived
#* from this software without specific prior written permission.
#*
#* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
#* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
#* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
#* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
#* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
#* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
#* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
#* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
#* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
#* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
#* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
#* POSSIBILITY OF SUCH DAMAGE.
#***********************************************************
# Author: Blaise Gassend
import roslib
import rospy
import threading
import os
import logging
import sys
import traceback
import tempfile
from diagnostic_msgs.msg import DiagnosticStatus, KeyValue, DiagnosticArray
from sound_play.msg import SoundRequest, SoundRequestAction, SoundRequestResult, SoundRequestFeedback
import actionlib
from std_msgs.msg import String
try:
import pygst
pygst.require('0.10')
import gst
import gobject
except:
str="""
**************************************************************
Error opening pygst. Is gstreamer installed? (sudo apt-get install python-gst0.10
**************************************************************
"""
rospy.logfatal(str)
print str
exit(1)
def sleep(t):
try:
rospy.sleep(t)
except:
pass
class soundtype:
STOPPED = 0
LOOPING = 1
COUNTING = 2
def __init__(self, file, volume = 1.0):
self.lock = threading.RLock()
self.state = self.STOPPED
self.sound = gst.element_factory_make("playbin","player")
if (":" in file):
uri = file
elif os.path.isfile(file):
uri = "file://" + os.path.abspath(file)
else:
rospy.logerr('Error: URI is invalid: %s'%file)
self.uri = uri
self.volume = volume
self.sound.set_property('uri', uri)
self.sound.set_property("volume",volume)
self.staleness = 1
self.file = file
self.bus = self.sound.get_bus()
self.bus.add_signal_watch()
self.bus.connect("message", self.on_stream_end)
def on_stream_end(self, bus, message):
if message.type == gst.MESSAGE_EOS:
self.state = self.STOPPED
def __del__(self):
# stop our GST object so that it gets garbage-collected
self.stop()
def update(self):
self.bus.poll(gst.MESSAGE_ERROR, 10)
def loop(self):
self.lock.acquire()
try:
self.staleness = 0
if self.state == self.COUNTING:
self.stop()
if self.state == self.STOPPED:
self.sound.seek_simple(gst.FORMAT_TIME, gst.SEEK_FLAG_FLUSH, 0)
self.sound.set_state(gst.STATE_PLAYING)
self.state = self.LOOPING
finally:
self.lock.release()
def stop(self):
if self.state != self.STOPPED:
self.lock.acquire()
try:
self.sound.set_state(gst.STATE_NULL)
self.state = self.STOPPED
finally:
self.lock.release()
def single(self):
self.lock.acquire()
try:
rospy.logdebug("Playing %s"%self.uri)
self.staleness = 0
if self.state == self.LOOPING:
self.stop()
self.sound.seek_simple(gst.FORMAT_TIME, gst.SEEK_FLAG_FLUSH, 0)
self.sound.set_state(gst.STATE_PLAYING)
self.state = self.COUNTING
finally:
self.lock.release()
def command(self, cmd):
if cmd == SoundRequest.PLAY_STOP:
self.stop()
elif cmd == SoundRequest.PLAY_ONCE:
self.single()
elif cmd == SoundRequest.PLAY_START:
self.loop()
def get_staleness(self):
self.lock.acquire()
position = 0
duration = 0
try:
position = self.sound.query_position(gst.FORMAT_TIME)[0]
duration = self.sound.query_duration(gst.FORMAT_TIME)[0]
except Exception, e:
position = 0
duration = 0
finally:
self.lock.release()
if position != duration:
self.staleness = 0
else:
self.staleness = self.staleness + 1
return self.staleness
def get_playing(self):
return self.state == self.COUNTING
class soundplay:
_feedback = SoundRequestFeedback()
_result = SoundRequestResult()
def stopdict(self,dict):
for sound in dict.values():
sound.stop()
def stopall(self):
self.stopdict(self.builtinsounds)
self.stopdict(self.filesounds)
self.stopdict(self.voicesounds)
def select_sound(self, data):
if data.sound == SoundRequest.PLAY_FILE:
if not data.arg2:
if not data.arg in self.filesounds.keys():
rospy.logdebug('command for uncached wave: "%s"'%data.arg)
try:
self.filesounds[data.arg] = soundtype(data.arg)
except:
rospy.logerr('Error setting up to play "%s". Does this file exist on the machine on which sound_play is running?'%data.arg)
return
else:
rospy.logdebug('command for cached wave: "%s"'%data.arg)
sound = self.filesounds[data.arg]
else:
absfilename = os.path.join(roslib.packages.get_pkg_dir(data.arg2), data.arg)
if not absfilename in self.filesounds.keys():
rospy.logdebug('command for uncached wave: "%s"'%absfilename)
try:
self.filesounds[absfilename] = soundtype(absfilename)
except:
rospy.logerr('Error setting up to play "%s" from package "%s". Does this file exist on the machine on which sound_play is running?'%(data.arg, data.arg2))
return
else:
rospy.logdebug('command for cached wave: "%s"'%absfilename)
sound = self.filesounds[absfilename]
elif data.sound == SoundRequest.SAY:
if not data.arg in self.voicesounds.keys():
rospy.logdebug('command for uncached text: "%s"' % data.arg)
txtfile = tempfile.NamedTemporaryFile(prefix='sound_play', suffix='.txt')
(wavfile,wavfilename) = tempfile.mkstemp(prefix='sound_play', suffix='.wav')
txtfilename=txtfile.name
os.close(wavfile)
voice = data.arg2
try:
txtfile.write(data.arg)
txtfile.flush()
os.system("text2wave -eval '("+voice+")' "+txtfilename+" -o "+wavfilename)
try:
if os.stat(wavfilename).st_size == 0:
raise OSError # So we hit the same catch block
except OSError:
rospy.logerr('Sound synthesis failed. Is festival installed? Is a festival voice installed? Try running "rosdep satisfy sound_play|sh". Refer to http://wiki.ros.org/sound_play/Troubleshooting')
return
self.voicesounds[data.arg] = soundtype(wavfilename)
finally:
txtfile.close()
else:
rospy.logdebug('command for cached text: "%s"'%data.arg)
sound = self.voicesounds[data.arg]
else:
rospy.logdebug('command for builtin wave: %i'%data.sound)
if not data.sound in self.builtinsounds:
params = self.builtinsoundparams[data.sound]
self.builtinsounds[data.sound] = soundtype(params[0], params[1])
sound = self.builtinsounds[data.sound]
if sound.staleness != 0 and data.command != SoundRequest.PLAY_STOP:
# This sound isn't counted in active_sounds
rospy.logdebug("activating %i %s"%(data.sound,data.arg))
self.active_sounds = self.active_sounds + 1
sound.staleness = 0
# if self.active_sounds > self.num_channels:
# mixer.set_num_channels(self.active_sounds)
# self.num_channels = self.active_sounds
return sound
def callback(self,data):
if not self.initialized:
return
self.mutex.acquire()
# Force only one sound at a time
self.stopall()
try:
if data.sound == SoundRequest.ALL and data.command == SoundRequest.PLAY_STOP:
self.stopall()
else:
sound = self.select_sound(data)
sound.command(data.command)
except Exception, e:
rospy.logerr('Exception in callback: %s'%str(e))
rospy.loginfo(traceback.format_exc())
finally:
self.mutex.release()
rospy.logdebug("done callback")
# Purge sounds that haven't been played in a while.
def cleanupdict(self, dict):
purgelist = []
for (key,sound) in dict.iteritems():
try:
staleness = sound.get_staleness()
except Exception, e:
rospy.logerr('Exception in cleanupdict for sound (%s): %s'%(str(key),str(e)))
staleness = 100 # Something is wrong. Let's purge and try again.
#print "%s %i"%(key, staleness)
if staleness >= 10:
purgelist.append(key)
if staleness == 0: # Sound is playing
self.active_sounds = self.active_sounds + 1
for key in purgelist:
rospy.logdebug('Purging %s from cache'%key)
dict[key].stop() # clean up resources
del dict[key]
def cleanup(self):
self.mutex.acquire()
try:
self.active_sounds = 0
self.cleanupdict(self.filesounds)
self.cleanupdict(self.voicesounds)
self.cleanupdict(self.builtinsounds)
except:
rospy.loginfo('Exception in cleanup: %s'%sys.exc_info()[0])
finally:
self.mutex.release()
def diagnostics(self, state):
try:
da = DiagnosticArray()
ds = DiagnosticStatus()
ds.name = rospy.get_caller_id().lstrip('/') + ": Node State"
if state == 0:
ds.level = DiagnosticStatus.OK
ds.message = "%i sounds playing"%self.active_sounds
ds.values.append(KeyValue("Active sounds", str(self.active_sounds)))
ds.values.append(KeyValue("Allocated sound channels", str(self.num_channels)))
ds.values.append(KeyValue("Buffered builtin sounds", str(len(self.builtinsounds))))
ds.values.append(KeyValue("Buffered wave sounds", str(len(self.filesounds))))
ds.values.append(KeyValue("Buffered voice sounds", str(len(self.voicesounds))))
elif state == 1:
ds.level = DiagnosticStatus.WARN
ds.message = "Sound device not open yet."
else:
ds.level = DiagnosticStatus.ERROR
ds.message = "Can't open sound device. See http://wiki.ros.org/sound_play/Troubleshooting"
da.status.append(ds)
da.header.stamp = rospy.get_rostime()
self.diagnostic_pub.publish(da)
except Exception, e:
rospy.loginfo('Exception in diagnostics: %s'%str(e))
def execute_cb(self, data):
data = data.sound_request
if not self.initialized:
return
self.mutex.acquire()
# Force only one sound at a time
self.stopall()
try:
if data.sound == SoundRequest.ALL and data.command == SoundRequest.PLAY_STOP:
self.stopall()
else:
sound = self.select_sound(data)
sound.command(data.command)
r = rospy.Rate(1)
start_time = rospy.get_rostime()
success = True
while sound.get_playing():
sound.update()
if self._as.is_preempt_requested():
rospy.loginfo('sound_play action: Preempted')
sound.stop()
self._as.set_preempted()
success = False
break
self._feedback.playing = sound.get_playing()
self._feedback.stamp = rospy.get_rostime() - start_time
self._as.publish_feedback(self._feedback)
r.sleep()
if success:
self._result.playing = self._feedback.playing
self._result.stamp = self._feedback.stamp
rospy.loginfo('sound_play action: Succeeded')
self._as.set_succeeded(self._result)
except Exception, e:
rospy.logerr('Exception in actionlib callback: %s'%str(e))
rospy.loginfo(traceback.format_exc())
finally:
self.mutex.release()
rospy.logdebug("done actionlib callback")
def __init__(self):
rospy.init_node('sound_play')
self.diagnostic_pub = rospy.Publisher("/diagnostics", DiagnosticArray, queue_size=1)
rootdir = os.path.join(roslib.packages.get_pkg_dir('sound_play'),'sounds')
self.builtinsoundparams = {
SoundRequest.BACKINGUP : (os.path.join(rootdir, 'BACKINGUP.ogg'), 0.1),
SoundRequest.NEEDS_UNPLUGGING : (os.path.join(rootdir, 'NEEDS_UNPLUGGING.ogg'), 1),
SoundRequest.NEEDS_PLUGGING : (os.path.join(rootdir, 'NEEDS_PLUGGING.ogg'), 1),
SoundRequest.NEEDS_UNPLUGGING_BADLY : (os.path.join(rootdir, 'NEEDS_UNPLUGGING_BADLY.ogg'), 1),
SoundRequest.NEEDS_PLUGGING_BADLY : (os.path.join(rootdir, 'NEEDS_PLUGGING_BADLY.ogg'), 1),
}
self.no_error = True
self.initialized = False
self.active_sounds = 0
self.mutex = threading.Lock()
sub = rospy.Subscriber("robotsound", SoundRequest, self.callback)
self._as = actionlib.SimpleActionServer('sound_play', SoundRequestAction, execute_cb=self.execute_cb, auto_start = False)
self._as.start()
self.mutex.acquire()
self.sleep(0.5) # For ros startup race condition
self.diagnostics(1)
while not rospy.is_shutdown():
while not rospy.is_shutdown():
self.init_vars()
self.no_error = True
self.initialized = True
self.mutex.release()
try:
self.idle_loop()
# Returns after inactive period to test device availability
#print "Exiting idle"
except:
rospy.loginfo('Exception in idle_loop: %s'%sys.exc_info()[0])
finally:
self.mutex.acquire()
self.diagnostics(2)
self.mutex.release()
def init_vars(self):
self.num_channels = 10
self.builtinsounds = {}
self.filesounds = {}
self.voicesounds = {}
self.hotlist = []
if not self.initialized:
rospy.loginfo('sound_play node is ready to play sound')
def sleep(self, duration):
try:
rospy.sleep(duration)
except rospy.exceptions.ROSInterruptException:
pass
def idle_loop(self):
self.last_activity_time = rospy.get_time()
while (rospy.get_time() - self.last_activity_time < 10 or
len(self.builtinsounds) + len(self.voicesounds) + len(self.filesounds) > 0) \
and not rospy.is_shutdown():
#print "idle_loop"
self.diagnostics(0)
self.sleep(1)
self.cleanup()
#print "idle_exiting"
def callback(data):
rospy.loginfo(rospy.get_caller_id() + "I heard %s", data.data)
def listener():
rospy.init_node('listener', anonymous=True)
rospy.Subscriber("chatter", String, callback)
rospy.spin()
if __name__ == '__main__':
soundplay()
listener()
|
[
"2752233840@qq.com"
] |
2752233840@qq.com
|
044a6ce242357a8fa5278a930a9a768cde9209e7
|
9a68aafce22f8087bfad4e0449495d5287bc8ebc
|
/bigdata/hw3/hw3c.py
|
3eff32401c9d242abe210784b1470938c652492a
|
[
"MIT"
] |
permissive
|
vador/coursera-homework
|
5f0904c9781526fa0765d3c17e1bea1f3cefd120
|
e94ee76f910fe6a5b1602252fc1869575bd876fe
|
refs/heads/master
| 2021-01-17T23:23:13.123216
| 2013-10-05T20:20:20
| 2013-10-05T20:20:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,515
|
py
|
# -*- coding: utf-8 -*-
import mincemeat
import glob
from stopwords import *
datasource = dict()
key = 0
for fname in glob.glob('hw3data/*'):
with open(fname) as f:
content = f.readlines()
for line in content:
datasource[key] = line
key = key + 1
def mapfn(k, v):
import string
allStopWords={'a':1, 'about':1, 'above':1, 'after':1, 'again':1, 'against':1, 'all':1, 'am':1, 'an':1, 'and':1, 'any':1, 'are':1, 'arent':1, 'as':1, 'at':1, 'be':1, 'because':1, 'been':1, 'before':1, 'being':1, 'below':1, 'between':1, 'both':1, 'but':1, 'by':1, 'cant':1, 'cannot':1, 'could':1, 'couldnt':1, 'did':1, 'didnt':1, 'do':1, 'does':1, 'doesnt':1, 'doing':1, 'dont':1, 'down':1, 'during':1, 'each':1, 'few':1, 'for':1, 'from':1, 'further':1, 'had':1, 'hadnt':1, 'has':1, 'hasnt':1, 'have':1, 'havent':1, 'having':1, 'he':1, 'hed':1, 'hell':1, 'hes':1, 'her':1, 'here':1, 'heres':1, 'hers':1, 'herself':1, 'him':1, 'himself':1, 'his':1, 'how':1, 'hows':1, 'i':1, 'id':1, 'ill':1, 'im':1, 'ive':1, 'if':1, 'in':1, 'into':1, 'is':1, 'isnt':1, 'it':1, 'its':1, 'its':1, 'itself':1, 'lets':1, 'me':1, 'more':1, 'most':1, 'mustnt':1, 'my':1, 'myself':1, 'no':1, 'nor':1, 'not':1, 'of':1, 'off':1, 'on':1, 'once':1, 'only':1, 'or':1, 'other':1, 'ought':1, 'our':1, 'ours ':1, 'ourselves':1, 'out':1, 'over':1, 'own':1, 'same':1, 'shant':1, 'she':1, 'shed':1, 'shell':1, 'shes':1, 'should':1, 'shouldnt':1, 'so':1, 'some':1, 'such':1, 'than':1, 'that':1, 'thats':1, 'the':1, 'their':1, 'theirs':1, 'them':1, 'themselves':1, 'then':1, 'there':1, 'theres':1, 'these':1, 'they':1, 'theyd':1, 'theyll':1, 'theyre':1, 'theyve':1, 'this':1, 'those':1, 'through':1, 'to':1, 'too':1, 'under':1, 'until':1, 'up':1, 'very':1, 'was':1, 'wasnt':1, 'we':1, 'wed':1, 'well':1, 'were':1, 'weve':1, 'were':1, 'werent':1, 'what':1, 'whats':1, 'when':1, 'whens':1, 'where':1, 'wheres':1, 'which':1, 'while':1, 'who':1, 'whos':1, 'whom':1, 'why':1, 'whys':1, 'with':1, 'wont':1, 'would':1, 'wouldnt':1, 'you':1, 'youd':1, 'youll':1, 'youre':1, 'youve':1, 'your':1, 'yours':1, 'yourself':1, 'yourselves':1}
info = v.split(':::')
id = info[0]
authorlist = info[1]
title = info[2]
title = title.lower()
title.replace('-', ' ')
title = title.translate(None, string.punctuation)
words = title.split()
logging.info("words: %s" % (words))
wordsStopped = []
for w in words:
if w not in allStopWords:
wordsStopped.append(w)
logging.info("wordsstopped: %s" % (wordsStopped,))
for author in authorlist.split('::'):
logging.info("author: %s" % (author,))
for w in wordsStopped:
yield author, w
def reducefn(k, vs):
logging.info("vs received: %s" % (vs))
kvs = dict()
#vvs = [item for sublist in vs for item in sublist]
for w in vs:
if w in kvs:
kvs[w] = kvs[w] + 1
else:
kvs[w] = 1
return kvs
def obsolete():
kvs = dict()
for w in vs:
if w in kvs:
kvs[w] = kvs[w] + 1
else:
kvs[w] = 1
wordlist = sorted(kvs, key=lambda key: kvs[key], reverse=True)
countlist = list()
for w in wordlist:
countlist.append(kvs[w])
result = (wordlist, countlist)
return result
s = mincemeat.Server()
s.datasource = datasource
s.mapfn = mapfn
s.reducefn = reducefn
results = s.run_server(password="changeme")
#for w in results:
# print w, results[w]
#print results
|
[
"vador@grabeuh.com"
] |
vador@grabeuh.com
|
585ca217e56ae4d3a6ae723a1d01f0076b99a9a3
|
e861bd6ff0e2ebeb95e1c4feb6a8ba8df069c588
|
/backend/board/admin.py
|
7418f5bdeb9fdc7d3baf639f377bc09ffe583e7f
|
[
"MIT"
] |
permissive
|
lgvaioli/kanbanlache
|
6924c7b1e9d70d434a3f9accb26e36037ceddfa6
|
08e5473946eb67f61acbf799ebc8c555d31d5ec1
|
refs/heads/master
| 2023-02-19T21:06:47.865463
| 2021-01-16T16:13:57
| 2021-01-16T16:13:57
| 276,716,456
| 0
| 0
|
MIT
| 2021-01-16T16:13:59
| 2020-07-02T18:15:29
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 158
|
py
|
from django.contrib import admin
from .models import Board, Section, Task
admin.site.register(Board)
admin.site.register(Section)
admin.site.register(Task)
|
[
"laureano3400@gmail.com"
] |
laureano3400@gmail.com
|
7f4eb287098db534b7d8e7201b840a58822f9e26
|
95155d0722a0bf57b7d50e03bfe2e95552b8f152
|
/meating_planer/settings.py
|
bca192dd626bef07013a5f2fa0c448144ad9476d
|
[] |
no_license
|
amiruddinsaifi/django.github.io
|
80aa5004271a7892f54ab3acaa7ec0222bf26008
|
1a36a243f5c821184ecbac3ebb29529f9e9e2d25
|
refs/heads/master
| 2022-04-24T01:13:52.459451
| 2020-04-26T03:48:06
| 2020-04-26T03:48:06
| 258,932,744
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,112
|
py
|
"""
Django settings for meating_planer project.
Generated by 'django-admin startproject' using Django 3.0.3.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'nidey45$c+%$y(@r6pcc52d&40u+gu_f12*jlyzs)uf-q61u5s'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'meating_planer.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'meating_planer.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
|
[
"amiruddinsaifi017@gmail.com"
] |
amiruddinsaifi017@gmail.com
|
86bc9833945f1dc98c5dfc6fc8b6ecd1c82ad884
|
a9c7fb130854beb86568ad082ccb1a05ef0ef6a6
|
/Advocate/migrations/0019_auto_20160904_1920.py
|
49cfcf33fe5192c1bf5b2412736ae30cbfde3dc6
|
[] |
no_license
|
HNMN3/Advocate_Diary_New
|
deb85164ebebc1c1593434c89c96ea4f522bcb2b
|
a5338a344c64e4efb7668b2c70756d380e01cff1
|
refs/heads/master
| 2021-06-08T11:48:39.456657
| 2016-09-25T07:20:26
| 2016-09-25T07:20:26
| 69,147,386
| 0
| 1
| null | 2021-03-23T12:34:51
| 2016-09-25T06:19:40
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 851
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-09-04 13:50
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('Advocate', '0018_auto_20160904_1917'),
]
operations = [
migrations.RemoveField(
model_name='case',
name='case_stage',
),
migrations.RemoveField(
model_name='case',
name='case_type',
),
migrations.RemoveField(
model_name='case',
name='court_of',
),
migrations.RemoveField(
model_name='casehistory',
name='case',
),
migrations.DeleteModel(
name='Case',
),
migrations.DeleteModel(
name='CaseHistory',
),
]
|
[
"hnmn3.nitin@gmail.com"
] |
hnmn3.nitin@gmail.com
|
aaf8624e9438adbb1bb0ab3cf3146fa9f9a3ff19
|
b657a5c8f2aec7f042bbb01918daaac0f6d66f61
|
/Assignment3/P4_A3.py
|
20fa3c4e00a2c675e5fd7ec6357ed17574d3601c
|
[] |
no_license
|
Tushargohel/Assignment_py
|
de363a634c2ae551c23b6318d2b664e5d63405ba
|
1ef9b4f454a025ee5b7aae462e0182de4059bdc7
|
refs/heads/master
| 2021-01-19T12:31:06.729673
| 2015-04-24T07:06:14
| 2015-04-24T07:06:14
| 32,247,231
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 177
|
py
|
list1 = [a for a in range(-10, 10)]
print(list1)
print("maximum num is :-", max(list1))
print("minimum num is :-", min(list1))
print("sum of list elements is :-", sum(list1))
|
[
"tushargohel25@gmail.com"
] |
tushargohel25@gmail.com
|
a1883cd02094762301a57b90b0b4ccc9c937c274
|
7955cc1c3153982977774cf4ea035f9afc831d5d
|
/start_point/db/run_sql.py
|
6e4934b64e2ccee1c6d3ce8e117e3f6e441ce2ff
|
[] |
no_license
|
SinnottTM/codeclan_w4_d3_sql_RESTful_routes
|
506a1102eb007d471fc522038678edd155cf2730
|
b66d53f8f69bce5cb57c98005f49fa97fbcc397a
|
refs/heads/main
| 2023-03-25T11:46:10.885485
| 2021-03-18T01:14:04
| 2021-03-18T01:14:04
| 348,899,914
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 694
|
py
|
# the run_sql file establishes the connection with the database. It gives instructions for how to connect and what to do if there are issues regarding connection etc.
import psycopg2
import psycopg2.extras as ext
def run_sql(sql, values = None):
conn = None
results = []
try:
conn=psycopg2.connect("dbname='task_manager'")
cur = conn.cursor(cursor_factory=ext.DictCursor)
cur.execute(sql, values)
conn.commit()
results = cur.fetchall()
cur.close()
except (Exception, psycopg2.DatabaseError) as error:
print(error)
finally:
if conn is not None:
conn.close()
return results
|
[
"tim.sinnott86@gmail.com"
] |
tim.sinnott86@gmail.com
|
a3b1f3ae9127584f887fe77bf1beecd4298e3577
|
8ae7adaa9402155278a7cea09435ab9c7fc96700
|
/projet/the_eight_queens_puzzle.py
|
035dc624893697eb54ec58877f7bfd9d489debfa
|
[] |
no_license
|
FirelightFlagboy/OPC-python
|
cafc69d2b74d9c8187ad7fa51d599a99e7914460
|
0fb074ba5421870ebb36a2b6f5eb9107fd02f296
|
refs/heads/master
| 2020-03-19T04:27:34.905183
| 2018-06-02T15:48:29
| 2018-06-02T15:48:29
| 135,830,774
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,951
|
py
|
# -*-coding:utf-8 -*
import os
from array import array
nb_sol = 0
def ft_add_glob():
global nb_sol
nb_sol += 1
def ft_print_sol_fast(tab):
src = str()
for i in range(0,8):
src += str(tab[i] + 1) + " "
print(src)
def ft_print_sol_fancy(tab):
src = "._._._._._._._._.\n"
for line in range(0, 8):
src += "|"
for row in range(0, 8):
if row == tab[line]:
src += "X"
else:
src += " "
src += "|"
src += "\n._._._._._._._._.\n"
print(src)
def ft_is_free(tab, line, row):
boolean = True
for li in range(0, line):
ro = 0
for ro in range(0, 8):
r = tab[li]
boolean = boolean and (True if (abs(line - li) != abs(row - r)) and row != r else False)
return boolean
def ft_place_queens(tab, line, dysplay):
row = 0
if line >= 8:
if dysplay == 1:
ft_print_sol_fancy(tab)
elif dysplay == 2:
ft_print_sol_fast(tab)
ft_add_glob()
else:
for row in range(0, 8):
if ft_is_free(tab, line, row):
tab[line] = row
ft_place_queens(tab, line + 1, dysplay)
tab[line] = 0
pass
def ft_cmp(s1, s2):
l1 = len(s1)
l2 = len(s2)
i = 0
while i < l1 and i < l2 and s1[i] == s2[i]:
i += 1
if i >= l1 and i >= l2:
return 0
elif i >= l1 and i < l2:
return -ord(s2[i])
elif i < l1 and i >= l2:
return ord(s1[i])
def main():
"""lancer la fonction qui vas chercher
les solutions"""
print("====Dysplay Method====")
print("-1 fancy/slow")
print("-2 fast/speed")
print("======================")
error = 1
while error == 1:
error = 0
choice = input("saisissez option :\n>>>")
try:
choice = int(choice)
except (TypeError, ValueError, NameError) as e:
print("impossible de convertir \"{}\" en integer".format(choice))
error = 1
dysplay = choice
tab = array('i')
for i in range(0,8):
tab.append(0)
ft_place_queens(tab,0, dysplay)
print("nb de solution :", nb_sol)
os.system("pause")
if __name__ == '__main__':
"""permet de lancer le programme principale"""
main()
|
[
"firelight.flagboy@gmail.com"
] |
firelight.flagboy@gmail.com
|
09eed3dbf259eeefd3093603ca08efeaff52c8b8
|
f3d8e1351e52526959e2d44d72fd716924f1751d
|
/problems/406_queue_reconstruction_by_height.py
|
81b529c40406d2fbab1e058a00ed3324524932e3
|
[] |
no_license
|
xueyuanl/leetcode-py
|
c27a4faff5b9040d57cf864d3a11f1683d8182e3
|
03d3e34522c8c819388634ab4b63077da864a4e1
|
refs/heads/master
| 2021-07-14T23:40:32.913822
| 2021-07-14T13:43:19
| 2021-07-14T13:43:19
| 206,973,737
| 4
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 497
|
py
|
class Solution(object):
"""
best explanation: https://leetcode.com/problems/queue-reconstruction-by-height/discuss/167308/Python-solution
"""
def reconstructQueue(self, people):
"""
:type people: List[List[int]]
:rtype: List[List[int]]
"""
people_sorted = sorted(people, key=lambda x: (-x[0], x[1]))
res = []
for p in people_sorted:
res.insert(p[1], p)
return res
if __name__ == '__main__':
pass
|
[
"15186846+xueyuanl@users.noreply.github.com"
] |
15186846+xueyuanl@users.noreply.github.com
|
ce8b754ec0abd08554af81fdd5d0f1b868c711bd
|
bb38f63620d00b9b9d21e6f52f8e541440f660c1
|
/functions/artificial_generation/dvf_generation.py
|
9fb8898679c1e9ea0d35306396f46d26b834302b
|
[
"Apache-2.0"
] |
permissive
|
hsokooti/RegNet
|
5f7ba712fb3997e3b9af73966f0c06f0f41797a3
|
28a8b6132677bb58e9fc811c0dd15d78913c7e86
|
refs/heads/master
| 2023-03-22T14:44:14.630764
| 2021-03-17T22:51:34
| 2021-03-17T22:51:34
| 116,586,770
| 211
| 63
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 30,843
|
py
|
from . import utils as ag_utils
import copy
from joblib import Parallel, delayed
import logging
import multiprocessing
import numpy as np
import os
import SimpleITK as sitk
import scipy.ndimage as ndimage
import functions.image.image_processing as ip
import functions.setting.setting_utils as su
def zero(im_input_sitk):
size_im = im_input_sitk.GetSize()[::-1]
dvf = np.zeros(size_im+(3,))
return dvf
def single_freq(setting, im_info, stage, im_input_sitk, gonna_generate_next_im=False):
im_info_su = {'data': im_info['data'], 'deform_exp': im_info['deform_exp'], 'type_im': im_info['type_im'],
'cn': im_info['cn'], 'dsmooth': im_info['dsmooth'], 'stage': stage, 'padto': im_info['padto']}
seed_number = ag_utils.seed_number_by_im_info(im_info, 'single_freq',
stage=stage, gonna_generate_next_im=gonna_generate_next_im)
deform_number = im_info['deform_number']
if gonna_generate_next_im:
max_deform = setting['deform_exp'][im_info['deform_exp']]['NextIm_MaxDeform']
dim_im = 3 # The deformation of the NextIm is always 3D
seed_number = seed_number + 1
grid_border_to_zero = setting['deform_exp'][im_info['deform_exp']]['SingleFrequency_SetGridBorderToZero'][0]
grid_spacing = setting['deform_exp'][im_info['deform_exp']]['SingleFrequency_BSplineGridSpacing'][0]
grid_smoothing_sigma = [i/stage for i in setting['deform_exp'][im_info['deform_exp']]['SingleFrequency_GridSmoothingSigma'][0]]
bspline_transform_address = su.address_generator(setting, 'NextBSplineTransform', **im_info_su)
bspline_im_address = su.address_generator(setting, 'NextBSplineTransformIm', **im_info_su)
else:
max_deform = setting['deform_exp'][im_info['deform_exp']]['MaxDeform'] * \
setting['deform_exp'][im_info['deform_exp']]['SingleFrequency_MaxDeformRatio'][deform_number]
dim_im = 3
grid_border_to_zero = setting['deform_exp'][im_info['deform_exp']]['SingleFrequency_SetGridBorderToZero'][deform_number]
grid_spacing = setting['deform_exp'][im_info['deform_exp']]['SingleFrequency_BSplineGridSpacing'][deform_number]
grid_smoothing_sigma = [i/stage for i in setting['deform_exp'][im_info['deform_exp']]['SingleFrequency_GridSmoothingSigma'][deform_number]]
bspline_transform_address = su.address_generator(setting, 'BSplineTransform', **im_info_su)
bspline_im_address = su.address_generator(setting, 'BSplineTransformIm', **im_info_su)
random_state = np.random.RandomState(seed_number)
if setting['DVFPad_S'+str(stage)] > 0:
# im_input is already zeropadded in this case
padded_mm = setting['DVFPad_S'+str(stage)] * im_input_sitk.GetSpacing()[0]
grid_border_to_zero = (grid_border_to_zero + np.ceil(np.repeat(padded_mm, int(dim_im)) / grid_spacing)).astype(np.int)
if len(np.unique(im_input_sitk.GetSpacing())) > 1:
raise ValueError('dvf_generation: padding is only implemented for isotropic voxel size. current voxel size = [{}, {}, {}]'.format(
im_input_sitk.GetSpacing()[0], im_input_sitk.GetSpacing()[1], im_input_sitk.GetSpacing()[2]))
bcoeff = bspline_coeff(im_input_sitk, max_deform, grid_border_to_zero, grid_smoothing_sigma,
grid_spacing, random_state, dim_im, artificial_generation='single_frequency')
if setting['WriteBSplineTransform']:
sitk.WriteTransform(bcoeff, bspline_transform_address)
bspline_im_sitk_tuple = bcoeff.GetCoefficientImages()
bspline_im = np.concatenate((np.expand_dims(sitk.GetArrayFromImage(bspline_im_sitk_tuple[0]), axis=-1),
np.expand_dims(sitk.GetArrayFromImage(bspline_im_sitk_tuple[1]), axis=-1),
np.expand_dims(sitk.GetArrayFromImage(bspline_im_sitk_tuple[1]), axis=-1)),
axis=-1)
bspline_spacing = bspline_im_sitk_tuple[0].GetSpacing()
bspling_origin = [list(bspline_im_sitk_tuple[0].GetOrigin())[i] + list(im_input_sitk.GetOrigin())[i] for i in range(3)]
bspline_direction = im_input_sitk.GetDirection()
bspline_im_sitk = ip.array_to_sitk(bspline_im, origin=bspling_origin, spacing=bspline_spacing, direction=bspline_direction, is_vector=True)
sitk.WriteImage(bspline_im_sitk, bspline_im_address)
dvf_filter = sitk.TransformToDisplacementFieldFilter()
dvf_filter.SetSize(im_input_sitk.GetSize())
dvf_sitk = dvf_filter.Execute(bcoeff)
dvf = sitk.GetArrayFromImage(dvf_sitk)
mask_to_zero = setting['deform_exp'][im_info['deform_exp']]['MaskToZero']
if mask_to_zero is not None and not gonna_generate_next_im:
sigma = setting['deform_exp'][im_info['deform_exp']]['SingleFrequency_BackgroundSmoothingSigma'][deform_number]
dvf = do_mask_to_zero_gaussian(setting, im_info_su, dvf, mask_to_zero, stage, max_deform, sigma)
if setting['deform_exp'][im_info['deform_exp']]['DVFNormalization']:
dvf = normalize_dvf(dvf, max_deform)
return dvf
def respiratory_motion(setting, im_info, stage, moving_image_mode='Exhale'):
"""
Respiratory motion consists of four deformations: [2009 Hub A stochastic approach to estimate the uncertainty]
1) Extension of the Chest in the Transversal Plane with scale of s0
2) Decompression of the Lung in Cranio-Caudal Direction with maximum of t0
3) Random Deformation
4) Tissue Sliding Between Lung and Rib Cage (not implemented yet)
:param setting:
:param im_info:
:param stage:
:param moving_image_mode: 'Exhale' : mode_coeff = 1, 'Inhale': mode_coeff = -1
dvf[:, :, :, 2] = mode_coeff * dvf_craniocaudal
dvf[:, :, :, 1] = mode_coeff * dvf_anteroposterior
:return:
"""
im_info_su = {'data': im_info['data'], 'deform_exp': im_info['deform_exp'], 'type_im': im_info['type_im'],
'cn': im_info['cn'], 'dsmooth': im_info['dsmooth'], 'stage': stage, 'padto': im_info['padto']}
seed_number = ag_utils.seed_number_by_im_info(im_info, 'respiratory_motion', stage=stage)
random_state = np.random.RandomState(seed_number)
deform_number = im_info['deform_number']
t0_max = setting['deform_exp'][im_info['deform_exp']]['RespiratoryMotion_t0'][deform_number]
s0_max = setting['deform_exp'][im_info['deform_exp']]['RespiratoryMotion_s0'][deform_number]
max_deform = setting['deform_exp'][im_info['deform_exp']]['MaxDeform'] * \
setting['deform_exp'][im_info['deform_exp']]['RespiratoryMotion_MaxDeformRatio'][deform_number]
max_deform_single_freq = setting['deform_exp'][im_info['deform_exp']]['MaxDeform'] * \
setting['deform_exp'][im_info['deform_exp']]['RespiratoryMotion_SingleFrequency_MaxDeformRatio'][deform_number]
grid_border_to_zero = setting['deform_exp'][im_info['deform_exp']]['RespiratoryMotion_SetGridBorderToZero'][deform_number]
grid_spacing = setting['deform_exp'][im_info['deform_exp']]['RespiratoryMotion_BSplineGridSpacing'][deform_number]
grid_smoothing_sigma = [i / stage for i in setting['deform_exp'][im_info['deform_exp']]['RespiratoryMotion_GridSmoothingSigma'][deform_number]]
t0 = random_state.uniform(0.8 * t0_max, 1.1 * t0_max)
s0 = random_state.uniform(0.8 * s0_max, 1.1 * s0_max)
if moving_image_mode == 'Inhale':
mode_coeff = -1
else:
mode_coeff = 1
im_sitk = sitk.ReadImage(su.address_generator(setting, 'Im', **im_info_su))
lung_im = sitk.GetArrayFromImage(sitk.ReadImage(su.address_generator(setting, 'Lung', **im_info_su))).astype(np.bool)
i_lung = np.where(lung_im)
diaphragm_slice = np.min(i_lung[0])
anteroposterior_dim = 1
shift_of_center_scale = random_state.uniform(2, 12) # in voxel
center_scale = np.round(np.max(i_lung[anteroposterior_dim]) - shift_of_center_scale / stage) # 10 mm above the maximum lung. will be approximately close to vertebra
# sliding motion
# mask_rib = im > 300
# r = 3
# struct = np.ones([2*r+1, 2*r+1, 2*r+1], dtype=np.bool)
# mask_rib_close = ndimage.morphology.binary_closing(mask_rib, structure=struct)
# slc = 50
# import matplotlib.pyplot as plt
# plt.figure(); plt.imshow(im[slc, :, :], cmap='gray')
# plt.figure(); plt.imshow(mask_rib[slc, :, :])
# plt.figure(); plt.imshow(lung_im[slc, :, :])
# plt.figure(); plt.imshow(mask_rib_close[slc, :, :])
logging.debug('Diaphragm slice is ' + str(diaphragm_slice))
indices = [None] * 3
indices[0], indices[1], indices[2] = [i * stage for i in np.meshgrid(np.arange(0, np.shape(lung_im)[0]),
np.arange(0, np.shape(lung_im)[1]),
np.arange(0, np.shape(lung_im)[2]),
indexing='ij')]
scale_transversal_plane = np.ones(np.shape(lung_im)[0])
dvf_anteroposterior = np.zeros(np.shape(lung_im))
dvf_craniocaudal = np.zeros(np.shape(lung_im))
lung_extension = (np.max(i_lung[0]) - diaphragm_slice) / 2
alpha = 1.3 / lung_extension
for z in range(np.shape(scale_transversal_plane)[0]):
if z < diaphragm_slice:
scale_transversal_plane[z] = 1 + s0
dvf_craniocaudal[z, :, :] = t0
elif diaphragm_slice <= z < diaphragm_slice + lung_extension:
scale_transversal_plane[z] = 1 + s0 * (1 - np.log(1 + (z - diaphragm_slice) * alpha) / np.log(1 + lung_extension * alpha))
dvf_craniocaudal[z, :, :] = t0 * (1 - np.log(1 + (z - diaphragm_slice) * alpha) / np.log(1 + lung_extension * alpha))
else:
scale_transversal_plane[z] = 1
dvf_craniocaudal[z, :, :] = 0
dvf_anteroposterior[z, :, :] = (indices[anteroposterior_dim][z, :, :] - center_scale) * (scale_transversal_plane[z] - 1)
dvf = np.zeros(list(np.shape(lung_im))+[3])
dvf[:, :, :, 2] = mode_coeff * dvf_craniocaudal
dvf[:, :, :, 1] = -mode_coeff * dvf_anteroposterior
bcoeff = bspline_coeff(im_sitk, max_deform_single_freq, grid_border_to_zero, grid_smoothing_sigma,
grid_spacing, random_state, dim_im=3, artificial_generation='respiratory_motion')
dvf_single_freq_filter = sitk.TransformToDisplacementFieldFilter()
dvf_single_freq_filter.SetSize(im_sitk.GetSize())
dvf_single_freq_sitk = dvf_single_freq_filter.Execute(bcoeff)
dvf_single_freq = sitk.GetArrayFromImage(dvf_single_freq_sitk)
if setting['deform_exp'][im_info['deform_exp']]['DVFNormalization']:
dvf_single_freq = normalize_dvf(dvf_single_freq, max_deform)
dvf_single_freq[:, :, :, 2] = dvf_single_freq[:, :, :, 2] * 0.3 # make the dvf in the slice direction smaller
dvf = dvf + dvf_single_freq
mask_to_zero = setting['deform_exp'][im_info['deform_exp']]['MaskToZero']
if mask_to_zero is not None:
sigma = setting['deform_exp'][im_info['deform_exp']]['RespiratoryMotion_BackgroundSmoothingSigma'][deform_number]
dvf = do_mask_to_zero_gaussian(setting, im_info_su, dvf, mask_to_zero, stage, max_deform, sigma)
else:
raise ValueError('In the current implementation, respiratory_motion is not valid without mask_to_zero')
if setting['deform_exp'][im_info['deform_exp']]['DVFNormalization']:
dvf = normalize_dvf(dvf, max_deform * 1.2)
return dvf
def mixed_freq(setting, im_info, stage):
im_info_su = {'data': im_info['data'], 'deform_exp': im_info['deform_exp'], 'type_im': im_info['type_im'],
'cn': im_info['cn'], 'dsmooth': im_info['dsmooth'], 'stage': stage, 'padto': im_info['padto']}
seed_number = ag_utils.seed_number_by_im_info(im_info, 'mixed_freq', stage=stage)
random_state = np.random.RandomState(seed_number)
deform_number = im_info['deform_number']
max_deform = setting['deform_exp'][im_info['deform_exp']]['MaxDeform'] * \
setting['deform_exp'][im_info['deform_exp']]['MixedFrequency_MaxDeformRatio'][deform_number]
grid_smoothing_sigma = [i/stage for i in setting['deform_exp'][im_info['deform_exp']]['MixedFrequency_GridSmoothingSigma'][deform_number]]
grid_border_to_zero = setting['deform_exp'][im_info['deform_exp']]['MixedFrequency_SetGridBorderToZero'][deform_number]
grid_spacing = setting['deform_exp'][im_info['deform_exp']]['MixedFrequency_BSplineGridSpacing'][deform_number] # Approximately
number_dilation = setting['deform_exp'][im_info['deform_exp']]['MixedFrequency_Np'][deform_number]
im_canny_address = su.address_generator(setting, 'ImCanny', **im_info_su)
im_sitk = sitk.ReadImage(su.address_generator(setting, 'Im', **im_info_su))
if os.path.isfile(im_canny_address):
im_canny_sitk = sitk.ReadImage(im_canny_address)
else:
im_canny_sitk = sitk.CannyEdgeDetection(sitk.Cast(im_sitk, sitk.sitkFloat32),
lowerThreshold=setting['deform_exp'][im_info['deform_exp']]['Canny_LowerThreshold'],
upperThreshold=setting['deform_exp'][im_info['deform_exp']]['Canny_UpperThreshold'])
sitk.WriteImage(sitk.Cast(im_canny_sitk, sitk.sitkInt8), im_canny_address)
lung_im = sitk.GetArrayFromImage(sitk.ReadImage(su.address_generator(setting, 'Lung', **im_info_su))).astype(np.bool)
im_canny = sitk.GetArrayFromImage(im_canny_sitk)
# erosion with ndimage is 5 times faster than SimpleITK
lung_dilated = ndimage.binary_dilation(lung_im)
available_region = np.logical_and(lung_dilated, im_canny)
available_region = np.tile(np.expand_dims(available_region, axis=-1), 3)
dilated_edge = np.copy(available_region)
itr_edge = 0
i_edge = [None]*3
select_voxel = [None]*3
block_low = [None]*3
block_high = [None]*3
for dim in range(3):
i_edge[dim] = np.where(available_region[:, :, :, dim] > 0)
# Previously, we only selected voxels on the edges (CannyEdgeDetection), but now we use all voxels.
if (len(i_edge[0][0]) == 0) or (len(i_edge[1][0]) == 0) or (len(i_edge[2][0]) == 0):
logging.debug('dvf_generation: We are out of points. Plz change the threshold value of Canny method!!!!! ') # Old method. only edges!
while (len(i_edge[0][0]) > 4) and (len(i_edge[1][0]) > 4) and (len(i_edge[2][0]) > 4) and (itr_edge < number_dilation):
# i_edge will change at the end of this while loop!
no_more_dilatation_in_this_region = False
for dim in range(3):
select_voxel[dim] = int(random_state.randint(0, len(i_edge[dim][0]) - 1, 1, dtype=np.int64))
block_low[dim], block_high[dim] = center_to_block(setting,
center=np.array([i_edge[dim][0][select_voxel[dim]],
i_edge[dim][1][select_voxel[dim]],
i_edge[dim][2][select_voxel[dim]]]),
radius=round(setting['deform_exp'][im_info['deform_exp']]['MixedFrequency_BlockRadius']/stage),
im_ref=im_sitk)
if itr_edge == 0:
struct = np.ones((3, 3, 3), dtype=bool)
for dim in range(3):
dilated_edge[:, :, :, dim] = ndimage.binary_dilation(dilated_edge[:, :, :, dim], structure=struct)
elif itr_edge < np.round(10*number_dilation/12): # We like to include zero deformation in our training set.
no_more_dilatation_in_this_region = True
for dim in range(3):
dilated_edge[block_low[dim][0]:block_high[dim][0],
block_low[dim][1]:block_high[dim][1],
block_low[dim][2]:block_high[dim][2], dim] = False
elif itr_edge < np.round(11*number_dilation/12):
struct = ndimage.generate_binary_structure(3, 2)
for dim in range(3):
mask_for_edge_dilation = np.zeros(np.shape(dilated_edge[:, :, :, dim]), dtype=bool)
mask_for_edge_dilation[block_low[dim][0]:block_high[dim][0], block_low[dim][1]:block_high[dim][1], block_low[dim][2]:block_high[dim][2]] = True
dilated_edge[:, :, :, dim] = ndimage.binary_dilation(dilated_edge[:, :, :, dim], structure=struct, mask=mask_for_edge_dilation)
if (itr_edge % 2) == 0:
no_more_dilatation_in_this_region = True
elif itr_edge < number_dilation:
struct = np.zeros((9, 9, 9), dtype=bool)
if (itr_edge % 3) == 0:
struct[0:5, :, :] = True
if (itr_edge % 3) == 1:
struct[:, 0:5, :] = True
if (itr_edge % 3) == 2:
struct[:, :, 0:5] = True
for dim in range(3):
mask_for_edge_dilation = np.zeros(np.shape(dilated_edge[:, :, :, dim]), dtype=bool)
mask_for_edge_dilation[block_low[dim][0]:block_high[dim][0], block_low[dim][1]:block_high[dim][1], block_low[dim][2]:block_high[dim][2]] = True
dilated_edge[:, :, :, dim] = ndimage.binary_dilation(dilated_edge[:, :, :, dim], structure=struct, mask=mask_for_edge_dilation)
if random_state.uniform() > 0.3:
no_more_dilatation_in_this_region = True
if no_more_dilatation_in_this_region:
available_region[block_low[dim][0]:block_high[dim][0], block_low[dim][1]:block_high[dim][1], block_low[dim][2]:block_high[dim][2], dim] = False
if itr_edge >= np.round(10*number_dilation/12):
for dim in range(3):
i_edge[dim] = np.where(available_region[:, :, :, dim] > 0)
itr_edge += 1
bcoeff = bspline_coeff(im_sitk, max_deform, grid_border_to_zero, grid_smoothing_sigma,
grid_spacing, random_state, dim_im=3, artificial_generation='mixed_frequency')
dvf_filter = sitk.TransformToDisplacementFieldFilter()
dvf_filter.SetSize(im_sitk.GetSize())
smoothed_values_sitk = dvf_filter.Execute(bcoeff)
smoothed_values = sitk.GetArrayFromImage(smoothed_values_sitk)
dvf = (dilated_edge.astype(np.float64) * smoothed_values).astype(np.float64)
if setting['DVFPad_S'+str(stage)] > 0:
pad = setting['DVFPad_S'+str(stage)]
dvf = np.pad(dvf, ((pad, pad), (pad, pad), (pad, pad), (0, 0)), 'constant', constant_values=(0,))
sigma_range = setting['deform_exp'][im_info['deform_exp']]['MixedFrequency_SigmaRange'][deform_number]
sigma = random_state.uniform(low=sigma_range[0] / stage,
high=sigma_range[1] / stage,
size=3)
dvf = smooth_dvf(dvf, sigma_blur=sigma, parallel_processing=setting['ParallelSearching'])
if setting['deform_exp'][im_info['deform_exp']]['DVFNormalization']:
dvf = normalize_dvf(dvf, max_deform)
return dvf
def translation(setting, im_info, stage, im_input_sitk):
seed_number = ag_utils.seed_number_by_im_info(im_info, 'translation', stage=stage)
random_state = np.random.RandomState(seed_number)
deform_number = im_info['deform_number']
max_deform = setting['deform_exp'][im_info['deform_exp']]['MaxDeform'] * \
setting['deform_exp'][im_info['deform_exp']]['Translation_MaxDeformRatio'][deform_number]
dim_im = setting['Dim']
translation_transform = sitk.TranslationTransform(dim_im)
translation_magnitude = np.zeros(3)
for dim in range(dim_im):
if random_state.random_sample() > 0.8:
translation_magnitude[dim] = 0
else:
translation_magnitude[dim] = random_state.uniform(-max_deform, max_deform)
translation_transform.SetParameters(translation_magnitude)
dvf_filter = sitk.TransformToDisplacementFieldFilter()
dvf_filter.SetSize(im_input_sitk.GetSize())
dvf_sitk = dvf_filter.Execute(translation_transform)
dvf = sitk.GetArrayFromImage(dvf_sitk)
return dvf
def bspline_coeff(im_input_sitk, max_deform, grid_border_to_zero, grid_smoothing_sigma, grid_spacing, random_state, dim_im, artificial_generation=None):
number_of_grids = list(np.round(np.array(im_input_sitk.GetSize()) * np.array(im_input_sitk.GetSpacing()) / grid_spacing))
number_of_grids = [int(i) for i in number_of_grids] # This is a bit funny, it has to be int (and not even np.int)
# BCoeff = sitk.BSplineTransformInitializer(ImInput, numberOfGrids, order=3)
# problem with the offset
bcoeff = sitk.BSplineTransformInitializer(sitk.Image(im_input_sitk.GetSize(), sitk.sitkInt8), number_of_grids, order=3)
bcoeff_parameters = random_state.uniform(-max_deform*4, max_deform*4, len(bcoeff.GetParameters()))
# we choose numbers to be in range of MaxDeform, please note that there are two smoothing steps after this initialization.
# So numbers will be much smaller.
grid_side = bcoeff.GetTransformDomainMeshSize()
if dim_im == 3:
bcoeff_smoothed_dim = [None] * 3
for dim in range(3):
bcoeff_dim = np.reshape(np.split(bcoeff_parameters, 3)[dim], [grid_side[2]+3, grid_side[1]+3, grid_side[0]+3])
# number of coefficients in grid is increased with 3 in simpleITK.
if np.any(grid_border_to_zero):
# in two steps, the marginal coefficient of the grids are set to zero:
# 1. before smoothing the grid with gridBorderToZero+1 2. after smoothing the grid with gridBorderToZero
non_zero_mask = np.zeros(np.shape(bcoeff_dim))
non_zero_mask[grid_border_to_zero[0] + 1:-grid_border_to_zero[0] - 1, grid_border_to_zero[1] + 1:-grid_border_to_zero[1] - 1,
grid_border_to_zero[2] + 1:-grid_border_to_zero[2] - 1] = 1
bcoeff_dim = bcoeff_dim * non_zero_mask
bcoeff_smoothed_dim[dim] = ndimage.filters.gaussian_filter(bcoeff_dim, grid_smoothing_sigma[dim])
if np.any(grid_border_to_zero):
non_zero_mask = np.zeros(np.shape(bcoeff_dim))
non_zero_mask[grid_border_to_zero[0]:-grid_border_to_zero[0], grid_border_to_zero[1]:-grid_border_to_zero[1],
grid_border_to_zero[2]:-grid_border_to_zero[2]] = 1
bcoeff_smoothed_dim[dim] = bcoeff_smoothed_dim[dim] * non_zero_mask
bcoeff_parameters_smooth = np.hstack((np.reshape(bcoeff_smoothed_dim[0], -1),
np.reshape(bcoeff_smoothed_dim[1], -1),
np.reshape(bcoeff_smoothed_dim[2], -1)))
else:
raise ValueError('not implemented for 2D')
if artificial_generation in ['single_frequency', 'respiratory_motion']:
bcoeff_parameters_smooth_normalize = normalize_dvf(bcoeff_parameters_smooth, max_deform * 1.7)
elif artificial_generation == 'mixed_frequency':
bcoeff_parameters_smooth_normalize = normalize_dvf(bcoeff_parameters_smooth, max_deform * 2, min_deform=max_deform)
else:
raise ValueError("artificial_generation should be in ['single_frequency', 'mixed_frequency', 'respiratory_motion']")
bcoeff.SetParameters(bcoeff_parameters_smooth_normalize)
return bcoeff
def smooth_dvf(dvf, dim_im=3, sigma_blur=None, parallel_processing=True):
dvf_smooth = np.empty(np.shape(dvf))
if parallel_processing:
num_cores = multiprocessing.cpu_count() - 2
if dim_im == 3:
# The following line is not working in Windows
[dvf_smooth[:, :, :, 0], dvf_smooth[:, :, :, 1], dvf_smooth[:, :, :, 2]] = \
Parallel(n_jobs=num_cores)(delayed(smooth_gaussian)(dvf=dvf[:, :, :, i], sigma=sigma_blur[i]) for i in range(np.shape(dvf)[3]))
if dim_im == 2:
[dvf_smooth[:, :, :, 0], dvf_smooth[:, :, :, 1]] = \
Parallel(n_jobs=num_cores)(delayed(smooth_gaussian)(dvf=dvf[:, :, :, i], sigma=sigma_blur[i]) for i in range(np.shape(dvf)[3]))
dvf_smooth[:, :, :, 2] = dvf[:, :, :, 2]
else:
for dim in range(dim_im):
dvf_smooth[:, :, :, dim] = smooth_gaussian(dvf[:, :, :, dim], sigma_blur[dim])
return dvf_smooth
def normalize_dvf(dvf, max_deform, min_deform=None):
max_dvf = max(abs(np.max(dvf)), abs(np.min(dvf)))
if max_dvf > max_deform:
dvf = dvf * max_deform / max_dvf
if min_deform is not None:
if max_dvf < min_deform:
dvf = dvf * min_deform / max_dvf
return dvf
def smooth_gaussian(dvf, sigma):
return ndimage.filters.gaussian_filter(dvf, sigma=sigma)
def center_to_block(setting, center=None, radius=10, im_ref=None):
block_low = center - radius
block_high = center + radius
if setting['Dim'] == 2:
block_low[0] = center[0] - 1
block_high[0] = center[0] + 2
for dim in range(3):
if block_low[dim] < 0:
block_low[dim] = 0
if block_high[dim] > im_ref.GetSize()[-1-dim]:
block_high[dim] = im_ref.GetSize()[-1-dim]
return block_low, block_high
def do_mask_to_zero_gaussian(setting, im_info_su, dvf, mask_to_zero, stage, max_deform, sigma):
mask_address = su.address_generator(setting, mask_to_zero, **im_info_su)
mask_im = sitk.GetArrayFromImage(sitk.ReadImage(mask_address))
dvf = dvf * np.repeat(np.expand_dims(mask_im, axis=3), np.shape(dvf)[3], axis=3)
sigma = sigma / stage * max_deform / 7 # in stage 4 we should make this sigma smaller but at the same time
sigma = np.tile(sigma, 3)
# the max_deform in stage 4 is 20 which leads to negative jacobian. There is no problem for other sigma values in the code.
dvf = smooth_dvf(dvf, sigma_blur=sigma, parallel_processing=setting['ParallelSearching'])
return dvf
def background_to_zero_linear(setting, im_info_su, gonna_generate_next_im=False):
if gonna_generate_next_im:
im_info_su_orig = copy.deepcopy(im_info_su)
im_info_su_orig['dsmooth'] = 0
torso_address = su.address_generator(setting, 'Torso', **im_info_su_orig)
else:
torso_address = su.address_generator(setting, 'Torso', **im_info_su)
torso_im = sitk.GetArrayFromImage(sitk.ReadImage(torso_address))
torso_distance = ndimage.morphology.distance_transform_edt(1 - torso_im, sampling=setting['VoxelSize'])
mask_to_zero = torso_im.copy().astype(np.float)
background_ind = [torso_im == 0]
mask_to_zero[background_ind] = (1 / torso_distance[background_ind])
mask_to_zero[mask_to_zero < 0.05] = 0
return mask_to_zero
def translation_with_bspline_grid(setting, im_input_sitk, im_info=None):
seed_number = ag_utils.seed_number_by_im_info(im_info, 'translation')
random_state = np.random.RandomState(seed_number)
deform_number = im_info['deform_number']
max_deform = setting['deform_exp'][im_info['deform_exp']]['MaxDeform'] * \
setting['deform_exp'][im_info['deform_exp']]['Translation_MaxDeformRatio'][deform_number]
dim_im = setting['Dim']
grid_border_to_zero = setting['deform_exp'][im_info['deform_exp']]['setGridBorderToZero_translation'][deform_number]
grid_spacing = setting['deform_exp'][im_info['deform_exp']]['BsplineGridSpacing_translation'][deform_number]
if setting['DVFPad_S1'] > 0:
# ImInput is already zeropadded in this case
padded_mm = setting['DVFPad_S1'] * im_input_sitk.GetSpacing()[0]
grid_border_to_zero = (grid_border_to_zero + np.ceil(np.repeat(padded_mm, int(dim_im[0])) / grid_spacing)).astype(np.int)
if len(np.unique(im_input_sitk.GetSpacing())) > 1:
raise ValueError('dvf_generation: padding is only implemented for isotropic voxel size. current voxel size = [{}, {}, {}]'.format(
im_input_sitk.GetSpacing()[0], im_input_sitk.GetSpacing()[1], im_input_sitk.GetSpacing()[2]))
number_of_grids = list(np.round(np.array(im_input_sitk.GetSize()) * np.array(im_input_sitk.GetSpacing()) / grid_spacing))
number_of_grids = [int(i) for i in number_of_grids] # it has to be int (and not even np.int)
# BCoeff = sitk.BSplineTransformInitializer(ImInput, numberOfGrids, order=3)
# problem with the offset
bcoeff = sitk.BSplineTransformInitializer(sitk.Image(im_input_sitk.GetSize(), sitk.sitkInt8), number_of_grids, order=3)
grid_side = bcoeff.GetTransformDomainMeshSize()
if dim_im == 3:
bcoeff_smoothed_dim = [None] * 3
translation_magnitude = [None] * 3
for dim in range(3):
if random_state.random_sample() > 0.8:
translation_magnitude[dim] = 0
else:
translation_magnitude[dim] = random_state.uniform(-max_deform, max_deform)
if dim == 2:
if translation_magnitude[2] < max_deform * 2 / 3:
if translation_magnitude[1] < max_deform * 2 / 3:
if translation_magnitude[0] < max_deform * 2 / 3:
translation_magnitude[2] = random_state.uniform(max_deform * 2 / 3, max_deform)
sign_of_magnitude = random_state.random_sample()
if sign_of_magnitude > 0.5:
translation_magnitude[2] = - translation_magnitude[2]
bcoeff_dim = np.ones([grid_side[2] + 3, grid_side[1] + 3, grid_side[0] + 3]) * translation_magnitude[dim]
# number of coefficients in grid is increased with 3 in simpleITK.
if np.any(grid_border_to_zero):
non_zero_mask = np.zeros(np.shape(bcoeff_dim))
non_zero_mask[grid_border_to_zero[0]:-grid_border_to_zero[0], grid_border_to_zero[1]:-grid_border_to_zero[1],
grid_border_to_zero[2]:-grid_border_to_zero[2]] = 1
bcoeff_dim = bcoeff_dim * non_zero_mask
bcoeff_smoothed_dim[dim] = bcoeff_dim
bcoeff_parameters_smooth = np.hstack((np.reshape(bcoeff_smoothed_dim[0], -1),
np.reshape(bcoeff_smoothed_dim[1], -1),
np.reshape(bcoeff_smoothed_dim[2], -1)))
else:
raise ValueError('not implemented for 2D')
bcoeff.SetParameters(bcoeff_parameters_smooth)
dvf_filter = sitk.TransformToDisplacementFieldFilter()
dvf_filter.SetSize(im_input_sitk.GetSize())
dvf_sitk = dvf_filter.Execute(bcoeff)
dvf = sitk.GetArrayFromImage(dvf_sitk)
if setting['deform_exp'][im_info['deform_exp']]['DVFNormalization']:
dvf = normalize_dvf(dvf, max_deform)
return dvf
|
[
"h.sokooti@gmail.com"
] |
h.sokooti@gmail.com
|
16020a9467be3e9c290b0cd196a1c48194e540c5
|
3450624ffe4e7aa187d3834ed6c5a503049d6e58
|
/vcuhunt/env/Lib/site-packages/pytest_django/fixtures.py
|
b2cc82580af41d20e698b46a1c63cbb4d5a6c73c
|
[
"MIT"
] |
permissive
|
wdrescher/vcu-scavenger-hunt
|
018c53419de7a1fee95cabce89905119ae76d330
|
98dd642e2a8a648055bf726950741928d4ba2a96
|
refs/heads/master
| 2022-12-09T19:19:55.907388
| 2020-04-12T17:35:23
| 2020-04-12T17:35:23
| 236,537,824
| 0
| 0
|
MIT
| 2022-10-30T16:02:52
| 2020-01-27T16:36:16
|
Python
|
UTF-8
|
Python
| false
| false
| 14,814
|
py
|
"""All pytest-django fixtures"""
from __future__ import with_statement
import os
import warnings
from contextlib import contextmanager
from functools import partial
import pytest
from . import live_server_helper
from .django_compat import is_django_unittest
from .lazy_django import skip_if_no_django
__all__ = [
"django_db_setup",
"db",
"transactional_db",
"django_db_reset_sequences",
"admin_user",
"django_user_model",
"django_username_field",
"client",
"admin_client",
"rf",
"settings",
"live_server",
"_live_server_helper",
"django_assert_num_queries",
"django_assert_max_num_queries",
]
@pytest.fixture(scope="session")
def django_db_modify_db_settings_tox_suffix():
skip_if_no_django()
tox_environment = os.getenv("TOX_PARALLEL_ENV")
if tox_environment:
# Put a suffix like _py27-django21 on tox workers
_set_suffix_to_test_databases(suffix=tox_environment)
@pytest.fixture(scope="session")
def django_db_modify_db_settings_xdist_suffix(request):
skip_if_no_django()
xdist_suffix = getattr(request.config, "slaveinput", {}).get("slaveid")
if xdist_suffix:
# Put a suffix like _gw0, _gw1 etc on xdist processes
_set_suffix_to_test_databases(suffix=xdist_suffix)
@pytest.fixture(scope="session")
def django_db_modify_db_settings_parallel_suffix(
django_db_modify_db_settings_tox_suffix,
django_db_modify_db_settings_xdist_suffix,
):
skip_if_no_django()
@pytest.fixture(scope="session")
def django_db_modify_db_settings(django_db_modify_db_settings_parallel_suffix):
skip_if_no_django()
@pytest.fixture(scope="session")
def django_db_use_migrations(request):
return not request.config.getvalue("nomigrations")
@pytest.fixture(scope="session")
def django_db_keepdb(request):
return request.config.getvalue("reuse_db")
@pytest.fixture(scope="session")
def django_db_createdb(request):
return request.config.getvalue("create_db")
@pytest.fixture(scope="session")
def django_db_setup(
request,
django_test_environment,
django_db_blocker,
django_db_use_migrations,
django_db_keepdb,
django_db_createdb,
django_db_modify_db_settings,
):
"""Top level fixture to ensure test databases are available"""
from .compat import setup_databases, teardown_databases
setup_databases_args = {}
if not django_db_use_migrations:
_disable_native_migrations()
if django_db_keepdb and not django_db_createdb:
setup_databases_args["keepdb"] = True
with django_db_blocker.unblock():
db_cfg = setup_databases(
verbosity=request.config.option.verbose,
interactive=False,
**setup_databases_args
)
def teardown_database():
with django_db_blocker.unblock():
try:
teardown_databases(db_cfg, verbosity=request.config.option.verbose)
except Exception as exc:
request.node.warn(
pytest.PytestWarning(
"Error when trying to teardown test databases: %r" % exc
)
)
if not django_db_keepdb:
request.addfinalizer(teardown_database)
def _django_db_fixture_helper(
request, django_db_blocker, transactional=False, reset_sequences=False
):
if is_django_unittest(request):
return
if not transactional and "live_server" in request.fixturenames:
# Do nothing, we get called with transactional=True, too.
return
django_db_blocker.unblock()
request.addfinalizer(django_db_blocker.restore)
if transactional:
from django.test import TransactionTestCase as django_case
if reset_sequences:
class ResetSequenceTestCase(django_case):
reset_sequences = True
django_case = ResetSequenceTestCase
else:
from django.test import TestCase as django_case
test_case = django_case(methodName="__init__")
test_case._pre_setup()
request.addfinalizer(test_case._post_teardown)
def _disable_native_migrations():
from django.conf import settings
from django.core.management.commands import migrate
from .migrations import DisableMigrations
settings.MIGRATION_MODULES = DisableMigrations()
class MigrateSilentCommand(migrate.Command):
def handle(self, *args, **kwargs):
kwargs["verbosity"] = 0
return super(MigrateSilentCommand, self).handle(*args, **kwargs)
migrate.Command = MigrateSilentCommand
def _set_suffix_to_test_databases(suffix):
from django.conf import settings
for db_settings in settings.DATABASES.values():
test_name = db_settings.get("TEST", {}).get("NAME")
if not test_name:
if db_settings["ENGINE"] == "django.db.backends.sqlite3":
continue
test_name = "test_{}".format(db_settings["NAME"])
if test_name == ":memory:":
continue
db_settings.setdefault("TEST", {})
db_settings["TEST"]["NAME"] = "{}_{}".format(test_name, suffix)
# ############### User visible fixtures ################
@pytest.fixture(scope="function")
def db(request, django_db_setup, django_db_blocker):
"""Require a django test database.
This database will be setup with the default fixtures and will have
the transaction management disabled. At the end of the test the outer
transaction that wraps the test itself will be rolled back to undo any
changes to the database (in case the backend supports transactions).
This is more limited than the ``transactional_db`` resource but
faster.
If multiple database fixtures are requested, they take precedence
over each other in the following order (the last one wins): ``db``,
``transactional_db``, ``django_db_reset_sequences``.
"""
if "django_db_reset_sequences" in request.fixturenames:
request.getfixturevalue("django_db_reset_sequences")
if (
"transactional_db" in request.fixturenames
or "live_server" in request.fixturenames
):
request.getfixturevalue("transactional_db")
else:
_django_db_fixture_helper(request, django_db_blocker, transactional=False)
@pytest.fixture(scope="function")
def transactional_db(request, django_db_setup, django_db_blocker):
"""Require a django test database with transaction support.
This will re-initialise the django database for each test and is
thus slower than the normal ``db`` fixture.
If you want to use the database with transactions you must request
this resource.
If multiple database fixtures are requested, they take precedence
over each other in the following order (the last one wins): ``db``,
``transactional_db``, ``django_db_reset_sequences``.
"""
if "django_db_reset_sequences" in request.fixturenames:
request.getfixturevalue("django_db_reset_sequences")
_django_db_fixture_helper(request, django_db_blocker, transactional=True)
@pytest.fixture(scope="function")
def django_db_reset_sequences(request, django_db_setup, django_db_blocker):
"""Require a transactional test database with sequence reset support.
This behaves like the ``transactional_db`` fixture, with the addition
of enforcing a reset of all auto increment sequences. If the enquiring
test relies on such values (e.g. ids as primary keys), you should
request this resource to ensure they are consistent across tests.
If multiple database fixtures are requested, they take precedence
over each other in the following order (the last one wins): ``db``,
``transactional_db``, ``django_db_reset_sequences``.
"""
_django_db_fixture_helper(
request, django_db_blocker, transactional=True, reset_sequences=True
)
@pytest.fixture()
def client():
"""A Django test client instance."""
skip_if_no_django()
from django.test.client import Client
return Client()
@pytest.fixture()
def django_user_model(db):
"""The class of Django's user model."""
from django.contrib.auth import get_user_model
return get_user_model()
@pytest.fixture()
def django_username_field(django_user_model):
"""The fieldname for the username used with Django's user model."""
return django_user_model.USERNAME_FIELD
@pytest.fixture()
def admin_user(db, django_user_model, django_username_field):
"""A Django admin user.
This uses an existing user with username "admin", or creates a new one with
password "password".
"""
UserModel = django_user_model
username_field = django_username_field
username = "admin@example.com" if username_field == "email" else "admin"
try:
user = UserModel._default_manager.get(**{username_field: username})
except UserModel.DoesNotExist:
extra_fields = {}
if username_field not in ("username", "email"):
extra_fields[username_field] = "admin"
user = UserModel._default_manager.create_superuser(
username, "admin@example.com", "password", **extra_fields
)
return user
@pytest.fixture()
def admin_client(db, admin_user):
"""A Django test client logged in as an admin user."""
from django.test.client import Client
client = Client()
client.login(username=admin_user.username, password="password")
return client
@pytest.fixture()
def rf():
"""RequestFactory instance"""
skip_if_no_django()
from django.test.client import RequestFactory
return RequestFactory()
class SettingsWrapper(object):
_to_restore = []
def __delattr__(self, attr):
from django.test import override_settings
override = override_settings()
override.enable()
from django.conf import settings
delattr(settings, attr)
self._to_restore.append(override)
def __setattr__(self, attr, value):
from django.test import override_settings
override = override_settings(**{attr: value})
override.enable()
self._to_restore.append(override)
def __getattr__(self, item):
from django.conf import settings
return getattr(settings, item)
def finalize(self):
for override in reversed(self._to_restore):
override.disable()
del self._to_restore[:]
@pytest.yield_fixture()
def settings():
"""A Django settings object which restores changes after the testrun"""
skip_if_no_django()
wrapper = SettingsWrapper()
yield wrapper
wrapper.finalize()
@pytest.fixture(scope="session")
def live_server(request):
"""Run a live Django server in the background during tests
The address the server is started from is taken from the
--liveserver command line option or if this is not provided from
the DJANGO_LIVE_TEST_SERVER_ADDRESS environment variable. If
neither is provided ``localhost:8081,8100-8200`` is used. See the
Django documentation for its full syntax.
NOTE: If the live server needs database access to handle a request
your test will have to request database access. Furthermore
when the tests want to see data added by the live-server (or
the other way around) transactional database access will be
needed as data inside a transaction is not shared between
the live server and test code.
Static assets will be automatically served when
``django.contrib.staticfiles`` is available in INSTALLED_APPS.
"""
skip_if_no_django()
import django
addr = request.config.getvalue("liveserver") or os.getenv(
"DJANGO_LIVE_TEST_SERVER_ADDRESS"
)
if addr and ":" in addr:
if django.VERSION >= (1, 11):
ports = addr.split(":")[1]
if "-" in ports or "," in ports:
warnings.warn(
"Specifying multiple live server ports is not supported "
"in Django 1.11. This will be an error in a future "
"pytest-django release."
)
if not addr:
if django.VERSION < (1, 11):
addr = "localhost:8081,8100-8200"
else:
addr = "localhost"
server = live_server_helper.LiveServer(addr)
request.addfinalizer(server.stop)
return server
@pytest.fixture(autouse=True, scope="function")
def _live_server_helper(request):
"""Helper to make live_server work, internal to pytest-django.
This helper will dynamically request the transactional_db fixture
for a test which uses the live_server fixture. This allows the
server and test to access the database without having to mark
this explicitly which is handy since it is usually required and
matches the Django behaviour.
The separate helper is required since live_server can not request
transactional_db directly since it is session scoped instead of
function-scoped.
It will also override settings only for the duration of the test.
"""
if "live_server" not in request.fixturenames:
return
request.getfixturevalue("transactional_db")
live_server = request.getfixturevalue("live_server")
live_server._live_server_modified_settings.enable()
request.addfinalizer(live_server._live_server_modified_settings.disable)
@contextmanager
def _assert_num_queries(config, num, exact=True, connection=None, info=None):
from django.test.utils import CaptureQueriesContext
if connection is None:
from django.db import connection
verbose = config.getoption("verbose") > 0
with CaptureQueriesContext(connection) as context:
yield context
num_performed = len(context)
if exact:
failed = num != num_performed
else:
failed = num_performed > num
if failed:
msg = "Expected to perform {} queries {}{}".format(
num,
"" if exact else "or less ",
"but {} done".format(
num_performed == 1 and "1 was" or "%d were" % (num_performed,)
),
)
if info:
msg += "\n{}".format(info)
if verbose:
sqls = (q["sql"] for q in context.captured_queries)
msg += "\n\nQueries:\n========\n\n%s" % "\n\n".join(sqls)
else:
msg += " (add -v option to show queries)"
pytest.fail(msg)
@pytest.fixture(scope="function")
def django_assert_num_queries(pytestconfig):
return partial(_assert_num_queries, pytestconfig)
@pytest.fixture(scope="function")
def django_assert_max_num_queries(pytestconfig):
return partial(_assert_num_queries, pytestconfig, exact=False)
|
[
"nguyenam8@vcu.edu"
] |
nguyenam8@vcu.edu
|
4370177278986c2bc431fa9871bc5e6dd9ab6f8f
|
483661fe2de1a4a25c01a89498d6524c7a3a138a
|
/Algorithms/Others/Factorial.py
|
a7205dcc994ad8d8240971a7639b2a3e243e4305
|
[] |
no_license
|
Ada-Lungu/Playing-with-Python
|
c012c7a480030c1ffc33c73881c128b334b252cd
|
12e81602ad05d43973e7fbb863f8ffa77daaf01f
|
refs/heads/master
| 2016-08-03T03:16:29.027688
| 2014-09-12T13:41:52
| 2014-09-12T13:41:52
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 492
|
py
|
def factorial(n): # n! = n(n-1)! ; 0! = 1
if n == 0:
return 1
else:
factorial = n * factorial(n-1) # or return n * factorial(n-1)
return factorial
# this is a Recursive Function = calling a function inside itself
def fibonacci(n): # fibonacci(n) = fibonacci (n-1) + fibonacci (n-2)
if n == 0 or n == 1:
fibonnaci = 1
else:
fibonnaci = fibonnaci(n-1) + fibonnaci(n-2)
assert isinstance(fibonnaci, object)
return fibonnaci
|
[
"ada.lungu@gmail.com"
] |
ada.lungu@gmail.com
|
44044c891ec2b8e378f51acf7933a3a1bd3fe346
|
fe7600d6ed61107f8fcec298527515415712fc0b
|
/docs/conf.py
|
ea673e3a14034c0268db2d9ef5d9cda9ef3ca78d
|
[
"BSD-3-Clause"
] |
permissive
|
kevinjqiu/mockingjay
|
67b98e4e4c6aa3c96378c11226d1fccbb01b1fea
|
b97ee8ab9476165916305591b319d1a2e6af0635
|
refs/heads/master
| 2023-01-14T18:15:40.855856
| 2015-05-29T03:10:56
| 2015-05-29T03:10:56
| 36,202,496
| 1
| 0
|
BSD-3-Clause
| 2022-12-26T19:44:15
| 2015-05-25T01:17:05
|
Python
|
UTF-8
|
Python
| false
| false
| 8,425
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# mockingjay documentation build configuration file, created by
# sphinx-quickstart on Tue Jul 9 22:26:36 2013.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory is
# relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# Get the project root dir, which is the parent dir of this
cwd = os.getcwd()
project_root = os.path.dirname(cwd)
# Insert the project root dir as the first element in the PYTHONPATH.
# This lets us ensure that the source package is imported, and that its
# version is used.
sys.path.insert(0, project_root)
import mockingjay
# -- General configuration ---------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'mockingjay'
copyright = u'2015, Kevin J. Qiu'
# The version info for the project you're documenting, acts as replacement
# for |version| and |release|, also used in various other places throughout
# the built documents.
#
# The short X.Y version.
version = mockingjay.__version__
# The full version, including alpha/beta/rc tags.
release = mockingjay.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to
# some non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built
# documents.
#keep_warnings = False
# -- Options for HTML output -------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a
# theme further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as
# html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the
# top of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon
# of the docs. This file should be a Windows icon file (.ico) being
# 16x16 or 32x32 pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets)
# here, relative to this directory. They are copied after the builtin
# static files, so a file named "default.css" will overwrite the builtin
# "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names
# to template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer.
# Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer.
# Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages
# will contain a <link> tag referring to it. The value of this option
# must be the base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'mockingjaydoc'
# -- Options for LaTeX output ------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [
('index', 'mockingjay.tex',
u'mockingjay Documentation',
u'Kevin J. Qiu', 'manual'),
]
# The name of an image file (relative to this directory) to place at
# the top of the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings
# are parts, not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'mockingjay',
u'mockingjay Documentation',
[u'Kevin J. Qiu'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ----------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'mockingjay',
u'mockingjay Documentation',
u'Kevin J. Qiu',
'mockingjay',
'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
|
[
"kevin@freshbooks.com"
] |
kevin@freshbooks.com
|
cc8c1afd7c28f0d943eaf2700c34e719dd9003ea
|
85eff920f0f285abad84c2f6bcfd4f236f3976ab
|
/webservices/migrations/0099_engageboostadditionalglobalsettings.py
|
ff6fe266ae10d7c901a6d0d6e6ae3778ac320156
|
[] |
no_license
|
obxlifco/Web-Picking-App-GoGrocery
|
8cf5f7924005a19764e5c4722a47bfd963965f2e
|
6b084547bed2af43a67bada313d68e56f4228f96
|
refs/heads/main
| 2023-05-26T08:32:30.297317
| 2021-06-12T10:05:01
| 2021-06-12T10:05:01
| 315,206,253
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,203
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.17 on 2019-05-31 13:40
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('webservices', '0098_auto_20190530_1336'),
]
operations = [
migrations.CreateModel(
name='EngageboostAdditionalGlobalsettings',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('settings_key', models.TextField(blank=True, null=True)),
('settings_value', models.TextField(blank=True, null=True)),
('created', models.DateTimeField(blank=True, null=True)),
('modified', models.DateTimeField(blank=True, null=True)),
('isblocked', models.CharField(choices=[('y', 'y'), ('n', 'n')], default='n', max_length=2, null=True)),
('isdeleted', models.CharField(choices=[('y', 'y'), ('n', 'n')], default='n', max_length=2, null=True)),
],
options={
'db_table': 'engageboost_additional_global_settings',
},
),
]
|
[
"mjamal@lifcoshop.net"
] |
mjamal@lifcoshop.net
|
14a993e142ab29bcd04556609ffce8bcb573b3bf
|
d3c4353cbca106ac3d5154a9b452d6bb90c0ae83
|
/server/models/multimedia_emotions.py
|
c150e6784f4688291637583c1f28617f825c6d70
|
[
"MIT"
] |
permissive
|
CatalystOfNostalgia/hoot
|
fe64fc97522da2b7657ad0ba0477eaf50f94e942
|
952781ebf9cafc036c68d1358f1921b7c9a94ab7
|
refs/heads/master
| 2021-01-15T08:04:54.009776
| 2016-04-29T03:40:44
| 2016-04-29T03:40:44
| 49,789,954
| 2
| 0
| null | 2016-04-28T19:46:06
| 2016-01-16T20:36:03
|
Python
|
UTF-8
|
Python
| false
| false
| 420
|
py
|
from sqlalchemy import Column
from sqlalchemy import Integer
from sqlalchemy import String
from sqlalchemy import Date
from sqlalchemy.dialects.mysql import DOUBLE
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
class MultimediaEmotions(Base):
__tablename__ = 'multimedia_emotions'
media_id = Column(Integer, primary_key=True)
emotion = Column(Integer, primary_key=True)
|
[
"anthonyrdario@gmail.com"
] |
anthonyrdario@gmail.com
|
1fd89802a93b674ca740f01993f8aa10797c78f9
|
32c4a3f8893a7fe9039ebfb2d98215e06203f8f2
|
/src/tensor/op/granularity/finesse.py
|
3b5dc2a03e16933ac02aa7819af67ab8091f81cb
|
[
"Apache-2.0"
] |
permissive
|
jedhsu/tensor
|
d27c8951aa32208e3c5bbcef0d0f2bae56f8a670
|
3b2fe21029fa7c50b034190e77d79d1a94ea5e8f
|
refs/heads/main
| 2023-07-06T11:35:36.307538
| 2021-08-07T20:11:19
| 2021-08-07T20:11:19
| 366,904,212
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 96
|
py
|
"""
*Finesse*
"""
__all__ = ["Finesse"]
class Finesse(
TensorOperator,
):
pass
|
[
"jed910@gmail.com"
] |
jed910@gmail.com
|
b1a356c2a2ca5d8c100de27922504b27df7ff7da
|
8a1145b1d89eacb92e57be747043ae7a6209ade7
|
/final.py
|
f24e0d3424bb5f5603352372bfebee3480c4a5c5
|
[] |
no_license
|
NavniGupta/ece650pythonexamples
|
21ae326d71dc52fb90db9dadef3584f681e04b58
|
aaac538eb8f319da639814b27820b8f244076d4a
|
refs/heads/master
| 2020-03-29T12:52:15.834803
| 2018-10-26T19:38:27
| 2018-10-26T19:38:27
| 149,924,893
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,194
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Sun Sep 30 13:35:41 2018
@author: Kunal Taneja
"""
# -*- coding: utf-8 -*-
"""
Created on Sat Sep 22 11:24:18 2018
e
@author: Kunal Taneja
"""
import sys
import re
import ast
from decimal import Decimal
class_list = dict()
def vertex_collection():
k=1
m=1
l=list(class_list.values())
vertices=list()
graph_vertices=list()
graph_edges=list()
graph_edgeset=set()
graph_set=set()
vertex_list=dict()
dict_vertex=list()
vertices_intersection=list()
y=list()
#last_element=list()
no_streets=len(l)
vertices=[None]*no_streets
#last_element=[None]*no_streets
for o in range (0,no_streets):
y=[i.split(', ', 1)[0] for i in l[o]]
vertices[o]=y
for i in range(0,no_streets):
#print "hi"
for j in range (i+1,no_streets):
w=len(vertices[i])
#print "hi"
v=len(vertices[j])
while k<w:
while m<v:
pair1=vertices[i][k-1]
pair2=vertices[i][k]
pair3=vertices[j][m-1]
pair4=vertices[j][m]
x1,y1 = ast.literal_eval(pair1)
x2,y2 = ast.literal_eval(pair2)
x3,y3 = ast.literal_eval(pair3)
x4,y4 = ast.literal_eval(pair4)
x1=float(x1)
x2=float(x2)
x3=float(x3)
x4=float(x4)
y1=float(y1)
y2=float(y2)
y3=float(y3)
y4=float(y4)
A1 = y2-y1
B1 = x1-x2
C1 = A1*(x1) + B1*(y1)
A2 = y4-y3
B2 = x3-x4
C2 = A2*(x3)+ B2*(y3)
determinant = A1*B2 - A2*B1
min_x1=min(x1,x2)
min_x2=min(x3,x4)
max_x1=max(x1,x2)
max_x2=max(x3,x4)
min_y1=min(y1,y2)
min_y2=min(y3,y4)
max_y1=max(y1,y2)
max_y2=max(y3,y4)
flag1=False
flag2=False
pair1="(" + str(x1) + "," +str(y1) + ")"
pair2="(" + str(x2) + "," +str(y2) + ")"
pair3="(" + str(x3) + "," +str(y3) + ")"
pair4="(" + str(x4) + "," +str(y4) + ")"
thislist = list((pair1,pair2,pair3,pair4))
# print thislist
if (determinant != 0):
X = Decimal((B2*C1 - B1*C2)/determinant)
X = round(X,2)
Y = Decimal((A1*C2 - A2*C1)/determinant)
Y = round(Y,2)
# print "X= " + str(X)
#print "Y= " + str(Y)
#print "min_x1= " + str(min_x1)
#print "max_x1= " + str(max_x1)
#print "min_y1= " + str(min_y1)
# print "max_y1= " + str(max_y1)
# print "min_x2= " + str(min_x2)
#print "max_x2= " + str(max_x2)
#print "min_y2= " + str(min_y2)
#print "max_y2= " + str(max_y2)
if (bool(X<=max_x1) & bool(X>=min_x1)):
#print ("im true for x1")
if (bool(Y<=max_y1) & bool(Y>=min_y1)):
#print ("hi im true for both x1,y1")
flag1=True
if (bool(X<=max_x2) & bool(X>=min_x2)):
#print ("im true for x2")
if (bool(Y<=max_y2) & bool(Y>=min_y2)):
# print ("hi im true for both x2,y2")
flag2=True
if(flag1==True & flag2==True):
#print "you got me right"
new_vertex="(" + str(X) + "," +str(Y) + ")"
graph_vertices.extend(thislist)
dict_vertex.append(new_vertex)
vertices_intersection.append(thislist)
intersection_points=new_vertex
graph_vertices.append(new_vertex)
graph_set=set(graph_vertices)
graph_vertices1=list(graph_set)
for z in range(0,len(graph_vertices1)):
vertex_list[z+1]=graph_vertices1[z]
else:
pass
m=m+1
k=k+1
m=1
k=1
print "V = {"
for x,y in vertex_list.items():
print x,": ",y
print "}"
for t in range(0, len(dict_vertex)):
intersection_pt=dict_vertex[t]
vertexlist=vertices_intersection[t]
[pair1, pair2, pair3, pair4]=vertexlist
for g in vertex_list:
if intersection_pt == vertex_list[g]:
edge_intersection=g
for s,e in vertex_list.items():
if e==pair1:
edge_pair1=s
#print "comparing pair 1" + e
#print s
for s,e in vertex_list.items():
if e==pair2:
edge_pair2=s
#print s
#print "comparing pair 2" + e
for s,e in vertex_list.items():
if e==pair3:
edge_pair3=s
# print s
# print "comparing pair 3" +e
for s,e in vertex_list.items():
if e==pair4:
edge_pair4=s
# print "comparing pair 4" + e
#print s
#print edge_intersection
# print intersection_points
edge1="<"+str(edge_intersection) + "," + str(edge_pair1) + ">"
#print edge1
edge2="<"+str(edge_intersection) + "," + str(edge_pair2) + ">"
#print edge2
edge3="<"+str(edge_intersection) + "," + str(edge_pair3) + ">"
#print edge3
edge4="<"+str(edge_intersection) + "," + str(edge_pair4) + ">"
#print edge4
edgelist = list((edge1,edge2,edge3,edge4))
graph_edges.extend(edgelist)
graph_edgeset=set(graph_edges)
graph_edges=list(graph_edgeset)
distinct=list()
for n in range(0,len(graph_edges)):
pair=graph_edges[n]
pair=re.sub('<','(',pair)
pair=re.sub('>',')',pair)
v1,w1 = ast.literal_eval(pair)
distinct.append(v1)
distinct_set=set(distinct)
distinct=list(distinct_set)
for n1 in range(1,len(distinct)):
edge_betweeen="<"+str(distinct[n1-1]) + "," + str(distinct[n1]) + ">"
graph_edges.append(edge_betweeen)
graph_set=set(graph_edges)
graph_edges=list(graph_set)
print "E ={"
for u in graph_edges:
print u
print "}"
def main():
while True:
command=raw_input()
if(command ==''):
break
elif(command[0]=='r'):
y=re.split(' +"|"|',command)
else:
y=re.split('" +| +"',command)
if(len(y)==1):
choice=y[0]
elif(len(y)==2):
choice=y[0]
street=y[1]
street=street.lower()
elif(len(y)==3):
choice=y[0]
street=y[1]
street=street.lower()
location=y[2]
else:
sys.stdout.write("Error: " + "Wrong selection of command")
continue
if choice == 'a':
location=re.sub(' +','',location)
location=re.sub('\)\(',') ( ',location)
location=re.sub('\( ','(',location)
location=location.split(' ')
class_list[street] = location
#database()
elif choice == 'c':
location=re.sub(' +','',location)
location=re.sub('\)\(',') ( ',location)
location=re.sub('\( ','(',location)
location=location.split(' ')
class_list[street] = location
#database()
elif choice == 'r':
try:
del class_list[street]
except KeyError:
sys.stderr.write("Error: " + street + " not available to delete")
#database()
elif choice == 'g':
vertex_collection()
else:
print 'Error: ' + 'Wrong choice try again'
if __name__ == '__main__':
main()
|
[
"noreply@github.com"
] |
NavniGupta.noreply@github.com
|
a57fd44c4e0572a4606a934c2b10d00002f17e65
|
ae737dd1a93b31673e26341606ca767ec00f32bf
|
/list-propinsi/list-per-propinsi-jambi.py
|
188016fe865a1d1490f5c306cf92ec007f02b1ed
|
[] |
no_license
|
tandhy/pilpres2014
|
ba14ad7cf13ecf154c59046d80a969bfddd734b5
|
8c7a12636470b46edd7167632affd1272721a1d1
|
refs/heads/master
| 2020-06-04T03:01:35.401826
| 2014-07-20T01:15:25
| 2014-07-20T01:15:25
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,496
|
py
|
'''
Author : Tandhy Simanjuntak / July 15th, 2014
Purpose : To download all c1 form per province from pilpres2014.kpu.go.id
'''
# cURL operation
import pycurl
# handle buffer
from StringIO import StringIO
# encode given variable
from urllib import urlencode
# prettify output
import pprint
# handle directory operation
import os
# use beautifulSoup to read html
from bs4 import BeautifulSoup
# generate time and date and sleep interval
import time
# add random function to generate sleep interbal
import random
def formatKode(kode):
different = 5 - len(kode)
zero = ''
for i in range(0, different,1):
zero += '0'
return zero + kode
def formatKodeTPS(kode):
different = 3 - len(str(kode))
zero = ''
for i in range(0, different,1):
zero += '0'
return zero + str(kode)
# set save path. you may change to your own folder. windows version
#savePath = "f:/workspace/pilpres2014"
# set save path. you may change to your own folder. mac version
savePath = "/Users/tandhy/pilpres2014/drive/kawalpemilu-C1"
# set this variable to show each form c1 filename after saved into computer
showSavedFilename = True
# variable
prop = []
kabupaten = []
kecamatan = []
kelurahan = []
# format file c1: tpsx-tpsKode-[12digit].jpg
# saved under these folder : Propinsi - Kabupaten - Kecamatan - Kelurahan
# remove hashtag to enable propinsi download.
# For example, if you want to download propinsi Bali, remove '#' from prop = {'kode': '53241', 'nama': 'BALI'}
#propinsi = [ {'kode': '15885', 'nama': 'JAMBI'},{'kode': '17404', 'nama': 'SUMATERA SELATAN'},{'kode': '20802', 'nama': 'BENGKULU'},{'kode': '22328', 'nama': 'LAMPUNG'},{'kode': '24993', 'nama': 'KEPULAUAN BANGKA BELITUNG'},{'kode': '25405', 'nama': 'KEPULAUAN RIAU'},{'kode': '25823', 'nama': 'DKI JAKARTA'},{'kode': '26141', 'nama': 'JAWA BARAT'},{'kode': '32676', 'nama': 'JAWA TENGAH'},{'kode': '41863', 'nama': 'DAERAH ISTIMEWA YOGYAKARTA'},{'kode': '42385', 'nama': 'JAWA TIMUR'},{'kode': '51578', 'nama': 'BANTEN'},{'kode': '53241', 'nama': 'BALI'},{'kode': '54020', 'nama': 'NUSA TENGGARA BARAT'},{'kode': '55065', 'nama': 'NUSA TENGGARA TIMUR'},{'kode': '58285', 'nama': 'KALIMANTAN BARAT'},{'kode': '60371', 'nama': 'KALIMANTAN TENGAH'},{'kode': '61965', 'nama': 'KALIMANTAN SELATAN'},{'kode': '64111', 'nama': 'KALIMANTAN TIMUR'},{'kode': '65702', 'nama': 'SULAWESI UTARA'},{'kode': '67393', 'nama': 'SULAWESI TENGAH'},{'kode': '69268', 'nama': 'SULAWESI SELATAN'},{'kode': '72551', 'nama': 'SULAWESI TENGGARA'},{'kode': '74716', 'nama': 'GORONTALO'},{'kode': '75425', 'nama': 'SULAWESI BARAT'},{'kode': '76096', 'nama': 'MALUKU'},{'kode': '77085', 'nama': 'MALUKU UTARA'},{'kode': '78203', 'nama': 'PAPUA'},{'kode': '81877', 'nama': 'PAPUA BARAT'}]
propinsi = [{'kode': '15885', 'nama': 'JAMBI'}]
# open a log file
# set path for log
logPath = savePath + "/listpropinsi"
# iterate each propinsi
for prop in propinsi:
logFilename = logPath + "/" + prop['nama'] + "-" + prop['kode'] + ".txt"
# check for log file
if not os.path.exists(logPath):
os.makedirs(logPath)
#print "Propinsi : %s" %(prop['nama'])
# check if file exists
if not (os.path.exists(logFilename)):
openLogFile = open(logFilename, 'w')
openLogFile.write("Generated : " + time.strftime("%m.%d.%Y %H:%M:%S") + "\n" )
openLogFile.write("========================================\n")
openLogFile.write( "Propinsi : %s (%s)\n" %(prop['nama'] , prop['kode']) )
openLogFile.write( "Format data : Provinsi\tKabupaten/Kota\tKecamatan\tKelurahan\tNomor TPS\tID TPS\tPS-Hatta\tJW-JK\tTidak Sah\tLink4\tLink3\tLink2\tLink1\n\n" )
listReadLogFile = ''
else:
openLogFile = open(logFilename, 'a')
readLogFile = open(logFilename, 'r')
listReadLogFile = readLogFile.read()
# store ID
IDFilename = logPath + "/id-%s.txt" % (prop['kode'])
# check if file exists
if not (os.path.exists(IDFilename)):
openIDFile = open( IDFilename, 'w') # rewrite existing file
openIDFile.write("%s\n" % (prop['kode']) )
listReadIDFile = ''
else:
openIDFile = open (IDFilename, 'a')
readIDFile = open (IDFilename,'r')
listReadIDFile = readIDFile.read()
c = pycurl.Curl()
buffer = StringIO()
url = 'http://pilpres2014.kpu.go.id/c1.php?cmd=select&grandparent=0&parent=%s' % ( prop['kode'] )
post_data = {'wilayah_id': prop['kode']}
postfields = urlencode(post_data)
c.setopt(c.URL,url)
c.setopt(c.POSTFIELDS, postfields)
c.setopt(c.CONNECTTIMEOUT,999)
c.setopt(c.WRITEDATA, buffer)
c.perform()
# fetch <option>
htmlFile = BeautifulSoup(buffer.getvalue())
optionTag = ''
optionTag = htmlFile.find_all('option')
kabupaten = []
for option in optionTag:
if(option.string != 'pilih'):
# store <option> value into dict
info = {'kode': option.get('value'), 'nama' : option.string }
# add dict into list
kabupaten.append(info)
# add time interval
time.sleep(random.randint(3,5)) # random sleep time between 8 - 15 second
# iterate each kabupaten
for kab in kabupaten:
#print "%s; %s" %(prop['nama'], kab['nama'])
if kab['kode'] not in listReadIDFile:
openIDFile.write("%s\n" % (kab['kode']) )
#openLogFile.write( "%s; %s\n" %(prop['nama'], kab['nama']) )
# set folder path
c = pycurl.Curl()
buffer = StringIO()
url = 'http://pilpres2014.kpu.go.id/c1.php?cmd=select&grandparent=%s&parent=%s' % ( prop['kode'] , kab['kode'])
post_data = {'wilayah_id': kab['kode']}
postfields = urlencode(post_data)
c.setopt(c.URL,url)
c.setopt(c.POSTFIELDS, postfields)
c.setopt(c.CONNECTTIMEOUT,999)
c.setopt(c.WRITEDATA, buffer)
c.perform()
# fetch <option>
htmlFile = BeautifulSoup(buffer.getvalue())
optionTag = ''
optionTag = htmlFile.find_all('option')
kecamatan = []
for option in optionTag:
if(option.string != 'pilih'):
# store <option> value into dict
info = {'kode': option.get('value'), 'nama' : option.string }
# add dict into list
kecamatan.append(info)
# iterate each kecamatan
for kec in kecamatan:
#print "%s; %s; %s" %( prop['nama'], kab['nama'], kec['nama'] )
if kec['kode'] not in listReadIDFile:
openIDFile.write("%s\n" % (kec['kode']) )
#openLogFile.write( "%s; %s; %s\n" %( prop['nama'], kab['nama'], kec['nama'] ) )
c = pycurl.Curl()
buffer = StringIO()
url = 'http://pilpres2014.kpu.go.id/c1.php?cmd=select&grandparent=%s&parent=%s' % (kab['kode'], kec['kode'])
post_data = {'wilayah_id': kec['kode']}
postfields = urlencode(post_data)
c.setopt(c.URL,url)
c.setopt(c.POSTFIELDS, postfields)
c.setopt(c.CONNECTTIMEOUT,999)
c.setopt(c.WRITEDATA, buffer)
c.perform()
# fetch <option>
htmlFile = BeautifulSoup(buffer.getvalue())
optionTag = ''
optionTag = htmlFile.find_all('option')
kelurahan = []
for option in optionTag:
if(option.string != 'pilih'):
# store <option> value into dict
info = {'kode': option.get('value'), 'nama' : option.string }
# add dict into list
kelurahan.append(info)
# iterate each kelurahan
for kel in kelurahan:
# check if kelurahan has been downloaded or not in the IDFilename
#print "%s; %s; %s; %s" %( prop['nama'], kab['nama'], kec['nama'], kel['nama'] )
#openLogFile.write( "%s; %s; %s; %s" % ( prop['nama'], kab['nama'], kec['nama'], kel['nama'] ) )
if kel['kode'] not in listReadIDFile:
c = pycurl.Curl()
buffer = StringIO()
url = 'http://pilpres2014.kpu.go.id/c1.php?cmd=select&grandparent=%s&parent=%s' % (kec['kode'], kel['kode'])
post_data = {'wilayah_id': kel['kode']}
postfields = urlencode(post_data)
c.setopt(c.URL,url)
c.setopt(c.CONNECTTIMEOUT,999)
c.setopt(c.POSTFIELDS, postfields)
c.setopt(c.WRITEDATA, buffer)
c.perform()
# fetch <td> to get tps code
htmlFile = BeautifulSoup(buffer.getvalue())
tdTag = ''
tdTag = htmlFile.find_all('td')
tpsKode = []
for td in tdTag:
if(td.string != None) and (td.string != 'unduh'):
if(len(td.string) > 2 ):
tpsKode.append(td.string)
# fetch <a> to get link to the image
'''aTag =''
aTag = htmlFile.find_all('a')
aList = []
for a in aTag:
if(a.get('href').find("javascript:read_jpg") != -1):
aList.append(a.get('href').strip("javascript:read_jpg('").strip("')"))'''
noTps = 0
link1 = ''
link2 = ''
link3 = ''
link4 = ''
# 12 34567 890 12
# format scan : 00 32832 006 01.jpg
for i in range(0,len(tpsKode), 1):
link1 = '=hyperlink("http://scanc1.kpu.go.id/viewp.php?f=00%s%s01.jpg", "hal.1")' % (formatKode(kel['kode']), formatKodeTPS(i+1))
link2 = '=hyperlink("http://scanc1.kpu.go.id/viewp.php?f=00%s%s02.jpg", "hal.2")' % (formatKode(kel['kode']), formatKodeTPS(i+1))
link3 = '=hyperlink("http://scanc1.kpu.go.id/viewp.php?f=00%s%s03.jpg", "hal.3")' % (formatKode(kel['kode']), formatKodeTPS(i+1))
link4 = '=hyperlink("http://scanc1.kpu.go.id/viewp.php?f=00%s%s04.jpg", "hal.4")' % (formatKode(kel['kode']), formatKodeTPS(i+1))
#link1 = '=hyperlink("http://scanc1.kpu.go.id/viewp.php?f=%s.jpg", "hal.1")' % (aList[(i * 4) + 0])
#link2 = '=hyperlink("http://scanc1.kpu.go.id/viewp.php?f=%s.jpg", "hal.2")' % (aList[(i * 4) + 1])
#link3 = '=hyperlink("http://scanc1.kpu.go.id/viewp.php?f=%s.jpg", "hal.3")' % (aList[(i * 4) + 2])
#link4 = '=hyperlink("http://scanc1.kpu.go.id/viewp.php?f=%s.jpg", "hal.4")' % (aList[(i * 4) + 3])
#print "%s\t%s\t%s\t%s\t%d\t%s" %( prop['nama'], kab['nama'], kec['nama'], kel['nama'], i + 1, tpsKode[i] )
print "%s\t%s\t%s\t%s\t%d\t%s\t0\t0\t0\t%s\t%s\t%s\t%s" %( prop['nama'], kab['nama'], kec['nama'], kel['nama'], i + 1, tpsKode[i], link4, link3, link2, link1 )
# check whether already written or not
text = "%s\t%s\t%s\t%s\t%d\t%s\t0\t0\t0\t%s\t%s\t%s\t%s\n" %( prop['nama'], kab['nama'], kec['nama'], kel['nama'], i + 1, tpsKode[i], link4, link3, link2, link1 )
if text not in listReadLogFile:
openLogFile.write(text)
print "----------------------------------------------------------------"
openIDFile.write("%s\n" % (kel['kode']) )
time.sleep(random.randint(1,3)) # random sleep time between 1 - 3 second
else:
print "%s\t%s\t%s\t%s\t--> DONE" %( prop['nama'], kab['nama'], kec['nama'], kel['nama'])
# separator between kecamatan
print "----------------------------------------------------------------"
# close c instance
c.close()
# close openLogFile instance
openLogFile.close()
|
[
"tandhy@bu.edu"
] |
tandhy@bu.edu
|
3e40648cf136b93535e2e1426d1b48ef2d2dee2a
|
498e6fd1e00b126659741ed92df937f2d00aa10a
|
/3_accessing_an_image_pix.py
|
03eb7858bbe5238c4bb7d1b38ed744cf5c362e18
|
[] |
no_license
|
zaneno/opencv_talk_public
|
304092e4e6c798df172bebd4ba806976cd16d1c1
|
93ded0e7e5b1f1371d7291d209e3e4372e5d3507
|
refs/heads/main
| 2023-03-03T09:13:12.630342
| 2021-02-10T16:12:18
| 2021-02-10T16:12:18
| 337,778,028
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 406
|
py
|
import numpy as np
import cv2
# Read the image
img = cv2.imread('pics/pictured_rocks.jpg')
print("type(img): ", type(img))
print("img.shape: ", img.shape)
# get i and j
i = int(input("i: "))
j = int(input("j: "))
# read the value at i,j
b, g, r = img[i,j]
# we could also do
b = img[i,j,0]
g = img[i,j,1]
r = img[i,j,2]
# or
b = img.item(i,j,0)
g = img.item(i,j,1)
r = img.item(i,j,2)
print(b, g, r)
|
[
"you@example.com"
] |
you@example.com
|
557f77dd5d9757885339a60b4da132a5a7432c06
|
561b57b8dbad7b02898032a115825c37f3125062
|
/tests/summarizer/test_text_media_matcher.py
|
f8e76c734b9e53c34902a7a58a5640b6ef2549b7
|
[
"Apache-2.0"
] |
permissive
|
DanisHack/stampify
|
c2c687dbb992b870c2d55669fdc3249f6991a2fb
|
95f95ec98aa3f0b05d8ad39ce5dd3d89858b4488
|
refs/heads/master
| 2022-11-20T17:47:13.002219
| 2020-07-22T10:00:49
| 2020-07-22T10:00:49
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,675
|
py
|
from summarization.text_media_matching.text_media_matcher import \
TextMediaMatcher
from tests.summarizer.text_media_input_fetcher import fetch_text_media_input
# fetch test inputs
test_input_dict = fetch_text_media_input()
sentence_1 = test_input_dict["sentence_1"]
media_related_to_sentence_1 = test_input_dict["media_related_to_sentence_1"]
sentence_2 = test_input_dict["sentence_2"]
media_related_to_sentence_2 = test_input_dict["media_related_to_sentence_2"]
def test_text_media_matcher_return_format():
''' tests the return format of the text-media
matcher
'''
matcher = TextMediaMatcher(
[sentence_1, sentence_2],
[media_related_to_sentence_1, media_related_to_sentence_2]
)
processed_contents_dict = matcher._get_matched_and_unmatched_contents()
assert isinstance(processed_contents_dict, dict)
assert 'matched_contents' in processed_contents_dict
assert 'unused_contents' in processed_contents_dict
def test_text_media_matcher_matches_contents():
''' checks if the response returned is correct'''
matcher = TextMediaMatcher(
[sentence_2],
[media_related_to_sentence_1, media_related_to_sentence_2]
)
processed_contents_dict = matcher._get_matched_and_unmatched_contents()
assert processed_contents_dict['matched_contents'] == [
(sentence_2, media_related_to_sentence_2)]
assert processed_contents_dict['unused_contents'] == [
media_related_to_sentence_1]
def test_text_media_matcher_returns_unused_media_when_sentences_is_empty():
matcher = TextMediaMatcher(
[],
[media_related_to_sentence_1, media_related_to_sentence_2]
)
processed_contents_dict = matcher._get_matched_and_unmatched_contents()
assert processed_contents_dict["matched_contents"] == []
assert processed_contents_dict["unused_contents"] == [
media_related_to_sentence_1, media_related_to_sentence_2
]
def test_text_media_matcher_returns_unused_sentences_when_media_is_empty():
matcher = TextMediaMatcher(
[sentence_1, sentence_2],
[]
)
processed_contents_dict = matcher._get_matched_and_unmatched_contents()
assert processed_contents_dict["matched_contents"] == []
assert processed_contents_dict["unused_contents"] == [
sentence_1, sentence_2
]
def test_text_media_matcher_returns_empty_dict_when_both_are_empty():
matcher = TextMediaMatcher(
[],
[]
)
processed_contents_dict = matcher._get_matched_and_unmatched_contents()
assert processed_contents_dict["matched_contents"] == []
assert processed_contents_dict["unused_contents"] == []
|
[
"noreply@github.com"
] |
DanisHack.noreply@github.com
|
b115f3b835f732aa430fd5f7e80dc861d91c5357
|
ec26178312157b019bad591b1a2d035a83189aed
|
/TestChild.py
|
7ecc87a810c9e8103343a13429e7bc806c07fad3
|
[] |
no_license
|
James-Brooke95/Test_Repo
|
ee678b260ef362526ebc65edb7064a0498caa620
|
61df91e6fee2ef257e4badf52ce2bcb5ac65dea0
|
refs/heads/master
| 2022-12-17T20:05:55.695605
| 2020-09-10T16:54:36
| 2020-09-10T16:54:36
| 292,811,450
| 0
| 0
| null | 2020-09-04T10:16:47
| 2020-09-04T09:49:09
|
Python
|
UTF-8
|
Python
| false
| false
| 66
|
py
|
## Adding a new file in ChildBranch
print("Inside Child Branch")
|
[
"noreply@github.com"
] |
James-Brooke95.noreply@github.com
|
4fe7e9efc0dd51911bec6ad5266887f80d60ad41
|
45547a3feb1f08b785e15b62c8e8c4ca9dcda909
|
/polls/migrations/0004_auto_20190131_1744.py
|
8b638264ec44cbdef2ecdc5fa6dd762a4ab7c80c
|
[] |
no_license
|
joelunmsm2003/smartback
|
d330d906141fd347fb283350e885e53a15cd0adc
|
f30d4d39fbe648dea7683fb42922bbd145758f19
|
refs/heads/master
| 2020-04-20T02:09:05.794530
| 2019-02-06T17:44:49
| 2019-02-06T17:44:49
| 168,563,916
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 862
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2019-01-31 17:44
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('polls', '0003_auto_20190131_1720'),
]
operations = [
migrations.RemoveField(
model_name='profile',
name='birthdate',
),
migrations.RemoveField(
model_name='profile',
name='location',
),
migrations.RemoveField(
model_name='profile',
name='role',
),
migrations.AddField(
model_name='profile',
name='club',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='polls.Club'),
),
]
|
[
"you@example.com"
] |
you@example.com
|
f442c5078b4ec9dcc3e88498c0d9bed65d728069
|
9b92a946c139c83c4103a406dce26e4fb01fdf60
|
/53_Maximum_Subarray.py
|
54be525144dc1bb81f2e59a3fac65dbfa06c1940
|
[] |
no_license
|
nyuxz/CodePractice
|
97ddddcaf0780e8cfc7af0c8a43e103d32eab0c0
|
019627d16073c09240a2535f95277f9283fcc15f
|
refs/heads/master
| 2021-01-20T00:47:45.144956
| 2021-01-12T01:40:46
| 2021-01-12T01:40:46
| 89,191,034
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 957
|
py
|
#53. Maximum Subarray
class Solution(object):
def maxSubArray(self, nums):
"""
:type nums: List[int]
:rtype: int
@Logit
Kadane's algorithm, O(n) complexity
state: lookup[i]
dp: lookup[i] = max(A[i], A[i] + lookup[i-1])
"""
currMax = nums[0]
Max = nums[0]
for i in nums[1:]:
currMax = max(i, currMax + i)
Max = max(Max, currMax)
return Max
## DP
def maxSubArray(self, nums: List[int]) -> int:
dp = [0]*len(nums)
dp[0] = nums[0]
max_num = nums[0]
for i in range(1, len(nums)):
dp[i] = max(dp[i-1]+nums[i], nums[i])
if dp[i]>max_num:
max_num = dp[i]
return max_num
# testing
if __name__ == '__main__':
print (Solution().maxSubArray([-2,1,-3,4,-1,2,1,-5,4]))
'''
@Note
1. without the dp, the complexity is O(n^2)
2. ToDo: divide and conquer approach, O(nlogn)
'''
|
[
"xz1757@nyu.edu"
] |
xz1757@nyu.edu
|
3e8c28637cfd517e58a46fb1d395c4bf16b06ee7
|
b1e958f90059cd97b8c1cb62abb34948fa867569
|
/Practice-Exercises/Week 0a - Expressions/Hours to seconds template.py
|
f52b8668a9059d598e14a2e460a7d05e63d0cb38
|
[] |
no_license
|
deltaworld/python-programming-coursera
|
ff756f3522a79f9b2101607f8156adc8d256db10
|
740ef3b02c05323e8da691ccbaa17337deb32db7
|
refs/heads/master
| 2021-01-23T03:04:25.476645
| 2015-07-15T13:25:22
| 2015-07-15T13:25:22
| 39,136,483
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 480
|
py
|
# http://www.codeskulptor.org/#exercises_expr_hours_to_seconds_template.py
# Compute the number of seconds in a given number of hours, minutes, and seconds.
###################################################
# Hours, minutes, and seconds to seconds conversion formula
# Student should enter statement on the next line.
###################################################
# Expected output
# Student should look at the following comments and compare to printed output.
#26497
|
[
"itrainit@gmail.com"
] |
itrainit@gmail.com
|
ec719999c4de822f2d48a2779bb7d13b839294a9
|
8b0a55183c72f0cb816f2e35725b436caaf583be
|
/flaskblog/forms.py
|
8e4a05cc0425805ce5fb4699476e11c90bddf4f4
|
[] |
no_license
|
OhDany/blog-flask
|
79924f406494e744b18c5919b6d1509bac57b074
|
a622fbbec085513944f314e2b3354073ab5adb76
|
refs/heads/master
| 2022-10-29T05:47:35.894119
| 2020-06-14T20:26:16
| 2020-06-14T20:26:16
| 269,820,577
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,696
|
py
|
from flask_wtf import FlaskForm
from flask_wtf.file import FileField, FileAllowed
from flask_login import current_user
from wtforms import StringField, PasswordField, SubmitField, BooleanField, TextAreaField
from wtforms.validators import DataRequired, Length, Email, EqualTo, ValidationError
from flaskblog.models import User
class RegistrationForm(FlaskForm):
username = StringField('Username',
validators=[DataRequired(), Length(min=2, max=20)])
email = StringField('Email',
validators=[DataRequired(), Email()])
password = PasswordField('Password',
validators=[DataRequired()])
confirm_password = PasswordField('Confirm Password',
validators=[DataRequired(), EqualTo('password')])
submit = SubmitField('Sign Up')
def validate_username(self, username):
user = User.query.filter_by(username=username.data).first()
if user:
raise ValidationError('That username is taken. Please choose a different one')
def validate_email(self, email):
user = User.query.filter_by(email=email.data).first()
if user:
raise ValidationError('That email is taken. Please choose a different one')
class LoginForm(FlaskForm):
email = StringField('Email',
validators=[DataRequired(), Email()])
password = PasswordField('Password',
validators=[DataRequired()])
remember = BooleanField('Remember Me')
submit = SubmitField('Login')
class UpdateAccountForm(FlaskForm):
username = StringField('Username',
validators=[DataRequired(), Length(min=2, max=20)])
email = StringField('Email',
validators=[DataRequired(), Email()])
picture = FileField('Update Profile Picture', validators=[FileAllowed(['jpg', 'png', 'jpeg'])])
submit = SubmitField('Update')
def validate_username(self, username):
if username.data != current_user.username:
user = User.query.filter_by(username=username.data).first()
if user:
raise ValidationError('That username is taken. Please choose a different one')
def validate_email(self, email):
if email.data != current_user.email:
user = User.query.filter_by(email=email.data).first()
if user:
raise ValidationError('That email is taken. Please choose a different one')
class PostForm(FlaskForm):
title = StringField('Title', validators=[DataRequired()])
content = TextAreaField('Content', validators=[DataRequired()])
submit = SubmitField('Post')
|
[
"oh.dany.mx@gmail.com"
] |
oh.dany.mx@gmail.com
|
c396ffb51c0613102b6abeb9c30a599501808f45
|
c896a69e98e737a19259565bf8bb10011001645f
|
/VolunteerManager/credentials.default.py
|
7aa1037eac539826fcd858e3d6475c8f6d905079
|
[] |
no_license
|
Berailitz/VolunteerManager
|
5dffe3bedb6eb66c74149db56dc8d046bdd2d582
|
af2478aa2f1209f4fbe45d5fc345d6333908a2f8
|
refs/heads/master
| 2021-01-03T12:06:17.452864
| 2018-02-04T10:06:58
| 2018-02-04T10:06:58
| 240,076,282
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 316
|
py
|
"""credentials of this app"""
#!/usr/env/python3
# -*- coding: UTF-8 -*-
SQLALCHEMY_DATABASE_URI = 'mysql+pymysql://username:password@localhost/database?charset=utf8'
SECRET_KEY = 'xxx'
SYNC_UAERNAME = 'username'
SYNC_ENCRYPTED_PASSWORD = "password"
UNIVERSAL_DEBUG_TOKEN = 'token' # IMPORTANT NOTE: FOR DEBUG ONLY
|
[
"admin@ohhere.xyz"
] |
admin@ohhere.xyz
|
74da7b2b2eef0fc96ccad15dd90171c4b80cb928
|
16fcf705234385a7bf3b6f9402512e8fe99cb4a4
|
/stock_trading/stock_trading/urls.py
|
22fceeea86c2b6d368af92e1ff12448739fdb274
|
[] |
no_license
|
webclinic017/Stock-Trading-8
|
86a9bc9ca2f2879c131df4decbe6176337db3e93
|
8a27beddfd60d398566e8407f30610dbf88925d0
|
refs/heads/master
| 2023-08-31T16:18:45.934001
| 2021-09-28T22:37:40
| 2021-09-28T22:37:40
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 799
|
py
|
"""stock_trading URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('home.urls'))
]
|
[
"67853959+XAbirHasan@users.noreply.github.com"
] |
67853959+XAbirHasan@users.noreply.github.com
|
abcc156969f2f6ec3e715a99b3824791acfdc7fb
|
b28d13b2e785398f1a8074e0034080539009c837
|
/django-rest-generic/snippets/serializers.py
|
55761014b6cfca90d7a3b16d7dcfbfa1290f9a78
|
[] |
no_license
|
sdugaro/django
|
c58f1c290a1cadf90d723083c1bceefbbac99073
|
1704f1796cb3f25cac260c6120becd70e9f1c33f
|
refs/heads/main
| 2023-02-06T22:06:41.872202
| 2020-12-27T09:04:12
| 2020-12-27T09:04:12
| 311,162,303
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,773
|
py
|
from rest_framework import serializers
from snippets.models import Snippet, LANGUAGE_CHOICES, STYLE_CHOICES
# Subclassing a Base REST_FRAMEWORK Serializer
class SnippetSerializerBasic(serializers.Serializer):
# define which fields are serializable
id = serializers.IntegerField(read_only=True)
title = serializers.CharField(required=False, allow_blank=True, max_length=100)
code = serializers.CharField(style={'base_template': 'textarea.html'})
linenos = serializers.BooleanField(required=False)
language = serializers.ChoiceField(choices=LANGUAGE_CHOICES, default='python')
style = serializers.ChoiceField(choices=STYLE_CHOICES, default='friendly')
# serializer.save() callbacks
def create(self, validated_data):
"""
Create and return a new `Snippet` instance, given the validated data.
"""
return Snippet.objects.create(**validated_data)
def update(self, instance, validated_data):
"""
Update and return an existing `Snippet` instance, given the validated data.
"""
instance.title = validated_data.get('title', instance.title)
instance.code = validated_data.get('code', instance.code)
instance.linenos = validated_data.get('linenos', instance.linenos)
instance.language = validated_data.get('language', instance.language)
instance.style = validated_data.get('style', instance.style)
instance.save()
return instance
# Analagous to Djangos ModelForm/Form Meta class code simplification
# REST has a ModelSerializer/Serializer simplification idiom
class SnippetSerializer(serializers.ModelSerializer):
class Meta:
model = Snippet
fields = ['id', 'title', 'code', 'linenos', 'language', 'style']
|
[
"sdugaro@yahoo.com"
] |
sdugaro@yahoo.com
|
be7482a998f69bafdeb63adb3cc2376c3382c26f
|
261eba086816dbb3db4836c9b1e5869ccf0f8bae
|
/python代码规范demo讲解/2.8生成器/demo_08_old.py
|
38d6db4d11804451d32495eb59a6a0f597d02815
|
[] |
no_license
|
budaLi/jianzi
|
e316bdfb25587d14d38f1bea98772bce5ac69198
|
bca098de0f06ae1c78afc3203dfb0eea6a412dee
|
refs/heads/master
| 2023-05-02T19:33:25.752799
| 2021-05-25T08:03:24
| 2021-05-25T08:03:24
| 271,513,687
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 291
|
py
|
"""
生成器和迭代器
"""
# @Time : 2020/10/27 10:42
# @Author : Libuda
# @FileName: demo_08_old.py
# @Software: PyCharm
def odds(n):
ret = []
for i in range(1, n + 1):
if i % 2 == 1:
ret.append(i)
return ret
for i in odds(1000):
print(i)
|
[
"1364826576@qq.com"
] |
1364826576@qq.com
|
a39c817bbea407b51abff77e28f6f2a39c944bd7
|
0f83e01412eede77ebc1933630d95720f4cf9527
|
/SimpleClient/CollectWireless.py
|
82eb5dcbb458adf343c3906bcadb4135cb35c520
|
[] |
no_license
|
onthejeep/Bluetooth
|
73e5df80612fc70ea55dbf7ae6ac7a95926cc36e
|
2889ca29284067e77237512f7263bf3c98bf5c8e
|
refs/heads/master
| 2021-11-28T10:39:40.442105
| 2021-11-24T03:15:29
| 2021-11-24T03:15:29
| 18,699,876
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 129
|
py
|
'''
Created on Apr 1, 2014
@author: admshuyang
'''
from uuid import getnode as get_mac;
mac = get_mac();
print mac;
|
[
"onthejeep@gmail.com"
] |
onthejeep@gmail.com
|
d882f260c06396681b45c7a6a624dad9f6a6c4bd
|
c42565ca8af1b854a99a7c531a8dbe84c617ba25
|
/docs/unglue_variables.py
|
40e9410c8a4aaa7548d27b9313f4c80078d21a9d
|
[
"MIT"
] |
permissive
|
neurodata/bilateral-connectome
|
e4823efd7aee21f70e436bc32cfd2089a44f0246
|
2f56a81187c364fa461ea8d99c299b2a75e58f69
|
refs/heads/main
| 2023-06-09T16:15:05.356935
| 2023-06-02T19:29:57
| 2023-06-02T19:29:57
| 410,043,679
| 5
| 2
|
MIT
| 2022-03-16T20:22:26
| 2021-09-24T17:11:28
|
Python
|
UTF-8
|
Python
| false
| false
| 1,646
|
py
|
#%%
import ast
import json
from glob import glob
import nbformat as nbf
# Collect a list of all notebooks in the content folder
loc = "bilateral-connectome/docs/**/*.ipynb"
notebooks = glob(loc, recursive=True)
# HACK what is the globby way to do this?
notebooks = [n for n in notebooks if "_build" not in n]
data_key = "application/papermill.record/text/plain"
image_key = "application/papermill.record/image/png"
variables = {}
for notebook_path in notebooks:
notebook = nbf.read(notebook_path, nbf.NO_CONVERT)
for cell in notebook.cells:
if cell.get("cell_type") == "code":
outputs = cell.get("outputs")
for output in outputs:
if "data" in output:
if (
(image_key not in output["data"])
and ("image/svg+xml" not in output["data"])
and ("image/png" not in output["data"])
):
data = output["data"]
if data_key in data:
value = data[data_key]
try:
value = ast.literal_eval(value)
except:
pass
name = output["metadata"]["scrapbook"]["name"]
variables[name] = value
with open("bilateral-connectome/docs/glued_variables.json", "w") as f:
json.dump(variables, f, indent=4)
with open("bilateral-connectome/docs/glued_variables.txt", "w") as f:
for key, value in variables.items():
f.write(f"{key} {value}\n")
|
[
"benjamindpedigo@gmail.com"
] |
benjamindpedigo@gmail.com
|
d296b86fe99d2467263e58b64824856279526989
|
7d196362b366ba0e562b28057f728b77a5689800
|
/GetFeeds/migrations/0001_initial.py
|
2585e56d6f9bdd66d9886235fd64cfd5402545cf
|
[] |
no_license
|
balrammirani/NewsFeed
|
cfb67ef8a60dbe3677d359d41cdcfd36fb483c41
|
db3aeb74e252d3d8e1b8535b30e7e7f534166310
|
refs/heads/master
| 2021-01-19T00:57:37.949978
| 2016-07-12T18:26:53
| 2016-07-12T18:26:53
| 63,002,291
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 549
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-07-10 06:50
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='News',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('image', models.CharField(max_length=150)),
],
),
]
|
[
"balram.mirani@gmail.com"
] |
balram.mirani@gmail.com
|
8b4fdfc9079d643803ddb8c60fc5bf41d041df6f
|
06dca5ec0ac9cdcbc42171795c067300bebea24b
|
/project_python/resource/practive_11_Reading_And_Writing_File.py
|
5c5c4b66f41d338a72e62fabc41dceda0d8b30b0
|
[] |
no_license
|
quangdt/plan_come_on_baby
|
0a9dd76feceb1323c22c33586687accefb649392
|
c26b0ea98b9649fc8d5c61865a2dfdc829324964
|
refs/heads/master
| 2021-01-18T13:54:19.176897
| 2015-09-22T01:40:33
| 2015-09-22T01:40:33
| 38,100,359
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 726
|
py
|
from sys import argv
script, filename=argv
print "We're going to erase %r." %filename
print "If you don't want that, hit CTRL-C (^C)."
print "If you do want that, hit RETURN."
raw_input("?")
print "Opening the file..."
target=open(filename,'w')
print "Truncating the file. Goodbye!"
target.truncate()
print "Now I'm going to ask you for three lines."
line1= raw_input("Line 1: ")
line2= raw_input("Line 2: ")
line3= raw_input("Line 3: ")
line4= raw_input("Line 4: ")
print "I'm going to write these to the file."
target.write(line1)
target.write("\n")
target.write(line2)
target.write("\n")
target.write(line3)
target.write("\n")
target.write(line4)
target.write("\n")
print "And finally, we close it."
target.close()
|
[
"quangyeuthuong@gmail.com"
] |
quangyeuthuong@gmail.com
|
397ba2e22bc6c8eff31f10741b937e33c96d2856
|
1155a0b487a7cbc3d5539432f5cb68cbf97355aa
|
/flaskproject/test.py
|
9b1ad1e421095cf02a956a4ad5c17e88e66e9642
|
[] |
no_license
|
Hqj130359/Flask
|
8f1392c016f0e9712485e4668cc501c3b59049f4
|
a02051b318c484dd2c6d5c6d726bdf51e99bbb3a
|
refs/heads/master
| 2020-08-06T21:25:44.571412
| 2019-10-07T15:37:40
| 2019-10-07T15:37:40
| 213,160,123
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,335
|
py
|
# import datetime
# result=[] #接受所有的日期,需要一个嵌套列表,列表当中嵌套的是7元素列表
# #月份分类
# big_month=[1,3,5,7,8,10,12]
# small_month=[4,6,9,11]
#
# now=datetime.datetime.now()
# month=now.month
# first_date=datetime.datetime (now.year,now.month,1,0,0)
# #年月日 时 分
# # print(first_date.weekday()) #python的日期当中 星期的范围 0-6 0是周一 6 代表周日
# # print(now.weekday())
# # first_week=first_date.weekday() #2019年9月1号是周日
# #如果一号是周一 那么第一行应该是 1-7号 0
# #如果一号是周二 那么第一行应该是 1*empty+1-6号 1
# #如果一号是周三 那么第一行应该是 2*empty+1-5号 2
# #如果一号是周四 那么第一行应该是 3*empty+1-4号 3
# #如果一号是周五 那么第一行应该是 4*empty+1-3号 4
# #如果一号是周六 那么第一行应该是 5*empty+1-2号 5
# #如果一号是周日 那么第一行应该是 6*empty+1号 6
#
# if month in big_month:
# day_range=range(1,32) #指定月份的总天数
# elif month in small_month:
# day_range=range(1,31)
# else:
# day_range=range(1,29)
# # 获取指定月天数
# day_range=list(day_range)
#
# first_week=first_date.weekday() #获取指定月1号是周几 6
# line1=[] #第一行数据
# #day_range 1-30
# for e in range(first_week):
# line1.append('empty')
# for d in range(7-first_week):
# line1.append(str(day_range.pop(0)))
# # print(line1)
# result.append(line1)
#
# while day_range :# 如果总天数列表有值,就接着循环
# line=[] #每个子列表
# for i in range(7):
# if len(line) < 7 and day_range:
# line.append(str(day_range.pop(0)))
# else:
# line.append('empty')
# result.append(line)
# # print(result)
# # 展示效果
# print("星期一 星期二 星期三 星期四 星期五 星期六 星期日")
# for line in result:
# for day in line:
# day=day.center(6)
# print(day,end=" ")
# print()
# import calendar
# result=calendar.month(19,9).splitlines()[2:]
# for line in result:
# print(line)
from flask import Flask,render_template
import datetime
# import calendar
app=Flask(__name__)
class Calendar:
"""
当前类实现日历功能
1返回列表嵌套列表的日历
2,安装日历格式打印日历
"""
def __init__(self, month='now'):
self.result = [] # 接受所有的日期,需要一个嵌套列表,列表当中嵌套的是7元素列表
# 月份分类
big_month = [1, 3, 5, 7, 8, 10, 12]
small_month = [4, 6, 9, 11]
# 获取当前月
now = datetime.datetime.now()
if month == "now":
month = now.month
first_date = datetime.datetime(now.year, now.month, 1, 0, 0)
# 年月日 时 分
# print(first_date.weekday()) #python的日期当中 星期的范围 0-6 0是周一 6 代表周日
# print(now.weekday())
# first_week=first_date.weekday() #2019年9月1号是周日
# 如果一号是周一 那么第一行应该是 1-7号 0
# 如果一号是周二 那么第一行应该是 1*empty+1-6号 1
# 如果一号是周三 那么第一行应该是 2*empty+1-5号 2
# 如果一号是周四 那么第一行应该是 3*empty+1-4号 3
# 如果一号是周五 那么第一行应该是 4*empty+1-3号 4
# 如果一号是周六 那么第一行应该是 5*empty+1-2号 5
# 如果一号是周日 那么第一行应该是 6*empty+1号 6
else:
# assert int(month) in range(1,13)
first_date = datetime.datetime(now.year, month, 1, 0, 0)
if month in big_month:
day_range = range(1, 32) # 指定月份的总天数
elif month in small_month:
day_range = range(1, 31)
else:
day_range = range(1, 29)
# 获取指定月天数
self.day_range = list(day_range)
first_week = first_date.weekday() # 获取指定月1号是周几 6
line1 = [] # 第一行数据
for e in range(first_week):
line1.append('empty')
for d in range(7 - first_week):
line1.append(str(self.day_range.pop(0)))
self.result.append(line1)
while self.day_range: # 如果总天数列表有值,就接着循环
line = [] # 每个子列表
for i in range(7):
if len(line) < 7 and self.day_range:
line.append(str(self.day_range.pop(0)))
else:
line.append('empty')
self.result.append(line)
def return_month(self):
"""
:return:
"""
return self.result
def print_month(self):
"""
:return:
"""
print("星期一 星期二 星期三 星期四 星期五 星期六 星期日")
for line in self.result:
for day in line:
day = day.center(6)
print(day, end=" ")
print()
@app.route("/userInfo/")
def userInfo():
calendar=Calendar().return_month()
return render_template("userInfo.html",**locals())
print(1)
if __name__ == "__main__":
app.run(host='127.0.0.1',port=8000,debug=True)
|
[
"zhangsan@qq.com"
] |
zhangsan@qq.com
|
91240e23206342a0d3ec5cad451bd20ae4aa8cf3
|
f42ea0c6a477b741e8e749f9e1ba8922931659d2
|
/study_6_7.py
|
7cef9f442c6661350975f9d1e6d4c48083546247
|
[] |
no_license
|
7dongyuxiaotang/python_code
|
4dca469770d4416def97ebc69f233db7df501d07
|
2cef015481926a8e480ce54840c2d222bb17bf3e
|
refs/heads/master
| 2023-07-06T20:33:38.228196
| 2021-08-12T09:33:27
| 2021-08-12T09:33:27
| 371,591,950
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 552
|
py
|
# 1、什么是文件
# 文件是操作系统提供给用户/应用程序操作硬盘的一种虚拟的概念/接口
# 用户/应用程序(open())
# 操作系统(文件)
# 计算机硬件(硬盘)
# 2、为何要用文件
# 用户/应用程序可以通过文件将数据永久保存在硬盘中,即操作文件就是操作硬盘
# 3、如何用文件:open()
# (1)、控制文本读写内容的模式
# t和b 不能单独使用,必须和r/w/a连用
# f=open(r'D:\text.txt',mode='rt',encoding='utf-8')
# res=f.read()
# print(res)
|
[
"614028708@qq.com"
] |
614028708@qq.com
|
8f6cddfbfcd4dff1c8b96b66402ec35eda611b4c
|
f09978f2a0850278255bd198222cd3990cb0c687
|
/gear/migrations/0002_auto_20190525_1142.py
|
366fa9819bb1eb88a25f23d44ef896e4040d58dd
|
[] |
no_license
|
szpone/climbing-gear
|
0e4e53b99a0b550c0e172af21c2c9e08e2c3f1ba
|
78ab13b97b4b66464859b95ba6e5ed8587d5e60c
|
refs/heads/master
| 2022-12-12T11:08:57.277056
| 2019-06-05T16:06:02
| 2019-06-05T16:06:02
| 185,016,538
| 1
| 0
| null | 2022-11-22T03:49:28
| 2019-05-05T10:30:11
|
Python
|
UTF-8
|
Python
| false
| false
| 324
|
py
|
# Generated by Django 2.2.1 on 2019-05-25 11:42
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('gear', '0001_initial'),
]
operations = [
migrations.RenameModel(
old_name='GearType',
new_name='GearCategory',
),
]
|
[
"nikola.adamus@gmail.com"
] |
nikola.adamus@gmail.com
|
b1806d05824ad4586ebad22b991734232c01e535
|
7cafba9d90ad17feebb9bf066fe33f0bb50bdc1f
|
/code/Memoria/Utility.py
|
26d1941359bebd84960f0afa7b0c35343e2157e6
|
[] |
no_license
|
StephanGuingor/Memory-GUI-Client-Server
|
2795ff555cc85d2ec37b6f0c099505b938641eaf
|
3dd9f15fca25b41f20c94e7f1ece17b48f37a3fa
|
refs/heads/master
| 2022-04-23T06:21:43.031591
| 2020-04-28T21:24:35
| 2020-04-28T21:24:35
| 259,752,969
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 730
|
py
|
from os.path import dirname, abspath, join
import pygame
main_dir = dirname(abspath(__file__)) # Program's directory
def load_image(file, transparent):
"""loads an image, prepares it for play"""
file = join(main_dir, 'Data', file)
try:
surface = pygame.image.load(file)
except pygame.error:
raise SystemExit('Could not load image "%s" %s' %
(file, pygame.get_error()))
if transparent:
return surface.convert_alpha()
return surface.convert()
def overrides(interface_class):
"""Verify that class is overriding another"""
def overrider(method):
assert (method.__name__ in dir(interface_class))
return method
return overrider
|
[
"stephan.guingor04@gmail.com"
] |
stephan.guingor04@gmail.com
|
754c20f53dcf01348a945035ab83fc23b0d6413f
|
7d703048a5393a0fc65c319bbe95df3795013260
|
/app/models/tasks_categories_model.py
|
9a92db5e09a4848d39e92b500f0a25792131d0b8
|
[] |
no_license
|
nicole-malaquias/eisenhower
|
0e21e381f6d46feef33ec426d04607aadecc06be
|
be5a5212741de9dda44b7bc15c5bd28430f50f50
|
refs/heads/master
| 2023-08-16T03:42:40.958219
| 2021-10-08T18:55:09
| 2021-10-08T18:55:09
| 417,294,032
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 705
|
py
|
from app.configs.database import db
from dataclasses import dataclass
from sqlalchemy.orm import backref, relationship, validates
@dataclass
class TaskCategoriesModel(db.Model):
id : int
__tablename__ = 'tasks_categories'
id = db.Column(db.Integer, primary_key=True)
task_id = db.Column(db.Integer, db.ForeignKey('tasks.id'), nullable=False)
category_id = db.Column(db.Integer, db.ForeignKey('categories.id'), nullable=False)
tasks = relationship('TasksModel', backref=backref('task_categories', cascade='all, delete-orphan'))
category = db.relationship("CategoryModel", backref=backref('ct'))
|
[
"nicolemalaquias91@gmail.com"
] |
nicolemalaquias91@gmail.com
|
8420713bfe5ac7d87bf6f172a8e8198c043dee6c
|
ef0c31b1f2802e84a4f97d5417709f384fc2288d
|
/test_ui_single_q.py
|
9260eaa4f714426935246e7f0c04128d8eee4c75
|
[] |
no_license
|
thydmle/pycontin
|
e35c2fce4449e971496084d46a8f3964b5c3cdbe
|
6e8a4bc32d787aba12e476ae18fc425ab4e2b130
|
refs/heads/master
| 2020-06-19T08:25:18.669788
| 2014-11-18T00:19:48
| 2014-11-18T00:19:48
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,302
|
py
|
# Copyright 2014, Jerome Fung
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import numpy as np
import yaml_serialize
from numpy.testing import assert_allclose
from dls_core import Optics, CorrFn, Measurement
from pycontin_core import PyContinInputs
from problem_setup import add_weights
from pycontin_fixed_q import solve_series, solve_alpha, _setup_inversion
from computations import prob_1_alpha
class TestClass():
def setUp(self):
# data and metadata using DLS core classes
optics = Optics(wavelen = 488e-7, index = 1.43) # length in cm
test_data = np.loadtxt('contin_test_data_set_1.txt')
corr_func = CorrFn(*test_data.transpose())
self.measurement = Measurement(corr_func, 60., optics)
# pycontin inputs
self.pc_inputs = PyContinInputs(n_grid = 31,
grid_bounds = np.array([5e2, 5e6]),
kernel_type = 'mw',
kernel_kwargs = {'prop_const' :
1.37e-4},
dust_term = True)
self.soln0, self.int_res0 = solve_alpha(self.measurement,
self.pc_inputs, 5.91e-10)
self.soln_r = solve_alpha(self.measurement, self.pc_inputs, 3e-6,
self.int_res0)
# need to load/define gold here
self.matrix_stats = np.loadtxt('contin_data_set_1_matrix_stats.txt')
def test_lowreg_soln(self):
# check scalings on x and alpha
assert_allclose(self.int_res0.xsc, self.matrix_stats[:,2], rtol = 5e-4)
assert_allclose(self.int_res0.alpha_sc, 1./9.302e13, rtol = 1e-4)
# check solution
gold_x = np.concatenate((np.zeros(19),
np.array([4.263e-11, 1.006e-11]),
np.zeros(10),
np.array([8.5963e-2])))
gold_err = np.concatenate((np.zeros(19),
np.array([3e-12, 3.2e-12]),
np.zeros(10),
np.array([1.7e-3])))
assert_allclose(self.soln0.x, gold_x, rtol = 1e-3, atol = 1e-16)
assert_allclose(self.soln0.error, gold_err, rtol = 4e-2, atol = 1e-16)
# check degs of freedom and residuals
assert_allclose(self.soln0.n_dof, 3.)
assert_allclose(np.sqrt(self.soln0.reduced_chisq), 2.889e-3,
rtol = 1e-3)
def test_coefficient_matrix(self):
# check setup
inv_input = _setup_inversion(self.measurement, self.pc_inputs)
matrix_A_maxima = np.array([inv_input.coeff_matrix[:, i].max() for i in
np.arange(self.pc_inputs.n_grid + 1)])
matrix_A_minima = np.array([inv_input.coeff_matrix[:, i].min() for i in
np.arange(self.pc_inputs.n_grid + 1)])
assert_allclose(matrix_A_minima, self.matrix_stats[:,0], rtol = 5e-5)
assert_allclose(matrix_A_maxima, self.matrix_stats[:,1], rtol = 5e-5)
def test_reg_soln(self):
gold_x = np.concatenate((np.zeros(15),
np.array([6.270e-14, 4.878e-12,
1.318e-11, 2.107e-11,
2.259e-11, 1.381e-11,
2.055e-12]),
np.zeros(9),
np.array([8.22e-2])))
gold_err = np.concatenate((np.zeros(15),
np.array([1.9e-12, 2.7e-12, 2e-12,
8.8e-13, 1.7e-12,
9.5e-13, 6e-13]),
np.zeros(9),
np.array([1.9e-3])))
assert_allclose(self.soln_r.x, gold_x, rtol = 2e-3, atol = 1e-14)
assert_allclose(self.soln_r.error, gold_err, rtol = 4e-2, atol = 1e-16)
assert_allclose(self.soln_r.Valpha, 3.38453e-4, rtol = 1e-4)
assert_allclose(self.soln_r.chisq, 3.15960e-4, rtol = 1e-4)
assert_allclose((self.soln_r.residuals**2).sum(), self.soln_r.chisq)
assert_allclose(np.sqrt(self.soln_r.reduced_chisq), 3.056e-3,
rtol = 1e-3)
assert_allclose(self.soln_r.n_dof, 3.175, rtol = 1e-3)
prob1 = prob_1_alpha(self.soln_r.chisq, self.soln0.Valpha,
self.soln0.n_dof,
len(self.measurement.corrfn.data))
assert_allclose(prob1, 0.704, rtol = 1e-3)
def test_weighted_soln(self):
soln_wt, intres_wt = solve_alpha(self.measurement,
add_weights(self.pc_inputs,
self.soln_r), 2.23e-7)
# see CONTIN test output
gold_xwt = np.concatenate((np.zeros(18),
np.array([1.014e-11, 3.275e-11,
1.795e-11]),
np.zeros(10), np.array([8.2845e-2])))
gold_err = np.concatenate((np.zeros(18),
np.array([4e-12, 2.3e-12, 1.9e-12]),
np.zeros(10), np.array([1.6e-3])))
assert_allclose(soln_wt.x, gold_xwt, rtol = 2e-3, atol = 1e-15)
assert_allclose(soln_wt.error, gold_err, rtol = 3e-2, atol = 1e-14)
assert_allclose(soln_wt.n_dof, 3.038, rtol = 1e-3, atol = 1e-15)
assert_allclose(np.sqrt(soln_wt.reduced_chisq), 9.317e-4,
rtol = 1e-3)
def test_moment_analysis(self):
'''
moments, mom_errs = calculate_moments(self.grid_mw, self.quad_weights,
self.x0[:-1],
self.infodict0['covar_x'][:-1,
:-1])
# see contin test output
gold_moments = np.array([1.9509e-11, 3.4570e-6, 6.1951e-1,
1.1257e5, 2.0797e10])
gold_percent_errs = np.array([2.9, 1.6, 0.28, 2.0, 4.3])
assert_allclose(moments, gold_moments, rtol = 1e-2)
assert_allclose(mom_errs, 1e-2 * gold_percent_errs, rtol = 2e-2)
pass
'''
def test_series(self):
series, intermed_res = solve_series(self.measurement, self.pc_inputs)
assert_allclose(series.best_prob1, 0.5, rtol = 1e-1)
def test_simulated_data(self):
opt_2 = Optics(0.6328, 1.33)
q = opt_2.qsca(60)
a = 0.1 # microns
kT = 295 * 1.38e-23 * 1e18
tbase = np.logspace(-3, 2, 201) # ms
D = kT / (6. * np.pi * 1. * a)
data = np.exp(-2. * D * q**2 * tbase)
meast = Measurement(CorrFn(tbase, data), 60., opt_2)
pc_inputs = PyContinInputs(n_grid = 31,
grid_bounds = np.array([1e-2, 1]),
kernel_type = 'rad',
kernel_kwargs = {'kT' : kT,
'eta' : 1.})
soln, intres = solve_alpha(meast, pc_inputs, 1e-11)
# check that the residuals are negligible
assert_allclose(soln.residuals, np.zeros(201), atol = 1e-8)
# check that there is a spike near correct radius
assert_allclose(pc_inputs.grid[soln.x.argmax()], a)
print soln.x
def test_serialization(self):
pass
|
[
"fung@physics.harvard.edu"
] |
fung@physics.harvard.edu
|
5e0dd9f13ea0378495ff20008d37fa50ec597dea
|
91f39fc8b5df9ccde912d91506357739374d8831
|
/pokemonbot/PokemonGo/pokemongo_bot/cell_workers/initial_transfer_worker.py
|
a4875842ceb75668d7c0beff619b978ae537a018
|
[] |
no_license
|
baloon11/PokemonGo-bot_Django_wrapper
|
88b97488b4d77fe03d8a0acab980995dd3cdc78b
|
2fb9b8e50d3130efba3acd442bbc7c9b05907d98
|
refs/heads/master
| 2021-01-17T19:00:40.948725
| 2016-08-02T08:12:04
| 2016-08-02T08:12:04
| 64,734,348
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,663
|
py
|
import json
# from pokemongo_bot.human_behaviour import sleep
# from pokemongo_bot import logger
from .. import logger
from ..human_behaviour import sleep
class InitialTransferWorker(object):
def __init__(self, bot):
self.config = bot.config
self.pokemon_list = bot.pokemon_list
self.api = bot.api
def work(self):
logger.log('[x] Initial Transfer.')
logger.log(
'[x] Preparing to transfer all duplicate Pokemon, keeping the highest CP of each type.')
logger.log('[x] Will NOT transfer anything above CP {}'.format(
self.config.initial_transfer))
pokemon_groups = self._initial_transfer_get_groups()
for id in pokemon_groups:
group_cp = pokemon_groups[id].keys()
if len(group_cp) > 1:
group_cp.sort()
group_cp.reverse()
for x in range(1, len(group_cp)):
if self.config.initial_transfer and group_cp[x] > self.config.initial_transfer:
continue
print('[x] Transferring {} with CP {}'.format(
self.pokemon_list[id - 1]['Name'], group_cp[x]))
self.api.release_pokemon(
pokemon_id=pokemon_groups[id][group_cp[x]])
response_dict = self.api.call()
sleep(2)
logger.log('[x] Transferring Done.')
def _initial_transfer_get_groups(self):
pokemon_groups = {}
self.api.get_player().get_inventory()
inventory_req = self.api.call()
inventory_dict = inventory_req['responses']['GET_INVENTORY'][
'inventory_delta']['inventory_items']
user_web_inventory = 'web/inventory-%s.json' % (self.config.username)
with open(user_web_inventory, 'w') as outfile:
json.dump(inventory_dict, outfile)
for pokemon in inventory_dict:
try:
reduce(dict.__getitem__, [
"inventory_item_data", "pokemon_data", "pokemon_id"
], pokemon)
except KeyError:
continue
group_id = pokemon['inventory_item_data'][
'pokemon_data']['pokemon_id']
group_pokemon = pokemon['inventory_item_data'][
'pokemon_data']['id']
group_pokemon_cp = pokemon[
'inventory_item_data']['pokemon_data']['cp']
if group_id not in pokemon_groups:
pokemon_groups[group_id] = {}
pokemon_groups[group_id].update({group_pokemon_cp: group_pokemon})
return pokemon_groups
|
[
"olfb_c@ukr.net"
] |
olfb_c@ukr.net
|
5d6cd21329a2ad018802f7b44173e6758ee4ac1e
|
4bc696d97f9fec7e5ce136593556007a8b889d5f
|
/server/apps/reportAdmin/models.py
|
7b4b5b4e7c7e4c8ce9965c93308c74c3c434b22f
|
[] |
no_license
|
davidhorst/FirstDjangular
|
37224a72ebd1e487b4b07755b06432a99f572eaf
|
5d18577f8d52e7e276c2c850d33f929de8e77ee6
|
refs/heads/master
| 2021-06-12T09:34:21.103774
| 2016-12-13T14:53:24
| 2016-12-13T14:53:24
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 187
|
py
|
from __future__ import unicode_literals
from django.db import models
class Report(models.Model):
name = models.CharField(max_length=15)
interval = models.PositiveIntegerField()
|
[
"="
] |
=
|
c83d59471151b154da89111e7a1d551d9c80b15d
|
beeb13e821788646e472fda3dad055a0b15da0f5
|
/r3.py
|
c497c3d5f5b5e0929c8cd43be27cb76577f32487
|
[] |
no_license
|
alahesia/smart-avto
|
0f59c2496e3b926052052c150bd392f8dd7af2e7
|
7528d101c7639685962306cc65c8d10f4cf4ac41
|
refs/heads/master
| 2021-01-25T06:18:36.152488
| 2017-06-15T18:07:11
| 2017-06-15T18:07:11
| 93,554,344
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,256
|
py
|
#!/usr/bin/env python
# -*- coding: utf8 -*-
import RPi.GPIO as GPIO
import MFRC522
import signal
import time
red = 40
card_01 = '1662448133'
GPIO.setmode(GPIO.BOARD) # Это значит, что считаем пины по порядку с левого верхнего (3v3 - первый)
GPIO.setwarnings(False)
GPIO.setup(red, GPIO.OUT)
continue_reading = True
MIFAREReader = MFRC522.MFRC522()
while continue_reading:
# Сканируем карты - считываем их UID
(status,TagType) = MIFAREReader.MFRC522_Request(MIFAREReader.PICC_REQIDL)
if status == MIFAREReader.MI_OK:
print "Card detected"
# Read UID
(status,uid) = MIFAREReader.MFRC522_Anticoll()
# Если считали UID, то идем дальше
if status == MIFAREReader.MI_OK:
# показ UID
UIDcode = str(uid[0])+str(uid[1])+str(uid[2])+str(uid[3])
print UIDcode
if UIDcode == card_01:
GPIO.output(red, 0)
print "Door open"
# А если карты в списке нет, то моргаем и пищим
else:
GPIO.output(red, 1)
time.sleep(0.05)
print "Unrecognised Card"
GPIO.cleanup()
|
[
"alahesia@gmail.com"
] |
alahesia@gmail.com
|
c54a6f9f02fb20751b071ecad0b54c97bad903ac
|
2cca7049fc4daa4f50b2b7a7d8059e3055e18048
|
/tutorWithParkGrade.py
|
0f60626eca91254fd7f3b73b7dd2b1f66b21f701
|
[] |
no_license
|
twil0516/studyPy
|
6176c94e9f3be3599cf28cf34f1ba0553d8cd259
|
faa1c3f7b6ebc2a4fd67470a68eb6dbdd459b3f2
|
refs/heads/master
| 2022-10-11T13:34:11.838558
| 2020-06-11T10:05:02
| 2020-06-11T10:05:02
| 271,509,360
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 408
|
py
|
score = int(input("점수를 입력하세요 : "))
if score <= 100 and score >= 0:
if score > 90 and score <= 100:
print("A학점 입니다.")
elif score > 80 and score <= 90:
print("B학점 입니다.")
elif score > 70 and score <= 80:
print("C학점 입니다.")
else:
print("F학점 입니다.")
else:
print("점수를 잘 못 입력하셨습니다.")
|
[
"twil0516@gmail.com"
] |
twil0516@gmail.com
|
ea55bed2be2d0e0b08a50d11258844aae744e0f7
|
ad5d7c41370e9dae67ed21a8624ee042977cb51f
|
/leadmanager/settings.py
|
cd780f3e49313d93e23e45edf9713f59813a8d4a
|
[] |
no_license
|
Sparrowan/fullstack-sparrowan-lead-manager-django-react
|
122e1d3579ee20f77f070bd58f57a9e52f382c45
|
60ef0d348283a5313b77b1d9568cfe6254da6a55
|
refs/heads/master
| 2023-01-12T14:59:14.248440
| 2020-03-22T12:15:44
| 2020-03-22T12:15:44
| 249,174,169
| 0
| 0
| null | 2023-01-05T10:30:17
| 2020-03-22T12:01:57
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 3,409
|
py
|
"""
Django settings for leadmanager project.
Generated by 'django-admin startproject' using Django 3.0.4.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'l)y6q&oo^3#h2)p+c%i(p^dyxd+yoze!7tjhwz1915@cs+aurp'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'leads',
'corsheaders',
]
MIDDLEWARE = [
'corsheaders.middleware.CorsMiddleware',
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'leadmanager.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
os.path.join(BASE_DIR, 'frontend/build'),
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'leadmanager.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'frontend/build/static'),
]
CORS_ORIGIN_WHITELIST = [
"http://localhost:3000"
]
|
[
"alphiuswambua@gmail.com"
] |
alphiuswambua@gmail.com
|
727d79147a17669687219207cd4e55ee2d5313ec
|
115b5356242176b8873ae7e43cd313e41cbd0ee6
|
/tensorflow/openvino_ssd_image.py
|
41ba377ec9bd23198c6d50fce6858ba69d032241
|
[] |
no_license
|
squeakus/bitsandbytes
|
b71ec737431bc46b7d93969a7b84bc4514fd365b
|
218687d84db42c13bfd9296c476e54cf3d0b43d2
|
refs/heads/master
| 2023-08-26T19:37:15.190367
| 2023-07-18T21:41:58
| 2023-07-18T21:42:14
| 80,018,346
| 2
| 4
| null | 2022-06-22T04:08:35
| 2017-01-25T13:46:28
|
C
|
UTF-8
|
Python
| false
| false
| 7,491
|
py
|
#!/usr/bin/env python
"""
Copyright (c) 2018 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import print_function
import sys
import os
from argparse import ArgumentParser
import cv2
import time
import logging as log
import glob
from openvino.inference_engine import IENetwork, IEPlugin
def build_argparser():
parser = ArgumentParser()
parser.add_argument("-m", "--model", help="Path to an .xml file with a trained model.", required=True, type=str)
parser.add_argument("-i", "--input",
help="Path to image. Please specify regex for multiple images, e.g.; images/*jpg", required=True,
type=str)
parser.add_argument("-l", "--cpu_extension",
help="MKLDNN (CPU)-targeted custom layers.Absolute path to a shared library with the kernels "
"impl.", type=str, default=None)
parser.add_argument("-pp", "--plugin_dir", help="Path to a plugin folder", type=str, default=None)
parser.add_argument("-d", "--device",
help="Specify the target device to infer on; CPU, GPU, FPGA or MYRIAD is acceptable. Demo "
"will look for a suitable plugin for device specified (CPU by default)", default="CPU",
type=str)
parser.add_argument("--labels", help="Labels mapping file", default=None, type=str)
parser.add_argument("-pt", "--prob_threshold", help="Probability threshold for detections filtering",
default=0.5, type=float)
return parser
def main():
log.basicConfig(format="[ %(levelname)s ] %(message)s", level=log.INFO, stream=sys.stdout)
args = build_argparser().parse_args()
model_xml = args.model
model_bin = os.path.splitext(model_xml)[0] + ".bin"
# Plugin initialization for specified device and load extensions library if specified
log.info("Initializing plugin for {} device...".format(args.device))
plugin = IEPlugin(device=args.device, plugin_dirs=args.plugin_dir)
if args.cpu_extension and 'CPU' in args.device:
plugin.add_cpu_extension(args.cpu_extension)
# Read IR
log.info("Reading IR...")
net = IENetwork(model=model_xml, weights=model_bin)
if plugin.device == "CPU":
supported_layers = plugin.get_supported_layers(net)
not_supported_layers = [l for l in net.layers.keys() if l not in supported_layers]
if len(not_supported_layers) != 0:
log.error("Following layers are not supported by the plugin for specified device {}:\n {}".
format(plugin.device, ', '.join(not_supported_layers)))
log.error("Please try to specify cpu extensions library path in demo's command line parameters using -l "
"or --cpu_extension command line argument")
sys.exit(1)
assert len(net.inputs.keys()) == 1, "Demo supports only single input topologies"
assert len(net.outputs) == 1, "Demo supports only single output topologies"
input_blob = next(iter(net.inputs))
out_blob = next(iter(net.outputs))
log.info("Loading IR to the plugin...")
exec_net = plugin.load(network=net, num_requests=2)
# Read and pre-process input image
n, c, h, w = net.inputs[input_blob].shape
del net
if args.labels:
with open(args.labels, 'r') as f:
labels_map = [x.strip() for x in f]
else:
labels_map = None
if "*" in args.input:
images =glob.glob(args.input)
else:
images = [args.input]
for imgname in images:
assert os.path.isfile(imgname), "Specified input file doesn't exist"
img = cv2.imread(imgname)
cur_request_id = 0
next_request_id = 1
log.info("Starting inference in sync mode...")
log.info("To switch between sync and async modes press Tab button")
log.info("To stop the demo execution press Esc button")
is_async_mode = False
render_time = 0
initial_h, initial_w, channels = img.shape
# Main sync point:
# in the truly Async mode we start the NEXT infer request, while waiting for the CURRENT to complete
# in the regular mode we start the CURRENT request and immediately wait for it's completion
inf_start = time.time()
in_img = cv2.resize(img, (w, h))
in_img = in_img.transpose((2, 0, 1)) # Change data layout from HWC to CHW
in_img = in_img.reshape((n, c, h, w))
exec_net.start_async(request_id=cur_request_id, inputs={input_blob: in_img})
if exec_net.requests[cur_request_id].wait(-1) == 0:
inf_end = time.time()
det_time = inf_end - inf_start
# Parse detection results of the current request
res = exec_net.requests[cur_request_id].outputs[out_blob]
for obj in res[0][0]:
# Draw only objects when probability more than specified threshold
if obj[2] > args.prob_threshold:
xmin = int(obj[3] * initial_w)
ymin = int(obj[4] * initial_h)
xmax = int(obj[5] * initial_w)
ymax = int(obj[6] * initial_h)
class_id = int(obj[1])
# Draw box and label\class_id
color = (0, 0, 255)
cv2.rectangle(img, (xmin, ymin), (xmax, ymax), color, 2)
det_label = labels_map[class_id] if labels_map else str(class_id)
cv2.putText(img, det_label + ' ' + str(round(obj[2] * 100, 1)) + ' %', (xmin, ymin - 7),
cv2.FONT_HERSHEY_COMPLEX, 0.6, color, 1)
# Draw performance stats
print("Inference time: {:.3f} ms".format(det_time * 1000))
inf_time_message = "Inference time: N\A for async mode" if is_async_mode else \
"Inference time: {:.3f} ms".format(det_time * 1000)
render_time_message = "OpenCV rendering time: {:.3f} ms".format(render_time * 1000)
async_mode_message = "Async mode is on. Processing request {}".format(cur_request_id) if is_async_mode else \
"Async mode is off. Processing request {}".format(cur_request_id)
cv2.putText(img, inf_time_message, (15, 15), cv2.FONT_HERSHEY_COMPLEX, 0.5, (200, 10, 10), 1)
cv2.putText(img, render_time_message, (15, 30), cv2.FONT_HERSHEY_COMPLEX, 0.5, (10, 10, 200), 1)
cv2.putText(img, async_mode_message, (10, int(initial_h - 20)), cv2.FONT_HERSHEY_COMPLEX, 0.5,
(10, 10, 200), 1)
#img = cv2.resize(img,(500,500))
cv2.imshow("Detection Results", img)
key = cv2.waitKey(0)
cv2.destroyAllWindows()
if key == 27: # Esc key to stop
break
del exec_net
del plugin
# cv2.imwrite("out.png", img)
if __name__ == '__main__':
sys.exit(main() or 0)
|
[
"jonathanbyrn@gmail.com"
] |
jonathanbyrn@gmail.com
|
efdb82400d1318cd2f944357d02d3a541be9f2a2
|
0e06a05a64d02f660d988db11bd004941c2058ac
|
/ex41.py
|
36c1392569f76f56d9ace74b79a4d0e6b501c7c2
|
[] |
no_license
|
luisco96/LPTHW
|
65fa53dee6f8799212fd4278dea6d884d9155d36
|
ee2f8b4596c1aba7515152cbb7d0ee15a5ab3b86
|
refs/heads/master
| 2021-04-03T14:13:07.585079
| 2020-05-31T21:26:09
| 2020-05-31T21:26:09
| 248,364,233
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,346
|
py
|
import random
from urllib.request import urlopen
import sys
WORD_URL = "http://learncodethehardway.org/words.txt"
WORDS = []
PHRASES = {
"class %%%(%%%):" :
"Make a class named %%% that is-a %%%.",
"class %%%(object):\n\tdef __init__(self, ***)":
"class %%% has-a __init__ that takes self and *** params.",
"class %%%(object):\n\tdef ***(self, @@@)":
"class %%% has-a function *** that takes self and @@@ params.",
"*** = %%%():":
"Set *** to an instance of class %%%.",
"***.***(@@@)":
"From *** get the *** function, call it with params self, @@@.",
"***.*** = '***'":
"From *** get the *** attribute and set it to '***'"
}
# do they want to drill phrases first
if len(sys.argv) == 2 and sys.argv[1] == "english":
PHRASE_FIRST = True
else:
PHRASE_FIRST = False
# load up the words from the website
for word in urlopen(WORD_URL).readlines():
WORDS.append(str(word.strip(), encoding="utf-8"))
def convert(snippet, phrase):
class_name = [w.capitalize() for w in
random.sample(WORDS, snippet.count("%%%"))]
other_names = random.sample(WORDS, snippet.count("***"))
results = []
param_names = []
for i in range(0, snippet.count("@@@")):
param_count = random.randint(1,3)
param_names.append(', '.join(
random.sample(WORDS, param_count)))
for sentence in snippet, phrase:
result = sentence[:]
# fake class names
for word in class_name:
result = result.replace("%%%", word, 1)
# fake other names
for word in other_names:
result = result.replace("***", word, 1)
# fake parameter lists
for word in param_names:
result = result.replace("@@@", word, 1)
results.append(result)
return results
# keep going until they hit CTRL-D
try:
while True:
snippets = list(PHRASES.keys())
random.shuffle(snippets)
for snippet in snippets:
phrase = PHRASES[snippet]
question, answer = convert(snippet, phrase)
if PHRASE_FIRST:
question, answer = answer, question
print(question)
input("> ")
print(f"ANSWER: {answer}\n\n")
except EOFError:
print("\nBye")
|
[
"luisco28@gmail.com"
] |
luisco28@gmail.com
|
0e5447d21a2d421a9baa4a13d2780114aadaec9b
|
ffb7e32769cf5928fcca5abf3a60f86e456bb79a
|
/biz/t8.py
|
73ca8fc13e6c2220b392691e5faeaca2a4eca4c6
|
[
"Apache-2.0"
] |
permissive
|
relax-space/python-learning
|
92f8173af26d05beae89e928b1df4aa907b5701b
|
45457fc6c3a6583cb28bd14161439ec557c4ce2b
|
refs/heads/master
| 2022-05-03T13:33:45.646838
| 2021-11-30T21:38:06
| 2022-03-19T15:34:22
| 250,942,773
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 949
|
py
|
import pytest
# https: // www.cnblogs.com/luizyao/p/11848352.html
# indirect:argnames的子集或者一个布尔值;将指定参数的实参通过request.param重定向到和参数同名的fixture中,以此满足更复杂的场景;
# src/chapter-11/test_indirect.py
@pytest.fixture()
def max(request):
return request.param - 1
@pytest.fixture()
def min(request):
return request.param + 1
# 默认 indirect 为 False
@pytest.mark.parametrize('min, max', [(1, 2), (3, 4)])
def test_indirect(min, max):
assert min <= max
# min max 对应的实参重定向到同名的 fixture 中
@pytest.mark.parametrize('min, max', [(1, 2), (3, 4)], indirect=True)
def test_indirect_indirect(min, max):
assert min >= max
# 只将 max 对应的实参重定向到 fixture 中
@pytest.mark.parametrize('min, max', [(1, 2), (3, 4)], indirect=['max'])
def test_indirect_part_indirect(min, max):
print(min, max)
assert min == max
|
[
"xiaoxm_001@163.com"
] |
xiaoxm_001@163.com
|
7bf8db59d258149767c30babf4c58f93344b9449
|
8ec9b7ab4fbe4d3c9f13a4cfbc26f6ecc18fb475
|
/Flask_Proj/flaskr/db.py
|
9a9e4728497f5e4caac4c77b3fa4feb74e28eb89
|
[] |
no_license
|
mrsreddy451/Python
|
b337037f3338fe354b9a0e760d59fe6b642ba3a4
|
d77f7c732b11913cbd539836c813dbd4798d9496
|
refs/heads/master
| 2022-06-25T20:24:58.172543
| 2020-05-07T14:47:41
| 2020-05-07T14:47:41
| 152,211,647
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 919
|
py
|
import sqlite3
import click
from flask import current_app, g
from flask.cli import with_appcontext
def get_db():
if 'db' not in g:
g.db = sqlite3.connect(
current_app.config['DATABASE'],
detect_types=sqlite3.PARSE_DECLTYPES
)
g.db.row_factory = sqlite3.Row
return g.db
def close_db(e=None):
db = g.pop('db', None)
if db is not None:
db.close()
def init_db():
db = get_db()
with current_app.open_resource('schema.sql') as f:
db.executescript(f.read().decode('utf8'))
@click.command('init-db')
@with_appcontext
def init_db_command():
"""Clear the existing data and create new tables."""
init_db()
click.echo('Initialized the database.')
print('run the commands')
def init_app(app):
app.teardown_appcontext(close_db)
app.cli.add_command(init_db_command)
|
[
"noreply@github.com"
] |
mrsreddy451.noreply@github.com
|
546cca90b6e1aaf74aefb8a18438d14b36b643c0
|
ca1321ae08ae4645c16753504e6387c43a162c7d
|
/search-suggestions-system.py
|
cbc8112b745eec0c5f4ee535b116fe9ecceea5a9
|
[] |
no_license
|
atomicapple0/misc
|
42b9a98875ad309c3fca577c08750eff2dadcd8c
|
998ccb846759ef736a13c3513dc62e28e4049d43
|
refs/heads/master
| 2022-11-10T11:56:33.211139
| 2022-10-26T02:31:38
| 2022-10-26T02:31:38
| 252,246,196
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,090
|
py
|
import bisect
class Solution:
def suggestedProducts(self, products, searchWord):
deq = products
deq.sort()
sol = []
idx = 0
for i in range(len(searchWord)):
soll = []
while idx < len(deq):
try:
if deq[idx][:i + 1] == searchWord[:i + 1]:
break
except:
pass
idx += 1
for j in range(idx, min(len(deq), idx + 3)):
try:
if j < len(deq) and deq[j][:i + 1] == searchWord[:i + 1]:
soll.append(deq[j])
else:
break
except:
pass
sol.append(soll)
return sol
def optimal(self, A, word):
A.sort()
res = []
prefix = ''
i = 0
for c in word:
prefix += c
i = bisect.bisect_left(A, prefix, lo=i)
res.append([w for w in A[i:i+3] if w.startswith(prefix)])
return res
|
[
"brianzhangaa@gmail.com"
] |
brianzhangaa@gmail.com
|
c9856aeddf6a7e307e2113099793921a5da75d36
|
07c5656f004b6a444e22ff7b4c3b6802d027f759
|
/week_9/class_0420/API_5/common/http_request.py
|
a2e1884012d0be74fe084a99df396c6dcb6f8729
|
[] |
no_license
|
EuniceHu/python15_api_test
|
de2a0f0bec8057edb27c8d1f82a438da3e9c105c
|
1313e56ddfa67a2490e703a1a5ef4a6967565849
|
refs/heads/master
| 2020-05-20T13:30:41.686327
| 2019-05-14T11:00:52
| 2019-05-14T11:00:52
| 185,599,046
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,371
|
py
|
# -*- coding:utf-8 _*-
"""
@author:mongo
@time: 2018/12/17
@email:3126972006@qq.com
@function:
"""
import requests
from API_5.common.config import config
class HTTPRequest:
"""
独立session,cookies需要自己传递
使用这类的request方法去完成不同的HTTP请求,并且返回响应结果
"""
def request(self, method, url, data=None, json=None, cookies=None):
method = method.upper() # 将method强制转成全大小
if type(data) == str:
data = eval(data) # str转成字典
if method == 'GET':
resp = requests.get(url, params=data, cookies=cookies) # resp 是Response对象
elif method == 'POST':
if json:
resp = requests.post(url, json=json, cookies=cookies)
else:
resp = requests.post(url, data=data, cookies=cookies)
else:
resp = None
print('UN-support method')
return resp
class HTTPRequest2:
"""
公共使用一个session, cookies自动传递
使用这类的request方法去完成不同的HTTP请求,并且返回响应结果
"""
def __init__(self):
# 打开一个session
self.session = requests.sessions.session()
def request(self, method, url, data=None, json=None):
method = method.upper() # 将method强制转成全大小
if type(data) == str:
data = eval(data) # str转成字典
# 拼接URL
url = config.get('api', 'pre_url') + url
print('请求url:', url)
print('请求data:', data)
if method == 'GET':
resp = self.session.request(method=method, url=url, params=data)
elif method == 'POST':
if json:
resp = self.session.request(method=method, url=url, json=json)
else:
resp = self.session.request(method=method, url=url, data=data)
else:
resp = None
print('UN-support method')
print('请求response:', resp.text)
return resp
def close(self):
self.session.close() # 用完记得关闭,很关键!!!
if __name__ == '__main__':
# params = {"mobilephone": "15810447878", "pwd": "123456"}
# http_request = HTTPRequest()
# # 调用登陆
# resp = http_request.request('pOST', 'http://test.lemonban.com/futureloan/mvc/api/member/login', data=params)
# print(resp.status_code)
# print(resp.text)
# print(resp.cookies)
#
# # 调用充值
# params = {"mobilephone": "15810447878", "amount": "1000"}
# resp2 = http_request.request('POST', 'http://test.lemonban.com/futureloan/mvc/api/member/recharge', data=params,
# cookies=resp.cookies)
# print(resp2.status_code)
# print(resp2.text)
# print(resp2.cookies)
http_request2 = HTTPRequest2()
params = {"mobilephone": "15810447878", "pwd": "123456"}
resp = http_request2.request('pOST', 'http://test.lemonban.com/futureloan/mvc/api/member/login', data=params)
params = {"mobilephone": "15810447878", "amount": "1000"}
resp2 = http_request2.request('POST', 'http://test.lemonban.com/futureloan/mvc/api/member/recharge', data=params)
http_request2.close()
print(resp2.status_code)
print(resp2.text)
print(resp2.cookies)
|
[
"hongdh1122@163.com"
] |
hongdh1122@163.com
|
5e4f6e129e5aa4b8851045b557a6a8f82e4450bd
|
1dcc09e5f6676f9390e2be296c77e537b4e10f1d
|
/auth.py
|
69b8cba64d4e76622615d076aa5a8cf957ca8a00
|
[] |
no_license
|
farahSalotaibi/casting-agency
|
61c1075591be7dfe2e963e7eac6a9fe085d1b9a1
|
eba2ed09aeedfe7fa9652bdcb8eed5a2a0c77028
|
refs/heads/master
| 2023-02-04T11:13:25.010522
| 2020-12-21T11:30:04
| 2020-12-21T11:30:04
| 323,140,231
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,614
|
py
|
import json
from flask import request, _request_ctx_stack, abort
from functools import wraps
from jose import jwt
from urllib.request import urlopen
AUTH0_DOMAIN = 'farahalota.us.auth0.com'
ALGORITHMS = ['RS256']
API_AUDIENCE = 'casting'
# AuthError Exception
'''
AuthError Exception
A standardized way to communicate auth failure modes
'''
class AuthError(Exception):
def __init__(self, error, status_code):
self.error = error
self.status_code = status_code
# Auth Header
'''
@TODO implement get_token_auth_header() method DOOONE
it should attempt to get the header from the request
it should raise an AuthError if no header is present
it should attempt to split bearer and the token
it should raise an AuthError if the header is malformed
return the token part of the header
'''
def get_token_auth_header():
auth = request.headers.get('Authorization', None)
if not auth:
raise AuthError({
'code': 'authorization_header_missing',
'description': 'Authorization header is expected.'
}, 401)
parts = auth.split()
if parts[0].lower() != 'bearer':
raise AuthError({
'code': 'invalid_header',
'description': 'Authorization header must start with "Bearer".'
}, 401)
elif len(parts) == 1:
raise AuthError({
'code': 'invalid_header',
'description': 'Token not found.'
}, 401)
elif len(parts) > 2:
raise AuthError({
'code': 'invalid_header',
'description': 'Authorization header must be bearer token.'
}, 401)
token = parts[1]
return token
'''
@TODO implement check_permissions(permission, payload) method DOOONE
@INPUTS
permission: string permission (i.e. 'post:drink')
payload: decoded jwt payload
it should raise an AuthError if permissions are not included in the payload
!!NOTE check your RBAC settings in Auth0
it should raise an AuthError if the requested permission string is not in the payload permissions array
return true otherwise
'''
def check_permissions(permission, payload):
if 'permissions' not in payload:
raise AuthError({
'code': 'invalid_claims',
'description': 'Permissions not included in JWT.'
}, 400)
if permission not in payload['permissions']:
raise AuthError({
'code': 'unauthorized',
'description': 'Permission not found.'
}, 403)
return True
'''
@TODO implement verify_decode_jwt(token) method DOOONE
@INPUTS
token: a json web token (string)
it should be an Auth0 token with key id (kid)
it should verify the token using Auth0 /.well-known/jwks.json
it should decode the payload from the token
it should validate the claims
return the decoded payload
!!NOTE urlopen has a common certificate error described here: https://stackoverflow.com/questions/50236117/scraping-ssl-certificate-verify-failed-error-for-http-en-wikipedia-org
'''
def verify_decode_jwt(token):
jsonurl = urlopen(f'https://{AUTH0_DOMAIN}/.well-known/jwks.json')
jwks = json.loads(jsonurl.read())
unverified_header = jwt.get_unverified_header(token)
rsa_key = {}
if 'kid' not in unverified_header:
raise AuthError({
'code': 'invalid_header',
'description': 'Authorization malformed.'
}, 401)
for key in jwks['keys']:
if key['kid'] == unverified_header['kid']:
rsa_key = {
'kty': key['kty'],
'kid': key['kid'],
'use': key['use'],
'n': key['n'],
'e': key['e']
}
if rsa_key:
try:
payload = jwt.decode(
token,
rsa_key,
algorithms=ALGORITHMS,
audience=API_AUDIENCE,
issuer='https://' + AUTH0_DOMAIN + '/'
)
return payload
except jwt.ExpiredSignatureError:
raise AuthError({
'code': 'token_expired',
'description': 'Token expired.'
}, 401)
except jwt.JWTClaimsError:
raise AuthError({
'code': 'invalid_claims',
'description': 'Incorrect claims. Please, check the audience and issuer.'
}, 401)
except Exception:
raise AuthError({
'code': 'invalid_header',
'description': 'Unable to parse authentication token.'
}, 400)
raise AuthError({
'code': 'invalid_header',
'description': 'Unable to find the appropriate key.'
}, 400)
'''
@TODO implement @requires_auth(permission) decorator method DOOONE
@INPUTS
permission: string permission (i.e. 'post:drink')
it should use the get_token_auth_header method to get the token
it should use the verify_decode_jwt method to decode the jwt
it should use the check_permissions method validate claims and check the requested permission
return the decorator which passes the decoded payload to the decorated method
'''
def requires_auth(permission=''):
def requires_auth_decorator(f):
@wraps(f)
def wrapper(*args, **kwargs):
token = get_token_auth_header()
payload = verify_decode_jwt(token)
check_permissions(permission, payload)
return f(payload, *args, **kwargs)
return wrapper
return requires_auth_decorator
|
[
"farahalota@hotmail.com"
] |
farahalota@hotmail.com
|
705ee15d5c50c3aae068e458114c830421a440fc
|
efe70f921654fd669d8458d9de3d00ecdbcaad53
|
/remote/protocols/irc/services.py
|
395a0a243c15bf160a09bd460eeb7b73d1023eee
|
[] |
no_license
|
xblaster/scrutator
|
92c93590f74bd1f1ed71b13c216a6ad24212e84f
|
eff724d4488bbced528b01f9f82cbdeb5726795b
|
refs/heads/master
| 2016-09-03T07:21:50.888776
| 2009-12-07T12:55:12
| 2009-12-07T12:55:12
| 67,173
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,089
|
py
|
'''
Created on 1 Dec 2009
@author: wax
'''
from remote.protocols.irc.model import IrcServer, IrcChannel
import ConfigParser, os
import MySQLdb
import MySQLdb.cursors
class IrcServices:
def __init__(self):
config = ConfigParser.ConfigParser()
#config.readfp(open('default.cfg'))
config.read('default.cfg')
#print config.get("database", "username")
self.dbpool = MySQLdb.connect(
host= config.get("database", "host"),
user= config.get("database", "username"),
passwd= config.get("database", "passwd"),
db=config.get("database", "db"),
cursorclass = MySQLdb.cursors.DictCursor
).cursor()
def getServerList(self):
self.dbpool.execute("SELECT * FROM servers")
return self.dbpool.fetchall()
def getModel(self):
self.dbpool.execute("SELECT * FROM `channels`, servers WHERE server_id = servers.id AND (server_id=3 OR server_id=2 OR server_id=1 OR server_id=4 OR server_id=8)")
servers = dict()
for elt in self.dbpool.fetchall():
host = elt["host"]
if not servers.has_key(host):
server = IrcServer()
server.host = host
servers[host] = server
server = servers[host]
channel = IrcChannel()
channel.name = elt["name"]
server.addChannel(channel)
return servers.values()
def setChanStatus(self, channel):
self.dbpool.execute("UPDATE `channels` SET `status` = '"+channel.bot+"', `lastupdate` = '"+getNowInMysql()+"' WHERE `name` ='"+channel.name+"' LIMIT 1")
def getNowInMysql():
from datetime import datetime
from time import strftime
newdate = datetime.now()
mysqldate = strftime("%Y-%m-%d %H:%M:%S", newdate.timetuple())
return mysqldate
|
[
"xblaster@lo2k.net"
] |
xblaster@lo2k.net
|
2428c3e3708e0fc2e67361126f0113670c6f5630
|
4609d368e65a361346a02e5c47a5dc2cfe0364a5
|
/project/content/apps.py
|
24f4ccb82e3354f626f7ae665f197f247f9ced7b
|
[] |
no_license
|
fahmihidayah/Simple-Django-CMS
|
38e84a01aa573eba729b64cd17880c6ed94aabd0
|
cc25cf6362ec9e60a2615e4f4cd4e1cece1a07e7
|
refs/heads/master
| 2023-02-28T13:34:40.138950
| 2021-02-11T08:08:35
| 2021-02-11T08:08:35
| 337,964,159
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 87
|
py
|
from django.apps import AppConfig
class ContentConfig(AppConfig):
name = 'Content'
|
[
"f.hidayah@pixilapps.com"
] |
f.hidayah@pixilapps.com
|
f8e92541ae3277aa329b90bab739a4181e3439c1
|
cf8729d340b7164653ce8a3a577a1ab5f806a9dc
|
/Casa/Casa_automatizada.py
|
081cac1f629a690bcab15f81ad54a9bc19d67d4c
|
[] |
no_license
|
aroldovargas/Simulador-Casa-Automatizada
|
fe3c52c7263e82b068a525ac719e13b0be26392d
|
ab8fac4d98a6bf2593c134a35d09e4200941a16a
|
refs/heads/master
| 2020-05-30T10:02:06.929003
| 2019-05-24T18:56:17
| 2019-05-24T18:56:17
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,036
|
py
|
# -*- coding: utf-8 -*-
import sys
if sys.platform == 'linux2':
import subprocess
output = subprocess.Popen(
'xrandr | grep "\*" | cut -d" " -f4',
shell=True,
stdout=subprocess.PIPE).communicate()[0]
screenx = int(output.replace('\n', '').split('x')[0])
screeny = int(output.replace('\n', '').split('x')[1])
elif sys.platform == 'win32':
from win32api import GetSystemMetrics
screenx = GetSystemMetrics(0)
screeny = GetSystemMetrics(1)
elif sys.platform == 'darwin':
from AppKit import NSScreen
frame_size = NSScreen.mainScreen().frame().size
screenx = frame_size.width
screeny = frame_size.height
else:
# For mobile devices, use full screen
screenx,screeny = 800,600 # return something
import kivy
from kivy.app import App
from kivy.app import Widget
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.screenmanager import ScreenManager, Screen, FadeTransition
import casa_api
from kivy.lang import Builder
from kivy.uix.textinput import TextInput
from kivy.properties import ObjectProperty, ListProperty, StringProperty, NumericProperty
from kivy.base import runTouchApp
from kivy.uix.spinner import Spinner
from kivy.uix.button import Button
from kivy.uix.dropdown import DropDown
from kivy.uix.image import Image
from kivy.core.window import Window
from kivy.uix.popup import Popup
from kivy.uix.label import Label
from kivy.clock import Clock
import time
kivy.require('1.8.0')
__version__ = "0.1"
Window.size = (1324,800)
Window.left = (screenx - 1200)/2
Window.top = (screeny - 800)/2
hora = ""
temperatura = ""
data = ""
numeros_sorteados = []
jogadores = {}
bilhete = []
div = 0.0
class HomeWidget(Screen):
pass
class DispositivosWidget(Screen):
pass
class Celular(Screen):
pass
class Bolao_Janela(ScreenManager):
# spinner = Spinner(
# # default value shown
# text='Home',
# # available values
# values=('Home', 'Work', 'Other', 'Custom'),
# # just for positioning in our example
# size_hint=(None, None),
# size=(100, 44),
# pos_hint={'center_x': .5, 'center_y': .5})
def show_selected_value(self):
runTouchApp(mainbutton)
def on_remove_botao(self):
self.remove_wigdet(self.dropdown)
def switch_to_homeWidget(self):
self.current = 'homeWidget'
def switch_to_dispositivosWidget(self):
self.current = 'dispositivosWidget'
def init_simulacao(self,clocktext):
Clock.schedule_interval(self.update(clocktext), 1)
def update(self,clocktext):
clocktext.text = time.strftime('%I'+':'+'%M'+' %p')
def simulacao(self,cont):
horaLabel = StringProperty()
dataLabel = StringProperty()
temperaturaLabel = StringProperty()
celularImagem = StringProperty()
celularLabel = StringProperty()
telaImagem = StringProperty()
telaLabel = StringProperty()
boxCelular = StringProperty()
quarto1Lista = StringProperty()
quarto2Lista = StringProperty()
cozinhaLista = StringProperty()
salaLista = StringProperty()
corredorLista = StringProperty()
banheiroLista = StringProperty()
lista = [horaLabel,dataLabel,temperaturaLabel]
arq = open("Inicio.txt","r")
linha = arq.readline()
cont = 0
while linha != "":
linha = casa_api.removebarraN(linha)
if cont == 2:
lista[cont].txt = linha
linha = arq.readline()
cont +=1
arq.close()
celularImagem.source = "celular.png"
celularLabel.background_color = 1,1,1,1
telaImagem.source = "tela.png"
telaLabel.background_color = 0,0,0,0
boxCelular.background_color = 0,0,0,1
boxCelular.text = "Enviar"
telaLabel.text = "Relatorio de sistema"
listaAr = []
listaAr.append(quarto1Lista[2])
listaAr.append(quarto2Lista[2])
listaAr.append(cozinhaLista[2])
listaAr.append(salaLista[2])
listaAr.append(corredorLista[2])
listaAr.append(banheiroLista[2])
listaTemp = []
listaTemp.append(quarto1Lista[0])
listaTemp.append(quarto2Lista[0])
listaTemp.append(cozinhaLista[0])
listaTemp.append(salaLista[0])
listaTemp.append(corredorLista[0])
listaTemp.append(banheiroLista[0])
listaPresenca = []
listaPresenca.append(quarto1Lista[1])
listaPresenca.append(quarto2Lista[1])
listaPresenca.append(cozinhaLista[1])
listaPresenca.append(salaLista[1])
listaPresenca.append(corredorLista[1])
listaPresenca.append(banheiroLista[1])
listaLampadas = []
listaLampadas.append(quarto1Lista[3])
listaLampadas.append(quarto2Lista[3])
listaLampadas.append(cozinhaLista[3])
listaLampadas.append(salaLista[3])
listaLampadas.append(corredorLista[3])
listaLampadas.append(banheiroLista[3])
Arligado = [["arQuarto1",0],["arQuarto2",0],["arCozinha",0],["arSala",0],["arCorredor",0],["arBanheiro",0]]
#Não tem ar na cozinha e no banheiro
Sensortemp = [["tempQuarto1",1,"25"],["tempQuarto2",1,"25"],["tempCozinha",1,"25"],["tempSala",1,"25"],["tempCorredor",1,"25"],["tempBanheiro",1,"25"]]
Presenca = [["pessoasQuarto1",1],["pessoasQuarto2",0],["pessoasCozinha",0],["pessoasSala",1],["pessoasCorredor",0],["pessoasBanheiro",0]]
Lampadas = [["lampadaQuarto1",1,1],["lampadaQuarto2",1,1],["lampadaCozinha",1,0],["lampadaSala",1,1],["lampadaCorredor1",1,0],["lampadaBanheiro",1,0]]
#Preenche imagem Ar
for j in Arligado:
if j[1] == 1 and (j[0] == "arQuarto1" or j[0] == "arQuarto2"):
if j[0] == "arQuarto1":
listaAr[0].source = "arcondicionadoDir.png"
if j[0] == "arQuarto2":
listaAr[1].source = "arcondicionadoDir.png"
if j[1] == 1 and j[0] == "arCorredor":
listaAr[4].source = "arcondicionadoFrente.png"
if j[1] == 1 and j[0] == "arSala":
listaAr[3].source = "arcondicionadoEsq.png"
#Preenche Temperatura
for j in Sensortemp:
if j[1] == 1 and (j[0] == "tempQuarto1" or j[0] == "tempQuarto2"):
if j[0] == "tempQuarto1":
listaTemp[0].text = j[2] + "°"
if j[0] == "tempQuarto2":
listaTemp[1].text = j[2] + "°"
if j[1] == 1 and j[0] == "tempSala":
listaTemp[3].text = j[2] + "°"
if j[1] == 1 and j[0] == "tempCozinha":
listaTemp[2].text = j[2] + "°"
if j[1] == 1 and j[0] == "tempCorredor":
listaTemp[4].text = j[2] + "°"
if j[1] == 1 and j[0] == "tempBanheiro":
listaTemp[5].text = j[2] + "°"
#Preenche Prensença
for j in Presenca:
if j[1] == 1 and (j[0] == "pessoasQuarto1" or j[0] == "pessoasQuarto2"):
if j[0] == "pessoasQuarto1":
listaPresenca[0].text = "s"
if j[0] == "pessoasQuarto2":
listaPresenca[1].text = "s"
if j[1] == 1 and j[0] == "pessoasSala":
listaPresenca[3].text = "s"
if j[1] == 1 and j[0] == "pessoasCozinha":
listaPresenca[2].text = "s"
if j[1] == 1 and j[0] == "pessoasCorredor":
listaPresenca[4].text = "s"
if j[1] == 1 and j[0] == "pessoasBanheiro":
listaPresenca[5].text ="s"
for j in Lampadas:
if j[2]==1:
if j[1] == 1 and (j[0] == "lampadaQuarto1" or j[0] == "lampadaQuarto2"):
if j[0] == "lampadaQuarto1":
listaLampadas[0].source = "lampada_acesa.png"
if j[0] == "lampadaQuarto2":
listaLampadas[1].source = "lampada_acesa.png"
if j[1] == 1 and j[0] == "lampadaSala":
listaLampadas[3].source = "lampada_acesa.png"
if j[1] == 1 and j[0] == "lampadaCozinha":
listaLampadas[2].source = "lampada_acesa.png"
if j[1] == 1 and j[0] == "lampadaCorredor1":
listaLampadas[4][0].source = "lampada_acesa.png"
listaLampadas[4][1].source = "lampada_acesa.png"
if j[1] == 1 and j[0] == "lampadaBanheiro":
listaLampadas[5].source ="lampada_acesa.png"
else:
if j[1] == 1 and (j[0] == "lampadaQuarto1" or j[0] == "lampadaQuarto2"):
if j[0] == "lampadaQuarto1":
listaLampadas[0].source = "lampada_apagada.png"
if j[0] == "lampadaQuarto2":
listaLampadas[1].source = "lampada_apagada.png"
if j[1] == 1 and j[0] == "lampadaSala":
listaLampadas[3].source = "lampada_apagada.png"
if j[1] == 1 and j[0] == "lampadaCozinha":
listaLampadas[2].source = "lampada_apagada.png"
if j[1] == 1 and j[0] == "lampadaCorredor1":
listaLampadas[4][0].source = "lampada_apagada.png"
listaLampadas[4][1].source = "lampada_apagada.png"
if j[1] == 1 and j[0] == "lampadaBanheiro":
listaLampadas[5].source ="lampada_apagada.png"
if horaLabel.text == '':
horaLabel.text = str(1)
else:
horaLabel.text = str(int(horaLabel.text)+1)
break
return 0
def callbackHome(self):
dataLabel = StringProperty()
arq = open("Inicio.txt","w")
arq.write(str(horaLabel1)+"\n"+str(temperaturaLabel)+"\n"+str(dataLabel)+"\n")
arq.close()
# def show_selected_value(spinner, text):
# print('The spinner', spinner, 'have text', text)
# runTouchApp(spinner)
# spinner.bind(text=show_selected_value)
class casa(App):
def __init__(self,**kwargs):
super(casa,self).__init__(**kwargs)
#Lista [temperatura,numeroPessoas,arCoondicionado,lampadas,tomadas]
def build(self):
self.root = Bolao_Janela()
return self.root
#
#
if __name__ == '__main__':
casa().run()
|
[
"noreply@github.com"
] |
aroldovargas.noreply@github.com
|
4889678a70051b822eb24ffcd1a1a875749a16bd
|
d99b6f08e4641a5a1936181c63b94c667fb83a9c
|
/venv/lib/python2.7/site-packages/imblearn/under_sampling/tests/test_cluster_centroids.py
|
81acf3ffadf17e0098016fd1f2a63f50313dc6a5
|
[] |
no_license
|
spanlab/spantoolbox
|
6c4d48731e2994ce360ed6284eac032ddbff87bc
|
45855443397f65d57d77a6296fb09f1df3cba31d
|
refs/heads/master
| 2023-07-29T09:18:36.592056
| 2019-12-17T00:25:28
| 2019-12-17T00:25:28
| 120,956,860
| 2
| 1
| null | 2023-07-06T21:17:02
| 2018-02-09T21:20:00
|
Python
|
UTF-8
|
Python
| false
| false
| 7,530
|
py
|
"""Test the module cluster centroids."""
from __future__ import print_function
from collections import Counter
import numpy as np
from numpy.testing import (assert_array_almost_equal, assert_array_equal,
assert_equal, assert_raises, assert_warns)
from sklearn.cluster import KMeans
from sklearn.utils.estimator_checks import check_estimator
from imblearn.under_sampling import ClusterCentroids
# Generate a global dataset to use
RND_SEED = 0
# Data generated for the toy example
X = np.array([[0.04352327, -0.20515826], [0.92923648, 0.76103773],
[0.20792588, 1.49407907], [0.47104475, 0.44386323],
[0.22950086, 0.33367433], [0.15490546, 0.3130677],
[0.09125309, -0.85409574], [0.12372842, 0.6536186],
[0.13347175, 0.12167502], [0.094035, -2.55298982]])
Y = np.array([1, 0, 1, 0, 1, 1, 1, 1, 0, 1])
def test_cc_sk_estimator():
"""Test the sklearn estimator compatibility"""
check_estimator(ClusterCentroids)
def test_cc_bad_ratio():
"""Test either if an error is raised with a wrong decimal value for
the ratio"""
# Define a negative ratio
ratio = -1.0
cc = ClusterCentroids(ratio=ratio, random_state=RND_SEED)
assert_raises(ValueError, cc.fit, X, Y)
# Define a ratio greater than 1
ratio = 100.0
cc = ClusterCentroids(ratio=ratio, random_state=RND_SEED)
assert_raises(ValueError, cc.fit, X, Y)
# Define ratio as an unknown string
ratio = 'rnd'
cc = ClusterCentroids(ratio=ratio, random_state=RND_SEED)
assert_raises(ValueError, cc.fit, X, Y)
# Define ratio as a list which is not supported
ratio = [.5, .5]
cc = ClusterCentroids(ratio=ratio, random_state=RND_SEED)
assert_raises(ValueError, cc.fit, X, Y)
def test_init():
"""Test the initialisation of the object"""
# Define a ratio
ratio = 1.
cc = ClusterCentroids(ratio=ratio, random_state=RND_SEED)
assert_equal(cc.ratio, ratio)
def test_cc_fit_single_class():
"""Test either if an error when there is a single class"""
# Define the parameter for the under-sampling
ratio = 'auto'
# Create the object
cc = ClusterCentroids(ratio=ratio, random_state=RND_SEED)
# Resample the data
# Create a wrong y
y_single_class = np.zeros((X.shape[0], ))
assert_warns(UserWarning, cc.fit, X, y_single_class)
def test_cc_fit_invalid_ratio():
"""Test either if an error is raised when the balancing ratio to fit is
smaller than the one of the data"""
# Create the object
ratio = 1. / 10000.
cc = ClusterCentroids(ratio=ratio, random_state=RND_SEED)
# Fit the data
assert_raises(RuntimeError, cc.fit, X, Y)
def test_cc_fit():
"""Test the fitting method"""
# Define the parameter for the under-sampling
ratio = 'auto'
# Create the object
cc = ClusterCentroids(ratio=ratio, random_state=RND_SEED)
# Fit the data
cc.fit(X, Y)
# Check if the data information have been computed
assert_equal(cc.min_c_, 0)
assert_equal(cc.maj_c_, 1)
assert_equal(cc.stats_c_[0], 3)
assert_equal(cc.stats_c_[1], 7)
def test_sample_wrong_X():
"""Test either if an error is raised when X is different at fitting
and sampling"""
# Define the parameter for the under-sampling
ratio = 'auto'
# Create the object
cc = ClusterCentroids(ratio=ratio, random_state=RND_SEED)
cc.fit(X, Y)
assert_raises(RuntimeError, cc.sample,
np.random.random((100, 40)), np.array([0] * 50 + [1] * 50))
def test_sample_wt_fit():
"""Test either if an error is raised when sample is called before
fitting"""
# Define the parameter for the under-sampling
ratio = 'auto'
# Create the object
cc = ClusterCentroids(ratio=ratio, random_state=RND_SEED)
assert_raises(RuntimeError, cc.sample, X, Y)
def test_fit_sample_auto():
"""Test fit and sample routines with auto ratio"""
# Define the parameter for the under-sampling
ratio = 'auto'
# Create the object
cc = ClusterCentroids(ratio=ratio, random_state=RND_SEED)
# Fit and sample
X_resampled, y_resampled = cc.fit_sample(X, Y)
X_gt = np.array([[0.92923648, 0.76103773], [0.47104475, 0.44386323],
[0.13347175, 0.12167502], [0.06738818, -0.529627],
[0.17901516, 0.69860992], [0.094035, -2.55298982]])
y_gt = np.array([0, 0, 0, 1, 1, 1])
assert_array_almost_equal(X_resampled, X_gt)
assert_array_equal(y_resampled, y_gt)
def test_fit_sample_half():
"""Test fit and sample routines with ratio of .5"""
# Define the parameter for the under-sampling
ratio = .5
# Create the object
cc = ClusterCentroids(ratio=ratio, random_state=RND_SEED)
# Fit and sample
X_resampled, y_resampled = cc.fit_sample(X, Y)
X_gt = np.array([[0.92923648, 0.76103773], [0.47104475, 0.44386323],
[0.13347175, 0.12167502], [0.09125309, -0.85409574],
[0.19220316, 0.32337101], [0.094035, -2.55298982],
[0.20792588, 1.49407907], [0.04352327, -0.20515826],
[0.12372842, 0.6536186]])
y_gt = np.array([0, 0, 0, 1, 1, 1, 1, 1, 1])
assert_array_almost_equal(X_resampled, X_gt)
assert_array_equal(y_resampled, y_gt)
def test_sample_wrong_X_dft_ratio():
"""Test either if an error is raised when X is different at fitting
and sampling without ratio"""
# Create the object
cc = ClusterCentroids(random_state=RND_SEED)
cc.fit(X, Y)
assert_raises(RuntimeError, cc.sample,
np.random.random((100, 40)), np.array([0] * 50 + [1] * 50))
def test_continuous_error():
"""Test either if an error is raised when the target are continuous
type"""
# continuous case
y = np.linspace(0, 1, 10)
cc = ClusterCentroids(random_state=RND_SEED)
assert_warns(UserWarning, cc.fit, X, y)
def test_multiclass_fit_sample():
"""Test fit sample method with multiclass target"""
# Make y to be multiclass
y = Y.copy()
y[5] = 2
y[6] = 2
# Resample the data
cc = ClusterCentroids(random_state=RND_SEED)
X_resampled, y_resampled = cc.fit_sample(X, y)
# Check the size of y
count_y_res = Counter(y_resampled)
assert_equal(count_y_res[0], 2)
assert_equal(count_y_res[1], 2)
assert_equal(count_y_res[2], 2)
def test_fit_sample_object():
"""Test fit and sample using a KMeans object"""
# Define the parameter for the under-sampling
ratio = 'auto'
# Create the object
cluster = KMeans(random_state=RND_SEED)
cc = ClusterCentroids(
ratio=ratio, random_state=RND_SEED, estimator=cluster)
# Fit and sample
X_resampled, y_resampled = cc.fit_sample(X, Y)
X_gt = np.array([[0.92923648, 0.76103773], [0.47104475, 0.44386323],
[0.13347175, 0.12167502], [0.06738818, -0.529627],
[0.17901516, 0.69860992], [0.094035, -2.55298982]])
y_gt = np.array([0, 0, 0, 1, 1, 1])
assert_array_almost_equal(X_resampled, X_gt)
assert_array_equal(y_resampled, y_gt)
def test_fit_sample_wrong_object():
"""Test fit and sample using a KMeans object"""
# Define the parameter for the under-sampling
ratio = 'auto'
# Create the object
cluster = 'rnd'
cc = ClusterCentroids(
ratio=ratio, random_state=RND_SEED, estimator=cluster)
# Fit and sample
assert_raises(ValueError, cc.fit_sample, X, Y)
|
[
"spanlab@gmail.com"
] |
spanlab@gmail.com
|
89a85d61b9daab440ea1ac0abe4fb2d81e4cbb14
|
43db0aedb18f642fb7c9e80e163ce466d733363b
|
/src/optimisation/optimizers.py
|
833970bbb9b95f665368feda4e73991e02ace19d
|
[] |
no_license
|
Polymere/biorob-semesterproject
|
933291a4adb2e62b3e600fabb307e0027c37537a
|
db464609e3fedbd6fe2ae4df0915c5d7bb6d3f16
|
refs/heads/master
| 2020-04-24T10:07:12.716983
| 2019-08-14T15:16:49
| 2019-08-14T15:16:49
| 171,882,725
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 23,163
|
py
|
#!/usr/bin/env python
""" @package optimizers
Here is a brief description.
This is my detailed description
that is so long that
it spans several lines
"""
import sys
import yaml
import numpy as np
import pandas as pd
import os
import time
from copy import copy
import warnings
warnings.filterwarnings('ignore') # pandas warning a utter trash
LOG_DEBUG=1
LOG_INFO=2
LOG_WARNING=3
LOG_ERROR=4
LOG_LEVEL=LOG_WARNING
class Optimizer:
"""Parent class for optimizers
Not functional as is, but contains shared methods by the children classes.
Attributes:
- bound_mod=None boundary mode to be used, see check_bound
- dofs=None degrees of freedom and boundaries
- nb_ind=None number of individuals per generation/step
- nb_parents=None number of parents used to generate next population
"""
bound_mod=None # boundary mode to be used, see check_bound
dofs=None # degrees of freedom and boundaries
nb_ind=None # number of individuals per generation/step
nb_parents=None # number of parents used to generate next population
def __init__(self,args):
""" Initialization of the optimizer
Sets the attributes of the optimizer according to the input args
and flattens the dofs for further use
\parameter args -- dictionnary with evolution parameters, used
to set theattributes of the optimizer
"""
for arg_name,arg_value in args.items():
if hasattr(self, arg_name):
setattr(self, arg_name, arg_value)
if LOG_LEVEL<=LOG_INFO:
print("\n[INFO]Optimizer ",self.__class__.__name__,
" initialized with\n",self.__dict__)
if self.dofs is not None:
self.flatten_params()
def check_bound(self,population):
""" Checks boundaries of the population parameters, and acts accordingly
Called when a new population is generated by the optimizer (generation),
with two modes:
clip : if a parameter value is out of boundaries, set its value to
the closest boundary
exp_handicap: allow the parameter value to exceed boundaries, but
but assign an handicap to the individual, scaling as
\f$h = - \Sigma_{p}^{dofs}(\exp{(\Delta_{max,p})+
(\Delta_{min,p})}-1)\f$
with \f$\Delta_{max,p}=v_p-bound\_high_p\f$ if
\f$v_p>bound\_high_p\f$ and 0 otherwise
A new column fit_handicap is added to the population dataframe,
and can be used as an additional selection criterion
\param population -- Dataframe with columns for parameters and rows for
individuals, indexed by uid
Output:
population with either clipped parameter values or additional
handicap column
"""
for index, row in population.iterrows():
if LOG_LEVEL<=LOG_DEBUG:
print("\n[DEBUG]check_bound\n",row)
if self.bound_mod=="clip":
row=row+(row>self._bound_high)*(self._bound_high-row)
row=row+(row<self._bound_low)*(self._bound_low-row)
population.loc[index]=row.values
elif self.bound_mod=="exp_handicap":
handicap= (row>self._bound_high)*(row-self._bound_high)+\
(row<self._bound_low)*(self._bound_low-row)
if LOG_LEVEL<=LOG_DEBUG:
print("\n[DEBUG]handicap\n",handicap)
print("\n[DEBUG]type\n",type(handicap))
print("\n[DEBUG]type\n",type(handicap.values[0]))
fl_handicap=np.sum(np.exp(handicap.values.astype(np.float))-1)
if LOG_LEVEL<=LOG_DEBUG:
print("\n[DEBUG]fl_handicap\n",fl_handicap)
population.loc[index,"fit_handicap"]=-fl_handicap
if LOG_LEVEL<=LOG_DEBUG:
print("\n[DEBUG]Boundaries\n",population)
return population
def benchmark(self,problem,nb_gen,nb_dim):
""" Benchmarking of the optimization algorithm on one predefined problem
See run_bench_eval for examples
\param problem -- optimization problem to test (sphere, rastrigin or
fonseca_fleming)
(see https://en.wikipedia.org/w/index.php?title=Test_functions_for_optimization&oldid=787014841)
\param nb_gen -- number of optimization steps to perform
\param nb_dim -- number of dimension (degrees of freedom) of the problem
Output:
- Saves the performance (mean,std,best) for each generation and the
final population
"""
if problem=="rastrigin":
self.n=nb_dim
self.dofs={}
for i in range(nb_dim):
self.dofs[str(i)]=[-5.12,5.12]
self.bound_mod="clip"
self.bench_eval=self._eval_rastrigin
elif problem=="sphere":
self.dofs={}
for i in range(nb_dim):
self.dofs[str(i)]=[-10,10]
self.bench_eval=self._eval_sphere
elif problem=="fonseca_fleming":
self.n=nb_dim
self.dofs={}
for i in range(nb_dim):
self.dofs[str(i)]=[-4,4]
self.bench_eval=self._eval_fonseca_fleming
self.flatten_params()
gen_counter=0
uids=self.get_uid(nb_gen,self.nb_ind)
pop_df=pd.DataFrame(index=uids,columns=self.dofs.keys())
vals=np.random.uniform(low=self._bound_low,high=self._bound_high,size=(self.nb_ind,len(self.dofs)))
pop_df[:]=vals
current_pop=pop_df
performance_df=None
while gen_counter<nb_gen:
eval_pop=self.bench_eval(current_pop)
nrow={}
for fit in eval_pop.filter(like="fit").columns:
nrow[fit+"_std"]=eval_pop[fit].std()
nrow[fit+"_mean"]=eval_pop[fit].mean()
nrow[fit+"_best"]=eval_pop[fit].max()
if performance_df is None:
performance_df=pd.DataFrame(index=pd.RangeIndex(nb_gen),columns=nrow.keys())
performance_df.iloc[gen_counter]=nrow
if self.is_single_obj:
eval_pop["fit"]=eval_pop.filter(like="fit").sum(1)
parents=self.select(self.sort_pop(eval_pop))
print("\nParents\n",parents)
if gen_counter%10==0:
print(parents)
gen_counter+=1
current_pop=self.get_next_gen(parents,gen_counter)
performance_df.to_csv("perf_"+problem+self.__class__.__name__+".csv")
self.sort_pop(eval_pop).to_csv("sorted_pop_"+problem+self.__class__.__name__+".csv")
def _eval_rastrigin(self,pop):
"""
\f[f(x)=An+\Sigma_{i=i}^n\left(x_i^2-A\cos{(2\pi x_i)}\right) \textrm{with $A=10$}\f]
"""
fit=10*self.n+(pop.values**2-10*np.cos(pop.values.astype(np.float32)*2*np.pi)).sum(1)
pop["fit"]= - fit
return pop
def _eval_sphere(self,pop):
"""
\f[f(x)=\Sigma_{i=i}^n x_i^2 \f]
"""
print("\n---------------------\n")
print(pop)
print(pop.values)
print("\n---------------------\n")
fit=(pop.values**2).sum(1)
pop["fit"]=-fit
return pop
def _eval_fonseca_fleming(self,pop):
"""
\f[
\left\{
\begin{aligned}
&f_1(x)=1-\exp{\left( -\Sigma_{i=1}^n \left( x_i - \frac{1}{\sqrt{n}} \right)^2 \right)}\\
&f_2(x)=1-\exp{\left( -\Sigma_{i=1}^n \left( x_i + \frac{1}{\sqrt{n}} \right)^2 \right)}
\end{aligned}\right.
\f]
"""
f1= 1 - np.exp( ( - ( pop.values.astype(np.float32) - 1 / np.sqrt( self.n ) )**2 ).sum(1) )
f2= 1 - np.exp( ( - ( pop.values.astype(np.float32) + 1 / np.sqrt( self.n ) )**2 ).sum(1) )
pop["fit_1"]= - f1
pop["fit_2"]= - f2
return pop
def flatten_params(self):
""" Transformation of keyvals to arrays
Transforms the dofs dict
{ param_name1 : [bound_low1,bound_high1],
param_name2 : [bound_low2,bound_high2],...
}
to
dofs_names: [param_name1,param_name2 ...] (list)
_bound_low: [bound_low1,bound_low2,...] (np array)
_bound_high: [bound_high1,bound_high2,...] (np array)
for ease of access / parallel operations in population initialization &
boundarie checks
"""
nb_par=len(self.dofs)
self._bound_low=np.empty(nb_par, np.float16)
self._bound_high=np.empty(nb_par, np.float16)
self._params=[]
self.dof_names=[]
i=0
for dof_name,dof_bounds in self.dofs.items():
self._bound_low[i]=dof_bounds[0]
self._bound_high[i]=dof_bounds[1]
self.dof_names.append(dof_name)
i+=1
def sort_pop(self,eval_pop):
""" Default population sorting method
Sums all of the fitnesses metrics of the population and sort by
decreasing fitness (higher is better)
Input:
Evaluated population (see eval_pop in controller.py)
Output:
Sorted population
"""
srt=eval_pop.filter(like="fit").sum(1).sort_values(axis='index',ascending=False)
return eval_pop.loc[srt.index,:]
def select(self,sorted_pop):
""" Default parent selection method
Returns the nb_parents best individuals of the sorted population
\param sorted_pop Sorted population, with the first individuals
being the best ones
"""
return sorted_pop.head(self.nb_parents)
def get_next_gen(self,parents,gen_nb):
""" Placeholder for next generation creation
Declared here for structure, but this is dependent on optimization
method
\param parents -- Selected parents from previous population
\param gen_nb -- number of the current generation for indexing purposes
"""
raise NotImplementedError
class PSOptimizer(Optimizer):
"""
\image html pso1.png
"""
vel_range=None
c1=0.1
c2=0.3
def __init__(self, arg):
super(PSOptimizer, self).__init__(arg)
self.is_single_obj=True
if self.nb_ind!=self.nb_parents:
raise ValueError
else:
self.nb_particules=self.nb_parents
self._speed=None
self._global_best_fit=None
self._global_best_pos=None
def particles_init(self,eval_pop):
uids=self.get_uid()
fit=eval_pop.filter(like="fit")
fitnesses=fit.sum(1)
positions=eval_pop.drop(columns=fit.columns)
self._speed={}
self._positions={}
self._best_position={}
self._best_fit={}
for part,idx in zip(uids,range(self.nb_particules)):
self._speed[part]=np.random.uniform(low=self.vel_range[0],high=self.vel_range[1],size=len(self.dofs))
cfit=fit.iloc[idx].values[0]
cpos=positions.iloc[idx].to_dict()
if self._global_best_fit is None or self._global_best_fit<cfit:
self._global_best_fit=cfit
self._global_best_pos=cpos
self._positions[part]=cpos
self._best_position[part]=cpos
self._best_fit[part]=cfit
print("\nSpeeds\n",self._speed)
print("\nPositions\n",self._positions)
print("\nParticle best\n",self._best_fit)
def sort_pop(self,eval_pop):
if self._speed is None:
self.particles_init(eval_pop)
return eval_pop.sort_values("fit",ascending=False)
def select(self,sort_pop):
return sort_pop
def get_uid(self,tmp1=None,tmp2=None):
return["particule"+str(i+1) for i in range(self.nb_particules)]
def _dist(self,pos1,pos2):
dist=np.zeros((len(pos1)))
for dim, idx in zip(pos1.keys(),range(len(pos1))):
d=pos1[dim]-pos2[dim]
dist[idx]=d
return dist
def _inc(self,dct1,val):
nb_dim=len(dct1.keys())
for dim,dim_idx in zip(dct1.keys(),range(nb_dim)):
dct1[dim]=dct1[dim]+val[dim_idx]
return dct1
def updt_velocities(self):
for part in self._speed.keys():
f1=self.c1*np.random.uniform(0,1)*self._dist(self._best_position[part],self._positions[part])
f2=self.c2*np.random.uniform(0,1)*self._dist(self._global_best_pos,self._positions[part])
self._speed[part]=self._speed[part]+f1+f2
#if self._speed[part]>self.vel_range[0]:
self._speed[part][self._speed[part]<self.vel_range[0]]=self.vel_range[0]
self._speed[part][self._speed[part]>self.vel_range[1]]=self.vel_range[1]
print("VEL UPDT\n",f1,"\n",f2,"\n",self._speed[part])
def updt_positions(self):
for part in self._positions.keys():
self._positions[part]=self._inc(self._positions[part], self._speed[part])
def updt_best(self,sorted_pop):
#print("Updt best for\n",sorted_pop)
#print("\n Initial best pos",self._best_position)
fit=sorted_pop.filter(like="fit")
positions=sorted_pop.drop(columns=fit.columns)
#print("Positions\n",positions)
for part in self._positions.keys():
cfit=sorted_pop.loc[part].fit
if cfit>self._best_fit[part]:
if LOG_LEVEL<=LOG_DEBUG:
print("\n[DEBUG]Updt best for\n", part,cfit)
print("\n[DEBUG]Particule:\t",part)
print("\n[DEBUG]Stored position:\t",self._positions[part])
print("\n[DEBUG]Received position:\t",positions.loc[part])
self._best_fit[part]=cfit
self._best_position[part]=copy(self._positions[part])
if cfit>self._global_best_fit:
self._global_best_fit=cfit
self._global_best_pos=copy(self._positions[part])
#print("\n Final best pos",self._best_position)
return
def get_next_gen(self,sorted_pop,gen_counter):
self.updt_velocities()
self.updt_best(sorted_pop)
self.updt_positions()
pos_df=pd.DataFrame(self._positions)
print("\nBest:\t",self._global_best_fit,"\n\t",self._global_best_pos)
return pos_df.T
class GAOptimizer(Optimizer):
""" Simple Genetic algorithm based optimizer
Implementation of cross-over and mutation mechanisms to generate the
child population, as well as an age based elitism
Attributes :
- mut_amp=None mutation amplitude
- mut_rate=None mutation rate
- cross_rate=None crossover rate
- drop_age=5 maximum difference between child population and parent individual
"""
mut_amp=None
mut_rate=None
cross_rate=None
drop_age=5
def __init__(self, arg):
super(GAOptimizer, self).__init__(arg)
self.is_single_obj=True
def select(self,sorted_pop,gen_nb):
""" Parent selection method for GA
Removes individuals that are in the population for mode than
drop_age generations to limit elitism before returning the nb_parents
best individuals
Input:
- sorted_pop -- sorted population
- gen_nb -- current generation number
Output:
- parents for next generation
"""
cliped=self.check_age(sorted_pop, gen_nb)
return cliped.head(self.nb_parents)
def check_age(self,pop,gen_nb):
""" Removes old individuals from population
In the case where optimization is performed with elitism (keep
best individuals across generations), we remove the individuals
that were present for more that drop_age generations
(5 by default) in order to encourage exploration
Input:
- pop -- Population, with original generation number
- gen_nb -- current generation number
Output:
- population with oldest individuals removed (if applicable)
"""
if self.drop_age is None:
return pop
else:
too_old=pop[gen_nb-pop["gen"]>self.drop_age]
print("Dropping",too_old)
return pop.drop(too_old.index)
def get_uid(self,gen_nb,new_gen):
""" Returns a list with uids for a new generation
"""
return ["gen"+str(gen_nb)+"ind"+str(i+1) for i in range(self.nb_ind)]
def set_uid(self,gen_nb,new_gen):
""" Sets index of new_gen with uids
"""
new_gen.index=self.get_uid(gen_nb, new_gen)
return new_gen
def get_next_gen(self,parents,gen_nb):
""" Creates a new population from selected parents
Generates nb_ind new individuals, with unique identifiers as
index of the dataframe, using crossover and mutation on the parents
The boundaries of the generated population are checked according
to the specified rule (see check_bound)
Input:
- parents -- parent population selected by the optimizer
- gen_nb -- current generation number
Output:
- child population, which is going to be evaluated by the controller
"""
new_gen=self.check_bound(self.cross_and_mutate(parents))
return self.set_uid(gen_nb, new_gen)
def cross_and_mutate(self,parents):
""" Crossover and mutation on the parent population to generate child pop
1. Generate nb_ind random couple (parent1 parent2)
2. For each dof, use parameter value of parent2 with probability
cross_rate, otherwise use value from parent1
3. For each dof, add value N(0,1)*mut_amp with probability
mut_rate
\image html ga1.png
Input:
parents -- Dataframe containing parameters of selected parents
from previous generation
Output:
Dataframe with nb_ind generated childrens
"""
nb_dofs=len(self.dofs)
couples=np.random.randint(0, len(parents.index), (2,self.nb_ind))
if LOG_LEVEL<=LOG_DEBUG:
print("\n[DEBUG]parents\n",parents)
print("\n[DEBUG]couples\n",couples)
p1_ids=parents.iloc[couples[0][:]].index
p2_ids=parents.iloc[couples[1][:]].index
cross_select=(np.random.randint(0,100,(self.nb_ind,nb_dofs))<100*self.cross_rate)
# probability cross_rate to take a param from parent2
child_df=pd.DataFrame(index=pd.RangeIndex(self.nb_ind),columns=self.dofs.keys())
if LOG_LEVEL<=LOG_DEBUG:
print("\n[DEBUG]child_df\n",child_df.head())
child_df[:]=(parents.loc[p1_ids,self.dofs.keys()]*cross_select[:]).values+\
(parents.loc[p2_ids,self.dofs.keys()]*np.logical_not(cross_select[:])).values
mutate=(np.random.randint(0,100,(self.nb_ind,nb_dofs))<100*self.mut_rate)
# probability mutrate to add a normal of std value*mut_amp to param
mutate_amp=np.random.randn(self.nb_ind,nb_dofs)*child_df[:]*self.mut_amp
child_df[:]=child_df[:]+mutate_amp*mutate
if LOG_LEVEL<=LOG_DEBUG:
print("\n[DEBUG]Child\n",child_df)
return child_df
class NSGAIIOptimizer(GAOptimizer):
""" Optimizer based on Non-dominated Sorting Genetic Algorithm II (NSGAII)
Implementation of non-dominated sorting based on
https://ieeexplore.ieee.org/document/996017
"""
def __init__(self, arg):
super(NSGAIIOptimizer, self).__init__(arg)
self.is_single_obj=False
def sort_pop(self,eval_pop):
""" Non dominated sorting
Sorting based on increasing non dominated front number and decreasing
crowding distance
\image html sort1.png
Input:
eval_pop -- evaluated population, with each optimization objective
as a column of the dataframe (with prefix "fit")
Output:
Sorted population, according to algorithm ## ALGO
"""
tstart=time.time_ns()
pop_with_fronts=self.add_fronts(eval_pop)
tf=time.time_ns()
fronts_and_dist=self.add_crowding_distance(pop_with_fronts)
tcr=time.time_ns()
#print("\n[TIME]\nfront alloc\t",(tf-tstart)/1e6,"[ms]\ncrowding\t",(tcr-tf)/1e6,"[ms]")
sorted_pop=fronts_and_dist.sort_values(by=["front","cr_dist"],axis='index',ascending=[True,False])
if LOG_LEVEL<=LOG_INFO:
print("\n[INFO]Sorted population\n",sorted_pop)
return sorted_pop
def add_fronts(self,eval_pop):
""" Non dominated front computation
Implementatation of algorithm. For performance purposes, we stop
once we have allocated at least nb_parents fronts
\image html front1.png
Input:
eval_pop -- evaluated population, with each optimization objective
as a column of the dataframe (with prefix "fit")
Output:
eval_pop, with a new column ("front") containing the number of
non dominated front of the individual
"""
fit=eval_pop.filter(like="fit")
fronts=pd.DataFrame(columns=eval_pop.columns)
flag_empty_front=False
other=pd.DataFrame(columns=fit.columns)
nb_front=1
pop_in_fronts=0
while not (flag_empty_front or pop_in_fronts>self.nb_parents):
current_front=pd.DataFrame(columns=eval_pop.columns)
for index, indiv in fit.iterrows(): # must be a way to do it without iter
rel=fit.le(indiv,axis=1).drop(index)
dominant=rel[(rel.all(axis=1)==True)==True].index
rel=fit.gt(indiv,axis=1).drop(index)
#print(rel)
dominated=rel[(rel.all(axis=1)==True)==True].index
if LOG_LEVEL<=LOG_DEBUG:
print("\n[DEBUG]",index,"Dominates\n",dominant.values,"\nDominated by\n",dominated.values)
if len(dominated)==0:
current_front.loc[index,eval_pop.columns]=eval_pop.loc[index].values
current_front.loc[index,"front"]=nb_front
else:
other.loc[index]=fit.loc[index].values
if LOG_LEVEL<=LOG_DEBUG:
print("\n[DEBUG]other\n",other)
if LOG_LEVEL<=LOG_INFO:
print("\n[INFO]Front",nb_front,"with",len(current_front.index),"inds\n")
print("\n[INFO]",current_front)
print("\n[INFO]Missing",len(other),"inds\n")
print("\n[INFO]",other)
#pop_in_fronts+=len(current_front.index)
if pop_in_fronts>=self.nb_parents:
if LOG_LEVEL<=LOG_INFO:
print("\n[INFO]early stop (front",nb_front,")\n")
fronts=fronts.append(current_front)
flag_empty_front=(len(other)==0)
fit=other
other=pd.DataFrame(columns=fit.columns)
nb_front+=1
if LOG_LEVEL<=LOG_INFO:
fronts=fronts[fronts.columns.drop(fronts.filter(like="Unnamed").columns)]
print("\n[INFO]Fronts\n",fronts)
return fronts
def add_crowding_distance(self,pop_with_fronts):
""" Crowding distance computation
Implementation of the crowding distance algorithm, wich rewards
repartition of the solutions in the objective space
\image html cr1.png
Input:
- pop_with_fronts -- the first non-dom fronts, with at least
nb_parents individuals
Output:
- pop_with_fronts, with an additional column cr_dist containing the
the computed crowding distance
"""
if LOG_LEVEL<=LOG_DEBUG:
print("\n[DEBUG]Front values\n",pop_with_fronts.front.unique())
if "fit_stable" in pop_with_fronts.columns:
if LOG_LEVEL<=LOG_WARNING:
print("\n[WARNING]Removing stability from objectives\n")
fit=pop_with_fronts.filter(like="fit_")
fit=fit.add(fit.fit_stable,axis='index')
pop_with_fronts.loc[:,fit.columns]=fit
pop_with_fronts.drop("fit_stable",axis=1,inplace=True)
for front in pop_with_fronts.front.unique():
pop_front=pop_with_fronts[pop_with_fronts.front==front]
L=len(pop_front.index)
pop_front["cr_dist"]=0
if LOG_LEVEL<=LOG_DEBUG:
print("\n[DEBUG]Front",front," with pop\n",pop_front)
for obj in pop_front.filter(like="fit_").columns:
if LOG_LEVEL<=LOG_DEBUG:
print("\n[DEBUG]Objective\n",obj)
sorted_obj=pop_front.sort_values(by=obj,ascending=False,axis='index')
if LOG_LEVEL<=LOG_DEBUG:
print("\n[DEBUG]Sorted\n",sorted_obj.loc[:,obj])
sorted_obj.loc[:,"cr_dist"]=sorted_obj.shift(1).loc[:,obj]-sorted_obj.shift(-1).loc[:,obj]
sorted_obj.ix[0,"cr_dist"]=np.inf
sorted_obj.ix[L-1,"cr_dist"]=np.inf
if LOG_LEVEL<=LOG_DEBUG:
print("\n[DEBUG]Cr dist\n",sorted_obj)
pop_front.loc[sorted_obj.index,"cr_dist"]=pop_front.loc[sorted_obj.index,"cr_dist"].add(sorted_obj.cr_dist,axis='index')
if LOG_LEVEL<=LOG_INFO:
print("\n[INFO]crowding_distance for front ",front,"\n",pop_front)
pop_with_fronts.loc[pop_front.index,"cr_dist"]=pop_front.cr_dist
if LOG_LEVEL<=LOG_INFO:
print("\n[INFO]Full pop\n",pop_with_fronts)
return pop_with_fronts
def run_bench_eval(arg):
""" Benchmark evaluations
run from terminal as
'python optimizers.py OPT_NAME'
with OPT_NAME being one of the optimizers name (namely PSO,GA and
NSGAII), and change directly the hyperparameters or benchmark problem
below
"""
if arg=="PSO":
pars={"vel_range":[-0.5,0.5],
"nb_ind":50,
"nb_parents":50}
opti=PSOptimizer(pars)
elif arg=="GA":
pars={ "mut_amp":0.1,
"mut_rate":0.5,
"cross_rate": 0.5,
"nb_ind":50,
"nb_parents":5}
opti=GAOptimizer(pars)
elif arg=="NSGAII":
pars={ "mut_amp":0.1,
"mut_rate":0.5,
"cross_rate": 0.5,
"nb_ind":200,
"nb_parents":20}
opti=NSGAIIOptimizer(pars)
else:
raise KeyError
opti.benchmark("fonseca_fleming", 2, 2)
if __name__ == '__main__':
try:
arg=sys.argv[1]
except IndexError:
if LOG_LEVEL<=LOG_ERROR:
print("[ERROR] Missing arguments, see doc:\n",
run_bench_eval.__doc__)
else:
try:
run_bench_eval(arg)
except KeyError:
if LOG_LEVEL<=LOG_ERROR:
print("[ERROR] Incorrect arguments:",arg,"\n see doc:\n",
run_bench_eval.__doc__)
|
[
"paul.prevel@epfl.ch"
] |
paul.prevel@epfl.ch
|
3b6f84dfa395734f5ee0c39ad591ec38ff6405a8
|
5763a3a0a7ee4e3458767ceb30a12f0e3e02d9ba
|
/blog/main/migrations/0017_auto_20210417_1517.py
|
db35fbeb9a7c88b8d3b244f347c56f8d11deeecb
|
[] |
no_license
|
dnplkv/hw5_Polyakov
|
df9c77492103d7f83c0e9e6fbb8c00cfbe66e20e
|
6db653f9def6868cbad24b97d6ec15ce23e6764a
|
refs/heads/main
| 2023-06-14T10:04:57.764555
| 2021-07-19T17:15:31
| 2021-07-19T17:15:31
| 352,036,112
| 0
| 1
| null | 2021-07-19T17:15:31
| 2021-03-27T09:50:15
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 464
|
py
|
# Generated by Django 3.1.7 on 2021-04-17 15:17
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('main', '0016_auto_20210417_1451'),
]
operations = [
migrations.AlterField(
model_name='books',
name='author',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='main.author'),
),
]
|
[
"dannypolyakov95@gmail.com"
] |
dannypolyakov95@gmail.com
|
a72250408c3ed899f1c3e78bd067bfb26db56d99
|
6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4
|
/ncLp4ZXvz4x4oEHYh_12.py
|
569312e16f010b12fd6a44bdf5c65cb240b62bea
|
[] |
no_license
|
daniel-reich/ubiquitous-fiesta
|
26e80f0082f8589e51d359ce7953117a3da7d38c
|
9af2700dbe59284f5697e612491499841a6c126f
|
refs/heads/master
| 2023-04-05T06:40:37.328213
| 2021-04-06T20:17:44
| 2021-04-06T20:17:44
| 355,318,759
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 111
|
py
|
def sum_of_two(a, b, v):
for i in a:
for j in b:
if i+j == v:
return True
return False
|
[
"daniel.reich@danielreichs-MacBook-Pro.local"
] |
daniel.reich@danielreichs-MacBook-Pro.local
|
aff069f6593040c59948a74d78b4bc9e31ec60bb
|
f25763b3e3083c308b88b066b791121c3420dcc2
|
/models/mnist/generator.py
|
1676e125c24fcc3ae4e056de9a4468881dcac984
|
[
"MIT"
] |
permissive
|
ultraglorious/wgan
|
515d85fc146d244787477a10fcf753ff6da15945
|
d8673d95758b232b3dbf4c99e4d14aa32b4cc0ad
|
refs/heads/main
| 2023-07-08T20:47:13.086014
| 2021-08-16T11:34:18
| 2021-08-16T11:34:18
| 394,618,253
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 857
|
py
|
import tensorflow as tf
from layers import ConvolutionBlock as ConvBlock
def generator(latent_dim: int) -> tf.keras.Model:
"""Initializes WGAN generator"""
ls = 0.2 # leaky relu slope
initializer = tf.random_normal_initializer(0., 0.02)
inputs = tf.keras.layers.Input(shape=(latent_dim,), dtype=tf.dtypes.float32)
x = tf.keras.layers.Dense(7 * 7 * 256, use_bias=False, kernel_initializer=initializer)(inputs)
x = tf.keras.layers.Reshape((7, 7, 256))(x)
x = ConvBlock(5, 1, 128, transpose=True, normalize=True, dropout=True, leaky_slope=ls)(x) # 7, 7, 128
x = ConvBlock(5, 2, 64, transpose=True, normalize=True, dropout=True, leaky_slope=ls)(x) # 14, 14, 64
x = ConvBlock(5, 2, 1, transpose=True, normalize=True, dropout=True, activation="tanh")(x) # 28, 28, 1
return tf.keras.Model(inputs=inputs, outputs=x)
|
[
"e.grono@live.ca"
] |
e.grono@live.ca
|
bd9c4bccc07687e42b39122cb1fbe980f48099fe
|
7fb3867bb6e605c58aeebdebc3ac6a18a11beec8
|
/Python/assignment/pes-python-assignments-1x.git/51.py
|
db4d839aa7d6a29bac1a97ab8df09690fb45a530
|
[] |
no_license
|
gaya38/devpy
|
93bf6057fc198582bfa96446dc356667f9f0091d
|
9624d413222565199240a2f57a9b849563e83d49
|
refs/heads/master
| 2020-07-15T23:55:46.375979
| 2019-09-01T14:48:03
| 2019-09-01T14:48:03
| 205,675,946
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 649
|
py
|
x=raw_input("Enter the program driver as arg or big:")
if(x=='arg'):
a=raw_input("Enter a value:")
e=[]
g=0
for i in range(len(a)):
if (a[i]==" ")or(i==len(a)-1):
e.append(a[g:i+1])
g=i+1
else:
continue
for i in e:
print i
elif(x=='big'):
b=input("Enter b value:")
k=[]
while(b>0):
k.append(b%10)
b=b/10
for l in range(len(k)-1):
if (k[l]>k[l+1]):
temp=k[l]
k[l]=k[l+1]
k[l+1]=temp
else:
continue
print k[-1]
else:
print "Entered the wrong value so program ended"
|
[
"gayathri.ande08@gmail.com"
] |
gayathri.ande08@gmail.com
|
2d53323c0ba81f90f8b5ae148e1a9c877eda45f7
|
cf0ab8503d4d704045070deea1e2125375711e86
|
/apps/users/mixins.py
|
2ba17197bb580fc8979a19b0a3bda49302295b44
|
[] |
no_license
|
faierbol/syncano-platform
|
c3c6468600115752fd9fa5e46a0ad59f75f6bc9c
|
879111874d1ef70418b4890cf970720b0a2be4d8
|
refs/heads/master
| 2023-07-20T10:13:40.066127
| 2021-02-08T15:01:13
| 2021-02-08T15:01:13
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 436
|
py
|
# coding=UTF8
from apps.data.mixins import ObjectSchemaProcessViewMixin
from apps.data.models import DataObject, Klass
class UserProfileViewMixin(ObjectSchemaProcessViewMixin):
model = DataObject
def initialize_request(self, request, *args, **kwargs):
request = super().initialize_request(request, *args, **kwargs)
if request.instance:
self.klass = Klass.get_user_profile()
return request
|
[
"rk@23doors.com"
] |
rk@23doors.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.