blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 288 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 684 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 147 values | src_encoding stringclasses 25 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 128 12.7k | extension stringclasses 142 values | content stringlengths 128 8.19k | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
a7c06dc93cc7febe105c9c5e02e838264589e283 | bb142df010298fb4fa51d021a869bc99f689541b | /arelle/plugin/security/cryptAES_CBC.py | 47f32f7e35b403f4958f998c77ac43e52b7dc2da | [
"Apache-2.0"
] | permissive | fritzh321/Arelle | 6d7a7363716d52e3bf2f788c43a50de5f84edaa3 | fd585c7a5cef067ae213059bb864c4d32f937eb5 | refs/heads/master | 2020-04-09T22:50:02.115753 | 2018-12-02T16:33:20 | 2018-12-02T16:33:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,957 | py | '''
Created on June 7, 2018
@author: Mark V Systems Limited
(c) Copyright 2018 Mark V Systems Limited, All rights reserved.
Template crypt module using AES CBC mode.
Customize for an integrated security environment
Input file parameters may be in JSON (without newlines for pretty printing as below):
[ {"file": "file path to instance or inline xhtml",
"key": "base 64 encoded key",
"iv": "base 64 encoded iv",
... (any other custom entrypoint parameters)
},
{"file": "file 2"...
]
On Windows, the input file argument must be specially quoted if passed in via Java
due to a Java bug on Windows shell interface (without the newlines for pretty printing below):
"[{\"file\":\"z:\\Documents\\dir\\gpc_gd1-20130930.htm\",
\"key\": \"base 64 encoded key\",
\"iv\": \"base 64 encoded iv\",
... (any other custom entrypoint parameters)
}]"
The ownerObject may be a validation object related to the instance or to a collection of instances.
Customize method of detecting an encrypted file. This example appends "~" to distinguish files which are encrypted.
'''
import os, io, base64
from arelle import FileSource, XmlUtil
AES = None # Cipher.Crypto AES is only imported if an encrypted input is noticed
ENCRYPTED_FILE_SUFFIX = "~" # appended to any file which has been encrypted
def securityInit(ownerObject, options, filesource, entrypointfiles, sourceZipStream):
ownerObject.hasEncryption = False
ownerObject.cipherKey = None
ownerObject.cipherIv = None
def securityFilingStart(ownerObject, options, filesource, entrypointfiles, sourceZipStream):
# check if any files have an encryption key specified, if so activate security
if isinstance(entrypointfiles, list) and any("key" in entrypointfile for entrypointfile in entrypointfiles):
# AES encryption must be installed
global AES
from Crypto.Cipher import AES # must have AES encryption loaded in server
ownerObject.hasEncryption = True
def securityFileSourceExists(ownerObject, filepath):
# handle FileSource existence requests which might involve encrypted files
if ownerObject.hasEncryption and os.path.exists(filepath + ENCRYPTED_FILE_SUFFIX):
return True
return None
def securityFileSourceFile(cntlr, ownerObject, filepath, binary, stripDeclaration):
# handle FileSource file requests which can return encrypted contents
if ownerObject.hasEncryption:
for entrypointfile in ownerObject.entrypointfiles:
if filepath == entrypointfile["file"] and "key" in entrypointfile and "iv" in entrypointfile:
ownerObject.cipherIv = base64.decodebytes(entrypointfile["iv"].encode())
ownerObject.cipherKey = base64.decodebytes(entrypointfile["key"].encode())
break # set new iv, key based on entrypointfiles
# may be a non-entry file (xsd, linkbase, jpg) using entry's iv, key
if os.path.exists(filepath + ENCRYPTED_FILE_SUFFIX) and ownerObject.cipherKey is not None and ownerObject.cipherIv is not None:
encrdata = io.open(filepath + ENCRYPTED_FILE_SUFFIX, "rb").read()
cipher = AES.new(ownerObject.cipherKey, AES.MODE_CBC, iv=ownerObject.cipherIv)
bytesdata = cipher.decrypt(encrdata)
encrdata = None # dereference before decode operation
if binary: # return bytes
return (FileSource.FileNamedBytesIO(filepath, bytesdata[0:-bytesdata[-1]]), ) # trim AES CBC padding
# detect encoding if there is an XML header
encoding = XmlUtil.encoding(bytesdata[0:512],
default=cntlr.modelManager.disclosureSystem.defaultXmlEncoding
if cntlr else 'utf-8')
# return decoded string
text = bytesdata[0:-bytesdata[-1]].decode(encoding or 'utf-8') # trim AES CBC padding and decode
bytesdata = None # dereference before text operation
if stripDeclaration: # file source may strip XML declaration for libxml
xmlDeclarationMatch = FileSource.XMLdeclaration.search(text)
if xmlDeclarationMatch: # remove it for lxml
start,end = xmlDeclarationMatch.span()
text = text[0:start] + text[end:]
return (FileSource.FileNamedStringIO(filepath, initial_value=text), encoding)
return None
def securityWrite(ownerObject, filepath, data):
if ownerObject.hasEncryption and ownerObject.cipherKey is not None and ownerObject.cipherIv is not None:
cipher = AES.new(ownerObject.cipherKey, AES.MODE_CBC, iv=ownerObject.cipherIv)
if isinstance(data, str): # encode string into bytes
bytesdata = data.encode("utf-8")
else: # data is binary, doesn't need encoding
bytesdata = data
padlength = 16 - (len(bytesdata) % 16) # AES CBC padding
bytesdata += padlength * (chr(padlength).encode())
encrdata = cipher.encrypt(bytesdata)
if isinstance(data, str): bytesdata = None # dereference before open operation
with open(filepath + ENCRYPTED_FILE_SUFFIX, "wb") as fh:
fh.write(encrdata)
return True # written successfully
return None
__pluginInfo__ = {
# Do not use _( ) in pluginInfo itself (it is applied later, after loading
'name': 'Security Crypt AES_CBC',
'version': '1.0',
'description': '''AES_CBC security encryption''',
'license': 'Apache-2',
'author': 'Mark V Systems',
'copyright': '(c) Copyright 2018 Mark V Systems Limited, All rights reserved.',
# classes of mount points (required)
'Security.Crypt.Init': securityInit,
'Security.Crypt.Filing.Start': securityFilingStart,
'Security.Crypt.FileSource.Exists': securityFileSourceExists,
'Security.Crypt.FileSource.File': securityFileSourceFile,
'Security.Crypt.Write': securityWrite
}
| [
"fischer@markv.com"
] | fischer@markv.com |
6de2299a08c10405b614b3e10584176fc4c0a16f | 6a194f60c57f00d89467a551696a6d12c6494ca5 | /migrations/versions/b67f74365ac0_updated_the_classes.py | 6906146f3c3e3c17f86d5eff34066880f6b860a3 | [
"MIT"
] | permissive | GeGe-K/Pitcher-App | 3479bc41b0dd431592db87dd9bf94ca59645e2d6 | 4a970b37fe0fcd63ad3853a4f764c410a4acb640 | refs/heads/master | 2020-04-06T22:37:58.522524 | 2018-11-26T08:41:18 | 2018-11-26T08:41:18 | 157,842,038 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,372 | py | """updated the classes.
Revision ID: b67f74365ac0
Revises: 9d0c25ad18b3
Create Date: 2018-11-20 08:24:19.781368
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'b67f74365ac0'
down_revision = '9d0c25ad18b3'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('comments', sa.Column('posted', sa.DateTime(), nullable=True))
op.drop_column('comments', 'date')
op.drop_column('comments', 'time')
op.add_column('pitches', sa.Column('posted', sa.DateTime(), nullable=True))
op.drop_column('pitches', 'date')
op.drop_column('pitches', 'time')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('pitches', sa.Column('time', sa.VARCHAR(length=255), autoincrement=False, nullable=True))
op.add_column('pitches', sa.Column('date', sa.VARCHAR(length=255), autoincrement=False, nullable=True))
op.drop_column('pitches', 'posted')
op.add_column('comments', sa.Column('time', sa.VARCHAR(length=255), autoincrement=False, nullable=True))
op.add_column('comments', sa.Column('date', sa.VARCHAR(length=255), autoincrement=False, nullable=True))
op.drop_column('comments', 'posted')
# ### end Alembic commands ###
| [
"gloriagivondo@gmail.com"
] | gloriagivondo@gmail.com |
11a6fe0692f20dd9838d132b112065aff12e988d | 2f30cf20d58e2cde4037441e67213223c69a6998 | /lesson32_接口总结/d2_time_log.py | dc2d95c037ebc1ecda9cf0ab9e2c2fdab64b67c4 | [] | no_license | zengcong1314/python1205 | b11db7de7d0ad1f8401b8b0c9b20024b4405ae6c | da800ed3374d1d43eb75485588ddb8c3a159bb41 | refs/heads/master | 2023-05-25T07:17:25.065004 | 2021-06-08T08:27:54 | 2021-06-08T08:27:54 | 318,685,835 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 386 | py | # 根据时间生成新文件
import logging
import time
from logging.handlers import TimedRotatingFileHandler
logger = logging.getLogger('python36')
handler = TimedRotatingFileHandler('time12.log',when='s',interval=2,backupCount=100,encoding='UTF-8')
logger.addHandler(handler)
for i in range(100):
logger.warning("生成警告信息{}".format(time.time()))
time.sleep(0.1)
| [
"237886015@qq.com"
] | 237886015@qq.com |
68b702c7709ce4ab311d3fa7fb54a30b2284e31d | bd3fb18aef0bf47eb6410107d939134cffc3a1ae | /0-jakc/jakc_sale/models/procurement.py | 976173c9d8ebca1145d8f77ed6cb1eee4a2fdaf9 | [] | no_license | rapolebas/project-0021 | 477227f7359c893df891c1d98da82d45b6cfcdbe | 4e65ca034be5ff4c7a9c91a988db85ec37392452 | refs/heads/master | 2020-03-16T02:32:32.213774 | 2017-12-02T07:59:15 | 2017-12-02T07:59:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 671 | py | from datetime import datetime, timedelta
from openerp import SUPERUSER_ID
from openerp import api, fields, models, _
import openerp.addons.decimal_precision as dp
from openerp.tools import float_is_zero, float_compare, DEFAULT_SERVER_DATETIME_FORMAT
import logging
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger(__name__)
class ProcurementOrder(models.Model):
_inherit = ['procurement.order']
sale_order_id = fields.Many2one('sale.order','Sale Order', related='sale_line_id.order_id', readonly=True)
partner_vehicle_id = fields.Many2one('partner.vehicle', related='sale_order_id.partner_vehicle_id', readonly=True, string='Vehicle')
| [
"wahhid@gmail.com"
] | wahhid@gmail.com |
4a80d8be5dbcc1b93e738373fe9b598f7b96a3e3 | ad8de2c69a4d3692af2ce14aaa31ba97de95f24f | /project_code/Example Scripts/plot_oneclass.py | 28048908c1cd94c8c477ae1709fb92532a28fc17 | [
"MIT"
] | permissive | e-koch/Phys-595 | b04f7179879b3f6ff8a6d608be8667b892b874d9 | 44872fa47609242f7aa8671eb75851622516129f | refs/heads/master | 2021-01-25T05:28:09.497201 | 2015-09-10T04:58:26 | 2015-09-10T04:58:26 | 24,083,293 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,557 | py | """
==========================================
One-class SVM with non-linear kernel (RBF)
==========================================
An example using a one-class SVM for novelty detection.
:ref:`One-class SVM <svm_outlier_detection>` is an unsupervised
algorithm that learns a decision function for novelty detection:
classifying new data as similar or different to the training set.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.font_manager
from sklearn import svm
np.random.seed(1029344)
xx, yy = np.meshgrid(np.linspace(-5, 5, 500), np.linspace(-5, 5, 500))
# Generate train data
X = 0.3 * np.random.randn(1000000, 5) # + np.random.uniform(low=-1, high=1, size=(100,2))
X_train = np.r_[X + 2, X - 2]
# Generate some regular novel observations
X = 0.3 * np.random.randn(20000, 5)
X_test = np.r_[X + 2, X - 2]
# Generate some abnormal novel observations
X_outliers = np.random.uniform(low=-4, high=4, size=(20000, 5))
# fit the model
clf = svm.OneClassSVM(nu=0.1, kernel="rbf", gamma=0.1)
clf.fit(X_train)
y_pred_train = clf.predict(X_train)
y_pred_test = clf.predict(X_test)
y_pred_outliers = clf.predict(X_outliers)
n_error_train = y_pred_train[y_pred_train == -1].size
n_error_test = y_pred_test[y_pred_test == -1].size
n_error_outliers = y_pred_outliers[y_pred_outliers == 1].size
# plot the line, the points, and the nearest vectors to the plane
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
# plt.title("Novelty Detection")
plt.contourf(xx, yy, Z, levels=np.linspace(Z.min(), 0, 7), cmap=plt.cm.Blues_r)
a = plt.contour(xx, yy, Z, levels=[0], linewidths=2, colors='red')
plt.contourf(xx, yy, Z, levels=[0, Z.max()], colors='orange')
b1 = plt.scatter(X_train[:, 0], X_train[:, 1], c='white')
b2 = plt.scatter(X_test[:, 0], X_test[:, 1], c='green')
c = plt.scatter(X_outliers[:, 0], X_outliers[:, 1], c='red')
plt.axis('tight')
plt.xlim((-5, 5))
plt.ylim((-5, 5))
plt.legend([a.collections[0], b1, b2, c],
["learned frontier", "training observations",
"new regular observations", "new abnormal observations"],
loc="upper left",
prop=matplotlib.font_manager.FontProperties(size=11))
# plt.xlabel(
# "error train: %d/200 ; errors novel regular: %d/40 ; "
# "errors novel abnormal: %d/40"
# % (n_error_train, n_error_test, n_error_outliers))
plt.show()
print "error train: %d/200 ; errors novel regular: %d/40 ; "\
"errors novel abnormal: %d/40" \
% (n_error_train, n_error_test, n_error_outliers) | [
"koch.eric.w@gmail.com"
] | koch.eric.w@gmail.com |
f3e82229dd7ad3dce9fa4f95ba275f4f42e1397d | dbf635c24ed9eff228ffaf35e71dcfd3712891a5 | /acoustic/COVAREP/sentence_level_format/archived_models/archived_model_4/load_model.py | a663a69b3fee68df98be40a215253ee114384130 | [] | no_license | aascode/depression_estimation_from_individual_modalities | 57f3b6ebf740585c9cb3d5821028969e2f36e4d1 | 6e1563b4081c4aadc91f7110c684290b7a622167 | refs/heads/master | 2022-01-14T20:41:33.333739 | 2019-05-06T18:30:11 | 2019-05-06T18:30:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 836 | py | import numpy as np
import keras
from keras.models import Model, Sequential, load_model
from keras.layers import Dense, CuDNNLSTM, Input, Concatenate, Dropout
from keras import regularizers
def load_model(location = None):
if(location != None):
model = keras.models.load_model(location)
print("Loaded the model.")
return model
X = Input(shape = (4000, 74,))
X_gender = Input(shape = (1,))
Y = CuDNNLSTM(84, name = 'lstm_cell')(X)
Y = Dropout(rate = 0.2)(Y)
Y = Concatenate(axis = -1)([Y, X_gender])
Y = Dense(42, activation = 'relu')(Y)
Y = Dropout(rate = 0.2)(Y)
Y = Dense(20, activation = 'relu')(Y)
Y = Dropout(rate = 0.2)(Y)
Y = Dense(1, activation = None)(Y)
model = Model(inputs = [X, X_gender], outputs = Y)
print("Created a new model.")
return model
if(__name__ == "__main__"):
m = load_model() | [
"arbaaz.qureshi29@gmail.com"
] | arbaaz.qureshi29@gmail.com |
340d1b477b1dd67a4c8461aabf6a05268df3064b | 3c358b34811ad9d178e2865336498dde3f3e5032 | /WAFLEX/server/mymail.py | 8db59a99094a40f33d3899b37367ca1491db7699 | [] | no_license | shywj05/WAFLEX-MiddelProject | b255796839c889a16c4900a87f2e5adcd1337a44 | ca8db1e368104f75218a8da9a0f987349d27f755 | refs/heads/master | 2023-06-30T23:50:22.367106 | 2021-08-03T01:15:11 | 2021-08-03T01:15:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,612 | py | import smtplib
from email.mime.text import MIMEText
import string
import random
class MyMail:
def mysendmail(self, recvEmail, title):
smtpName = "smtp.gmail.com" #smtp 서버 주소
smtpPort = 587 #smtp 포트 번호
sendEmail = "sysojxx@gmail.com"
password = "Qwe123!@#"
_LENGTH = 8 # 8자리
# 숫자 + 대소문자 + 특수문자
alpha_s = string.ascii_lowercase
alpha_b = string.ascii_uppercase
digit = string.digits
temp = ['~','!','@','#','$','%','^','*']
# 랜덤한 문자열 생성
result = alpha_s[random.randrange(0, 26)] + alpha_s[random.randrange(0, 26)]
result += alpha_b[random.randrange(0, 26)] + alpha_b[random.randrange(0, 26)]
result += digit[random.randrange(0, 10)] + digit[random.randrange(0, 10)]
result += temp[random.randrange(len(temp))] + temp[random.randrange(len(temp))]
text = "인증하실 번호는 " +result+" 입니다."
msg = MIMEText(text) #MIMEText(text , _charset = "utf8")
msg['Subject'] = title
msg['From'] = sendEmail
msg['To'] = recvEmail
print(msg.as_string())
s=smtplib.SMTP( smtpName , smtpPort ) #메일 서버 연결
s.starttls() #TLS 보안 처리
s.login( sendEmail , password ) #로그인
s.sendmail( sendEmail, recvEmail, msg.as_string() ) #메일 전송, 문자열로 변환하여 보냅니다.
s.close() #smtp 서버 연결을 종료합니다.
return result
| [
"shywj05@gmail.com"
] | shywj05@gmail.com |
307cd74f01bac3f5f7372ab4a7ad3205c7496789 | 69ff10443014ac749d6e8e5eb8dd65f0ae995e0c | /install/opt/etc/gunicorn/sigproxy_config.py | 4764190e9bef0dfc137545068bab743d1f6bf0fd | [] | no_license | identinetics/d-PVZDweb | 1443051a961c2ffff3cef8fc5326a651d4783443 | 1e269546a505eed121e32cf22b7f2227e6473c95 | refs/heads/master | 2021-07-13T01:46:34.025748 | 2019-05-09T07:20:18 | 2019-05-09T07:22:06 | 160,792,534 | 0 | 1 | null | 2020-06-08T13:27:09 | 2018-12-07T08:10:35 | Shell | UTF-8 | Python | false | false | 786 | py | import os
from seclay_xmlsig_proxy_config import SigProxyConfig as Cfg
# Parameter description: see https://github.com/benoitc/gunicorn/blob/master/examples/example_config.py
bind = Cfg.host + ':' + str(Cfg.port)
access_log_format = '%(h)s %(l)s %(u)s %(t)s "%(r)s" %(s)s %(b)s "%(f)s" "%(a)s"'
accesslog = '/var/log/sigproxy/access.log'
errorlog = '/var/log/sigproxy/error.log'
loglevel = 'info'
pidfile = '/var/run/sigproxy/gunicorn.pid'
backlog = 64
workers = 1
worker_class = 'sync'
worker_connections = 1000
timeout = 30
keepalive = 2
spew = False
daemon = True
raw_env = [
'CSRFENCRYPTKEY=' + os.environ['CSRFENCRYPTKEY'],
'CSRFSECRET=' + os.environ['CSRFSECRET'],
]
# raw_env.append('DEBUG=') # activate this to set workers = 1
umask = 0
user = None
group = None
| [
"rainer@hoerbe.at"
] | rainer@hoerbe.at |
cbcfdc0e60f18a05779c713ed704226606269649 | 7950091dfd123b9fbe020cb8c9f529e98f7a89d8 | /weatherenv/Lib/site-packages/pipenv/vendor/backports/__init__.py | 0c64b4c10b513a109244dbb31d44bb616bdcf10c | [
"LicenseRef-scancode-python-cwi",
"Python-2.0",
"LicenseRef-scancode-free-unknown",
"GPL-1.0-or-later",
"LicenseRef-scancode-other-copyleft",
"MIT",
"BSD-3-Clause"
] | permissive | diyajaiswal11/Weather-App | d0000ebd12fb051cca8a4c56da4418c89714fb3e | f5e1bca505f6643d870b905577a383a10d17b026 | refs/heads/master | 2022-12-11T05:16:38.941949 | 2020-08-25T18:02:18 | 2020-08-25T18:02:18 | 231,254,913 | 3 | 0 | MIT | 2022-12-08T03:22:58 | 2020-01-01T19:25:37 | Python | UTF-8 | Python | false | false | 179 | py | __path__ = __import__('pkgutil').extend_path(__path__, __name__)
from . import weakref
from . import enum
from . import shutil_get_terminal_size
from . import functools_lru_cache
| [
"shubhijaiswal2000@gmail.com"
] | shubhijaiswal2000@gmail.com |
76d348dd6ae297a33d077ef1b31f5028fbf0cb36 | a8d86cad3f3cc6a977012d007d724bbaf02542f7 | /testsuites_dev/vui/sandbox/parse_test_suite.py | af2cfbb7b8f5b922afabd76a9b2df5c5256e8d27 | [] | no_license | bopopescu/bigrobot | f8d971183119a1d59f21eb2fc08bbec9ee1d522b | 24dad9fb0044df5a473ce4244932431b03b75695 | refs/heads/master | 2022-11-20T04:55:58.470402 | 2015-03-31T18:14:39 | 2015-03-31T18:14:39 | 282,015,194 | 0 | 0 | null | 2020-07-23T17:29:53 | 2020-07-23T17:29:52 | null | UTF-8 | Python | false | false | 345 | py | #!/usr/bin/env python
import sys
from robot.api import TestData
def print_suite(suite):
print 'Suite:', suite.name
for test in suite.testcase_table:
print '-', test.name
for child in suite.children: # recurse through testsuite directory
print_suite(child)
suite = TestData(source=sys.argv[1])
print_suite(suite)
| [
"vui.le@bigswitch.com"
] | vui.le@bigswitch.com |
ccc0c00ee714e842e0109aed656cd984dce4fb0a | b7add0d1b1effc50b27d3316fa5889a5227e5b19 | /Micropython/backups/tests/archieved/servo_driver_test.py | b14cc8b334a2136f39fec59f7d606d07e82b091c | [] | no_license | Woz4tetra/Atlas | efb83a7c7b2698bf8b36b023f7aa573cc38284f6 | c7380868a9efef9d1594ed7aa87187f03a7e4612 | refs/heads/master | 2020-04-04T06:25:50.657631 | 2017-04-05T01:53:15 | 2017-04-05T01:53:15 | 50,269,756 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 771 | py | import pyb
from libraries.pca9685 import ServoDriver
servo_driver = ServoDriver(2, -90, 90, 150, 600)
assert servo_driver.angle_to_pulse(-90) == 150
assert servo_driver.angle_to_pulse(90) == 600
assert servo_driver.angle_to_pulse(0) == 375
# servo_driver.servo_angle_min =
# servo_driver.servo_angle_max =
# servo_driver.servo_pulse_min =
# servo_driver.servo_pulse_max =
servo_driver.conversion = \
(servo_driver.servo_pulse_max - servo_driver.servo_pulse_min) / (
servo_driver.servo_angle_max - servo_driver.servo_angle_min)
for value in range(servo_driver.servo_angle_min,
servo_driver.servo_angle_max + 1, 10):
for servo_num in range(16):
servo_driver.set_servo(servo_num, value)
print(value)
pyb.delay(200)
| [
"woz4tetra@gmail.com"
] | woz4tetra@gmail.com |
3159f08ce73dc31e6bc9ee9d40859fdff73dd26b | 7fb87945b77d3adaedd8a155c981e97946734e41 | /cachetools/func.py | 78ec7f632964682cc6be0e5d5585a16cb22d26d7 | [] | no_license | Tony910517/openstack | 916b36368ea9f17958e4eb04bd1f9daf3aba9213 | 4c1380a03c37e7950dcf2bba794e75b7e4a8dfd0 | refs/heads/master | 2020-05-20T01:05:22.499224 | 2019-05-07T01:11:05 | 2019-05-07T01:11:05 | 185,292,662 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,382 | py | import collections
import functools
import random
import time
from .lfu import LFUCache
from .lru import LRUCache
from .rr import RRCache
from .ttl import TTLCache
try:
from threading import RLock
except ImportError:
from dummy_threading import RLock
_CacheInfo = collections.namedtuple('CacheInfo', [
'hits', 'misses', 'maxsize', 'currsize'
])
class _NullContext:
def __enter__(self):
pass
def __exit__(self, exc_type, exc_val, exc_tb):
pass
_nullcontext = _NullContext()
def _makekey_untyped(args, kwargs):
return (args, tuple(sorted(kwargs.items())))
def _makekey_typed(args, kwargs):
key = _makekey_untyped(args, kwargs)
key += tuple(type(v) for v in args)
key += tuple(type(v) for _, v in sorted(kwargs.items()))
return key
def _cachedfunc(cache, typed=False, lock=None):
makekey = _makekey_typed if typed else _makekey_untyped
context = lock() if lock else _nullcontext
def decorator(func):
stats = [0, 0]
def wrapper(*args, **kwargs):
key = makekey(args, kwargs)
with context:
try:
result = cache[key]
stats[0] += 1
return result
except KeyError:
stats[1] += 1
result = func(*args, **kwargs)
with context:
try:
cache[key] = result
except ValueError:
pass # value too large
return result
def cache_info():
with context:
hits, misses = stats
maxsize = cache.maxsize
currsize = cache.currsize
return _CacheInfo(hits, misses, maxsize, currsize)
def cache_clear():
with context:
stats[:] = [0, 0]
cache.clear()
wrapper.cache_info = cache_info
wrapper.cache_clear = cache_clear
return functools.update_wrapper(wrapper, func)
return decorator
def lfu_cache(maxsize=128, typed=False, getsizeof=None, lock=RLock):
"""Decorator to wrap a function with a memoizing callable that saves
up to `maxsize` results based on a Least Frequently Used (LFU)
algorithm.
"""
return _cachedfunc(LFUCache(maxsize, getsizeof), typed, lock)
def lru_cache(maxsize=128, typed=False, getsizeof=None, lock=RLock):
"""Decorator to wrap a function with a memoizing callable that saves
up to `maxsize` results based on a Least Recently Used (LRU)
algorithm.
"""
return _cachedfunc(LRUCache(maxsize, getsizeof), typed, lock)
def rr_cache(maxsize=128, choice=random.choice, typed=False, getsizeof=None,
lock=RLock):
"""Decorator to wrap a function with a memoizing callable that saves
up to `maxsize` results based on a Random Replacement (RR)
algorithm.
"""
return _cachedfunc(RRCache(maxsize, choice, getsizeof), typed, lock)
def ttl_cache(maxsize=128, ttl=600, timer=time.time, typed=False,
getsizeof=None, lock=RLock):
"""Decorator to wrap a function with a memoizing callable that saves
up to `maxsize` results based on a Least Recently Used (LRU)
algorithm with a per-item time-to-live (TTL) value.
"""
return _cachedfunc(TTLCache(maxsize, ttl, timer, getsizeof), typed, lock)
| [
"471123674@qq.com"
] | 471123674@qq.com |
7943b6a769c17e914c502d093500da05a3a32b96 | 37482bcc34c569b2042cc4f893a92360cb5cbca6 | /shell/userinfo.py | ed06ab41df9ca3fd84acfc2b025e1ee08f2ecb10 | [] | no_license | hustmonk/k21 | 8442b7bdc6eb92282add59a4ee9166a89897d3f4 | 12279e970da57150154ef3a6343afccb8b992870 | refs/heads/master | 2016-09-05T09:22:48.506265 | 2015-07-11T13:01:33 | 2015-07-11T13:01:33 | 35,711,753 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,071 | py | #!/usr/bin/env python
# -*- coding: GB2312 -*-
# Last modified:
"""docstring
"""
__revision__ = '0.1'
from common import *
from weekend import *
from Object import *
class Userinfo:
def __init__(self):
self.uid_num = {}
self.uid_days = {}
for line in open("conf/user.info"):
uid,num,days = line.strip().split("\t")
self.uid_num[uid] = int(num)
self.uid_days[uid] = days.split(",")
self.week = Week()
self.obj = Obj()
def get_num(self, uid):
return self.uid_num[uid]
def get_info(self, uid):
return self.uid_days[uid]
def get_features(self, uid, course_id):
f = [0]*(CIDX_VEC_NUM+1)
for day in self.get_info(uid):
cidx = self.obj.get_index(course_id, self.week.times(day))
f[cidx] = f[cidx] + 1
f[CIDX_VEC_NUM] = self.get_num(uid)
return f
if __name__ == "__main__":
userinfo = Userinfo()
print userinfo.get_features("vCk71G02ss3o0puuBIhnOZwxNIZqe2KE", "3cnZpv6ReApmCaZyaQwi2izDZxVRdC01")
| [
"liujingminghust@163.com"
] | liujingminghust@163.com |
f0f5d2e0e0693be10cb308b2a39769d289e2ecbc | 6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4 | /hoxv8zaQJNMWJqnt3_6.py | 80095df9794340c219bca34f726b867be42b5f8d | [] | no_license | daniel-reich/ubiquitous-fiesta | 26e80f0082f8589e51d359ce7953117a3da7d38c | 9af2700dbe59284f5697e612491499841a6c126f | refs/heads/master | 2023-04-05T06:40:37.328213 | 2021-04-06T20:17:44 | 2021-04-06T20:17:44 | 355,318,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 189 | py |
def is_heteromecic(n, test = 0):
if n==test*(test+1): return True
if test>int(n**.5): return False
return is_heteromecic(n, test+1)
# yet again, I'd hardly call this recursion...
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
fb6ce6d5ebf417aab66391b1ce2da8a5afc32d15 | 71bd623429f3b9f3701603836cf91f98436d48a7 | /tests/test_compute_embeddings.py | 0bfee0d2862e82494e0f8b5576c22f2b73997d55 | [
"Apache-2.0"
] | permissive | xiaobiaohust/sentence-transformers | b9f7d41901ef3159cb933e3d7d4f2e7698503975 | 167e4567670d711ef543239d0b922858c796a2fc | refs/heads/master | 2023-03-23T05:46:18.989786 | 2021-03-17T21:20:15 | 2021-03-17T21:20:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,870 | py | """
Computes embeddings
"""
import unittest
from sentence_transformers import SentenceTransformer
import numpy as np
class ComputeEmbeddingsTest(unittest.TestCase):
def setUp(self):
self.model = SentenceTransformer('paraphrase-distilroberta-base-v1')
def test_encode_token_embeddings(self):
"""
Test that encode(output_value='token_embeddings') works
:return:
"""
sent = ["Hello Word, a test sentence", "Here comes another sentence", "My final sentence", "Sentences", "Sentence five five five five five five five"]
emb = self.model.encode(sent, output_value='token_embeddings', batch_size=2)
assert len(emb) == len(sent)
for s, e in zip(sent, emb):
assert len(self.model.tokenize([s])['input_ids'][0]) == e.shape[0]
def test_encode_single_sentences(self):
#Single sentence
emb = self.model.encode("Hello Word, a test sentence")
assert emb.shape == (768,)
assert abs(np.sum(emb) - 7.9811716) < 0.001
# Single sentence as list
emb = self.model.encode(["Hello Word, a test sentence"])
assert emb.shape == (1, 768)
assert abs(np.sum(emb) - 7.9811716) < 0.001
# Sentence list
emb = self.model.encode(["Hello Word, a test sentence", "Here comes another sentence", "My final sentence"])
assert emb.shape == (3, 768)
assert abs(np.sum(emb) - 22.968266) < 0.001
def test_encode_normalize(self):
emb = self.model.encode(["Hello Word, a test sentence", "Here comes another sentence", "My final sentence"], normalize_embeddings=True)
assert emb.shape == (3, 768)
for norm in np.linalg.norm(emb, axis=1):
assert abs(norm - 1) < 0.001
def test_encode_tuple_sentences(self):
# Input a sentence tuple
emb = self.model.encode([("Hello Word, a test sentence", "Second input for model")])
assert emb.shape == (1, 768)
assert abs(np.sum(emb) - 9.503508) < 0.001
# List of sentence tuples
emb = self.model.encode([("Hello Word, a test sentence", "Second input for model"), ("My second tuple", "With two inputs"), ("Final tuple", "final test")])
assert emb.shape == (3, 768)
assert abs(np.sum(emb) - 32.14627) < 0.001
def test_multi_gpu_encode(self):
# Start the multi-process pool on all available CUDA devices
pool = self.model.start_multi_process_pool(['cpu', 'cpu'])
sentences = ["This is sentence {}".format(i) for i in range(1000)]
# Compute the embeddings using the multi-process pool
emb = self.model.encode_multi_process(sentences, pool, chunk_size=50)
assert emb.shape == (1000, 768)
emb_normal = self.model.encode(sentences)
diff = np.sum(np.abs(emb - emb_normal))
assert diff < 0.001
| [
"rnils@web.de"
] | rnils@web.de |
ed8a1eaea1d7b77ed9b4b067104c6a228d5336a4 | 6a6984544a4782e131510a81ed32cc0c545ab89c | /src/simprod-scripts/resources/tests/generators/nugen-generator.py | 934c279088774b490b79df7b1f9a5806373d3362 | [] | no_license | wardVD/IceSimV05 | f342c035c900c0555fb301a501059c37057b5269 | 6ade23a2fd990694df4e81bed91f8d1fa1287d1f | refs/heads/master | 2020-11-27T21:41:05.707538 | 2016-09-02T09:45:50 | 2016-09-02T09:45:50 | 67,210,139 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,138 | py | #!/usr/bin/env python
"""Ensure that the NuGen API hasn't changed (too much)"""
import os
import tempfile
import shutil
from icecube.simprod.modules.nugen import NuGen
from icecube import icetray, dataclasses, dataio
from I3Tray import I3Tray
try:
tmpdir = tempfile.mkdtemp(dir=os.getcwd())
tmpfile = os.path.join(tmpdir,'test.i3')
summaryfile = os.path.join(tmpdir,'summary.xml')
gcdfile = os.path.expandvars('$I3_TESTDATA/sim/GCD.i3.gz')
# make a very small nugen file
n = NuGen()
n.SetParameter('nevents',1)
n.SetParameter('outputfile',tmpfile)
n.SetParameter('summaryfile',summaryfile)
n.SetParameter('gcdfile',gcdfile)
n.SetParameter('mjd',55697)
n.SetParameter('NuFlavor','NuMu')
if n.Execute({}) != 0:
raise Exception('NuGen did not return OK')
# now check generated file
tray = I3Tray()
tray.Add('I3Reader', filename=tmpfile)
def checky(frame):
assert('NuGPrimary' in frame)
assert('I3MCTree' in frame)
tray.Add(checky, Streams=[icetray.I3Frame.DAQ])
tray.Execute()
tray.Finish()
finally:
shutil.rmtree(tmpdir)
| [
"wardvandriessche@gmail.com"
] | wardvandriessche@gmail.com |
ef990efbcc159fa01bb54f036ae7fdee2768ce9c | b67958ebbde7538f6c5dc0305ed278f7c1a9528a | /networking_tn/common/config.py | 4d6a5bfa7757d4899831213ab593e0c75d393d7d | [
"Apache-2.0"
] | permissive | xjforfuture/networking-ngfw | 4c6b04ede370ba9888422715d5a4be7858335fe0 | 26fa3aa94e0ae733dde47c82f3372afeb07ff24b | refs/heads/master | 2021-05-15T06:34:12.658302 | 2017-12-12T07:01:31 | 2017-12-12T07:01:31 | 113,798,673 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,819 | py | # Copyright 2015 Fortinet Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from fortiosclient import client
from oslo_config import cfg
from networking_fortinet._i18n import _
ML2_FORTINET = [
cfg.StrOpt('address', default='',
help=_('The address of fortigates to connect to')),
cfg.StrOpt('port', default='443',
help=_('The FGT port to serve API requests')),
cfg.StrOpt('protocol', default='https',
help=_('The FGT uses which protocol: http or https')),
cfg.StrOpt('username', default='admin',
help=_('The username used to login')),
cfg.StrOpt('password', default='password', secret=True,
help=_('The password used to login')),
cfg.StrOpt('int_interface', default='internal',
help=_('The interface to serve tenant network')),
cfg.StrOpt('ext_interface', default='',
help=_('The interface to the external network')),
cfg.StrOpt('tenant_network_type', default='vlan',
help=_('tenant network type, default is vlan')),
cfg.StrOpt('vlink_vlan_id_range', default='3500:4000',
help=_('vdom link vlan interface, default is 3500:4000')),
cfg.StrOpt('vlink_ip_range', default='169.254.0.0/20',
help=_('vdom link interface IP range, '
'default is 169.254.0.0/20')),
cfg.StrOpt('vip_mappedip_range', default='169.254.128.0/23',
help=_('The intermediate IP range in floating IP process, '
'default is 169.254.128.0/23')),
cfg.BoolOpt('npu_available', default=True,
help=_('If npu_available is True, it requires hardware FGT'
'with NPU, default is True')),
cfg.BoolOpt('enable_default_fwrule', default=False,
help=_('If True, fwaas will add a deny all rule automatically,'
' otherwise users need to add it manaully.')),
cfg.StrOpt('av_profile', default=None,
help=_('Assign a default antivirus profile in FWaaS, '
'the profile must exist in FGT, default is ""')),
cfg.StrOpt('webfilter_profile', default=None,
help=_('Assign a default web filter profile in FWaaS, '
'the profile must exist in FGT, default is ""')),
cfg.StrOpt('ips_sensor', default=None,
help=_('Assign a default IPS profile in FWaaS, '
'the profile must exist in FGT, default is ""')),
cfg.StrOpt('application_list', default=None,
help=_('Assign a default application control profile in FWaaS,'
' the profile must exist in FGT, default is ""')),
cfg.StrOpt('ssl_ssh_profile', default=None,
help=_('Assign a default SSL/SSH inspection profile in FWaaS, '
'the profile must exist in FGT, default is ""'))
]
cfg.CONF.register_opts(ML2_FORTINET, "ml2_fortinet")
fgt_info = {
'address': cfg.CONF.ml2_fortinet.address,
'port': cfg.CONF.ml2_fortinet.port,
'protocol': cfg.CONF.ml2_fortinet.protocol,
'username': cfg.CONF.ml2_fortinet.username,
'password': cfg.CONF.ml2_fortinet.password,
'int_interface': cfg.CONF.ml2_fortinet.int_interface,
'ext_interface': cfg.CONF.ml2_fortinet.ext_interface,
'tenant_network_type': cfg.CONF.ml2_fortinet.tenant_network_type,
'vlink_vlan_id_range': cfg.CONF.ml2_fortinet.vlink_vlan_id_range,
'vlink_ip_range': cfg.CONF.ml2_fortinet.vlink_ip_range,
'vip_mappedip_range': cfg.CONF.ml2_fortinet.vip_mappedip_range,
'npu_available': cfg.CONF.ml2_fortinet.npu_available,
'enable_default_fwrule': cfg.CONF.ml2_fortinet.enable_default_fwrule,
'av_profile': cfg.CONF.ml2_fortinet.av_profile,
'webfilter_profile': cfg.CONF.ml2_fortinet.webfilter_profile,
'ips_sensor': cfg.CONF.ml2_fortinet.ips_sensor,
'application_list': cfg.CONF.ml2_fortinet.application_list,
'ssl_ssh_profile': cfg.CONF.ml2_fortinet.ssl_ssh_profile
}
def get_apiclient():
"""Fortinet api client initialization."""
api_server = [(fgt_info['address'], fgt_info['port'],
'https' == fgt_info['protocol'])]
return client.FortiosApiClient(
api_server, fgt_info['username'], fgt_info['password'])
| [
"xjforfuture@163.com"
] | xjforfuture@163.com |
93120f4f678d41f66a3161ce124689235c26903b | 0c6100dc16291986fab157ed0437f9203f306f1b | /2000- 3000/2712.py | 1116e8cbeb79eb95c5222b1816185b7230018b3d | [] | no_license | Matuiss2/URI-ONLINE | 4c93c139960a55f7cc719d0a3dcd6c6c716d3924 | 6cb20f0cb2a6d750d58b826e97c39c11bf8161d9 | refs/heads/master | 2021-09-17T09:47:16.209402 | 2018-06-30T08:00:14 | 2018-06-30T08:00:14 | 110,856,303 | 13 | 1 | null | null | null | null | UTF-8 | Python | false | false | 689 | py | import re
loops = int(input())
for i in range(loops):
data = input()
placa = re.compile('([A-Z]{3})-([0-9]{4})') # vê se segue o formato AAA-9999
# Tem que checar o tamanho pois placas como AAA-9999x também são aceitas pelo regex
if placa.match(data) and len(data) == 8:
ultimo = data[-1]
if ultimo in ["1", "2"]:
print("MONDAY")
elif ultimo in ["3", "4"]:
print("TUESDAY")
elif ultimo in ["5", "6"]:
print("WEDNESDAY")
elif ultimo in ["7", "8"]:
print("THURSDAY")
elif ultimo in ["0", "9"]:
print("FRIDAY")
else:
print("FAILURE") | [
"noreply@github.com"
] | Matuiss2.noreply@github.com |
9ce14fca10d2ddb156b0712bfbb6af3a9ece4b33 | 5ca4a0d91f5bd119e80478b5bd3d43ed30133a42 | /film20/core/forms.py | adb00b502ae71fb5993bc1d567b86ff73d69687b | [] | no_license | thuvh/filmmaster | 1fc81377feef5a9e13f792b329ef90f840404ec5 | dd6a2ee5a4951b2397170d5086c000169bf91350 | refs/heads/master | 2021-01-17T16:10:54.682908 | 2012-04-29T18:19:52 | 2012-04-29T18:19:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,150 | py | #-------------------------------------------------------------------------------
# Filmaster - a social web network and recommendation engine
# Copyright (c) 2009 Filmaster (Borys Musielak, Adam Zielinski).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#-------------------------------------------------------------------------------
from django.utils.translation import gettext_lazy as _
from django import forms
from django.forms.util import ErrorList
from django.utils.translation import string_concat
from film20.core.search_helper import *
from film20.utils.slughifi import slughifi
import logging
logger = logging.getLogger(__name__)
def get_related_people_as_comma_separated_string(related_people):
related_names = ''
for related_name in related_people:
related_names = related_names + unicode(related_name) + ", "
related_names = related_names.rstrip(", ")
return related_names
def do_clean_related_person(self, related_person_str='related_person'):
related = []
related_person_form_data = self.cleaned_data[related_person_str]
if isinstance(related_person_form_data, (list, tuple)):
return related_person_form_data
if len(related_person_form_data) ==0:
self.cleaned_data[related_person_str] == ""
return self.cleaned_data[related_person_str]
else:
related_person_form_data = related_person_form_data.replace(", ",",").rstrip(", ")
related_people = related_person_form_data.rstrip(",").split(",")
for related_person in related_people:
related_permalink = slughifi(related_person)
namesurname = related_person.split(" ")
person_name = None
person_surname = None
if len(namesurname) != 1:
person_surname = namesurname[-1]
namesurname = namesurname[:-1]
person_name = " ".join(namesurname)
else:
person_surname = namesurname[0]
search_helper = Search_Helper()
search_results = search_helper.search_person_by_phrase(related_person)
best_results = list(search_results['best_results'])
other_results = list(search_results['results'])
people = best_results + other_results
if people:
names = ""
for person in people:
person_permalink = slughifi(unicode(person))
if related_person != unicode(person):
names = names + ", " + unicode(person)
msg = string_concat(_('Person'), " '", unicode(related_person), "' ", _('is not present in the database!'), " ", _('Maybe you were looking for these people:'), names)
self._errors[related_person_str] = ErrorList([msg])
else:
related.append(person)
break
else:
msg = string_concat(_('Person is not present in the database:'), unicode(related_person))
self._errors[related_person_str] = ErrorList([msg])
logger.debug("Related: " + str(related))
return related
def comma_split(s):
out = ''
lastc=''
for c in s:
if lastc=='\\':
out+=c
elif c==',':
out = out.strip()
if out:
yield out
out=''
elif c!='\\':
out+=c
lastc = c
out = out.strip()
if out:
yield out
def comma_escape(s):
return s.replace('\\','\\\\').replace(',','\\,')
| [
"email@ibrahimcesar.com"
] | email@ibrahimcesar.com |
79ccd0a82b25f9eaa786743f1ddefc8eaeb949f1 | 238e46a903cf7fac4f83fa8681094bf3c417d22d | /output/python37/Lib/email/errors.py | 95aa000352daf17b4eed229313953cfe30607bb5 | [
"bzip2-1.0.6",
"LicenseRef-scancode-proprietary-license",
"LicenseRef-scancode-warranty-disclaimer",
"LicenseRef-scancode-newlib-historical",
"OpenSSL",
"Python-2.0",
"TCL",
"BSD-3-Clause"
] | permissive | baojunli/FastCAE | da1277f90e584084d461590a3699b941d8c4030b | a3f99f6402da564df87fcef30674ce5f44379962 | refs/heads/master | 2023-02-25T20:25:31.815729 | 2021-02-01T03:17:33 | 2021-02-01T03:17:33 | 268,390,180 | 1 | 0 | BSD-3-Clause | 2020-06-01T00:39:31 | 2020-06-01T00:39:31 | null | UTF-8 | Python | false | false | 3,642 | py | # Copyright (C) 2001-2006 Python Software Foundation
# Author: Barry Warsaw
# Contact: email-sig@python.org
"""email package exception classes."""
class MessageError(Exception):
"""Base class for errors in the email package."""
class MessageParseError(MessageError):
"""Base class for message parsing errors."""
class HeaderParseError(MessageParseError):
"""Error while parsing headers."""
class BoundaryError(MessageParseError):
"""Couldn't find terminating boundary."""
class MultipartConversionError(MessageError, TypeError):
"""Conversion to a multipart is prohibited."""
class CharsetError(MessageError):
"""An illegal charset was given."""
# These are parsing defects which the parser was able to work around.
class MessageDefect(ValueError):
"""Base class for a message defect."""
def __init__(self, line=None):
if line is not None:
super().__init__(line)
self.line = line
class NoBoundaryInMultipartDefect(MessageDefect):
"""A message claimed to be a multipart but had no boundary parameter."""
class StartBoundaryNotFoundDefect(MessageDefect):
"""The claimed start boundary was never found."""
class CloseBoundaryNotFoundDefect(MessageDefect):
"""A start boundary was found, but not the corresponding close boundary."""
class FirstHeaderLineIsContinuationDefect(MessageDefect):
"""A message had a continuation line as its first header line."""
class MisplacedEnvelopeHeaderDefect(MessageDefect):
"""A 'Unix-from' header was found in the middle of a header block."""
class MissingHeaderBodySeparatorDefect(MessageDefect):
"""Found line with no leading whitespace and no colon before blank line."""
# XXX: backward compatibility, just in case (it was never emitted).
MalformedHeaderDefect = MissingHeaderBodySeparatorDefect
class MultipartInvariantViolationDefect(MessageDefect):
"""A message claimed to be a multipart but no subparts were found."""
class InvalidMultipartContentTransferEncodingDefect(MessageDefect):
"""An invalid content transfer encoding was set on the multipart itself."""
class UndecodableBytesDefect(MessageDefect):
"""Header contained bytes that could not be decoded"""
class InvalidBase64PaddingDefect(MessageDefect):
"""base64 encoded sequence had an incorrect length"""
class InvalidBase64CharactersDefect(MessageDefect):
"""base64 encoded sequence had characters not in base64 alphabet"""
# These errors are specific to header parsing.
class HeaderDefect(MessageDefect):
"""Base class for a header defect."""
def __init__(self, *args, **kw):
super().__init__(*args, **kw)
class InvalidHeaderDefect(HeaderDefect):
"""Header is not valid, message gives details."""
class HeaderMissingRequiredValue(HeaderDefect):
"""A header that must have a value had none"""
class NonPrintableDefect(HeaderDefect):
"""ASCII characters outside the ascii-printable range found"""
def __init__(self, non_printables):
super().__init__(non_printables)
self.non_printables = non_printables
def __str__(self):
return ("the following ASCII non-printables found in header: "
"{}".format(self.non_printables))
class ObsoleteHeaderDefect(HeaderDefect):
"""Header uses syntax declared obsolete by RFC 5322"""
class NonASCIILocalPartDefect(HeaderDefect):
"""local_part contains non-ASCII characters"""
# This defect only occurs during unicode parsing, not when
# parsing messages decoded from binary.
| [
"l”ibaojunqd@foxmail.com“"
] | l”ibaojunqd@foxmail.com“ |
d68c7916badbfb576b18ac6ccf18e9336831c7fd | b05b89e1f6378905bbb62e2a2bf2d4f8e3187932 | /nonDuplicateNumber.py | 47a770f039bc1c3e8aa91571be7ab8bc35b4f9b9 | [
"MIT"
] | permissive | anishmo99/Daily-Interview-Pro | c959cd336209132aebad67a409df685e654cfdfc | d8724e8feec558ab1882d22c9ca63b850b767753 | refs/heads/master | 2023-04-10T08:09:46.089227 | 2021-04-27T07:27:38 | 2021-04-27T07:27:38 | 269,157,996 | 1 | 1 | MIT | 2020-06-08T07:09:19 | 2020-06-03T17:57:21 | C++ | UTF-8 | Python | false | false | 233 | py | def nonDuplicateNumber(nums):
nums.sort()
i=0
while(i<len(nums)-1):
if nums[i]!=nums[i+1]:
return nums[i]
i+=2
return nums[len(nums)-1]
print nonDuplicateNumber([4, 3, 3, 2, 1, 2, 1])
# 1
| [
"ani10sh@gmail.com"
] | ani10sh@gmail.com |
d0200942933649d20982e8d0c1ba94566e8a5c5e | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_116/1767.py | 32d55ab5e1dd3b425a76fa09552b6e7c540fce0e | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,772 | py | def getRow( s ):
return [
s[0], s[1], s[2], s[3]
]
def checkCell( s, scores ):
if s == 'X':
return (scores[0],scores[1]+1)
if s == 'O':
return (scores[0]+1,scores[1])
if s == 'T':
return (scores[0]+1,scores[1]+1)
return scores
def findWinner( board ):
j = 0
dots = 0
#horizontal
while j < 4:
i = 0
(O, X) = (0, 0)
while i < 4:
(O, X) = checkCell( board[j][i], (O, X))
if board[j][i] == '.': dots += 1
i += 1
if X == 4: return 'X won'
if O == 4: return 'O won'
j += 1
j=0
#vertical
while j < 4:
i = 0
(O, X) = (0, 0)
while i < 4:
(O, X) = checkCell( board[i][j], (O, X))
i += 1
if X == 4: return 'X won'
if O == 4: return 'O won'
j += 1
i=0
#diagonal
(O, X) = (0, 0)
while i < 4:
(O, X) = checkCell( board[i][i], (O, X))
i += 1
if X == 4: return 'X won'
if O == 4: return 'O won'
(O, X) = (0, 0)
i=0
while i < 4:
(O, X) = checkCell( board[3-i][i], (O, X))
i += 1
if X == 4: return 'X won'
if O == 4: return 'O won'
if dots == 0:
return 'Draw'
else:
return 'Game has not completed'
input = open('A-large.in','r')
n = int(input.readline())
i = n
ret = ''
while i > 0:
case = []
j = 4
while j > 0:
case.append(getRow(input.readline()))
j -= 1
input.readline()
i -= 1
ret += 'Case #'+str(n-i)+': '+findWinner(case)+'\n'
input.close()
output = open('output.txt', 'w')
output.write(ret)
output.close()
| [
"miliar1732@gmail.com"
] | miliar1732@gmail.com |
4141d7cc2fbc369307bee0785e1cf1982a1e9a81 | f5652eab2e9efa7ec26cade6c5ecabdd5a067929 | /src/lib/Bcfg2/Options/OptionGroups.py | 70cb5d0dda8a4f57a39a8a38bbe8e52a03f9b0da | [
"BSD-2-Clause",
"mpich2",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | dhutty/bcfg2 | 949f3053f3f54beb304fad50182d8c12c72f73ca | fdf47ccf128645bd099f7da80487320e086d17fe | refs/heads/master | 2020-12-24T17:44:06.213854 | 2013-08-12T13:40:08 | 2013-08-12T13:40:08 | 1,522,043 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,578 | py | """ Option grouping classes """
import re
import copy
import fnmatch
from Options import Option # pylint: disable=W0403
from itertools import chain
__all__ = ["OptionGroup", "ExclusiveOptionGroup", "Subparser",
"WildcardSectionGroup"]
class OptionContainer(list):
""" Parent class of all option groups """
def list_options(self):
""" Get a list of all options contained in this group,
including options contained in option groups in this group,
and so on. """
return list(chain(*[o.list_options() for o in self]))
def __repr__(self):
return "%s(%s)" % (self.__class__.__name__, list.__repr__(self))
def add_to_parser(self, parser):
""" Add this option group to a :class:`Bcfg2.Options.Parser`
object. """
for opt in self:
opt.add_to_parser(parser)
class OptionGroup(OptionContainer):
""" Generic option group that is used only to organize options.
This uses :meth:`argparse.ArgumentParser.add_argument_group`
behind the scenes. """
def __init__(self, *items, **kwargs):
r"""
:param \*args: Child options
:type \*args: Bcfg2.Options.Option
:param title: The title of the option group
:type title: string
:param description: A longer description of the option group
:param description: string
"""
OptionContainer.__init__(self, items)
self.title = kwargs.pop('title')
self.description = kwargs.pop('description', None)
def add_to_parser(self, parser):
group = parser.add_argument_group(self.title, self.description)
OptionContainer.add_to_parser(self, group)
class ExclusiveOptionGroup(OptionContainer):
""" Option group that ensures that only one argument in the group
is present. This uses
:meth:`argparse.ArgumentParser.add_mutually_exclusive_group`
behind the scenes."""
def __init__(self, *items, **kwargs):
r"""
:param \*args: Child options
:type \*args: Bcfg2.Options.Option
:param required: Exactly one argument in the group *must* be
specified.
:type required: boolean
"""
OptionContainer.__init__(self, items)
self.required = kwargs.pop('required', False)
def add_to_parser(self, parser):
group = parser.add_mutually_exclusive_group(required=self.required)
OptionContainer.add_to_parser(self, group)
class Subparser(OptionContainer):
""" Option group that adds options in it to a subparser. This
uses a lot of functionality tied to `argparse Sub-commands
<http://docs.python.org/dev/library/argparse.html#sub-commands>`_.
The subcommand string itself is stored in the
:attr:`Bcfg2.Options.setup` namespace as ``subcommand``.
This is commonly used with :class:`Bcfg2.Options.Subcommand`
groups.
"""
_subparsers = dict()
def __init__(self, *items, **kwargs):
r"""
:param \*args: Child options
:type \*args: Bcfg2.Options.Option
:param name: The name of the subparser. Required.
:type name: string
:param help: A help message for the subparser
:param help: string
"""
self.name = kwargs.pop('name')
self.help = kwargs.pop('help', None)
OptionContainer.__init__(self, items)
def __repr__(self):
return "%s %s(%s)" % (self.__class__.__name__,
self.name,
list.__repr__(self))
def add_to_parser(self, parser):
if parser not in self._subparsers:
self._subparsers[parser] = parser.add_subparsers(dest='subcommand')
subparser = self._subparsers[parser].add_parser(self.name,
help=self.help)
OptionContainer.add_to_parser(self, subparser)
class WildcardSectionGroup(OptionContainer, Option):
""" WildcardSectionGroups contain options that may exist in
several different sections of the config that match a glob. It
works by creating options on the fly to match the sections
described in the glob. For example, consider:
.. code-block:: python
options = [
Bcfg2.Options.WildcardSectionGroup(
Bcfg2.Options.Option(cf=("myplugin:*", "number"), type=int),
Bcfg2.Options.Option(cf=("myplugin:*", "description"))]
If the config file contained ``[myplugin:foo]`` and
``[myplugin:bar]`` sections, then this would automagically create
options for each of those. The end result would be:
.. code-block:: python
>>> Bcfg2.Options.setup
Namespace(myplugin_bar_description='Bar description', myplugin_bar_number=2, myplugin_foo_description='Foo description', myplugin_foo_number=1, myplugin_sections=['myplugin:foo', 'myplugin:bar'])
All options must have the same section glob.
The options are stored in an automatically-generated destination
given by::
<prefix><section>_<destination>
``<destination>`` is the original `dest
<http://docs.python.org/dev/library/argparse.html#dest>`_ of the
option. ``<section>`` is the section that it's found in.
``<prefix>`` is automatically generated from the section glob by
replacing all consecutive characters disallowed in Python variable
names into underscores. (This can be overridden with the
constructor.)
This group stores an additional option, the sections themselves,
in an option given by ``<prefix>sections``.
"""
#: Regex to automatically get a destination for this option
_dest_re = re.compile(r'(\A(_|[^A-Za-z])+)|((_|[^A-Za-z0-9])+)')
def __init__(self, *items, **kwargs):
r"""
:param \*args: Child options
:type \*args: Bcfg2.Options.Option
:param prefix: The prefix to use for options generated by this
option group. By default this is generated
automatically from the config glob; see above
for details.
:type prefix: string
:param dest: The destination for the list of known sections
that match the glob.
:param dest: string
"""
OptionContainer.__init__(self, [])
self._section_glob = items[0].cf[0]
# get a default destination
self._prefix = kwargs.get("prefix",
self._dest_re.sub('_', self._section_glob))
Option.__init__(self, dest=kwargs.get('dest',
self._prefix + "sections"))
self._options = items
def list_options(self):
return [self] + OptionContainer.list_options(self)
def from_config(self, cfp):
sections = []
for section in cfp.sections():
if fnmatch.fnmatch(section, self._section_glob):
sections.append(section)
newopts = []
for opt_tmpl in self._options:
option = copy.deepcopy(opt_tmpl)
option.cf = (section, option.cf[1])
option.dest = self._prefix + section + "_" + option.dest
newopts.append(option)
self.extend(newopts)
for parser in self.parsers:
parser.add_options(newopts)
return sections
def add_to_parser(self, parser):
Option.add_to_parser(self, parser)
OptionContainer.add_to_parser(self, parser)
| [
"chris.a.st.pierre@gmail.com"
] | chris.a.st.pierre@gmail.com |
d32468db483cb91e5c3d3e4cfbbe931b6e5f991c | 92237641f61e9b35ff6af6294153a75074757bec | /Machine Learning/수업 자료/3주차_기계학습 알고리즘/14일차_로지스틱회귀분석/mnistNeuralNet03.py | 77b742b01f40b34c1e9f6e4e0735237777105fdf | [] | no_license | taepd/study | 8ded115765c4f804813e255d9272b727bf41ec80 | 846d3f2a5a4100225b750f00f992a640e9287d9c | refs/heads/master | 2023-03-08T13:56:57.366577 | 2022-05-08T15:24:35 | 2022-05-08T15:24:35 | 245,838,600 | 0 | 1 | null | 2023-03-05T23:54:41 | 2020-03-08T15:25:15 | JavaScript | UTF-8 | Python | false | false | 2,587 | py | # mnistNeuralNet01.py
from tensorflow.python.keras.datasets import mnist
from keras.utils import to_categorical
from tensorflow.python.keras.models import Sequential
from tensorflow.python.keras.layers import Dense
import matplotlib.pyplot as plt
image_row, image_col, image_dim = 28, 28, 28*28
(x_train, y_train), (x_test, y_test) = mnist.load_data()
print(x_train[1])
x_train = x_train.reshape(60000, image_dim)
x_train = x_train.astype('float') / 255.0
print(x_train[1])
x_test = x_test.reshape(10000, image_dim)
x_test = x_test.astype('float') / 255.0
# one-hot encoding
y_train = to_categorical(y_train)
y_test = to_categorical(y_test)
print('y_train[0]:', y_train[0])
# 모델 생성
model = Sequential()
# one-hot encoding 이후 이므로 컬럼수로 정답수 계산. np.unique()하면 2 나옴(0,1뿐이므로)
NB_CLASSES = y_train.shape[1]
print('nb: ', NB_CLASSES)
HIDDEN_LAYER_1 = 512
model.add(Dense(units=HIDDEN_LAYER_1, input_shape=(image_dim,), activation='relu'))
model.add(Dense(units=NB_CLASSES, activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer='rmsprop', metrics=['accuracy'])
print('model.fit() 중입니다.')
hist = model.fit(x_train, y_train, validation_split=0.3, epochs=5, batch_size=64, verbose=1)
print('히스토리 목록 보기')
print(hist.history.keys())
print('-'*30)
for key, value in hist.history.items():
print(f'키: {key}, 값: {value}')
print('-'*30)
print('model.evaluate 실행중')
score = model.evaluate(x_test, y_test, verbose=1)
print(f'test_acc: {score[1]: .4f}')
print('-'*30)
print(f'test_loss: {score[0]: .4f}')
print('-'*30)
# # 모델의 정확도에 대한 히스토리를 시각화
# plt.title('model accuracy')
# plt.xlabel('epoch')
# plt.ylabel('accuracy')
#
# accuracy = hist.history['accuracy']
# val_accuracy = hist.history['val_accuracy']
#
# plt.plot(accuracy)
# plt.plot(val_accuracy)
#
# # plot 이후에 legend 설정해야 한다?
# plt.legend(['train', 'test'], loc='upper left')
#
# filename = 'mnistNeuralNet01_01.png'
# plt.savefig(filename)
# print(filename + ' 파일 저장됨')
#
# # 모델의 손실(비용)함수에 대한 히스토리를 시각화
#
# plt.figure()
# plt.title('model loss')
# plt.xlabel('epoch')
# plt.ylabel('loss')
#
# accuracy = hist.history['loss']
# val_accuracy = hist.history['val_loss']
#
# plt.plot(accuracy)
# plt.plot(val_accuracy)
#
# # plot 이후에 legend 설정해야 한다?
# plt.legend(['train', 'test'], loc='best')
#
# filename = 'mnistNeuralNet01_02.png'
# plt.savefig(filename)
# print(filename + ' 파일 저장됨') | [
"taepd1@gmail.com"
] | taepd1@gmail.com |
3bb1ce23ad811a88a750d495850bd3be33a763b0 | 19fb971011a0d3977abfde325f77eedbff180b23 | /kf/kf/doctype/statement_of_account_for_gl/test_statement_of_account_for_gl.py | 93ab173a19461e78fccded89a5ae3a74472b26b3 | [
"MIT"
] | permissive | sagar30051991/KF-HR | a489174181869c0300f4c659e41162fcb84ce80b | 182c52563243fd609473bb5411ad61f789e9441e | refs/heads/master | 2021-01-18T23:33:26.702772 | 2016-05-16T05:20:14 | 2016-05-16T05:20:14 | 58,905,560 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 297 | py | # -*- coding: utf-8 -*-
# Copyright (c) 2015, Indictrans and Contributors
# See license.txt
from __future__ import unicode_literals
import frappe
import unittest
# test_records = frappe.get_test_records('Statement of Account for GL')
class TestStatementofAccountforGL(unittest.TestCase):
pass
| [
"sagarshiragawakar@gmail.com"
] | sagarshiragawakar@gmail.com |
4e09cc6f747a652d23a1385e5d090c163e840bc7 | cb2882bd79c4af7a145f0639a5c7f473b1d22456 | /python/tvm/topi/cumsum.py | 2013a352874d76955b69a5223da7f6bb6280377c | [
"Apache-2.0",
"Zlib",
"MIT",
"BSD-2-Clause",
"LicenseRef-scancode-unknown-license-reference",
"Unlicense"
] | permissive | vinx13/tvm | 8e37dd7735eeadc476596ba96f683a93f44a26c3 | fe398bf206d01b54a2d74603e6bc9c012d63b2c9 | refs/heads/master | 2023-08-30T17:50:49.337568 | 2021-02-17T08:16:26 | 2021-02-17T08:16:26 | 141,384,391 | 4 | 0 | Apache-2.0 | 2022-09-21T18:53:08 | 2018-07-18T05:16:49 | Python | UTF-8 | Python | false | false | 4,159 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name
"""Cumsum operator"""
from ..tir import decl_buffer, ir_builder
from ..te import extern
from .utils import prod, get_const_int
from .math import cast
def cumsum(data, axis=None, dtype=None, exclusive=None):
"""Numpy style cumsum op. Return the cumulative sum of the elements along a given axis.
Parameters
----------
data : tvm.te.Tensor
The input data to the operator.
axis : int, optional
Axis along which the cumulative sum is computed. The default (None) is to compute
the cumsum over the flattened array.
dtype : string, optional
Type of the returned array and of the accumulator in which the elements are summed.
If dtype is not specified, it defaults to the dtype of data.
exclusive : int, optional
If set to 1 will return exclusive sum in which the first element is not
included. In other terms, if set to 1, the j-th output element would be
the sum of the first (j-1) elements. Otherwise, it would be the sum of
the first j elements.
Returns
-------
result : tvm.te.Tensor
The result has the same size as data, and the same shape as data if axis is not None.
If axis is None, the result is a 1-d array.
"""
if dtype is None or dtype == "":
dtype = data.dtype
def maybe_cast(x):
if dtype != data.dtype:
return cast(x, dtype)
return x
axis_mul_before = 1
axis_mul_after = 1
if axis is None:
axis = 0
cumsum_axis_len = prod(data.shape)
shape = (cumsum_axis_len,)
else:
if not isinstance(axis, int):
axis = get_const_int(axis)
shape = data.shape
cumsum_axis_len = shape[axis]
if axis < 0:
axis = len(shape) + axis
for i, value in enumerate(shape, 0):
if i < axis:
axis_mul_before *= value
elif i > axis:
axis_mul_after *= value
if exclusive is None:
exclusive = 0
def gen_ir(data_buf, out_buf):
ib = ir_builder.create()
data_buf = ib.buffer_ptr(data_buf)
out_buf = ib.buffer_ptr(out_buf)
with ib.for_range(0, axis_mul_before * axis_mul_after, "fused", kind="parallel") as fused:
i = fused // axis_mul_after
j = fused % axis_mul_after
base_idx = i * cumsum_axis_len * axis_mul_after + j
if exclusive == 0:
out_buf[base_idx] = maybe_cast(data_buf[base_idx])
else:
out_buf[base_idx] = cast(0, dtype)
with ib.for_range(0, cumsum_axis_len - 1, "_k") as _k:
k = _k + 1
cur_idx = base_idx + k * axis_mul_after
prev_idx = base_idx + (k - 1) * axis_mul_after
if exclusive == 0:
out_buf[cur_idx] = out_buf[prev_idx] + maybe_cast(data_buf[cur_idx])
else:
out_buf[cur_idx] = out_buf[prev_idx] + maybe_cast(data_buf[prev_idx])
return ib.get()
out_buf = decl_buffer(shape, dtype, "out_buf")
return extern(
[shape],
[data],
lambda ins, outs: gen_ir(ins[0], outs[0]),
dtype=dtype,
out_buffers=[out_buf],
name="cumsum_generic",
tag="cumsum_generic",
)
| [
"noreply@github.com"
] | vinx13.noreply@github.com |
c5c27117168845db40692da47f0e4b594df6e4e8 | 73e063b43d0890f13cf1936826e2a1833447806f | /sqlalchemy/query.py | 078159db8bdaceb69c6ad698741a117d24e62e41 | [] | no_license | sodewumi/hb-skills | f1814495ee5540243449b1df3c43f3ce62dae8fe | 502f398a75568804393272aa896621811eb7bacb | refs/heads/master | 2021-01-19T13:53:00.523056 | 2015-05-11T16:06:52 | 2015-05-11T16:06:52 | 34,186,938 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,080 | py | # Note: this file will not run. It is only for recording answers.
# Part 2: Write queries
# Get the brand with the **id** of 8.
Brand.query.filter_by(id=8).one()
# Get all models with the **name** Corvette and the **brand_name** Chevrolet.
Brand.query.filter_by(name="Corvette", brand_name="Chevrolet").all()
# Get all models that are older than 1960.
db.session.query(Model).filter
# Get all brands that were founded after 1920.
# Get all models with names that begin with "Cor".
# Get all brands with that were founded in 1903 and that are not yet discontinued.
# Get all brands with that are either discontinued or founded before 1950.
# Get any model whose brand_name is not Chevrolet.
# Part 2.5: Advanced and Optional
def search_brands_by_name(mystr):
pass
def get_models_between(start_year, end_year):
pass
# Part 3: Discussion Questions
# 1. What is the returned value and datatype of ``Brand.query.filter_by(name='Ford')``?
# 2. In your own words, what is an association table, and what *type* of relationship
# does an association table manage?
| [
"info@hackbrightacademy.com"
] | info@hackbrightacademy.com |
628e23f96ff86b40610fd3627ca993c1abd63e56 | 4137167a68f487343ef5cfd4e99e730fb39efa56 | /polyfile/fileutils.py | c43d5b9f09ccb380558b05c773a85496fcaecf95 | [
"Apache-2.0",
"LicenseRef-scancode-public-domain"
] | permissive | chrismattmann/polyfile | b57ccc72ce2fcfd67c885815204b40d61924c0cb | 6b86b2d91c9af19abc1520c1339935569b2cc964 | refs/heads/master | 2020-09-24T08:42:10.991623 | 2019-11-25T22:16:29 | 2019-11-25T22:16:29 | 225,717,204 | 2 | 1 | Apache-2.0 | 2019-12-03T21:14:40 | 2019-12-03T21:14:39 | null | UTF-8 | Python | false | false | 7,221 | py | import mmap
import os
import tempfile as tf
import sys
def make_stream(path_or_stream, mode='rb', close_on_exit=None):
if isinstance(path_or_stream, FileStream):
return path_or_stream
else:
return FileStream(path_or_stream, mode=mode, close_on_exit=close_on_exit)
class Tempfile:
def __init__(self, contents, prefix=None, suffix=None):
self._temp = None
self._data = contents
self._prefix = prefix
self._suffix = suffix
def __enter__(self):
self._temp = tf.NamedTemporaryFile(prefix=self._prefix, suffix=self._suffix, delete=False)
self._temp.write(self._data)
self._temp.flush()
self._temp.close()
return self._temp.name
def __exit__(self, type, value, traceback):
if self._temp is not None:
os.unlink(self._temp.name)
self._temp = None
class PathOrStdin:
def __init__(self, path):
self._path = path
if self._path == '-':
self._tempfile = Tempfile(sys.stdin.buffer.read())
else:
self._tempfile = None
def __enter__(self):
if self._tempfile is None:
return self._path
else:
return self._tempfile.__enter__()
def __exit__(self, *args, **kwargs):
if self._tempfile is not None:
return self._tempfile.__exit__(*args, **kwargs)
class FileStream:
def __init__(self, path_or_stream, start=0, length=None, mode='rb', close_on_exit=None):
if isinstance(path_or_stream, str):
self._stream = open(path_or_stream, mode)
if close_on_exit is None:
close_on_exit = True
else:
if not path_or_stream.seekable():
raise ValueError('FileStream can only wrap streams that are seekable')
elif not path_or_stream.readable():
raise ValueError('FileStream can only wrap streams that are readable')
self._stream = path_or_stream
if isinstance(path_or_stream, FileStream):
if length is None:
self._length = len(path_or_stream) - start
else:
self._length = min(length, len(path_or_stream))
else:
filesize = os.path.getsize(self._stream.name)
if length is None:
self._length = filesize - start
else:
self._length = min(filesize, length) - start
if close_on_exit is None:
close_on_exit = False
self._name = self._stream.name
self.start = start
self.close_on_exit = close_on_exit
self._entries = 0
self._listeners = []
self._root = None
def __len__(self):
return self._length
def add_listener(self, listener):
self._listeners.append(listener)
def remove_listener(self, listener):
ret = False
for i in reversed(range(len(self._listeners))):
if self._listeners[i] == listener:
del self._listeners[i]
ret = True
return ret
def seekable(self):
return True
def writable(self):
return False
def readable(self):
return True
@property
def name(self):
return self._name
@property
def root(self):
if self._root is None:
if isinstance(self._stream, FileStream):
self._root = self._stream.root
else:
self._root = self._stream
return self._root
def save_pos(self):
f = self
class SP:
def __init__(self):
self.pos = f.root.tell()
def __enter__(self, *args, **kwargs):
return f
def __exit__(self, *args, **kwargs):
f.root.seek(self.pos)
return SP()
def fileno(self):
return self._stream.fileno()
def offset(self):
if isinstance(self._stream, FileStream):
return self._stream.offset() + self.start
else:
return self.start
def seek(self, offset, from_what=0):
if from_what == 1:
offset = self.tell() + offset
elif from_what == 2:
offset = len(self) + offset
if offset - self.start > self._length:
raise IndexError(f"{self!r} is {len(self)} bytes long, but seek was requested for byte {offset}")
self._stream.seek(self.start + offset)
def tell(self):
return min(max(self._stream.tell() - self.start, 0), self._length)
def read(self, n=None, update_listeners=True):
if self._stream.tell() - self.start < 0:
# another context moved the position, so move it back to our zero index:
self.seek(0)
pos = 0
else:
pos = self.tell()
if update_listeners:
for listener in self._listeners:
listener(self, pos)
ls = len(self)
if pos >= ls:
return b''
elif n is None:
return self._stream.read()[:ls - pos]
else:
return self._stream.read(min(n, ls - pos))
def contains_all(self, *args):
if args:
with mmap.mmap(self.fileno(), 0, access=mmap.ACCESS_READ) as filecontent:
for string in args:
if filecontent.find(string, self.offset(), self.offset() + len(self)) < 0:
return False
return True
def tempfile(self, prefix=None, suffix=None):
class FSTempfile:
def __init__(self, file_stream):
self._temp = None
self._fs = file_stream
def __enter__(self):
self._temp = tf.NamedTemporaryFile(prefix=prefix, suffix=suffix, delete=False)
self._fs.seek(0)
self._temp.write(self._fs.read(len(self._fs)))
self._temp.flush()
self._temp.close()
return self._temp.name
def __exit__(self, type, value, traceback):
if self._temp is not None:
os.unlink(self._temp.name)
self._temp = None
return FSTempfile(self)
def __getitem__(self, index):
if isinstance(index, int):
self.seek(index)
return self.read(1)
elif not isinstance(index, slice):
raise ValueError(f"unexpected argument {index}")
if index.step is not None and index.step != 1:
raise ValueError(f"Invalid slice step: {index}")
length=None
if index.stop is not None:
if index.stop < 0:
length = len(self) + index.stop - index.start
else:
length = len(self) - (index.stop - index.start)
return FileStream(self, start=index.start, length=length, close_on_exit=False)
def __enter__(self):
self._entries += 1
return self
def __exit__(self, type, value, traceback):
self._entries -= 1
assert self._entries >= 0
if self._entries == 0 and self.close_on_exit:
self.close_on_exit = False
self._stream.close()
| [
"evan.sultanik@trailofbits.com"
] | evan.sultanik@trailofbits.com |
19fbe3ae6d843d850ff23562c5b8b31594a1e2a0 | 4b64dd47fa9321b50875e96298a5f0766ffe97c9 | /leetcode/p122.py | 162b1bac0c661d114c05ccc4dc65293548240690 | [] | no_license | choupi/puzzle | 2ce01aa85201660da41378c6df093036fa2d3a19 | 736964767717770fe786197aecdf7b170d421c8e | refs/heads/master | 2021-07-23T13:17:45.086526 | 2021-07-20T11:06:28 | 2021-07-20T11:06:28 | 13,580,701 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 519 | py | class Solution:
def maxProfit(self, prices: List[int]) -> int:
trans = False
buy_price = None
profit = 0
for i in range(len(prices)):
if trans:
if i == len(prices)-1 or prices[i+1]<prices[i]:
profit += prices[i] - buy_price
trans = False
else:
if i<len(prices)-1 and prices[i+1]>prices[i]:
buy_price = prices[i]
trans = True
return profit
| [
"chromosome460@gmail.com"
] | chromosome460@gmail.com |
1bd82cb13ed2585404f2436b99d32f708c8e9d82 | d96ffbadf4526db6c30a3278f644c1bc25ff4054 | /src/storage/cluster_storage.py | d972d9137b5369a66d792d63c71147b0a0d4d5b5 | [
"MIT"
] | permissive | dballesteros7/master-thesis-2015 | 07c03726f6ceb66e6d706ffe06e4e5eb37dcda75 | 8c0bf9a6eef172fc8167a30780ae0666f8ea2d88 | refs/heads/master | 2021-05-03T11:22:28.333473 | 2016-04-26T14:00:30 | 2016-04-26T14:00:30 | 44,601,064 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,148 | py | import logging
from pymongo import MongoClient
class ClusterStorage:
def __init__(self):
client = MongoClient()
self.collection = client.flickrdata.clusters
def get_clusters(self, city_name, bandwidth):
result = self.collection.find({
'city_name': city_name,
'bandwidth': bandwidth
})
return list(result)
def get_cluster(self, cluster_id):
return self.collection.find_one({
'_id': cluster_id
})
def insert_clusters(self, city_name, bandwidth, entries, cluster_centers, cluster_labels):
logging.info('Collecting cluster data.')
clusters = []
unique_users_per_cluster = []
for cluster_center in cluster_centers:
clusters.append({
'latitude': cluster_center[0],
'longitude': cluster_center[1],
'photos': [],
'unique_users': 0,
'number_of_photos': 0,
'city_name': city_name,
'bandwidth': bandwidth
})
unique_users_per_cluster.append(set())
for cluster_label, entry in zip(cluster_labels, entries):
if cluster_label == -1:
continue
clusters[cluster_label]['photos'].append(entry['_id'])
clusters[cluster_label]['number_of_photos'] += 1
unique_users_per_cluster[cluster_label].add(entry['owner'])
for cluster, unique_users in zip(clusters, unique_users_per_cluster):
cluster['unique_users'] = len(unique_users)
self.collection.insert_many(clusters, ordered=False)
return clusters
def get_cluster_for_photo(self, photo_id, city_name, bandwidth):
return self.collection.find_one({
'photos': photo_id,
'city_name': city_name,
'bandwidth': bandwidth
})
def get_top_ten_clusters(self, city_name, bandwidth):
return self.collection.find({
'city_name': city_name,
'bandwidth': bandwidth
}, sort=[('number_of_photos', -1), ('unique_users', -1)], limit=10)
| [
"diegob@student.ethz.ch"
] | diegob@student.ethz.ch |
22fb657a6dc5caa60f4275c099a2c55e2f160222 | fc678a0a5ede80f593a29ea8f43911236ed1b862 | /206-ReverseLinkedList.py | f14547c884d1681052b51980876ae39bad92b593 | [] | no_license | dq-code/leetcode | 4be0b1b154f8467aa0c07e08b5e0b6bd93863e62 | 14dcf9029486283b5e4685d95ebfe9979ade03c3 | refs/heads/master | 2020-12-13T15:57:30.171516 | 2017-11-07T17:43:19 | 2017-11-07T17:43:19 | 35,846,262 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 519 | py | # Definition for singly-linked list.
class ListNode(object):
def __init__(self, x):
self.val = x
self.next = None
class Solution(object):
def reverseList(self, head):
"""
:type head: ListNode
:rtype: ListNode
"""
if head==None: return None
pivot = ListNode(0)
node = head
while node:
temp = node.next
node.next = pivot.next
pivot.next = node
node = temp
return pivot.next | [
"dengqianwork@gmail.com"
] | dengqianwork@gmail.com |
8616f51bef26951ccf5dd36eea282b9742d4f87d | 930309163b930559929323647b8d82238724f392 | /arc109_a.py | e8f0ea60a8a44793ef14ac47d27ac9e2ebf69e2c | [] | no_license | GINK03/atcoder-solvers | 874251dffc9f23b187faa77c439b445e53f8dfe1 | b1e7ac6e9d67938de9a85df4a2f9780fb1fbcee7 | refs/heads/master | 2021-11-07T14:16:52.138894 | 2021-09-12T13:32:29 | 2021-09-12T13:32:29 | 11,724,396 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,393 | py |
SA,SB,X,Y=map(int,input().split())
SA-=1;
SB-=1;
N=202
G = [[]*N for _ in range(N)]
A = [a for a in range(100)]
B = [b+100 for b in range(100)]
for i in range(len(A)-1):
G[A[i]].append( (A[i+1], Y) )
G[A[i+1]].append( (A[i], Y) )
for i in range(len(B)-1):
G[B[i]].append( (B[i+1], Y) )
G[B[i+1]].append( (B[i], Y) )
for i in range(0, len(A)):
G[B[i]].append( (A[i], X) )
G[A[i]].append( (B[i], X) )
for i in range(1, len(A)):
G[B[i-1]].append( (A[i], X) )
G[A[i]].append( (B[i-1], X) )
import collections
import heapq
Entity = collections.namedtuple("Entity", ["node", "w"])
Entity.__lt__ = lambda self, other: self.w <= other.w
def dijkstra(start) -> "List":
dist = [-1 for _ in range(N)]
dist[start] = 0
que = []
heapq.heappush(que, Entity(start, 0))
done = [False for _ in range(N)]
while que:
i, w = heapq.heappop(que)
# すでに訪れたところは処理しない
if done[i]:
continue
done[i] = True
for j, c in G[i]:
# 評価が未知のエッジ or より安くなる可能性がある場合は探索し、結果をヒープに入れる
if dist[j] == -1 or dist[j] > dist[i] + c:
dist[j] = dist[i] + c
heapq.heappush(que, Entity(j, dist[j]))
return dist
dist = dijkstra(SA)
#print(dist)
print(dist[SB+100])
| [
"gim.kobayashi@gmail.com"
] | gim.kobayashi@gmail.com |
acc8718c446ff2f61372886aa27513bf83191698 | 6aa8fd438e12e4e285d9b89be15e211e607821e0 | /.metadata/.plugins/org.eclipse.core.resources/.history/cc/604d9aa93aac00141484e17924c72bfe | 220dd8bf723cd98eacd7547909b0124e9522e153 | [] | no_license | phoenixproject/python | 2aa251c9fe9a3a665043d5f3d29d48c0f95b9273 | f8171d31d1d33a269d29374e7605a8f5bce6b5d6 | refs/heads/master | 2021-03-12T19:15:01.611936 | 2015-02-04T08:25:27 | 2015-02-04T08:25:27 | 30,287,884 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 491 | #!/usr/bin/python3
from br.edu.ifes.poo2.adapter.cdp.FiltroAguaNatural import FiltroAguaNatural
from br.edu.ifes.poo2.adapter.cdp.FiltroAguaGelada import FiltroAguaGelada
from br.edu.ifes.poo2.adapter.cdp.FiltroAguaESaiGelo import FiltroAguaESaiGelo
def main():
filtro = FiltroAguaNatural()
filtro.FiltrarAgua()
filtro = FiltroAguaGelada()
filtro.FiltrarAgua()
#filtro2 = FiltroAguaESaiGelo()
f#iltro2.FiltrarAgua()
if __name__ == "__main__" : main() | [
"phoenixproject.erp@gmail.com"
] | phoenixproject.erp@gmail.com | |
a2a09b8055dfde80a01fcb05669f1e1078e5b234 | e10a6d844a286db26ef56469e31dc8488a8c6f0e | /constrained_language_typology/sigtyp_reader_main.py | c3641d426176bf96df9381cb5e3c2f02b22c2a70 | [
"Apache-2.0",
"CC-BY-4.0"
] | permissive | Jimmy-INL/google-research | 54ad5551f97977f01297abddbfc8a99a7900b791 | 5573d9c5822f4e866b6692769963ae819cb3f10d | refs/heads/master | 2023-04-07T19:43:54.483068 | 2023-03-24T16:27:28 | 2023-03-24T16:32:17 | 282,682,170 | 1 | 0 | Apache-2.0 | 2020-07-26T15:50:32 | 2020-07-26T15:50:31 | null | UTF-8 | Python | false | false | 5,528 | py | # coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Reader for the format provided by SIGTYP 2020 Shared Task.
More information on the format is available here:
https://sigtyp.github.io/st2020.html
Example:
--------
Clone the GitHub data to ST2020_DIR. Then run:
> ST2020_DIR=...
> python3 sigtyp_reader_main.py --sigtyp_dir ${ST2020_DIR}/data \
--output_dir ${OUTPUT_DIR}
The above will create "train.csv", "dev.csv" and "test_blinded.csv" files
converted from the format provided by SIGTYP. Our models should be able to
injest these csv files. Along each of the above files, an accompanying
"data_train_*.json.gz" file is generated that contains metainformation on
various features and their values.
TODO:
-----
Following needs to be done:
- Latitude and longitude need to be on a point on a unit sphere? Keep as is
and add three further columns for (x,y,z)?
- Country codes are *several*.
- Other types of SOMs.
- Use BaseMap for visualizations?
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import json
import os
import tempfile
from absl import app
from absl import flags
from absl import logging
import constants as const
import data_info as data_lib
import sigtyp_reader as sigtyp
flags.DEFINE_string(
"sigtyp_dir", "",
"Directory containing SIGTYP original training and development.")
flags.DEFINE_string(
"output_dir", "",
"Output directory for preprocessed files.")
flags.DEFINE_bool(
"categorical_as_ints", False,
"Encode all the categorical features as ints.")
FLAGS = flags.FLAGS
def _write_dict(data, file_type, output_filename):
"""Writes dictionary of a specified type to a file in output directory."""
output_filename = os.path.join(
FLAGS.output_dir,
output_filename + "_" + file_type + data_lib.FILE_EXTENSION)
data_lib.write_data_info(output_filename, data)
def _process_file(filename, base_dir=None):
"""Preprocesses supplied data file."""
if not base_dir:
base_dir = FLAGS.sigtyp_dir
full_path = os.path.join(base_dir, filename + ".csv")
_, df, data_info = sigtyp.read(
full_path, categorical_as_ints=FLAGS.categorical_as_ints)
_write_dict(data_info, filename, const.DATA_INFO_FILENAME)
# Save preprocessed data frames to a csv.
output_file = os.path.join(FLAGS.output_dir, filename + ".csv")
logging.info("Saving preprocessed data to \"%s\" ...", output_file)
df.to_csv(output_file, sep="|", index=False, float_format="%g")
return data_info
def _write_combined_data(file_types, output_file_name):
"""Combine data from multiple files."""
with tempfile.TemporaryDirectory() as temp_dir:
temp_file = os.path.join(temp_dir, output_file_name + ".csv")
with open(temp_file, "w", encoding=const.ENCODING) as out_f:
header = None
all_lines = []
for file_type in file_types:
input_path = os.path.join(FLAGS.sigtyp_dir, file_type + ".csv")
with open(input_path, "r", encoding=const.ENCODING) as in_f:
lines = in_f.readlines()
if not header:
header = lines[0]
lines.pop(0) # Remove header.
all_lines.extend(lines)
# Sort the lines by the WALS code and dump them.
all_lines = sorted(all_lines, key=lambda x: x.split("|")[0])
all_lines.insert(0, header)
out_f.write("".join(all_lines))
_process_file(output_file_name, base_dir=temp_dir)
def _process_files():
"""Processes input files."""
# Process training and development files individually.
_process_file(const.TRAIN_FILENAME)
_process_file(const.DEV_FILENAME)
_process_file(const.TEST_GOLD_FILENAME)
test_data_info = _process_file(const.TEST_BLIND_FILENAME)
# Save features requested for prediction in the test set.
features_to_predict = test_data_info[const.DATA_KEY_FEATURES_TO_PREDICT]
if not features_to_predict:
raise ValueError("No features requested for prediction!")
predict_dict_path = os.path.join(FLAGS.output_dir,
const.FEATURES_TO_PREDICT_FILENAME + ".json")
logging.info("Saving features for prediction in \"%s\" ...",
predict_dict_path)
with open(predict_dict_path, "w", encoding=const.ENCODING) as f:
json.dump(features_to_predict, f)
# Process the combine the datasets.
_write_combined_data([const.TRAIN_FILENAME, const.DEV_FILENAME],
const.TRAIN_DEV_FILENAME)
_write_combined_data([const.TRAIN_FILENAME, const.DEV_FILENAME,
const.TEST_BLIND_FILENAME],
const.TRAIN_DEV_TEST_FILENAME)
def main(unused_argv):
# Check flags.
if not FLAGS.sigtyp_dir:
raise ValueError("Specify --sigtyp_dir for input data!")
if not FLAGS.output_dir:
raise ValueError("Specify --output_dir for preprocessed data!")
_process_files()
if __name__ == "__main__":
app.run(main)
| [
"copybara-worker@google.com"
] | copybara-worker@google.com |
eb023275d736cbac3ff00334ca4f12d84d44429a | f6e78129c6669e8f46a65a3d7c45cf10cca083b9 | /scripts/inject_powerloss.py | 1c4d01f0ff71e14aeb4c309ca1dea09dbfc96d04 | [] | no_license | realraum/door_and_sensors | 339259c16ed27d2466f3cf9d5a51a93bfc7b326c | 8c70cdc4a28eabb04ce1e2df6aab0fb86c5b4f28 | refs/heads/master | 2023-08-30T08:53:28.419146 | 2023-08-07T22:16:21 | 2023-08-07T22:16:21 | 31,234,025 | 1 | 0 | null | 2023-08-11T11:46:30 | 2015-02-23T22:45:47 | Go | UTF-8 | Python | false | false | 983 | py | #!/usr/bin/python3
# -*- coding: utf-8 -*-
from __future__ import with_statement
import paho.mqtt.client as mqtt
import json
import time
import sys
######## r3 ############
def sendR3Message(client, structname, datadict):
client.publish(structname, json.dumps(datadict))
# Start zmq connection to publish / forward sensor data
client = mqtt.Client()
client.connect("mqtt.realraum.at", 1883, 60)
# listen for sensor data and forward them
if len(sys.argv) < 3:
sendR3Message(client, "realraum/backdoorcx/powerloss",
{"Ts": int(time.time()),
"OnBattery": bool(True),
"PercentBattery": float(42.0),
"LineVoltage": float(2904.0),
"LoadPercent": float(0815.0)
})
else:
client.publish(sys.argv[1], sys.argv[2])
client.loop(timeout=1.0, max_packets=1)
client.disconnect()
# {“OnBattery”:bool, PercentBattery:float, LineVoltage: float, LoadPercent: float,
| [
"dev@2904.cc"
] | dev@2904.cc |
9fd094b78b8730d2a64729c0e9348ca22cbac3c0 | e01c5d1ee81cc4104b248be375e93ae29c4b3572 | /Sequence5/CTCI/2-Linked-List/1remove-dup.py | 17fed028ad554d4eca493329a1f98a574741788a | [] | no_license | lalitzz/DS | 7de54281a34814601f26ee826c722d123ee8bd99 | 66272a7a8c20c0c3e85aa5f9d19f29e0a3e11db1 | refs/heads/master | 2021-10-14T09:47:08.754570 | 2018-12-29T11:00:25 | 2018-12-29T11:00:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 797 | py | import singlell
def remove_duplicate(S):
tmp = S.head
seen_nodes = set()
prev = None
while tmp is not None:
if tmp.data in seen_nodes:
if prev is not None:
prev.next = tmp.next
else:
S.head = None
if tmp.next is None:
S.tail = prev
else:
seen_nodes.add(tmp.data)
prev = tmp
tmp = tmp.next
def remove_dup_notmp(S):
tmp = S.head
prev = None
while tmp is not None:
cur = tmp
while cur.next is not None:
if tmp.data == cur.next.data:
cur.next = cur.next.next
else:
cur = cur.next
tmp = tmp.next
S = singlell.SingleLinkList()
S.appendLeft(1)
S.appendLeft(2)
S.appendLeft(2)
S.appendLeft(2)
S.appendLeft(2)
S.appendLeft(2)
S.appendLeft(2)
remove_dup_notmp(S)
S.print_list() | [
"lalit.slg007@gmail.com"
] | lalit.slg007@gmail.com |
58fab79e5b39430682a825af92f73644698e4c5c | 64a296ffabb013ad8c8a55380718fcc629bc7755 | /cry1ac/src/pb_d_major_subset.py | f941b371c4ba31b579b7b1e815e7bf8cb87a6720 | [] | no_license | maxwshen/evoracle-dataprocessinganalysis | 3ed16fc20ff52d4be81bb171893e64562a81d5d7 | ed400f6ddfd7b7bba161dd3a06254013a2c90770 | refs/heads/master | 2022-04-30T07:50:07.486482 | 2020-04-27T18:21:08 | 2020-04-27T18:21:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,397 | py | #
from __future__ import division
import _config
import sys, os, fnmatch, datetime, subprocess
sys.path.append('/home/unix/maxwshen/')
import numpy as np
from collections import defaultdict
from mylib import util, compbio
import pandas as pd
# Default params
inp_dir = _config.OUT_PLACE + f'pb_c_convert/'
NAME = util.get_fn(__file__)
out_dir = _config.OUT_PLACE + NAME + '/'
util.ensure_dir_exists(out_dir)
exp_design = pd.read_csv(_config.DATA_DIR + f'Badran2015_SraRunTable.csv')
pacbio_nms = exp_design[exp_design['Instrument'] == 'PacBio RS II']['Library Name']
pacbio_nms = sorted(pacbio_nms)
params = {
# 21
'major_positions': [
-76,
-73,
# -69,
15,
# 61,
68,
198,
286,
304,
332,
344,
347,
361,
363,
384,
404,
417,
461,
463,
515,
582,
],
}
pos_to_ref = {
-76: 'A',
-73: 'M',
# -69: 'G',
15: 'C',
# 61: 'V',
68: 'F',
198: 'R',
286: 'G',
304: 'T',
332: 'E',
344: 'A',
347: 'Q',
361: 'T',
363: 'S',
384: 'D',
404: 'S',
417: 'N',
461: 'E',
463: 'N',
515: 'E',
582: 'S',
}
ordered_time_strings = [
'0hrs',
'12hrs',
'24hrs',
'36hrs',
'48hrs',
'60hrs',
'72hrs',
'84hrs',
'96hrs',
'108hrs',
'120hrs',
'132hrs',
'144hrs',
'156hrs',
'168hrs',
'180hrs',
'192hrs',
'204hrs',
'216hrs',
'228hrs',
'240hrs',
'264hrs',
'276hrs',
'300hrs',
'324hrs',
'348hrs',
'372hrs',
'396hrs',
'408hrs',
'432hrs',
'456hrs',
'480hrs',
'504hrs',
'528hrs',
]
##
# Functions
##
def get_short_genotypes(dfs):
short_gts = []
for read_nm in set(dfs['Read name']):
df = dfs[dfs['Read name'] == read_nm]
obs_pos_to_mut = {pos: mut for pos, mut in zip(df['Position'], df['Mutated amino acid'])}
short_gt = ''.join([obs_pos_to_mut[pos] if pos in obs_pos_to_mut else '.' for pos in params['major_positions']])
# short_gt = ''.join([obs_pos_to_mut[pos] if pos in obs_pos_to_mut else pos_to_ref[pos] for pos in params['major_positions']])
short_gts.append(short_gt)
# Filter genotypes with amino acid 'e' representing a deletion
print(f'Found {len(short_gts)} genotypes')
short_gts = [s for s in short_gts if 'e' not in s]
print(f'Filtered out e, leaving {len(short_gts)} genotypes')
return short_gts
##
# Primary
##
def major_subset():
get_time_from_nm = lambda nm: nm.split('_')[2]
dd = defaultdict(list)
for nm in pacbio_nms:
print(nm)
df = pd.read_csv(inp_dir + f'{nm}.csv', index_col = 0)
dfs = df[df['Position'].isin(params['major_positions'])]
short_gts = get_short_genotypes(dfs)
time = get_time_from_nm(nm)
dd['Abbrev genotype'] += short_gts
dd['Timepoint'] += [time] * len(short_gts)
df = pd.DataFrame(dd)
# Add stats
df['Read count'] = 1
dfs = df.groupby(['Abbrev genotype', 'Timepoint']).agg(sum).reset_index()
sums = dfs.groupby(['Timepoint'])['Read count'].sum()
time_to_sum = {time: ct for time, ct in zip(sums.index, list(sums))}
dfs['Total count'] = [time_to_sum[t] for t in dfs['Timepoint']]
dfs['Frequency'] = dfs['Read count'] / dfs['Total count']
dfs.to_csv(out_dir + f'badran_pacbio.csv')
pv_df = dfs.pivot(index = 'Abbrev genotype', columns = 'Timepoint', values = 'Frequency')
pv_df = pv_df.fillna(value = 0)
pv_df = pv_df[ordered_time_strings]
pv_df.to_csv(out_dir + f'badran_pacbio_pivot.csv')
# Subset to > 1% fq and renormalize
t = pv_df.apply(max, axis = 'columns')
gt_to_max_fq = {gt: max_fq for gt, max_fq in zip(t.index, list(t))}
keep_gts = [gt for gt, max_fq in zip(t.index, list(t)) if max_fq > 0.01]
print(f'Filtered {len(pv_df)} to {len(keep_gts)} genotypes with >1% fq in any timepoint')
# Normalize
pv_df = pv_df.loc[keep_gts]
pv_df /= pv_df.apply(sum)
pv_df = pv_df.sort_values(by = '528hrs', ascending = False)
pv_df.to_csv(out_dir + f'badran_pacbio_pivot_1pct.csv')
return
##
# qsub
##
def gen_qsubs():
# Generate qsub shell scripts and commands for easy parallelization
print('Generating qsub scripts...')
qsubs_dir = _config.QSUBS_DIR + NAME + '/'
util.ensure_dir_exists(qsubs_dir)
qsub_commands = []
pacbio_nms = exp_design[exp_design['Instrument'] == 'PacBio RS II']['Library Name']
num_scripts = 0
for nm in pacbio_nms:
command = f'python {NAME}.py {nm}'
script_id = NAME.split('_')[0]
# Write shell scripts
sh_fn = qsubs_dir + f'q_{script_id}_{nm}.sh'
with open(sh_fn, 'w') as f:
f.write(f'#!/bin/bash\n{command}\n')
num_scripts += 1
# Write qsub commands
qsub_commands.append(f'qsub -V -P regevlab -l h_rt=4:00:00,h_vmem=8G -wd {_config.SRC_DIR} {sh_fn} &')
# Save commands
commands_fn = qsubs_dir + '_commands.sh'
with open(commands_fn, 'w') as f:
f.write('\n'.join(qsub_commands))
subprocess.check_output(f'chmod +x {commands_fn}', shell = True)
print(f'Wrote {num_scripts} shell scripts to {qsubs_dir}')
return
##
# Main
##
@util.time_dec
def main():
print(NAME)
# Function calls
major_subset()
return
if __name__ == '__main__':
# if len(sys.argv) > 1:
# main(sys.argv[1:])
# else:
# gen_qsubs()
main() | [
"maxwshen@gmail.com"
] | maxwshen@gmail.com |
4558cc6e46898c6d04ac0c115bcdacc63c8181fa | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_34/619.py | 42c863876e3fa0be7bf9a9744685f50aab0ed2a3 | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 558 | py | #!/usr/bin/env python
import re
import sys
def match(words, pattern):
count = 0
for word in words:
if re.match(pattern.replace("(", "[").replace(")", "]"), word):
count += 1
return count
def main():
readline = sys.stdin.readline
l, d, n = [int(x) for x in readline().split(" ", 2)]
words = []
for i in range(d):
words.append(readline()[:-1])
for i in range(n):
pattern = readline()[:-1]
print "Case #%s: %s" % (i+1, match(words, pattern))
if __name__ == "__main__":
main()
| [
"miliar1732@gmail.com"
] | miliar1732@gmail.com |
680b2b82bf857e26d0faafe6e4775c0ed3a62c5a | 56f1bb713f0651ac63391349deb81790df14e4b5 | /Mirror Images/mirror.py | 2f0d48862fb5ca30e5ed19cfae2c329da4a5f92f | [
"CC0-1.0"
] | permissive | rajitbanerjee/kattis | 4cd46a2fe335120b8f53ca71544fc0681474118b | 3a5dd4c84c07e21f09ef45ebd9c1bad2a0adc6ad | refs/heads/master | 2022-05-05T03:19:28.744660 | 2020-08-12T18:48:55 | 2020-08-12T18:48:55 | 192,208,120 | 4 | 2 | CC0-1.0 | 2022-04-15T05:50:16 | 2019-06-16T15:38:15 | Python | UTF-8 | Python | false | false | 643 | py | """https://open.kattis.com/problems/mirror"""
T = int(input())
ans = []
def doubleMirror(image, R, C):
mirror_im = [[[] for _ in range(C)] for _ in range(R)]
for i in range(R):
for j in range(C):
mirror_im[R - i - 1][C - j - 1] = image[i][j]
for i in range(R):
mirror_im[i] = "".join(mirror_im[i])
return "\n".join(mirror_im)
for _ in range(T):
R, C = map(int, input().split())
image = []
for _ in range(R):
row = list(input())
image.append(row)
ans.append(doubleMirror(image, R, C))
for i, a in enumerate(ans):
print(f"Test {i + 1}")
print(a) | [
"rajit.banerjee@ucdconnect.ie"
] | rajit.banerjee@ucdconnect.ie |
f6892953b61e5bd8d916108ba1c4c6a22883b75e | 781e2692049e87a4256320c76e82a19be257a05d | /all_data/exercism_data/python/sum-of-multiples/6cd4a956dd254262aaa0ea881b44e3fe.py | 689a350a7f9eb5a21bf4e588871116ce841d8f22 | [] | no_license | itsolutionscorp/AutoStyle-Clustering | 54bde86fe6dbad35b568b38cfcb14c5ffaab51b0 | be0e2f635a7558f56c61bc0b36c6146b01d1e6e6 | refs/heads/master | 2020-12-11T07:27:19.291038 | 2016-03-16T03:18:00 | 2016-03-16T03:18:42 | 59,454,921 | 4 | 0 | null | 2016-05-23T05:40:56 | 2016-05-23T05:40:56 | null | UTF-8 | Python | false | false | 676 | py | class SumOfMultiples:
def __init__(self, *args):
if not args:
self.nums = (3, 5)
else:
self.nums = args
# @profile
def to_old(self, num):
ans = []
for i in range(num):
for j in self.nums:
if i % j == 0:
ans.append(i)
return sum(set(ans))
# @profile
def to(self, num):
ans = set()
for j in self.nums:
temp = [x for x in range(j, num, j)]
ans.update(temp)
return sum(ans)
if __name__ == "__main__":
print(SumOfMultiples(3, 5, 7, 2).to(10**6))
print(SumOfMultiples(43, 47).to_old(10**6))
| [
"rrc@berkeley.edu"
] | rrc@berkeley.edu |
6212ce1b008a751d592f27035ba0f6e02bc76f93 | 706dd00183f5f4a3ccb80e78efc35c9173a7d88b | /backend/prototype/migrations/0023_auto_20210827_2116.py | 70b299cf4cbc9978bb85a7710a1eb4e33178f229 | [] | no_license | jiaweioss/2021_Summer_Project | d82e89e431c500cde07201b150a4390ecf09ce6f | 136f007f1a4449710659b7424025d15402b7344a | refs/heads/main | 2023-07-14T14:17:39.239612 | 2021-08-29T12:59:03 | 2021-08-29T12:59:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 420 | py | # Generated by Django 3.1.7 on 2021-08-27 13:16
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('prototype', '0022_questionnaire_shownumbers'),
]
operations = [
migrations.AlterField(
model_name='questionnaire',
name='showNumbers',
field=models.BooleanField(default=True, null=True),
),
]
| [
"455436082@qq.com"
] | 455436082@qq.com |
9508aa89ecf08ec9d6866cda6052c802a93eb274 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_320/ch40_2019_08_26_19_13_34_255376.py | daf09fb1bcba4e7885a4e0cdad8525c5c25316da | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 132 | py | def fatorial(numero):
fat = 1
while numero > 0:
fat *= numero
numero -= 1
return fat
print(fatorial(5)) | [
"you@example.com"
] | you@example.com |
40a45937067dd33457d82b9d157f6a37f2980933 | 25faa623b069a9423e040903f4f2c5c123f53825 | /src/Sparrow/Python/setup.py | 8ec59326f11aa2e3e8e06d300befe7899648e147 | [
"BSD-3-Clause"
] | permissive | DockBio/sparrow | 99d3eb316426351312e74397c5cc4bb962118306 | f82cf86584e9edfc6f2c78af4896dc6f2ee8a455 | refs/heads/master | 2022-07-14T11:44:21.748779 | 2020-04-27T20:41:58 | 2020-04-27T20:41:58 | 257,099,197 | 0 | 0 | BSD-3-Clause | 2020-04-27T20:42:00 | 2020-04-19T20:48:28 | null | UTF-8 | Python | false | false | 1,000 | py | import setuptools
# Read README.md for the long description
with open("README.md", "r") as fh:
long_description = fh.read()
# Define the setup
setuptools.setup(
name="scine_sparrow",
version="2.0.0",
author="ETH Zurich, Laboratory for Physical Chemistry, Reiher Group",
author_email="scine@phys.chem.ethz.ch",
description="Open source semi-empirical quantum chemistry implementations.",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://www.scine.ethz.ch",
packages=["scine_sparrow"],
package_data={"scine_sparrow": ["scine_sparrow.so"]},
classifiers=[
"Programming Language :: Python",
"Programming Language :: C++",
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: BSD License",
"Natural Language :: English",
"Topic :: Scientific/Engineering :: Chemistry"
],
zip_safe=False,
test_suite='pytest',
tests_require=['pytest']
)
| [
"scine@phys.chem.ethz.ch"
] | scine@phys.chem.ethz.ch |
b40323c2dade3b11429fceddf7181bb8297ac62b | 025230a618b49c5f255c34e4389f87064df32a6f | /hypertools/tools/cluster.py | 88b0ff6d482d9bc04b10998d88acccca77304cd5 | [
"MIT"
] | permissive | shannonyu/hypertools | 18b44b502992e7748c8eabdab188b41e0120bf08 | 8134d46b6031169bb12d03e49357802c923a175f | refs/heads/master | 2021-01-25T05:50:54.314315 | 2017-02-01T22:16:57 | 2017-02-01T22:16:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,707 | py | #!/usr/bin/env python
from sklearn.cluster import KMeans
from sklearn.decomposition import PCA
import numpy as np
from .._shared.helpers import *
def cluster(x, n_clusters=8, ndims=None):
"""
Aligns a list of arrays
This function takes a list of high dimensional arrays and 'hyperaligns' them
to a 'common' space, or coordinate system following the approach outlined by
Haxby et al, 2011. Hyperalignment uses linear transformations (rotation,
reflection, translation, scaling) to register a group of arrays to a common
space. This can be useful when two or more datasets describe an identical
or similar system, but may not be in same coordinate system. For example,
consider the example of fMRI recordings (voxels by time) from the visual
cortex of a group of subjects watching the same movie: The brain responses
should be highly similar, but the coordinates may not be aligned.
Haxby JV, Guntupalli JS, Connolly AC, Halchenko YO, Conroy BR, Gobbini
MI, Hanke M, and Ramadge PJ (2011) A common, high-dimensional model of
the representational space in human ventral temporal cortex. Neuron 72,
404 -- 416.
Parameters
----------
data : list
A list of Numpy arrays or Pandas Dataframes
method : str
Either 'hyper' or 'SRM'. If 'hyper' (default),
Returns
----------
aligned : list
An aligned list of numpy arrays
"""
x = format_data(x)
if type(x) is list:
x = np.vstack(x)
if ndims:
x = PCA(n_components=ndims).fit_transform(x)
kmeans = KMeans(init='k-means++', n_clusters=n_clusters, n_init=10)
kmeans.fit(x)
return list(kmeans.labels_)
| [
"andrew.heusser@gmail.com"
] | andrew.heusser@gmail.com |
f4876c464f794477270f2c5c04c1902be29d18ef | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02785/s998943177.py | af54361e577f369c4e98be5c6ef60ce49c5108ea | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 130 | py | n,k=map(int,input().split())
h=list(map(int,input().split()))
h.sort(reverse=True)
if n-k<=0:
print(0)
else:
print(sum(h[k:])) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
4212e86c28029d266b8be09cb40da53ac4ceb49a | ee7e74fa14f176c7f1e9cff57cae14092a0baacf | /HomePlugPWN/plcmon.py | a5a3bbc1ddf4d810d49c224543e56c8f7383fe9c | [] | no_license | Cloudxtreme/powerline-arsenal | f720b047cc4fe24ceb44588456ad0fc1ce627202 | f62f9ea8b27b8079c7f52b7b81d6a8bf5e4baa1a | refs/heads/master | 2021-05-29T01:02:14.512647 | 2015-03-31T10:25:24 | 2015-03-31T10:25:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,267 | py | #!/usr/bin/en python2
import sys
import binascii
from layerscapy.HomePlugAV import *
from optparse import OptionParser
from genDAK import *
dictio = {}
def appendindic(pkt):
macad = iter(binascii.hexlify(pkt.load[0xe:0xe+6]))
macad = ':'.join(a+b for a,b in zip(macad, macad))
if macad not in dictio.keys() and macad != "00:00:00:00:00:00":
dictio[macad] = DAKgen(macad).generate()
print "\t Found CCo: %s (DAK: %s)" % (macad, dictio[macad])
if __name__ == "__main__":
usage = "usage: %prog [options] arg"
parser = OptionParser(usage)
parser.add_option("-i", "--iface", dest="iface", default="eth0",
help="select an interface to Enable sniff mode and sniff indicates packets", metavar="INTERFACE")
parser.add_option("-s", "--source", dest="sourcemac", default="00:c4:ff:ee:00:00",
help="source MAC address to use", metavar="SOURCEMARC")
(options, args) = parser.parse_args()
print "[+] Enabling sniff mode"
pkt = Ether(src=options.sourcemac)/HomePlugAV()/SnifferRequest(SnifferControl=1) # We enable Sniff mode here
sendp(pkt, iface=options.iface)
print "[+] Listening for CCo station..."
sniff(prn=appendindic, lfilter=lambda pkt:pkt.haslayer(HomePlugAV)) # capture the signal
| [
"oleg.kupreev@gmail.com"
] | oleg.kupreev@gmail.com |
e54038178c4fa4d44718db97ca7343f349b2592c | d1de9fdc4a444ff1c322e09c684ccb5247c22164 | /OpenElectrophy/classes/neo/io/asciisignalio.py | 3e15feb9068b0f9c35d8cb45b0849551f6806123 | [] | no_license | AntoineValera/SynaptiQs | a178ddf5aa3269fe677afa68f6838db219763a65 | b44a27ba01262e68d74488f98502083c9d681eb6 | refs/heads/master | 2021-01-18T21:12:16.543581 | 2016-05-12T14:52:27 | 2016-05-12T14:52:27 | 40,709,264 | 0 | 0 | null | 2015-10-07T11:32:01 | 2015-08-14T10:17:16 | Python | UTF-8 | Python | false | false | 8,028 | py | # -*- coding: utf-8 -*-
"""
Class for reading/writing analog signals in a text file.
Covers many case when part of a file can be viewed as a CSV format.
Supported : Read/Write
@author : sgarcia
"""
from baseio import BaseIO
#from neo.core import *
from ..core import *
import numpy
from numpy import *
from baseio import BaseIO
from numpy import *
import csv
class AsciiSignalIO(BaseIO):
"""
Class for reading/writing data in a text file.
Covers many cases when part of a file can be viewed as a CSV format.
**Example**
# read a file
io = AsciiSignalIO(filename = 'myfile.txt')
seg = io.read() # read the entire file
seg.get_analogsignals() # return all AnalogSignals
# write a file
io = AsciiSignalIO(filename = 'myfile.txt')
seg = Segment()
io.write(seg)
"""
is_readable = True
is_writable = True
supported_objects = [Segment , AnalogSignal]
readable_objects = [Segment]
writeable_objects = [Segment]
has_header = False
is_streameable = False
read_params = {
Segment : [
('delimiter' , {'value' : '\t', 'possible' : ['\t' , ' ' , ',' , ';'] }) ,
('usecols' , { 'value' : None , 'type' : int } ),
('skiprows' , { 'value' :0 } ),
('timecolumn' , { 'value' : None, 'type' : int } ) ,
('samplerate' , { 'value' : 1000., } ),
('t_start' , { 'value' : 0., } ),
('method' , { 'value' : 'homemade', 'possible' : ['genfromtxt' , 'csv' , 'homemade' ] }) ,
]
}
write_params = {
Segment : [
('delimiter' , {'value' : '\t', 'possible' : ['\t' , ' ' , ',' , ';'] }) ,
('timecolumn' , { 'value' : None, 'type' : int } ) ,
]
}
name = None
extensions = [ 'txt' ]
mode = 'file'
def __init__(self , filename = None) :
"""
This class read/write AnalogSignal in a text file.
Each signal is a column.
One of the column can be the time vector
**Arguments**
filename : the filename to read/write
"""
BaseIO.__init__(self)
self.filename = filename
def read(self , **kargs):
"""
Read the file.
Return a neo.Segment
See read_segment for detail.
"""
return self.read_segment( **kargs)
def read_segment(self,
delimiter = '\t',
usecols = None,
skiprows =0,
timecolumn = None,
samplerate = 1000.,
t_start = 0.,
method = 'genfromtxt',
):
"""
**Arguments**
delimiter : columns delimiter in file '\t' or one space or two space or ',' or ';'
usecols : if None take all columns otherwise a list for selected columns
skiprows : skip n first lines in case they contains header informations
timecolumn : None or a valid int that point the time vector
samplerate : the samplerate of signals if timecolumn is not None this is not take in account
t_start : time of the first sample
method : 'genfromtxt' or 'csv' or 'homemade'
in case of bugs you can try one of this methods
'genfromtxt' use numpy.genfromtxt
'csv' use cvs module
'homemade' use a intuitive more robust but slow method
"""
#loadtxt
if method == 'genfromtxt' :
sig = genfromtxt(self.filename,
delimiter = delimiter,
usecols = usecols ,
skiprows = skiprows,
dtype = 'f4')
if len(sig.shape) ==1:
sig = sig[:,newaxis]
elif method == 'csv' :
tab = [l for l in csv.reader( open(self.filename,'rU') , delimiter = delimiter ) ]
tab = tab[skiprows:]
sig = array( tab , dtype = 'f4')
elif method == 'homemade' :
fid = open(self.filename,'rU')
for l in range(skiprows):
fid.readline()
tab = [ ]
for line in fid.readlines():
line = line.replace('\r','')
line = line.replace('\n','')
l = line.split(delimiter)
while '' in l :
l.remove('')
tab.append(l)
sig = array( tab , dtype = 'f4')
if timecolumn is not None:
samplerate = 1./mean(diff(sig[:,timecolumn]))
t_start = sig[0,timecolumn]
#TODO :
#Add channel support here
blck=Block()
for i in xrange(sig.shape[1]) :
seg = Segment()
if usecols is not None :
if timecolumn == usecols[i] :
# time comlumn not a signal
continue
else :
if timecolumn == i :
continue
#print 'lkjjlkj', len(sig[:,i])
analogSig = AnalogSignal( signal = sig[:,i] ,
sampling_rate = samplerate,
t_start = t_start)
analogSig.channel = i
analogSig.name = 'SynaptiQsImport'
seg._analogsignals.append( analogSig )
blck._segments.append(seg)
return blck
def write(self , *args , **kargs):
"""
Write segment in a raw file.
See write_segment for detail.
"""
self.write_segment(*args , **kargs)
def write_segment(self, segment,
delimiter = '\t',
skiprows =0,
timecolumn = None,
):
"""
Write a segment and AnalogSignal in a text file.
**Arguments**
delimiter : columns delimiter in file '\t' or one space or two space or ',' or ';'
skiprows : skip n first lines in case they contains header informations
timecolumn : None or a valid int that point the time vector
"""
sigs = None
for analogSig in segment.get_analogsignals():
if sigs is None :
sigs = analogSig.signal[:,newaxis]
else :
sigs = concatenate ((sigs, analogSig.signal[:,newaxis]) , axis = 1 )
if timecolumn is not None:
t = segment.get_analogsignals()[0].t()
print sigs.shape , t.shape
sigs = concatenate ((sigs, t[:,newaxis]*nan) , axis = 1 )
sigs[:,timecolumn+1:] = sigs[:,timecolumn:-1].copy()
sigs[:,timecolumn] = t
savetxt(self.filename , sigs , delimiter = delimiter)
| [
"a.valera@ucl.ac.uk"
] | a.valera@ucl.ac.uk |
7981081268ce40b43f61b4b1ac9d555a9fc68f34 | 6b2a8dd202fdce77c971c412717e305e1caaac51 | /solutions_2464487_1/Python/axelbrz/a.py | 994845f27594079c0fb1112cac19471ac08a4d95 | [] | no_license | alexandraback/datacollection | 0bc67a9ace00abbc843f4912562f3a064992e0e9 | 076a7bc7693f3abf07bfdbdac838cb4ef65ccfcf | refs/heads/master | 2021-01-24T18:27:24.417992 | 2017-05-23T09:23:38 | 2017-05-23T09:23:38 | 84,313,442 | 2 | 4 | null | null | null | null | UTF-8 | Python | false | false | 677 | py | def paintRC(r, n):
return 2*n*r + n + 2*(n*(n-1))
def solve(m):
r, t = m.split(" ")
r, t = int(r), int(t)
d = 1
while paintRC(r, d) <= t:
d *= 2
i = 0
m = (d+i)/2
_m = m-1
while True:
if m == _m: return m
_m = m
#print i, m, d
p = paintRC(r, m)
if p < t: i = m
elif p > t: d = m
else: return m
m = (d+i)/2
return m
prob = "a"
f = open(prob+".in","r")
d = f.read()[:-1]
f.close()
f = open(prob+".out","w")
ms = "\n".join(d.split("\n")[1:]).split("\n")
T = 1
for m in ms:
S = "Case #%d: %s" % (T, solve(m))
print S
f.write(S + "\n")
T += 1
f.close()
| [
"eewestman@gmail.com"
] | eewestman@gmail.com |
0022e5af4b34bac0101c46ed56fdda12471bfbc2 | aba8e48577dba352eaebcfa6743bdf2e7e2de315 | /setup.py | 5c4e38b2e95bf9ce3d2ca27df97fa4822ae4d364 | [
"MIT"
] | permissive | tiagocoutinho/xkcd | 2ab019bcd4d5ac10ca638f268b0ed1d223ff47d0 | 33adc0bc1c15ae40e16a27575a710906e351e6d2 | refs/heads/master | 2021-01-22T21:41:10.590103 | 2017-03-19T11:09:51 | 2017-03-19T11:09:51 | 85,462,691 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 967 | py | # -*- coding: utf-8 -*-
#
# This file is part of the xkcd-get project
#
# Copyright (c) 2017 Tiago Coutinho
# Distributed under the MIT license. See LICENSE for more info.
import os
import sys
from setuptools import setup
requirements = [
'grequests',
'bs4',
]
setup(
name='xkcd-get',
version='0.0.3',
description="downloader of xkcd comics",
author="Tiago Coutinho",
author_email='coutinhotiago@gmail.com',
url='https://github.com/tiagocoutinho/xkcd',
py_modules=['xkcd_get'],
entry_points={
'console_scripts': [
'xkcd-get=xkcd_get:main'
]
},
install_requires=requirements,
zip_safe=False,
keywords='xkcd',
classifiers=[
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
)
| [
"coutinhotiago@gmail.com"
] | coutinhotiago@gmail.com |
49372f57347b9b25f4a13f2f90ba51d3e00fb64d | 428b2789f055f35a3d7221dfdd35ef2a74262f76 | /백준_문제집/BFS/적록색약.py | 3c82e8fa43361ccdf165a1ba80a21b6b44552229 | [] | no_license | sinsomi/Coding-Test | eb9fcf9c9ef2b427287a8f9ea27320bf6616e49a | 881974b533dc8d1ba44e8734346e38a3e668fda8 | refs/heads/master | 2022-12-10T04:56:50.280532 | 2020-09-14T02:37:55 | 2020-09-14T02:37:55 | 287,198,959 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,515 | py | import sys
n=int(input())
matrix=[list([p for p in sys.stdin.readline().strip()]) for _ in range(n)]
visit=[[0]*n for _ in range(n)]
dx=[1,0,-1,0]
dy=[0,-1,0,1]
def bfs(x,y,text):
queue=[]
queue.append([x,y])
while queue:
x,y=queue.pop(0)
for i in range(4):
nx,ny=x+dx[i],y+dy[i]
if nx<0 or nx>=n or ny<0 or ny>=n:
continue
if matrix[nx][ny]==text and visit[nx][ny]==0:
visit[nx][ny]=1
queue.append([nx,ny])
def bfs2(x,y,text,text2):
queue=[]
queue.append([x,y])
while queue:
x,y=queue.pop(0)
for i in range(4):
nx,ny=x+dx[i],y+dy[i]
if nx<0 or nx>=n or ny<0 or ny>=n:
continue
if (matrix[nx][ny]==text or matrix[nx][ny]==text2) and visit[nx][ny]==0:
visit[nx][ny]=1
queue.append([nx,ny])
r_cnt,g_cnt,b_cnt,rg_cnt=0,0,0,0
for i in range(n):
for j in range(n):
if matrix[i][j]=="R" and visit[i][j]==0:
bfs(i,j,'R')
r_cnt+=1
if matrix[i][j]=="G" and visit[i][j]==0:
bfs(i,j,'G')
g_cnt+=1
if matrix[i][j]=="B" and visit[i][j]==0:
bfs(i,j,'B')
b_cnt+=1
visit=[[0]*n for _ in range(n)]
for i in range(n):
for j in range(n):
if (matrix[i][j]=="R" or matrix[i][j]=="G") and visit[i][j]==0:
bfs2(i,j,'R','G')
rg_cnt+=1
print(r_cnt+b_cnt+g_cnt,rg_cnt+b_cnt) | [
"cindy960602@naver.com"
] | cindy960602@naver.com |
f0edcf01a50a21c0908381b011ca7b52dd8c04fb | 9e5eca27222871dd04e42c9106bb2fba07e598ff | /src/osxification/core_foundation/cf_number_type.py | f0cd9f2a858be41f18bf1d41f034984fe5d59fe2 | [] | no_license | jepebe/osxification | b2a68dec07cd0be3b7ebd519bd99d0bbd51e61c7 | c9a539f4dbeda9200e32a2eea2c955dd94e6f45e | refs/heads/master | 2016-09-03T06:35:41.659315 | 2015-05-19T18:00:23 | 2015-05-19T18:00:23 | 35,567,134 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,432 | py | from osxification.c import Enum
class CFNumberType(Enum):
kCFNumberSInt8Type = None
kCFNumberSInt16Type = None
kCFNumberSInt32Type = None
kCFNumberSInt64Type = None
kCFNumberFloat32Type = None
kCFNumberFloat64Type = None
kCFNumberCharType = None
kCFNumberShortType = None
kCFNumberIntType = None
kCFNumberLongType = None
kCFNumberLongLongType = None
kCFNumberFloatType = None
kCFNumberDoubleType = None
kCFNumberCFIndexType = None
kCFNumberNSIntegerType = None
kCFNumberCGFloatType = None
kCFNumberMaxType = None
CFNumberType.addEnum("kCFNumberSInt8Type", 1)
CFNumberType.addEnum("kCFNumberSInt16Type", 2)
CFNumberType.addEnum("kCFNumberSInt32Type", 3)
CFNumberType.addEnum("kCFNumberSInt64Type", 4)
CFNumberType.addEnum("kCFNumberFloat32Type", 5)
CFNumberType.addEnum("kCFNumberFloat64Type", 6)
CFNumberType.addEnum("kCFNumberCharType", 7)
CFNumberType.addEnum("kCFNumberShortType", 8)
CFNumberType.addEnum("kCFNumberIntType", 9)
CFNumberType.addEnum("kCFNumberLongType", 10)
CFNumberType.addEnum("kCFNumberLongLongType", 11)
CFNumberType.addEnum("kCFNumberFloatType", 12)
CFNumberType.addEnum("kCFNumberDoubleType", 13)
CFNumberType.addEnum("kCFNumberCFIndexType", 14)
CFNumberType.addEnum("kCFNumberNSIntegerType", 15)
CFNumberType.addEnum("kCFNumberCGFloatType", 16)
CFNumberType.addEnum("kCFNumberMaxType", 16)
CFNumberType.registerEnum("CFNumberType")
| [
"jepebe@users.noreply.github.com"
] | jepebe@users.noreply.github.com |
47d464054b8f55b003a9117767310c2e27b0ee56 | 9b64f0f04707a3a18968fd8f8a3ace718cd597bc | /huaweicloud-sdk-drs/huaweicloudsdkdrs/v3/model/data_transformation_resp.py | 57c96c1b9a290c243df869933fe1713af14d0dc7 | [
"Apache-2.0"
] | permissive | jaminGH/huaweicloud-sdk-python-v3 | eeecb3fb0f3396a475995df36d17095038615fba | 83ee0e4543c6b74eb0898079c3d8dd1c52c3e16b | refs/heads/master | 2023-06-18T11:49:13.958677 | 2021-07-16T07:57:47 | 2021-07-16T07:57:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,705 | py | # coding: utf-8
import re
import six
class DataTransformationResp:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'id': 'str',
'status': 'str',
'error_code': 'str',
'error_msg': 'str'
}
attribute_map = {
'id': 'id',
'status': 'status',
'error_code': 'error_code',
'error_msg': 'error_msg'
}
def __init__(self, id=None, status=None, error_code=None, error_msg=None):
"""DataTransformationResp - a model defined in huaweicloud sdk"""
self._id = None
self._status = None
self._error_code = None
self._error_msg = None
self.discriminator = None
if id is not None:
self.id = id
if status is not None:
self.status = status
if error_code is not None:
self.error_code = error_code
if error_msg is not None:
self.error_msg = error_msg
@property
def id(self):
"""Gets the id of this DataTransformationResp.
任务id
:return: The id of this DataTransformationResp.
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this DataTransformationResp.
任务id
:param id: The id of this DataTransformationResp.
:type: str
"""
self._id = id
@property
def status(self):
"""Gets the status of this DataTransformationResp.
状态
:return: The status of this DataTransformationResp.
:rtype: str
"""
return self._status
@status.setter
def status(self, status):
"""Sets the status of this DataTransformationResp.
状态
:param status: The status of this DataTransformationResp.
:type: str
"""
self._status = status
@property
def error_code(self):
"""Gets the error_code of this DataTransformationResp.
错误码
:return: The error_code of this DataTransformationResp.
:rtype: str
"""
return self._error_code
@error_code.setter
def error_code(self, error_code):
"""Sets the error_code of this DataTransformationResp.
错误码
:param error_code: The error_code of this DataTransformationResp.
:type: str
"""
self._error_code = error_code
@property
def error_msg(self):
"""Gets the error_msg of this DataTransformationResp.
错误信息
:return: The error_msg of this DataTransformationResp.
:rtype: str
"""
return self._error_msg
@error_msg.setter
def error_msg(self, error_msg):
"""Sets the error_msg of this DataTransformationResp.
错误信息
:param error_msg: The error_msg of this DataTransformationResp.
:type: str
"""
self._error_msg = error_msg
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
import simplejson as json
return json.dumps(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, DataTransformationResp):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"hwcloudsdk@huawei.com"
] | hwcloudsdk@huawei.com |
c671c9bc14faeca0c67ea4b50d76925245201787 | 1d7eec692553afc411ec1e7325634f71a2aed291 | /backend/git_real/helpers.py | bf3c422e05de22a0a2d2461619aa0a66fe26c830 | [] | no_license | Andy-Nkumane/Tilde | a41a2a65b3901b92263ae94d527de403f59a5caf | 80de97edaf99f4831ca8cb989b93e3be5e09fdd6 | refs/heads/develop | 2023-05-09T10:02:41.240517 | 2021-05-28T09:20:51 | 2021-05-28T09:20:51 | 299,501,586 | 0 | 0 | null | 2020-10-25T22:37:30 | 2020-09-29T04:10:48 | Python | UTF-8 | Python | false | false | 6,310 | py | import base64
from social_auth.github_api import Api
from git_real import models
from git_real.constants import GITHUB_DATETIME_FORMAT, GITHUB_DEFAULT_TIMEZONE
from timezone_helpers import timestamp_str_to_tz_aware_datetime
from django.http import Http404
def strp_github_standard_time(timestamp: str):
if timestamp:
return timestamp_str_to_tz_aware_datetime(
timestamp=timestamp,
dt_format=GITHUB_DATETIME_FORMAT,
zone_name=GITHUB_DEFAULT_TIMEZONE,
)
return None
def upload_readme(api, repo_full_name, readme_text):
readme_path = f"repos/{repo_full_name}/contents/README.md"
try:
response = api.request(readme_path, json=False)
except Http404:
# it doesn't exist. Create it
response = api.put(
readme_path,
{
"message": "Added README.md",
"content": base64.b64encode(readme_text.encode("utf-8")).decode(
"utf-8"
),
},
json=False,
)
assert str(response.status_code).startswith("2"), f"{response}\n {response.json()}"
def create_org_repo(api, repo_full_name, private=True, exists_ok=False, **post_kwargs):
(org, repo) = repo_full_name.split("/")
args = {
"name": repo,
"private": private,
# "scopes": ["repo"],
}
args.update(post_kwargs)
result = api.post(f"orgs/{org}/repos", args)
if "errors" in result:
if result["errors"][0]["message"] == "name already exists on this account":
if not exists_ok:
raise Exception(result)
else:
# unhandled error
print("===============")
print(args)
print("================")
raise Exception(result)
return fetch_and_save_repo(repo_full_name=repo_full_name, api=api)
def _protection_settings(restrictions_users=None, restrictions_teams=None):
restrictions_users = restrictions_users or []
restrictions_teams = restrictions_teams or []
return {
"required_status_checks": None,
"enforce_admins": False,
"required_pull_request_reviews": {
"dismissal_restrictions": {},
"dismiss_stale_reviews": True,
"require_code_owner_reviews": False,
"required_approving_review_count": 2,
},
"dismissal_restrictions": {
"users": restrictions_users,
"teams": restrictions_teams,
},
# "restrictions": {"users": restrictions_users, "teams": restrictions_teams,},
"restrictions": None,
}
def protect_master(api, repo_full_name):
response = api.put(
f"repos/{repo_full_name}/branches/main/protection",
_protection_settings(),
headers={"Accept": "application/vnd.github.luke-cage-preview+json"},
)
# {'message': "If you would like to help us test the Require Multiple Reviewers API during its preview period, you must specify a custom media type in the 'Accept' header. Please see the docs for full details.", 'documentation_url': 'https://developer.github.com/v3/repos/branches/#update-branch-protection'}
if "errors" in response:
raise Exception(response)
def get_repo(repo_full_name, github_auth_login="", api=None, response404=None):
api = api or Api(github_auth_login)
return api.request(f"repos/{repo_full_name}", response404=response404)
def list_collaborators(api, repo_full_name):
"""queries gihub for a list of collaborator names associated with this repo"""
response = api.request(
f"repos/{repo_full_name}/collaborators",
json=True,
)
return [d["login"] for d in response]
def add_collaborator(api, repo_full_name, github_user_name, github_auth_login=None):
api = api or Api(github_auth_login)
# print(list_collaborators(api, repo_full_name))
response = api.put(
f"repos/{repo_full_name}/collaborators/{github_user_name}",
# {"permission": "push"},
headers={"accept": "application/vnd.github.v3+json"},
json=False,
data={},
)
# breakpoint()
if response.status_code == 404:
raise Exception(f"user or repo not found: {repo_full_name} {github_user_name}")
if response.status_code not in [201, 204]:
raise Exception(response.content)
# collaborators = get_collaborators(github_auth_login, repo_full_name, api=api)
# if github_user_name not in collaborators:
# EXCEPTION is always raised because collaborators is a list of dictionaries and github_user_name is a stringz
# raise Exception(f"Adding collaborator: {github_user_name} unsuccessful.")
def save_repo(repo: dict, user=None):
print(f"saving: {repo['full_name']}")
obj, created = models.Repository.objects.get_or_create(
ssh_url=repo["ssh_url"],
defaults={
"full_name": repo["full_name"],
"owner": repo["owner"]["login"],
"ssh_url": repo["ssh_url"],
"private": repo["private"],
"created_at": strp_github_standard_time(
repo["created_at"],
),
"archived": repo["archived"],
"user": user,
},
)
if not created:
obj.archived = obj.archived or repo["archived"]
obj.save()
return obj
def fetch_and_save_repo(api, repo_full_name):
repo_dict = get_repo(api=api, repo_full_name=repo_full_name, response404=404)
if repo_dict == 404:
return
o = save_repo(repo_dict)
assert o != None
return o
# def create_required_webhooks(api, repo_full_name, webhook_url):
# response = api.post(
# f"repos/{repo_full_name}/hooks",
# headers={"accept": "application/vnd.github.v3+json"},
# data={
# "config": {
# "url": webhook_url,
# "content_type": "json",
# "events": [
# # https://docs.github.com/en/developers/webhooks-and-events/github-event-types
# "PullRequestEvent",
# "PullRequestReviewCommentEvent",
# "PushEvent",
# ],
# }
# },
# json=False,
# )
# breakpoint()
# pass
| [
"sheena.oconnell@gmail.com"
] | sheena.oconnell@gmail.com |
0641e580798d05608958c806b1f8e45d5f9962c6 | a0db06c233d73b275c657b14ebc5e87dd91bc5e1 | /benchmark/egfrd/out_BD.py | e4918445bcc9c4c65cd550c9bb825413f0a53011 | [] | no_license | likr/ecell3-spatiocyte | 9a7cd258aa3fbd837ff3867a3cf8e9e99233a19e | 26a3231e9b022a239956938feabab9099baaee97 | refs/heads/master | 2021-01-14T14:16:35.291030 | 2013-03-07T04:27:28 | 2013-03-07T04:27:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,563 | py | # dt_factor = 1e-05
data_BD = [
# T=1e-06, N=100, V=1e-12
# steps= 8001, steps/sec= 7113.922237, steps/N= 80.010000
# run_times = [1.089055061340332, 1.0960071086883545, 1.1246960163116455]
[10890550.61340332, 10960071.086883545, 11246960.163116455],
# T=3.33333e-07, N=300, V=1e-12
# steps= 2667, steps/sec= 2568.901297, steps/N= 8.890000
# run_times = [1.025061845779419, 1.037959098815918, 1.038187026977539]
[30751855.37338257, 31138772.96447754, 31145610.809326172],
# T=1e-07, N=1000, V=1e-12
# steps= 801, steps/sec= 730.918151, steps/N= 0.801000
# run_times = [1.1104531288146973, 1.108593225479126, 1.0958819389343262]
[111045312.88146971, 110859322.54791258, 109588193.8934326],
# T=3.33333e-08, N=3000, V=1e-12
# steps= 267, steps/sec= 228.073911, steps/N= 0.089000
# run_times = [1.1678550243377686, 1.1749780178070068, 1.170673131942749]
[350356507.30133057, 352493405.34210205, 351201939.5828247],
# T=1e-08, N=10000, V=1e-12
# steps= 81, steps/sec= 66.546508, steps/N= 0.008100
# run_times = [1.219210147857666, 1.2195098400115967, 1.217193841934204]
[1219210147.857666, 1219509840.0115967, 1217193841.934204],
# T=3.33333e-09, N=30000, V=1e-12
# steps= 27, steps/sec= 17.751455, steps/N= 0.000900
# run_times = [1.5134360790252686, 1.507871150970459, 1.5210020542144775]
[4540308237.075806, 4523613452.911377, 4563006162.643433],
# T=1e-09, N=100000, V=1e-12
# steps= 8, steps/sec= 3.700389, steps/N= 0.000080
# run_times = [2.140352964401245, 2.145677089691162, 2.1619350910186768]
[21403529644.01245, 21456770896.91162, 21619350910.186768],
]
| [
"satya.arjunan@gmail.com"
] | satya.arjunan@gmail.com |
adc2011e57740926b2a60ca255e2411fb52098be | d34f82e7aa5da1d535f30ba7c0be6f8efff18107 | /backend/spaceshooter_3559/settings.py | 1a6e5daa445b9e858f47516c4f4629810933b373 | [] | no_license | crowdbotics-apps/spaceshooter-3559 | cc5f5c7e45e7db8733de12cbd53de15b6a01ce1c | 49d2a7f8fbba710dc74ffc5f3add81d4f72a9cf5 | refs/heads/master | 2020-05-25T00:02:37.230248 | 2019-05-19T20:49:44 | 2019-05-19T20:49:44 | 187,526,671 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,588 | py | """
Django settings for spaceshooter_3559 project.
Generated by 'django-admin startproject' using Django 1.11.20.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '8)x@grvzi66tmeplo^=4j405!7=ycfx_qj@ic2mp*u9zbpm+b5'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'spaceshooter_3559.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'spaceshooter_3559.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
import environ
env = environ.Env()
ALLOWED_HOSTS = ['*']
SITE_ID = 1
MIDDLEWARE += ['whitenoise.middleware.WhiteNoiseMiddleware']
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = env.bool("DEBUG", default=False)
if env.str("DATABASE_URL", default=None):
DATABASES = {
'default': env.db()
}
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'allauth.account.auth_backends.AuthenticationBackend'
)
STATIC_ROOT = os.path.join(BASE_DIR, "staticfiles")
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'static')
]
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
LOCAL_APPS = [
'home',
]
THIRD_PARTY_APPS = [
'rest_framework',
'rest_framework.authtoken',
'bootstrap4',
'allauth',
'allauth.account',
'allauth.socialaccount',
'allauth.socialaccount.providers.google',
]
INSTALLED_APPS += LOCAL_APPS + THIRD_PARTY_APPS
# allauth
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_AUTHENTICATION_METHOD = 'email'
ACCOUNT_USERNAME_REQUIRED = False
ACCOUNT_EMAIL_VERIFICATION = None
LOGIN_REDIRECT_URL = '/'
if DEBUG:
# output email to console instead of sending
EMAIL_BACKEND = "django.core.mail.backends.console.EmailBackend"
EMAIL_HOST = "smtp.sendgrid.net"
EMAIL_HOST_USER = env.str("SENDGRID_USERNAME", "")
EMAIL_HOST_PASSWORD = env.str("SENDGRID_PASSWORD", "")
EMAIL_PORT = 587
EMAIL_USE_TLS = True
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
2431b176a55f8c6554eaee9d4f08a9dd7b51f8d2 | f4f181f2c970a163801b4202fc8d6c92a4e8113d | /google-cloud-sdk/lib/googlecloudsdk/core/cache/exceptions.py | 2a53ffbf78a8f4a08d4a1c7348e5267837dd01bf | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0",
"MIT"
] | permissive | Sorsly/subtle | 7732a6cb910f5e2f4eed1ac0d3b5979001582340 | 718e79a3e04f1f57f39b6ebe90dec9e028e88d40 | refs/heads/master | 2021-05-24T01:21:39.218495 | 2017-10-28T01:33:58 | 2017-10-28T01:33:58 | 83,103,372 | 0 | 1 | MIT | 2020-07-25T11:21:05 | 2017-02-25T03:33:07 | Python | UTF-8 | Python | false | false | 1,730 | py | # Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Exceptions for the Cloud SDK persistent cache module."""
class Error(Exception):
"""Base for all persistent cache exceptions."""
class CacheVersionMismatch(Error):
"""Cache version mismatch."""
def __init__(self, message, actual, requested):
super(CacheVersionMismatch, self).__init__(message)
self.actual = actual
self.requested = requested
class CacheInvalid(Error):
"""Cach object is invalid."""
class CacheNameInvalid(Error):
"""Name is not a valid cache name."""
class CacheNotFound(Error):
"""Cache not found."""
class CacheTableDeleted(Error):
"""Cache table deleted."""
class CacheTableExpired(Error):
"""Cache table expired."""
class CacheTableRestricted(Error):
"""Cache table is restricted."""
class CacheTableNameInvalid(Error):
"""Cache table invalid table name."""
class CacheTableColumnsInvalid(Error):
"""Cache table columns invalid."""
class CacheTableKeysInvalid(Error):
"""Cache table keys invalid."""
class CacheTableNotFound(Error):
"""Cache table not found."""
class CacheTableRowSizeInvalid(Error):
"""Cache table row has incorrect size."""
| [
"han300@purdue.edu"
] | han300@purdue.edu |
15cb0c801573dc6c1fa20b20b008cf2ebbbab028 | 19d47d47c9614dddcf2f8d744d883a90ade0ce82 | /pynsxt/swagger_client/models/protocol_version.py | ecf2c99557eb577188be0e061a75187b063fc04b | [] | no_license | darshanhuang1/pynsxt-1 | 9ed7c0da9b3a64e837a26cbbd8b228e811cee823 | fb1091dff1af7f8b8f01aec715682dea60765eb8 | refs/heads/master | 2020-05-25T14:51:09.932853 | 2018-05-16T12:43:48 | 2018-05-16T12:43:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,888 | py | # coding: utf-8
"""
NSX API
VMware NSX REST API # noqa: E501
OpenAPI spec version: 1.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class ProtocolVersion(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'enabled': 'bool',
'name': 'str'
}
attribute_map = {
'enabled': 'enabled',
'name': 'name'
}
def __init__(self, enabled=None, name=None): # noqa: E501
"""ProtocolVersion - a model defined in Swagger""" # noqa: E501
self._enabled = None
self._name = None
self.discriminator = None
self.enabled = enabled
self.name = name
@property
def enabled(self):
"""Gets the enabled of this ProtocolVersion. # noqa: E501
Enable status for this protocol version # noqa: E501
:return: The enabled of this ProtocolVersion. # noqa: E501
:rtype: bool
"""
return self._enabled
@enabled.setter
def enabled(self, enabled):
"""Sets the enabled of this ProtocolVersion.
Enable status for this protocol version # noqa: E501
:param enabled: The enabled of this ProtocolVersion. # noqa: E501
:type: bool
"""
if enabled is None:
raise ValueError("Invalid value for `enabled`, must not be `None`") # noqa: E501
self._enabled = enabled
@property
def name(self):
"""Gets the name of this ProtocolVersion. # noqa: E501
Name of the TLS protocol version # noqa: E501
:return: The name of this ProtocolVersion. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this ProtocolVersion.
Name of the TLS protocol version # noqa: E501
:param name: The name of this ProtocolVersion. # noqa: E501
:type: str
"""
if name is None:
raise ValueError("Invalid value for `name`, must not be `None`") # noqa: E501
self._name = name
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ProtocolVersion):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"tcraft@pivotal.io"
] | tcraft@pivotal.io |
fe576e708d4593189a39418e127740d8fb4917db | f07a42f652f46106dee4749277d41c302e2b7406 | /Data Set/bug-fixing-5/4cb959150b31b8e8000fbce71583b4bae828e7b0-<test_delete_install>-fix.py | 57a327550c539cacc60b763701c92aeef81d3564 | [] | no_license | wsgan001/PyFPattern | e0fe06341cc5d51b3ad0fe29b84098d140ed54d1 | cc347e32745f99c0cd95e79a18ddacc4574d7faa | refs/heads/main | 2023-08-25T23:48:26.112133 | 2021-10-23T14:11:22 | 2021-10-23T14:11:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 283 | py | @responses.activate
def test_delete_install(self):
responses.add(url='https://example.com/webhook', method=responses.POST, body={
})
self.login_as(user=self.user)
response = self.client.delete(self.url, format='json')
assert (response.status_code == 204) | [
"dg1732004@smail.nju.edu.cn"
] | dg1732004@smail.nju.edu.cn |
07f7368b4fbb438613eaef2bbd3af716121c3aad | 1065ec75d9ee668ffd7aafc6a8de912d7c2cee6f | /addons/script.icechannel.extn.extra.uk/plugins/livetv_uk/the_vault_ltvi.py | 72df20a16aef7097c9ee51ce4e86b218c5dc699f | [] | no_license | bopopescu/kodiprofile | 64c067ee766e8a40e5c148b8e8ea367b4879ffc7 | 7e78640a569a7f212a771aab6a4a4d9cb0eecfbe | refs/heads/master | 2021-06-11T17:16:15.498281 | 2016-04-03T06:37:30 | 2016-04-03T06:37:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,010 | py | '''
Ice Channel
'''
from entertainment.plugnplay.interfaces import LiveTVIndexer
from entertainment.plugnplay import Plugin
from entertainment import common
class the_vault(LiveTVIndexer):
implements = [LiveTVIndexer]
display_name = "The Vault"
name = "the_vault"
other_names = "the_vault,The Vault"
import xbmcaddon
import os
addon_id = 'script.icechannel.extn.extra.uk'
addon = xbmcaddon.Addon(addon_id)
img = os.path.join( addon.getAddonInfo('path'), 'resources', 'images', name + '.png' )
regions = [
{
'name':'United Kingdom',
'img':addon.getAddonInfo('icon'),
'fanart':addon.getAddonInfo('fanart')
},
]
languages = [
{'name':'English', 'img':'', 'fanart':''},
]
genres = [
{'name':'Music', 'img':'', 'fanart':''}
]
addon = None
| [
"sokasoka@hotmail.com"
] | sokasoka@hotmail.com |
b26abcb7e4a798915466f32474e2d44d8fdea758 | 4a7462f65826586edccfe5709259603d53da5b10 | /presentation/scripts/test-6.py | 092c5bf4ea0afc58c86889083599969770c3bec5 | [] | no_license | ctn-archive/bekolay-fnme2015 | 846ba3bdfdae121cd1ca10de81d5eae2e570e84f | ad85f5a6f33031d5229344f92ba1df60f4515488 | refs/heads/master | 2021-01-18T01:45:50.708267 | 2015-11-04T22:40:43 | 2015-11-05T09:59:54 | 39,850,277 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 625 | py | import nengo; import numpy as np
from nengo.tests.conftest import Simulator, plt, seed
from nengo.tests.conftest import pytest_generate_tests
def test_ensemble(Simulator, nl, seed, plt):
with nengo.Network(seed=seed) as model:
model.config[nengo.Ensemble].neuron_type = nl()
stim = nengo.Node([0.5])
ens = nengo.Ensemble(40, dimensions=1)
nengo.Connection(stim, ens)
probe = nengo.Probe(ens, synapse=0.05)
sim = Simulator(model)
sim.run(0.5)
plt.plot(sim.trange(), sim.data[probe])
assert np.allclose(
sim.data[probe][sim.trange() > 0.4], 0.5, atol=0.1)
| [
"tbekolay@gmail.com"
] | tbekolay@gmail.com |
c4972e52a17ca8eb8805acb3b86cdc7e6b6a6934 | 62b84f877ccb4171f558c225fa0fdd4fd2c44d6c | /tests/counter_mnist.py | b97377f0211334b4226a6d8638c506e786fe1952 | [] | no_license | guicho271828/latplan | b6dfb55f3cceac947df770fb623d496111f9ab19 | 75a2fc773de245b422a695b51fccaf17294da123 | refs/heads/master | 2022-10-25T02:02:05.547143 | 2022-03-25T20:42:06 | 2022-03-25T20:59:29 | 96,482,151 | 77 | 19 | null | 2023-03-04T14:10:46 | 2017-07-07T00:11:52 | Python | UTF-8 | Python | false | false | 784 | py | #!/usr/bin/env python3
import numpy as np
import sys
sys.path.append('../../')
from latplan.puzzles.counter_mnist import generate_configs, successors, generate, states, transitions
from plot import plot_image, plot_grid
configs = generate_configs(10)
puzzles = generate(configs)
print(puzzles[9])
plot_image(puzzles[9],"counter_mnist.png")
plot_grid(puzzles[:36],"counter_mnists.png")
_transitions = transitions(10)
import numpy.random as random
indices = random.randint(0,_transitions[0].shape[0],18)
_transitions = _transitions[:,indices]
print(_transitions.shape)
transitions_for_show = \
np.einsum('ba...->ab...',_transitions) \
.reshape((-1,)+_transitions.shape[2:])
print(transitions_for_show.shape)
plot_grid(transitions_for_show,"counter_mnist_transitions.png")
| [
"guicho2.71828@gmail.com"
] | guicho2.71828@gmail.com |
414441c3ce1089e1e1406ee76644be8bf0e77341 | 271c7959a39f3d7ff63dddf285004fd5badee4d9 | /venv/Lib/site-packages/netaddr/strategy/eui64.py | 03de537533557e72ce24476b3810c9d5fcf1cc2b | [
"MIT"
] | permissive | natemellendorf/configpy | b6b01ea4db1f2b9109fd4ddb860e9977316ed964 | 750da5eaef33cede9f3ef532453d63e507f34a2c | refs/heads/master | 2022-12-11T05:22:54.289720 | 2019-07-22T05:26:09 | 2019-07-22T05:26:09 | 176,197,442 | 4 | 1 | MIT | 2022-12-08T02:48:51 | 2019-03-18T03:24:12 | Python | UTF-8 | Python | false | false | 7,707 | py | #-----------------------------------------------------------------------------
# Copyright (c) 2008 by David P. D. Moss. All rights reserved.
#
# Released under the BSD license. See the LICENSE file for details.
#-----------------------------------------------------------------------------
"""
IEEE 64-bit EUI (Extended Unique Indentifier) logic.
"""
import struct as _struct
import re as _re
from netaddr.core import AddrFormatError
from netaddr.strategy import (
valid_words as _valid_words, int_to_words as _int_to_words,
words_to_int as _words_to_int, valid_bits as _valid_bits,
bits_to_int as _bits_to_int, int_to_bits as _int_to_bits,
valid_bin as _valid_bin, int_to_bin as _int_to_bin,
bin_to_int as _bin_to_int)
# This is a fake constant that doesn't really exist. Here for completeness.
AF_EUI64 = 64
#: The width (in bits) of this address type.
width = 64
#: The AF_* constant value of this address type.
family = AF_EUI64
#: A friendly string name address type.
family_name = 'EUI-64'
#: The version of this address type.
version = 64
#: The maximum integer value that can be represented by this address type.
max_int = 2 ** width - 1
#-----------------------------------------------------------------------------
# Dialect classes.
#-----------------------------------------------------------------------------
class eui64_base(object):
"""A standard IEEE EUI-64 dialect class."""
#: The individual word size (in bits) of this address type.
word_size = 8
#: The number of words in this address type.
num_words = width // word_size
#: The maximum integer value for an individual word in this address type.
max_word = 2 ** word_size - 1
#: The separator character used between each word.
word_sep = '-'
#: The format string to be used when converting words to string values.
word_fmt = '%.2X'
#: The number base to be used when interpreting word values as integers.
word_base = 16
class eui64_unix(eui64_base):
"""A UNIX-style MAC address dialect class."""
word_size = 8
num_words = width // word_size
word_sep = ':'
word_fmt = '%x'
word_base = 16
class eui64_unix_expanded(eui64_unix):
"""A UNIX-style MAC address dialect class with leading zeroes."""
word_fmt = '%.2x'
class eui64_cisco(eui64_base):
"""A Cisco 'triple hextet' MAC address dialect class."""
word_size = 16
num_words = width // word_size
word_sep = '.'
word_fmt = '%.4x'
word_base = 16
class eui64_bare(eui64_base):
"""A bare (no delimiters) MAC address dialect class."""
word_size = 64
num_words = width // word_size
word_sep = ''
word_fmt = '%.16X'
word_base = 16
#: The default dialect to be used when not specified by the user.
DEFAULT_EUI64_DIALECT = eui64_base
#-----------------------------------------------------------------------------
#: Regular expressions to match all supported MAC address formats.
RE_EUI64_FORMATS = (
# 2 bytes x 8 (UNIX, Windows, EUI-64)
'^' + ':'.join(['([0-9A-F]{1,2})'] * 8) + '$',
'^' + '-'.join(['([0-9A-F]{1,2})'] * 8) + '$',
# 4 bytes x 4 (Cisco like)
'^' + ':'.join(['([0-9A-F]{1,4})'] * 4) + '$',
'^' + '-'.join(['([0-9A-F]{1,4})'] * 4) + '$',
'^' + '\.'.join(['([0-9A-F]{1,4})'] * 4) + '$',
# 16 bytes (bare, no delimiters)
'^(' + ''.join(['[0-9A-F]'] * 16) + ')$',
)
# For efficiency, each string regexp converted in place to its compiled
# counterpart.
RE_EUI64_FORMATS = [_re.compile(_, _re.IGNORECASE) for _ in RE_EUI64_FORMATS]
def _get_match_result(address, formats):
for regexp in formats:
match = regexp.findall(address)
if match:
return match[0]
def valid_str(addr):
"""
:param addr: An IEEE EUI-64 indentifier in string form.
:return: ``True`` if EUI-64 indentifier is valid, ``False`` otherwise.
"""
try:
if _get_match_result(addr, RE_EUI64_FORMATS):
return True
except TypeError:
pass
return False
def str_to_int(addr):
"""
:param addr: An IEEE EUI-64 indentifier in string form.
:return: An unsigned integer that is equivalent to value represented
by EUI-64 string address formatted according to the dialect
"""
words = []
try:
words = _get_match_result(addr, RE_EUI64_FORMATS)
if not words:
raise TypeError
except TypeError:
raise AddrFormatError('invalid IEEE EUI-64 identifier: %r!' % addr)
if isinstance(words, tuple):
pass
else:
words = (words,)
if len(words) == 8:
# 2 bytes x 8 (UNIX, Windows, EUI-48)
int_val = int(''.join(['%.2x' % int(w, 16) for w in words]), 16)
elif len(words) == 4:
# 4 bytes x 4 (Cisco like)
int_val = int(''.join(['%.4x' % int(w, 16) for w in words]), 16)
elif len(words) == 1:
# 16 bytes (bare, no delimiters)
int_val = int('%016x' % int(words[0], 16), 16)
else:
raise AddrFormatError(
'bad word count for EUI-64 identifier: %r!' % addr)
return int_val
def int_to_str(int_val, dialect=None):
"""
:param int_val: An unsigned integer.
:param dialect: (optional) a Python class defining formatting options
:return: An IEEE EUI-64 identifier that is equivalent to unsigned integer.
"""
if dialect is None:
dialect = eui64_base
words = int_to_words(int_val, dialect)
tokens = [dialect.word_fmt % i for i in words]
addr = dialect.word_sep.join(tokens)
return addr
def int_to_packed(int_val):
"""
:param int_val: the integer to be packed.
:return: a packed string that is equivalent to value represented by an
unsigned integer.
"""
words = int_to_words(int_val)
return _struct.pack('>8B', *words)
def packed_to_int(packed_int):
"""
:param packed_int: a packed string containing an unsigned integer.
It is assumed that string is packed in network byte order.
:return: An unsigned integer equivalent to value of network address
represented by packed binary string.
"""
words = list(_struct.unpack('>8B', packed_int))
int_val = 0
for i, num in enumerate(reversed(words)):
word = num
word = word << 8 * i
int_val = int_val | word
return int_val
def valid_words(words, dialect=None):
if dialect is None:
dialect = DEFAULT_EUI64_DIALECT
return _valid_words(words, dialect.word_size, dialect.num_words)
def int_to_words(int_val, dialect=None):
if dialect is None:
dialect = DEFAULT_EUI64_DIALECT
return _int_to_words(int_val, dialect.word_size, dialect.num_words)
def words_to_int(words, dialect=None):
if dialect is None:
dialect = DEFAULT_EUI64_DIALECT
return _words_to_int(words, dialect.word_size, dialect.num_words)
def valid_bits(bits, dialect=None):
if dialect is None:
dialect = DEFAULT_EUI64_DIALECT
return _valid_bits(bits, width, dialect.word_sep)
def bits_to_int(bits, dialect=None):
if dialect is None:
dialect = DEFAULT_EUI64_DIALECT
return _bits_to_int(bits, width, dialect.word_sep)
def int_to_bits(int_val, dialect=None):
if dialect is None:
dialect = DEFAULT_EUI64_DIALECT
return _int_to_bits(
int_val, dialect.word_size, dialect.num_words, dialect.word_sep)
def valid_bin(bin_val, dialect=None):
if dialect is None:
dialect = DEFAULT_EUI64_DIALECT
return _valid_bin(bin_val, width)
def int_to_bin(int_val):
return _int_to_bin(int_val, width)
def bin_to_int(bin_val):
return _bin_to_int(bin_val, width)
| [
"nate.mellendorf@gmail.com"
] | nate.mellendorf@gmail.com |
af477fc6a0296522ff4102bc09ec1664af163abf | 868e1bc0cbdbab12365c293656ee7a2a1373cac1 | /config.py | f3dc3803dc659577659c8068ac224dd0d5d08ec0 | [
"MIT"
] | permissive | xavierxross/nazurin | 15b811fbca984fe17f8d19fba5ab07c7517e5a69 | 9703781b14f626c39388c716cd412441198eb7e3 | refs/heads/master | 2023-02-17T05:37:03.095408 | 2021-01-13T14:09:49 | 2021-01-13T14:09:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 798 | py | from ast import literal_eval
from os import environ
ENV = environ.get('ENV', 'production')
TOKEN = environ.get('TOKEN')
# Webhook url, eg: https://xxx.herokuapp.com/, should end with '/'
WEBHOOK_URL = environ.get('WEBHOOK_URL')
# Port is given by Heroku
PORT = int(environ.get('PORT', '8443'))
TEMP_DIR = './temp/'
STORAGE = literal_eval(environ.get('STORAGE', "['Local']"))
STORAGE_DIR = environ.get('STORAGE_DIR', 'Pictures')
DATABASE = environ.get('DATABASE', 'Local')
# nazurin data collection in database
NAZURIN_DATA = 'nazurin'
ALBUM_ID = int(environ.get('ALBUM_ID'))
GALLERY_ID = int(environ.get('GALLERY_ID'))
ADMIN_ID = int(environ.get('ADMIN_ID'))
UA = "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.116 Safari/537.36"
RETRIES = 5 | [
"yyoung2001@gmail.com"
] | yyoung2001@gmail.com |
92410f6f12b802736db44b21188677bab8cbad4f | 6f23adb3da803dda89e21cfa21a024a015ec1710 | /2019/1-2.py | 800457275fe7eb521f7e86398de742615d6dee85 | [] | no_license | Remboooo/adventofcode | 1478252bcb19c0dd19e4fa2effd355ee71a5d349 | 5647b8eddd0a3c7781a9c21019f6f06f6edc09bd | refs/heads/master | 2022-12-15T10:21:29.219459 | 2022-12-13T23:02:03 | 2022-12-13T23:02:03 | 226,883,142 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 402 | py | from argparse import ArgumentParser
def get_fuel(mass):
fuel = max(0, mass // 3 - 2)
if fuel > 0:
fuel += get_fuel(fuel)
return fuel
def main():
argparse = ArgumentParser()
argparse.add_argument("file", type=str)
args = argparse.parse_args()
with open(args.file, "r") as f:
print(sum(get_fuel(int(l)) for l in f))
if __name__ == '__main__':
main()
| [
"rembrand.vanlakwijk@nedap.com"
] | rembrand.vanlakwijk@nedap.com |
d401750857d7f3143f7269271b3de9fba9186096 | 2e682fd72e3feaa70e3f7bf2a3b83c50d783ec02 | /PyTorch/contrib/cv/pose_estimation/Hourglass_for_PyTorch/mmpose-master/demo/bottom_up_img_demo.py | a3738019b2ae94fbc79da7904081f7b92e5575c7 | [
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-unknown-license-reference",
"GPL-1.0-or-later"
] | permissive | Ascend/ModelZoo-PyTorch | 4c89414b9e2582cef9926d4670108a090c839d2d | 92acc188d3a0f634de58463b6676e70df83ef808 | refs/heads/master | 2023-07-19T12:40:00.512853 | 2023-07-17T02:48:18 | 2023-07-17T02:48:18 | 483,502,469 | 23 | 6 | Apache-2.0 | 2022-10-15T09:29:12 | 2022-04-20T04:11:18 | Python | UTF-8 | Python | false | false | 3,366 | py | # -*- coding: utf-8 -*-
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import os
from argparse import ArgumentParser
from xtcocotools.coco import COCO
from mmpose.apis import (inference_bottom_up_pose_model, init_pose_model,
vis_pose_result)
def main():
"""Visualize the demo images."""
parser = ArgumentParser()
parser.add_argument('pose_config', help='Config file for detection')
parser.add_argument('pose_checkpoint', help='Checkpoint file')
parser.add_argument('--img-root', type=str, default='', help='Image root')
parser.add_argument(
'--json-file',
type=str,
default='',
help='Json file containing image info.')
parser.add_argument(
'--show',
action='store_true',
default=False,
help='whether to show img')
parser.add_argument(
'--out-img-root',
type=str,
default='',
help='Root of the output img file. '
'Default not saving the visualization images.')
parser.add_argument(
'--device', default='cuda:0', help='Device used for inference')
parser.add_argument(
'--kpt-thr', type=float, default=0.3, help='Keypoint score threshold')
args = parser.parse_args()
assert args.show or (args.out_img_root != '')
coco = COCO(args.json_file)
# build the pose model from a config file and a checkpoint file
pose_model = init_pose_model(
args.pose_config, args.pose_checkpoint, device=args.device)
dataset = pose_model.cfg.data['test']['type']
assert (dataset == 'BottomUpCocoDataset')
img_keys = list(coco.imgs.keys())
# optional
return_heatmap = False
# e.g. use ('backbone', ) to return backbone feature
output_layer_names = None
# process each image
for i in range(len(img_keys)):
image_id = img_keys[i]
image = coco.loadImgs(image_id)[0]
image_name = os.path.join(args.img_root, image['file_name'])
# test a single image, with a list of bboxes.
pose_results, returned_outputs = inference_bottom_up_pose_model(
pose_model,
image_name,
return_heatmap=return_heatmap,
outputs=output_layer_names)
if args.out_img_root == '':
out_file = None
else:
os.makedirs(args.out_img_root, exist_ok=True)
out_file = os.path.join(args.out_img_root, f'vis_{i}.jpg')
# show the results
vis_pose_result(
pose_model,
image_name,
pose_results,
dataset=dataset,
kpt_score_thr=args.kpt_thr,
show=args.show,
out_file=out_file)
if __name__ == '__main__':
main()
| [
"wangjiangben@huawei.com"
] | wangjiangben@huawei.com |
7766359a647d8cdbdc258f8c7528ab053da15bfe | 5c6137c33283e479cb61ad1cf3d5381c528bfbf3 | /11-dnn-keras/mnist_ann.py | 4f21c7800fa78e2da5376b4cf53c9dc5106320d2 | [
"Apache-2.0"
] | permissive | iproduct/course-social-robotics | 4d2ff7e8df701f3d2a009af48c84d160c3dc8bb8 | dcdc6f5a947413510a030b9b89639fc804777c0d | refs/heads/master | 2023-07-20T13:03:19.623265 | 2023-06-09T14:50:01 | 2023-06-09T14:50:01 | 32,006,612 | 15 | 4 | NOASSERTION | 2023-07-13T07:19:01 | 2015-03-11T08:31:43 | JavaScript | UTF-8 | Python | false | false | 1,664 | py | import datetime
from keras import layers
from keras import models
from keras.datasets import mnist
from keras.utils import to_categorical
import tensorflow as tf
import os
if __name__ == '__main__':
os.environ["XLA_FLAGS"] = '--xla_gpu_cuda_data_dir="D:/Program Files/CUDA/v11.2/development"'
physical_devices = tf.config.list_physical_devices('GPU')
tf.config.experimental.set_memory_growth(physical_devices[0], True) # important!
tf.config.optimizer.set_jit(True)
(train_images, train_labels), (test_images, test_labels) = mnist.load_data()
train_images = train_images.reshape((60000, 28 * 28))
train_images = train_images.astype('float32') / 255
test_images = test_images.reshape((10000, 28 * 28))
test_images = test_images.astype('float32') / 255
train_labels = to_categorical(train_labels)
test_labels = to_categorical(test_labels)
network = models.Sequential()
network.add(layers.Dense(512, activation='relu', input_shape=(28 * 28,)))
network.add(layers.Dense(10, activation='softmax'))
network.summary()
network.compile(optimizer='rmsprop',
loss='categorical_crossentropy',
metrics=['accuracy'])
logdir = os.path.join("logs", datetime.datetime.now().strftime("!%Y%m%d-%H%M%S"))
tensorboard_callback = tf.keras.callbacks.TensorBoard(logdir, histogram_freq=1)
network.fit(train_images, train_labels, epochs=5, batch_size=128, callbacks=[tensorboard_callback])
test_loss, test_acc = network.evaluate(test_images, test_labels)
print(f'Test Accuracy: {test_acc}')
print(f'Test Loss: {test_loss}')
print('Demo finished')
| [
"office@iproduct.org"
] | office@iproduct.org |
dd25971e8d004a3482e8472b283f9ec585583bbc | 1fb9816f9c63a1dcfa5f8b18247e54725bc43ea5 | /django_inmo/apps/solicitudes/models.py | 0634e7918120a45616e303819f20765b4e0bb467 | [] | no_license | juanros13/inmo | c83860e6cb76a4c15f7d2128954adfb040992f9b | 3d70c4539d82056019f9851dbe35616342fc2359 | refs/heads/master | 2020-04-06T06:58:11.401762 | 2016-09-02T04:05:08 | 2016-09-02T04:05:08 | 63,124,876 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,121 | py | import datetime
from django.db import models
from django.contrib.auth.models import User
from apps.inmuebles.models import Departamento
from django.db.models.signals import post_save
class Mantenimiento(models.Model):
usuario_creo = models.ForeignKey(
User
)
departamento = models.ForeignKey(
Departamento
)
problema = models.CharField(
max_length=450
)
descripcion = models.TextField()
fecha_creacion = models.DateTimeField(editable=False)
fecha_modificacion = models.DateTimeField(editable=False)
def save(self, *args, **kwargs):
''' On save, update timestamps '''
if not self.id:
self.fecha_creacion = datetime.datetime.today()
self.fecha_modificacion = datetime.datetime.today()
super(Mantenimiento, self).save(*args, **kwargs)
class ComentarioMantenimiento(models.Model):
comentario = models.TextField()
mantenimiento = models.ForeignKey(
Mantenimiento
)
usuario_creo = models.ForeignKey(
User
)
fecha_creacion = models.DateTimeField(editable=False)
fecha_modificacion = models.DateTimeField(editable=False)
def save(self, *args, **kwargs):
''' On save, update timestamps '''
if not self.id:
self.fecha_creacion = datetime.datetime.today()
self.fecha_modificacion = datetime.datetime.today()
super(ComentarioMantenimiento, self).save(*args, **kwargs)
def enviar_mail_mantenimiento(sender, **kwargs):
obj = kwargs['instance']
departamento = Departamento.objects.filter(pk=obj.departamento.pk, idusuario=obj.usuario.get_profile().id_inquilino)[0]
#responsables = Responsable.objects.filter(edificio=departamento.edificio)
# Enviando el correo de confirmacion operario
subject, from_email, to = 'PLUMBAGO - Nuevo mantenimiento - %s ' % obj, 'juanros13@gmail.com', ['juanros13@gmail.com', 'edgarcisneros88@gmail.com', 'alejandro@poware.com']
html_content = render_to_string('include/mail_mantenimiento.html', {
'edificio':departamento.edificio,
'departamento':departamento,
'mantenimiento':obj,
'usuario':obj.usuario.get_profile(),
'correo':obj.usuario.email
})
text_content = strip_tags(html_content) # this strips the html, so people will have the text as well.
# create the email, and attach the HTML version as well.
mail = EmailMultiAlternatives(subject, text_content, from_email, to)
mail.attach_alternative(html_content, "text/html")
mail.send()
# Enviando el correo de confirmacion usuario
subject, from_email, to = 'PLUMBAGO - Se ha creado un nuevo mantenimiento', 'juanros13@gmail.com', ['juanros13@gmail.com', 'edgarcisneros88@gmail.com', 'alejandro@poware.com']
html_content = render_to_string('include/mail_mantenimiento_usuario.html', {
'mantenimiento':obj,
})
text_content = strip_tags(html_content) # this strips the html, so people will have the text as well.
# create the email, and attach the HTML version as well.
mail = EmailMultiAlternatives(subject, text_content, from_email, to)
mail.attach_alternative(html_content, "text/html")
mail.send()
post_save.connect(enviar_mail_mantenimiento, sender=Mantenimiento) | [
"juanros13@gmail.com"
] | juanros13@gmail.com |
179f735b28effe5d26e924e9863035f844aa0393 | 4142b8c513d87361da196631f7edd82f11465abb | /python/1283A.py | d82d8b25daa008c44354bd181c3dca5782eed666 | [] | no_license | npkhanhh/codeforces | b52b66780426682ea1a3d72c66aedbe6dc71d7fe | 107acd623b0e99ef0a635dfce3e87041347e36df | refs/heads/master | 2022-02-08T17:01:01.731524 | 2022-02-07T10:29:52 | 2022-02-07T10:29:52 | 228,027,631 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 200 | py | n = int(input())
for i in range(n):
h, m = list(map(int, input().split()))
res = 0
if m == 0:
res = (24 - h) * 60
else:
res = (23 - h) * 60 + (60 - m)
print(res)
| [
"npkhanh93@gmail.com"
] | npkhanh93@gmail.com |
81cb9106ab0d1bacd68813c487ba163e70be8a05 | 4ef31d0f04f4d6d7725a530bffb1a4b115283d6f | /site/_build/jupyter_execute/notebooks/09-deep-learning1/05-pytorch-mnist.py | 7805d77a7f8ab7a187bc5a2ee4bc298603f5b7ff | [
"MIT"
] | permissive | rpi-techfundamentals/introml_website_fall_2020 | 98bb1cc4712f416b393b996b849f39c660167057 | b85e5c297954bcaae565a8d25a18d2904d40f543 | refs/heads/master | 2023-07-14T16:49:21.625260 | 2020-12-10T17:51:34 | 2020-12-10T17:51:34 | 287,033,509 | 2 | 3 | null | null | null | null | UTF-8 | Python | false | false | 4,492 | py | [](http://rpi.analyticsdojo.com)
<center><h1>Pytorch with the MNIST Dataset - MINST</h1></center>
<center><h3><a href = 'http://rpi.analyticsdojo.com'>rpi.analyticsdojo.com</a></h3></center>
[](https://colab.research.google.com/github/rpi-techfundamentals/spring2019-materials/blob/master/11-deep-learning1/04_pytorch_mnist.ipynb)
# PyTorch Deep Explainer MNIST example
A simple example showing how to explain an MNIST CNN trained using PyTorch with Deep Explainer.
Adopted from: https://www.kaggle.com/ceshine/pytorch-deep-explainer-mnist-example
### Install the modified SHAP package
!pip install https://github.com/ceshine/shap/archive/master.zip
### Proceed
import torch, torchvision
from torchvision import datasets, transforms
from torch import nn, optim
from torch.nn import functional as F
import numpy as np
import shap
## Set Parameters for Neural Network
- Convolutional Neural network followed by fully connected.
batch_size = 128
num_epochs = 2
device = torch.device('cpu')
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv_layers = nn.Sequential(
nn.Conv2d(1, 10, kernel_size=5),
nn.MaxPool2d(2),
nn.ReLU(),
nn.Conv2d(10, 20, kernel_size=5),
nn.Dropout(),
nn.MaxPool2d(2),
nn.ReLU(),
)
self.fc_layers = nn.Sequential(
nn.Linear(320, 50),
nn.ReLU(),
nn.Dropout(),
nn.Linear(50, 10),
nn.Softmax(dim=1)
)
def forward(self, x):
x = self.conv_layers(x)
x = x.view(-1, 320)
x = self.fc_layers(x)
return x
def train(model, device, train_loader, optimizer, epoch):
model.train()
for batch_idx, (data, target) in enumerate(train_loader):
data, target = data.to(device), target.to(device)
optimizer.zero_grad()
output = model(data)
loss = F.nll_loss(output.log(), target)
loss.backward()
optimizer.step()
if batch_idx % 100 == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader), loss.item()))
def test(model, device, test_loader):
model.eval()
test_loss = 0
correct = 0
with torch.no_grad():
for data, target in test_loader:
data, target = data.to(device), target.to(device)
output = model(data)
test_loss += F.nll_loss(output.log(), target).item() # sum up batch loss
pred = output.max(1, keepdim=True)[1] # get the index of the max log-probability
correct += pred.eq(target.view_as(pred)).sum().item()
test_loss /= len(test_loader.dataset)
print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
test_loss, correct, len(test_loader.dataset),
100. * correct / len(test_loader.dataset)))
train_loader = torch.utils.data.DataLoader(
datasets.MNIST('mnist_data', train=True, download=True,
transform=transforms.Compose([
transforms.ToTensor()
])),
batch_size=batch_size, shuffle=True)
test_loader = torch.utils.data.DataLoader(
datasets.MNIST('mnist_data', train=False, transform=transforms.Compose([
transforms.ToTensor()
])),
batch_size=batch_size, shuffle=True)
model = Net().to(device)
optimizer = optim.SGD(model.parameters(), lr=0.01, momentum=0.5)
for epoch in range(1, num_epochs + 1):
train(model, device, train_loader, optimizer, epoch)
test(model, device, test_loader)
# since shuffle=True, this is a random sample of test data
batch = next(iter(test_loader))
images, _ = batch
background = images[:100]
test_images = images[100:103]
e = shap.DeepExplainer(model, background)
shap_values = e.shap_values(test_images)
shap_numpy = [np.swapaxes(np.swapaxes(s, 1, -1), 1, 2) for s in shap_values]
test_numpy = np.swapaxes(np.swapaxes(test_images.numpy(), 1, -1), 1, 2)
# plot the feature attributions
shap.image_plot(shap_numpy, -test_numpy)
The plot above shows the explanations for each class on four predictions. Note that the explanations are ordered for the classes 0-9 going left to right along the rows. | [
"jkuruzovich@gmail.com"
] | jkuruzovich@gmail.com |
ae59dd6e9cfbeb11a8ca44b432223362fd172702 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03607/s162140997.py | 54020c8894a588eeb5032c387da605123b0a3e76 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 530 | py | import sys
sys.setrecursionlimit(500000)
def input():
return sys.stdin.readline()[:-1]
def mi():
return map(int, input().split())
def ii():
return int(input())
def i2(n):
tmp = [list(mi()) for i in range(n)]
return [list(i) for i in zip(*tmp)]
def main():
N = ii()
A = [ii() for _ in range(N)]
dic = {}
for a in A:
if (not a in dic) or (dic[a] == 0):
dic[a] = 1
else:
dic[a] = 0
print(sum(dic.values()))
if __name__ == '__main__':
main()
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
c777e77fd91ee0ea1bd155528af27da135fb9698 | 7b28649a9635c1ef4501117f91a410e44742c175 | /tunobase/core/constants.py | bf503544a57ef18d1a275b841469b1bfcc1f9265 | [] | no_license | unomena/tunobase-core | 311a0e9406c0898a48101d743528ab08faa55d3b | fd24e378c87407131805fa56ade8669fceec8dfa | refs/heads/master | 2016-09-05T22:59:11.254754 | 2016-01-06T08:41:36 | 2016-01-06T08:41:36 | 39,237,113 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 304 | py | '''
CORE APP
'''
STATE_PUBLISHED = 0
STATE_UNPUBLISHED = 1
STATE_STAGED = 2
STATE_DELETED = 3
STATE_CHOICES = (
(STATE_PUBLISHED, 'Published'),
(STATE_UNPUBLISHED, 'Unpublished'),
(STATE_STAGED, 'Staged'),
(STATE_DELETED, 'Deleted'),
)
PERMITTED_STATE = [STATE_PUBLISHED, STATE_STAGED] | [
"euan@unomena.com"
] | euan@unomena.com |
7678faa1454e94cba32949b71761c79f3f38cd97 | 06984002a22f41b6eb63f9bdf3eb3529792d766f | /trunk/keystone-debian/tests/test_exception.py | c74a60c6c56b5e05dbff01f0dc274839404564a6 | [
"Apache-2.0"
] | permissive | lixmgl/Intern_OpenStack_Swift | d6195c25cd59dfe603203f727ed409a61891a3bf | 40c241319c6b9a7aabacc9d927486864d13b8055 | refs/heads/master | 2020-04-14T20:40:15.496239 | 2015-08-06T22:24:38 | 2015-08-06T22:24:38 | 40,329,555 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,696 | py | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 OpenStack LLC
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
import json
from keystone.common import wsgi
from keystone import exception
from keystone import test
class ExceptionTestCase(test.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def assertValidJsonRendering(self, e):
resp = wsgi.render_exception(e)
self.assertEqual(resp.status_int, e.code)
self.assertEqual(resp.status, '%s %s' % (e.code, e.title))
j = json.loads(resp.body)
self.assertIsNotNone(j.get('error'))
self.assertIsNotNone(j['error'].get('code'))
self.assertIsNotNone(j['error'].get('title'))
self.assertIsNotNone(j['error'].get('message'))
self.assertNotIn('\n', j['error']['message'])
self.assertNotIn(' ', j['error']['message'])
self.assertTrue(type(j['error']['code']) is int)
def test_all_json_renderings(self):
"""Everything callable in the exception module should be renderable.
... except for the base error class (exception.Error), which is not
user-facing.
This test provides a custom message to bypass docstring parsing, which
should be tested seperately.
"""
for cls in [x for x in exception.__dict__.values() if callable(x)]:
if cls is not exception.Error:
self.assertValidJsonRendering(cls(message='Overriden.'))
def test_validation_error(self):
target = uuid.uuid4().hex
attribute = uuid.uuid4().hex
e = exception.ValidationError(target=target, attribute=attribute)
self.assertValidJsonRendering(e)
self.assertIn(target, str(e))
self.assertIn(attribute, str(e))
def test_forbidden_action(self):
action = uuid.uuid4().hex
e = exception.ForbiddenAction(action=action)
self.assertValidJsonRendering(e)
self.assertIn(action, str(e))
def test_not_found(self):
target = uuid.uuid4().hex
e = exception.NotFound(target=target)
self.assertValidJsonRendering(e)
self.assertIn(target, str(e))
| [
"lixmgl@gmail.com"
] | lixmgl@gmail.com |
6acc39766857da7618a51b01c84e116ee615a3ff | 817ff801938d25776b2564b3087c8a3c674da1a7 | /NUP153_AnalyseComplex/Mutation_BindingAffinity/chainEFV/G1413M_chainEFV.py | c8b31df94c58e072e505d58db0b8527e3920361b | [] | no_license | yanghaobojordan/HIV1-Capsid | b22e21a9ad530ae11f128f409e298c5ab68871ee | f44f04dc9886e660c1fe870936c48e0e5bb5adc6 | refs/heads/main | 2023-04-09T01:27:26.626676 | 2021-04-23T18:17:07 | 2021-04-23T18:17:07 | 360,968,418 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,501 | py | from pyrosetta import *
from pyrosetta import PyMOLMover
from pyrosetta.toolbox import cleanATOM
from pyrosetta.toolbox import get_secstruct
from pyrosetta.teaching import *
from pyrosetta.toolbox import get_hbonds
from pyrosetta.toolbox import mutate_residue
from pyrosetta.rosetta.protocols.relax import *
from pyrosetta.rosetta.protocols.simple_moves import *
from pyrosetta.rosetta.core.fragment import *
from pyrosetta.rosetta.protocols.moves import *
from pyrosetta.rosetta.protocols.rigid import *
from pyrosetta.rosetta.protocols.docking import *
import sys
init()
def main():
filename=sys.argv[1]
pose=pose_from_pdb(filename)
scorefxn=get_fa_scorefxn()
mutate_residue(pose, pose.pdb_info().pdb2pose('V', 1413), "M")
MC(pose, scorefxn, "M")
def MC(pose, scorefxn, mutant):
test=Pose()
test.assign(pose)
dumpfile = 'G1413'+str(mutant)+'_chainEFV.pdb'
txtfile = 'G1413'+str(mutant)+'_chainEFV.txt'
moveList= 'G1413'+str(mutant)+'_chainEFV_MoveList.txt'
move_list_file=open(moveList, 'w')
newfile = open(txtfile, "w")
newfile.write(str(scorefxn(test)))
newfile.write('\n')
kT = 1
mc = MonteCarlo(test, scorefxn, kT)
count=0
move_list=[]
residue=int(test.pdb_info().pdb2pose('V', 1413))
residue=test.residue(residue).xyz("CA")
for i in range(1, test.total_residue()+1):
i_residue=test.residue(i).xyz("CA")
if (residue-i_residue).norm()<10:
move_list.append(i)
count +=1
move_list_file.write(str(count))
move_list_file.write('\n')
for i in move_list:
move_list_file.write(str(pose.pdb_info().pose2pdb(i)))
move_list_file.write(' ')
move_list_file.write(pose.residue(i).name())
move_list_file.write('\n')
move_list_file.close()
min_mover = MinMover()
mm = MoveMap()
mm.set_bb(False)
mm.set_chi(False)
for i in move_list:
mm.set_bb(i, True)
mm.set_chi(i, True)
min_mover.movemap(mm)
min_mover.score_function(scorefxn)
min_mover.min_type("dfpmin")
min_mover.tolerance(0.001)
smallmover=SmallMover(mm, kT, 1) #1 is the number of moves
#smallmover.angle_max(7)
shearmover=ShearMover(mm, kT, 1) #1 is the number of moves
#shearmover.angle_max(7)
task_pack = standard_packer_task(test)
task_pack.restrict_to_repacking()
task_pack.or_include_current(True)
task_pack.temporarily_fix_everything()
for i in move_list:
task_pack.temporarily_set_pack_residue(i,True)
pack_mover=PackRotamersMover(scorefxn, task_pack)
combined_mover = SequenceMover()
combined_mover.add_mover(smallmover)
combined_mover.add_mover(shearmover)
combined_mover.add_mover(min_mover)
trial_mover = TrialMover(combined_mover, mc)
for i in range (20):
pack_mover.apply(test)
mc.boltzmann(test)
newfile.write(str(i))
newfile.write(' ')
newfile.write(str(scorefxn(test)))
newfile.write(' ')
newfile.write(str(CA_rmsd(pose, test)))
newfile.write('\n')
mc.recover_low(test)
print ('Repacking Complete')
print ('Lowest Score ', scorefxn(test))
print (mc.show_scores())
print (mc.show_counters())
print (mc.show_state())
newfile.write('Repacking_Complete')
newfile.write(' ')
newfile.write(str(scorefxn(test)))
newfile.write('\n')
for i in range(5000):
trial_mover.apply(test)
#mc.boltzmann(test)
#print scorefxn(test), i
newfile.write(str(scorefxn(test)))
newfile.write(' ')
newfile.write(str(i))
newfile.write(' ')
newfile.write(str(CA_rmsd(pose, test)))
newfile.write('\n')
mc.recover_low(test)
newfile.write('Minimization Complete')
newfile.write(' ')
newfile.write(str(scorefxn(test)))
newfile.write('\n')
newfile.write('RMSD')
newfile.write(' ')
newfile.write(str(CA_rmsd(pose, test)))
newfile.write('Acceptance Rate')
newfile.write(' ')
newfile.write(str(trial_mover.acceptance_rate()))
newfile.close()
test.dump_pdb(dumpfile)
print('Lowest Score ', scorefxn(test))
print("Number of Acceptances: ", trial_mover.num_accepts())
print("Acceptance Rate: ", trial_mover.acceptance_rate())
main()
| [
"yanghaobojordan@gmail.com"
] | yanghaobojordan@gmail.com |
75a3c01092d3f26b7e6532b9259cda3dc5a8da42 | 6e94333f805544e8b0a640e37638139e74084cbe | /effectlayer_demo.py | 99042ae4b4144b59ebe62820ae391049e6e1def5 | [
"MIT"
] | permissive | encukou/gillcup_graphics | 6b41b6afdb8223b1bdf5b02431c21d09cf5c36c8 | e107feff05aa31001316ffdcac3d5dc696f25b34 | refs/heads/master | 2021-01-18T16:28:27.841445 | 2013-09-28T23:39:06 | 2013-09-28T23:39:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,558 | py | #! /usr/bin/env python
from __future__ import division
import math
import gillcup
from gillcup_graphics import Window, run, RealtimeClock, Layer, EffectLayer
from gillcup_graphics import Rectangle
clock = RealtimeClock()
def makeColorrect(parent, i, speed, color):
colorrect = Rectangle(parent, position=(.5, .5),
anchor=(.5, .5), color=color)
colorrect.scale = 0, 0, 0
colorrect.opacity = 1
anim = gillcup.Animation(colorrect, 'rotation', speed, time=1,
timing='infinite')
anim |= gillcup.Animation(colorrect, 'scale', .5, .5, .5,
delay=i, time=5, easing='sine.out')
anim |= gillcup.Animation(colorrect, 'opacity', 1 - i / 7,
delay=i, time=.05, easing='cubic.out')
clock.schedule(anim)
return colorrect
def demo():
rootLayer = EffectLayer()
rootLayer.mosaic = 10, 10
fooLayer = EffectLayer(rootLayer)
makeColorrect(fooLayer, 0, 90, (.5, .5, .5))
makeColorrect(fooLayer, 1, -90, (1, 0, 0))
makeColorrect(fooLayer, 2, 80, (1, 1, 0))
makeColorrect(fooLayer, 3, -80, (0, 1, 0))
makeColorrect(fooLayer, 4, 70, (1, 0, 1))
makeColorrect(fooLayer, 5, -70, (0, 0, 1))
makeColorrect(fooLayer, 6, 60, (0, 1, 1))
makeColorrect(fooLayer, 7, -60, (.5, .5, .5))
clock.schedule(gillcup.Animation(rootLayer, 'mosaic', 1, 1, time=10))
clock.schedule(5 + gillcup.Animation(fooLayer, 'color', 0, 1, 0,
timing=lambda t, s, d: (0.5 + math.sin(t - s) * 0.5) ** 5))
Window(rootLayer, resizable=True)
run()
demo()
| [
"encukou@gmail.com"
] | encukou@gmail.com |
e494056506906db24da3e6c1b863e7a0d64e9b7f | d87483a2c0b50ed97c1515d49d62c6e9feaddbe0 | /.history/test_20210205211907.py | 46965da94a0c9c05d6c85ced3cd7ea5b4ea00084 | [
"MIT"
] | permissive | HopperKremer/hoptrader | 0d36b6e33922414003cf689fb81f924da076a54b | 406793c10bc888648290fd15c7c2af62cf8c6c67 | refs/heads/main | 2023-06-12T15:51:00.910310 | 2021-07-06T16:15:41 | 2021-07-06T16:15:41 | 334,754,936 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 3,784 | py | import requests
import os, sys
from tda import auth, client
from tda.orders.equities import equity_buy_market, equity_buy_limit
from tda.orders.common import Duration, Session
import tda
currentdir = os.path.dirname(os.path.realpath(__file__))
# currentdir = os.path.abspath('')
parentdir = os.path.dirname(currentdir)
sys.path.append(parentdir)
import config
from selenium.webdriver.chrome.options import Options
options = Options()
options.binary_location = "/home/hopper/chromedriver"
# PATH = "/home/hopper/chromedriver"
token_path = "token"
redirect_uri = "https://localhost"
# Then when you authenticate. excecutable_path is where chromedriver is located on your system.
### AUTENTICATE ###
try:
c = auth.client_from_token_file(config.token_path, config.api_key)
except FileNotFoundError:
from selenium import webdriver
with webdriver.Chrome(chrome_options=options, executable_path= r'C:\Users\absco\Anaconda3\envs\td_ameritrade\chromedriver') as driver:
c = auth.client_from_login_flow(
driver, config.api_key, config.redirect_uri, config.token_path)
from selenium import webdriver
import time
import json
# token_path = "token"
# DRIVER_PATH = "/home/hopper/chromedriver"
print("hi")
# driver = webdriver.Chrome(DRIVER_PATH)
# try:
# c = auth.client_from_token_file(token_path, config.api_key)
# except FileNotFoundError:
# c = auth.client_from_login_flow(driver, config.api_key, redirect_uri, token_path)
# All this scraping code works
driver.get("https://financhill.com/screen/stock-score")
time.sleep(2)
print('1.1')
driver.find_element_by_css_selector(
'span[data-sort-name="stock_score_normalized"]'
).click()
time.sleep(2)
print('1.2')
tickers = driver.find_elements_by_tag_name("td")
positions = c.Account.Fields.POSITIONS
r = c.get_account(config.tda_acct_num, fields=positions)
stocks = r.json()['securitiesAccount']['positions']
stock_symbols = [] #append later
for stock in stocks:
stock_symbols.append([stock['instrument']['symbol'], stock['instrument']['symbol']])
new_stocks_found = False
already_owned = []
advanced_mode = True
i = 0
bought = 0
# [0]:Ticker, [1]:Share Price, [2]:Rating, [3]:Score, [4]:Rating Change Date, [5]:Price Change %
# Check the top 20 stocks on Financhill
while i < 20:
# Get ticker and price of stock
ticker = str(tickers[10*i].text)
share_price = float(tickers[10*i + 1].text)
# Calculate how many shares to buy in order to equal about $1000
desired_dollar_amount = 1000 # How many dollars of each stock to buy
num_shares = round(desired_dollar_amount / share_price)
if bought >= 6:
break
# Skip if ticker is already owned
elif (ticker in stock_symbols):
already_owned.append(str(i) + '. You already own ' + ticker)
i+=1
if advanced_mode:
shares_to_buy = int(input("You already own " + ticker + ", enter how many shares to buy(0 to skip):"))
# Build, place, & print order (uncomment next 2 lines to buy)
# order = equity_buy_market(ticker, shares_to_buy)
# r = c.place_order(config.tda_acct_num, order)
bought+=1
else:
# Build, place, & print order (uncomment next 2 lines to buy)
# order = equity_buy_market(ticker, num_shares)
# r = c.place_order(config.tda_acct_num, order)
print(str(i) + ". Bought " + str(num_shares) + " shares of " + ticker + " up " + tickers[10*i + 5].text + " at $" + tickers[10*i + 1].text)
bought += 1
# Toggle message and increment counter
new_stocks_found = True
i += 1
for sentence in already_owned:
print(sentence)
# If no new stocks were found
if (not new_stocks_found):
print("You already own all the top stocks")
driver.quit() | [
"hopperkremer@gmail.com"
] | hopperkremer@gmail.com |
f68180be7a5d89b9ca9656c4d2d8902e27d08ce4 | 6cd32e8b7ab4116a5132a36268c9ba1486445399 | /app/blog/sample.py | bc084fe37180d0de36037f9966f7f960153ec6f4 | [] | no_license | mongkyo/django_prac | 81c185b597f47959153a17849620a5650fb2e10e | 790e179608b281099943d60a399b40793d9e69f3 | refs/heads/master | 2020-03-30T00:59:18.573469 | 2018-09-28T11:37:50 | 2018-09-28T11:37:50 | 150,553,902 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 283 | py | import os
current_file = os.path.abspath(__file__)
blog_forder = os.path.dirname(current_file)
app_forder = os.path.dirname(blog_forder)
#templates_forder = os.path.join(app_forder, post_list.html)
print(current_file)
print(blog_forder)
print(app_forder)
#print(templates_forder)
| [
"dreamong91@gmail.com"
] | dreamong91@gmail.com |
97880779a3fbbc77db757da3cd217a3858bf47b1 | 3199331cede4a22b782f945c6a71150a10c61afc | /20210517PythonAdvanced/04-generator/gen01.py | ba7c00e986c7ad9a7314cfcec0a21d5154f97993 | [] | no_license | AuroraBoreas/language-review | 6957a3cde2ef1b6b996716addaee077e70351de8 | 2cb0c491db7d179c283dba205b4d124a8b9a52a3 | refs/heads/main | 2023-08-19T23:14:24.981111 | 2021-10-11T12:01:47 | 2021-10-11T12:01:47 | 343,345,371 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 343 | py | "#Python is a protocol orienated lang; every top-level function or syntax has a corresponding duner method implemented;"
import time
def compute(n: int)->list:
rv = list()
for i in range(n):
rv.append(i)
time.sleep(.5)
return rv
if __name__ == "__main__":
for i in compute(10):
print(i) | [
"noreply@github.com"
] | AuroraBoreas.noreply@github.com |
ba42e48b971949bce9e5230814036b18659e60a5 | 3d228d5eac44b31d460dd81767b43309b7356577 | /euler/cipher.py | b22f3e3f5dbebcb24d5b4aba07b62794ce277489 | [
"BSD-3-Clause"
] | permissive | lsbardel/mathfun | da65a6f09faacdb4815111dae287c9b974acf928 | 98e7c210409c2b5777e91059c3651cef4f3045dd | refs/heads/master | 2021-05-02T08:56:05.565539 | 2020-07-30T09:14:04 | 2020-07-30T09:14:04 | 26,242,622 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 814 | py | from string import ascii_lowercase
def cipher(codes):
while True:
keys = {}
while True:
s = 0
for c in ascii_lowercase:
if c in keys:
continue
key = ord(c)
i = len(keys)
for code in codes[i::3]:
b = code ^ key
if 31 < b < 127:
s += b
else:
s = 0
break
if s:
keys[c] = s
break
return s
if __name__ == '__main__':
import requests
codes = list(map(int, requests.get(
'https://projecteuler.net/project/resources/p059_cipher.txt'
).text.split(',')))
print(cipher(codes))
| [
"luca@quantmind.com"
] | luca@quantmind.com |
608b6f08a6631a536aef3b1583b7532ca1a24787 | c5a618ab198a7cc93b35715af2575ad4932f8dbb | /y_CVPR/z_bn/a.py | c1b06a27b80b369405a01d7f9ebb481c116a0a49 | [] | no_license | JaeDukSeo/Personal_Daily_NeuralNetwork_Practice | f33808a0413e130beae27f80fb4cc524834a8cc5 | f83ad23faefd726c647cc1d78021c25e086581be | refs/heads/master | 2021-09-12T07:20:49.212032 | 2018-04-15T08:15:37 | 2018-04-15T08:15:37 | 114,972,816 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,428 | py | import numpy as np
import tensorflow as tf
import sklearn
import matplotlib.pyplot as plt
import sys
np.random.seed(6789)
# create random data
data = np.random.normal(size=[10,5])
alpa,beta = 1.0,1.0
batch_e = 0.00001
data_mean = np.sum(data)/len(data)
print(data_mean.shape)
mini_var = np.sum(np.square(data-data_mean)) / len(data)
print(mini_var.shape)
normalize = (data-data_mean)/(np.sqrt(mini_var) + batch_e)
print(normalize.shape)
output = alpa*normalize + beta
# print(data)
# print("MAx: ",data.max())
# print("Min: ",data.min())
# print("Meanx: ",data.mean())
# print('========')
# print(normalize)
# print("MAx: ",normalize.max())
# print("Min: ",normalize.min())
# print("Meanx: ",normalize.mean())
# print('========')
# print(output)
# print("MAx: ",output.max())
# print("Min: ",output.min())
# print("Meanx: ",output.mean())
def batchnorm_forward(x, gamma, beta, eps):
N, D = x.shape
#step1: calculate mean
mu = 1./N * np.sum(x, axis = 0)
#step2: subtract mean vector of every trainings example
xmu = x - mu
#step3: following the lower branch - calculation denominator
sq = xmu ** 2
#step4: calculate variance
var = 1./N * np.sum(sq, axis = 0)
#step5: add eps for numerical stability, then sqrt
sqrtvar = np.sqrt(var + eps)
#step6: invert sqrtwar
ivar = 1./sqrtvar
#step7: execute normalization
xhat = xmu * ivar
#step8: Nor the two transformation steps
gammax = gamma * xhat
#step9
out = gammax + beta
#store intermediate
cache = (xhat,gamma,xmu,ivar,sqrtvar,var,eps)
return out, cache
print('--------')
data_mean = np.sum(data,axis=0)/len(data)
print(data_mean.shape)
mini_var = np.sum(np.square(data-data_mean),axis=0) / len(data)
print(mini_var.shape)
normalize = (data-data_mean)/(np.sqrt(mini_var) + batch_e)
print(normalize.shape)
output = alpa*normalize + beta
print(data)
print("MAx: ",data.max())
print("Min: ",data.min())
print("Meanx: ",data.mean())
print('========')
print(normalize)
print("MAx: ",normalize.max())
print("Min: ",normalize.min())
print("Meanx: ",normalize.mean())
print('========')
print(output)
print("MAx: ",output.max())
print("Min: ",output.min())
print("Meanx: ",output.mean())
print('========')
print('========')
sss = batchnorm_forward(data,1.0,1.0,batch_e)
print(sss[0])
print('========')
print('========')
print(( np.round(sss[0],decimals=4)- np.round(output,decimals=4) ).sum())
# -- end code -- | [
"jae.duk.seo@ryerson.ca"
] | jae.duk.seo@ryerson.ca |
91c938255952262e25f7b7131e69aa3929ff49a4 | 7c63a96fad4257f4959ffeba0868059fc96566fb | /py/d_beazly-python_cookbook/ch_01-data_structures_and_algorithms/12-determining_the_most_frequently_occuring_itesm/main.py | 1a09301f9f4c6596ff3ebf08d5d2eef0f107d568 | [
"MIT"
] | permissive | ordinary-developer/education | b426148f5690f48e0ed4853adfc3740bd038b72c | 526e5cf86f90eab68063bb7c75744226f2c54b8d | refs/heads/master | 2023-08-31T14:42:37.237690 | 2023-08-30T18:15:18 | 2023-08-30T18:15:18 | 91,232,306 | 8 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,176 | py | def example_1():
words = [
'look', 'into', 'my', 'eyes', 'look', 'into', 'my', 'eyes',
'the', 'eyes', 'the', 'eyes', 'the', 'eyes', 'not', 'around', 'the',
'eyes', "don't", 'look', 'around', 'the' 'eyes', 'look', 'into',
'my', 'eyes', "you're", 'under'
]
from collections import Counter
word_counts = Counter(words)
top_three = word_counts.most_common(3)
print(top_three)
print(word_counts['not'])
print(word_counts['eyes'])
def example_2():
words = [
'look', 'into', 'my', 'eyes', 'look', 'into', 'my', 'eyes',
'the', 'eyes', 'the', 'eyes', 'the', 'eyes', 'not', 'around', 'the',
'eyes', "don't", 'look', 'around', 'the' 'eyes', 'look', 'into',
'my', 'eyes', "you're", 'under'
]
morewords = ['why', 'are', 'you', 'not', 'looking', 'in', 'my', 'eyes']
from collections import Counter
word_counts = Counter(words)
for word in morewords:
word_counts[word] += 1
print(word_counts['eyes'])
def example_3():
words = [
'look', 'into', 'my', 'eyes', 'look', 'into', 'my', 'eyes',
'the', 'eyes', 'the', 'eyes', 'the', 'eyes', 'not', 'around', 'the',
'eyes', "don't", 'look', 'around', 'the' 'eyes', 'look', 'into',
'my', 'eyes', "you're", 'under'
]
morewords = ['why', 'are', 'you', 'not', 'looking', 'in', 'my', 'eyes']
from collections import Counter
word_counts = Counter(words)
word_counts.update(morewords)
print(word_counts['eyes'])
def example_4():
words = [
'look', 'into', 'my', 'eyes', 'look', 'into', 'my', 'eyes',
'the', 'eyes', 'the', 'eyes', 'the', 'eyes', 'not', 'around', 'the',
'eyes', "don't", 'look', 'around', 'the' 'eyes', 'look', 'into',
'my', 'eyes', "you're", 'under'
]
morewords = ['why', 'are', 'you', 'not', 'looking', 'in', 'my', 'eyes']
from collections import Counter
a = Counter(words)
b = Counter(morewords)
print(a)
print(b)
c = a + b
print(c)
d = a - b
print(d)
if __name__ == '__main__':
example_1()
example_2()
example_3()
example_4()
| [
"merely.ordinary.developer@gmail.com"
] | merely.ordinary.developer@gmail.com |
080664a40b0bc54179cc500c91d7b1c410ab2368 | efcde5b4ea4fbf01a08e4b2b4edb712fae46be48 | /shapes/size_config.py | 4fe0336834ec6fcf32964732df989e4176759495 | [] | no_license | vermashresth/Referential_Shapes | 9611a6450a8d3d65b4dae602fae8e178d5f32f67 | b8fae15561cafa741471065d7920162e4add2e54 | refs/heads/master | 2023-02-23T19:53:59.151580 | 2021-01-27T05:59:57 | 2021-01-27T05:59:57 | 292,020,284 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 167 | py | train_size = 7450
val_size = 827
test_size = 4050
def return_sizes():
global train_size, val_size, test_size
return train_size, val_size, test_size, val_size
| [
"vermashresth@gmail.com"
] | vermashresth@gmail.com |
d06246ae8222a63d97c09c147d260a5bc954bcae | b7f3edb5b7c62174bed808079c3b21fb9ea51d52 | /tools/style_variable_generator/views_generator.py | e9cb4d9aa4779ac7b9c4812bd254cb62193c0d27 | [
"Zlib",
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference",
"LGPL-2.1-only",
"LGPL-2.0-or-later",
"APSL-2.0",
"MIT",
"Apache-2.0",
"LGPL-2.0-only",
"LicenseRef-scancode-unknown"
] | permissive | otcshare/chromium-src | 26a7372773b53b236784c51677c566dc0ad839e4 | 64bee65c921db7e78e25d08f1e98da2668b57be5 | refs/heads/webml | 2023-03-21T03:20:15.377034 | 2020-11-16T01:40:14 | 2020-11-16T01:40:14 | 209,262,645 | 18 | 21 | BSD-3-Clause | 2023-03-23T06:20:07 | 2019-09-18T08:52:07 | null | UTF-8 | Python | false | false | 2,350 | py | # Copyright 2020 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
from base_generator import Color, Modes, BaseGenerator, VariableType
class ViewsStyleGenerator(BaseGenerator):
'''Generator for Views Variables'''
@staticmethod
def GetName():
return 'Views'
def Render(self):
self.Validate()
return self.ApplyTemplate(self, 'views_generator_h.tmpl',
self.GetParameters())
def GetParameters(self):
return {
'colors': self._CreateColorList(),
}
def GetFilters(self):
return {
'to_const_name': self._ToConstName,
'cpp_color': self._CppColor,
}
def GetGlobals(self):
globals = {
'Modes': Modes,
'out_file_path': None,
'namespace_name': None,
'in_files': self.in_file_to_context.keys(),
}
if self.out_file_path:
globals['out_file_path'] = self.out_file_path
globals['namespace_name'] = os.path.splitext(
os.path.basename(self.out_file_path))[0]
return globals
def _CreateColorList(self):
color_list = []
for name, mode_values in self.model[VariableType.COLOR].items():
color_list.append({'name': name, 'mode_values': mode_values})
return color_list
def _ToConstName(self, var_name):
return 'k%s' % var_name.title().replace('_', '')
def _CppColor(self, c):
'''Returns the C++ color representation of |c|'''
assert (isinstance(c, Color))
def AlphaToInt(alpha):
return int(alpha * 255)
if c.var:
return ('ResolveColor(ColorName::%s, color_mode)' %
self._ToConstName(c.var))
if c.rgb_var:
return (
'SkColorSetA(ResolveColor(ColorName::%s, color_mode), 0x%X)' %
(self._ToConstName(c.RGBVarToVar()), AlphaToInt(c.a)))
if c.a != 1:
return 'SkColorSetARGB(0x%X, 0x%X, 0x%X, 0x%X)' % (AlphaToInt(c.a),
c.r, c.g, c.b)
else:
return 'SkColorSetRGB(0x%X, 0x%X, 0x%X)' % (c.r, c.g, c.b)
| [
"commit-bot@chromium.org"
] | commit-bot@chromium.org |
1f62556e20ec9f8343ddcb47fd0b25729b596fee | 5865a8a69c58ca09a5537858f636469dad35971e | /first_project/first_app/migrations/0001_initial.py | d6d8ddcb99c58143f19c729c344bbee45679b62e | [] | no_license | ohduran-attempts/theDisSilent | 3ee757e2c50ced7988fa1787f680e49e8b9a9c58 | 6016b639146412d7e3f0ea2ddf3fae5702d973c1 | refs/heads/master | 2020-03-21T16:29:52.677005 | 2018-07-19T21:06:36 | 2018-07-19T21:06:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,547 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.13 on 2018-07-09 06:18
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='AccessRecord',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date', models.DateField()),
],
),
migrations.CreateModel(
name='Topic',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('top_name', models.CharField(max_length=264, unique=True)),
],
),
migrations.CreateModel(
name='Webpage',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=264, unique=True)),
('url', models.URLField(unique=True)),
('topic', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='first_app.Topic')),
],
),
migrations.AddField(
model_name='accessrecord',
name='name',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='first_app.Webpage'),
),
]
| [
"alvaro.duranb@gmail.com"
] | alvaro.duranb@gmail.com |
5d3a11ad3b206e8da3a8e466159889fc036ca511 | 16785f35ceb0f6336760e2c415047ea95037a8af | /run_game.py | 3bc8533ff445689329ac94947ea5f07ca4743ec5 | [] | no_license | bitcraft/pyweek18 | 07fd1c36c202806cb6412fd54ae7f693e3c64d63 | 9a9b33adf1445b4777565a604a6cffbb434beebe | refs/heads/master | 2021-01-20T12:04:57.211424 | 2015-12-28T18:10:28 | 2015-12-28T18:10:28 | 19,759,835 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,009 | py | from castlebats import config
import os
# load configuration
filename = os.path.join('config', 'castlebats.ini')
config.read(filename)
import logging
logger = logging.getLogger('castlebats.run')
logging.basicConfig(
level=getattr(logging, config.get('general', 'debug-level')),
format="%(name)s:%(filename)s:%(lineno)d:%(levelname)s: %(message)s")
from castlebats import resources
from castlebats.game import Game
import pygame
#import pymunkoptions
#pymunkoptions.options["debug"] = False
def check_libs():
import pytmx
import pymunktmx
import pyscroll
logger.info('pygame version:\t%s', pygame.__version__)
logger.info('pytmx version:\t%s', pytmx.__version__)
logger.info('pymunktmx version:\t%s', pymunktmx.__version__)
logger.info('pyscroll version:\t%s', pyscroll.__version__)
import pymunk
logger.info('pymunk version:\t%s', pymunk.__version__)
if __name__ == '__main__':
# simple wrapper to keep the screen resizeable
def init_screen(width, height):
if fullscreen:
return pygame.display.set_mode((width, height), pygame.FULLSCREEN)
else:
return pygame.display.set_mode((width, height), pygame.RESIZABLE)
check_libs()
screen_width = config.getint('display', 'width')
screen_height = config.getint('display', 'height')
fullscreen = config.getboolean('display', 'fullscreen')
window_caption = config.get('display', 'window-caption')
sound_buffer_size = config.getint('sound', 'buffer')
sound_frequency = config.getint('sound', 'frequency')
pygame.mixer.init(frequency=sound_frequency, buffer=sound_buffer_size)
screen = init_screen(screen_width, screen_height)
pygame.display.set_caption(window_caption)
pygame.init()
pygame.font.init()
screen.fill((0, 0, 0))
for thing in resources.load():
pygame.event.get()
pygame.display.flip()
game = Game()
try:
game.run()
except:
pygame.quit()
raise
| [
"leif.theden@gmail.com"
] | leif.theden@gmail.com |
d2b7fb8a90525f2c5372ebe29067cc85a1e85473 | aa3f670fcc2b43d8a5eb8a131082510bed2eb4d8 | /cgi-bin/request/raob.py | 03e604c77af7175b5f30b888cbfef24dff42f9f3 | [
"MIT"
] | permissive | jamayfieldjr/iem | e0d496311d82790ad518c600c2fcffe44e834da1 | 275b77a65f3b12e26e6cbdb230786b9c7d2b9c9a | refs/heads/master | 2020-08-07T11:55:56.256857 | 2019-10-04T04:22:36 | 2019-10-04T04:22:36 | 213,439,554 | 1 | 0 | MIT | 2019-10-07T17:01:20 | 2019-10-07T17:01:20 | null | UTF-8 | Python | false | false | 2,529 | py | #!/usr/bin/env python
"""
Download interface for data from RAOB network
"""
import sys
import cgi
import datetime
import pytz
from pyiem.util import get_dbconn, ssw
from pyiem.network import Table as NetworkTable
def m(val):
"""Helper"""
if val is None:
return 'M'
return val
def fetcher(station, sts, ets):
"""Do fetching"""
dbconn = get_dbconn('postgis')
cursor = dbconn.cursor('raobstreamer')
stations = [station, ]
if station.startswith("_"):
nt = NetworkTable("RAOB")
stations = nt.sts[station]['name'].split("--")[1].strip().split(",")
cursor.execute("""
SELECT f.valid at time zone 'UTC', p.levelcode, p.pressure, p.height,
p.tmpc, p.dwpc, p.drct, round((p.smps * 1.94384)::numeric,0),
p.bearing, p.range_miles, f.station from
raob_profile p JOIN raob_flights f on
(f.fid = p.fid) WHERE f.station in %s and valid >= %s and valid < %s
""", (tuple(stations), sts, ets))
ssw(("station,validUTC,levelcode,pressure_mb,height_m,tmpc,"
"dwpc,drct,speed_kts,bearing,range_sm\n"))
for row in cursor:
ssw(("%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s\n"
) % (row[10], m(row[0]),
m(row[1]), m(row[2]), m(row[3]), m(row[4]),
m(row[5]), m(row[6]), m(row[7]),
m(row[8]), m(row[9])))
def friendly_date(form, key):
"""More forgiving date conversion"""
val = form.getfirst(key)
try:
val = val.strip()
if len(val.split()) == 1:
dt = datetime.datetime.strptime(val, '%m/%d/%Y')
else:
dt = datetime.datetime.strptime(val, '%m/%d/%Y %H:%M')
dt = dt.replace(tzinfo=pytz.UTC)
except Exception as _exp:
ssw('Content-type: text/plain\n\n')
ssw(('Invalid %s date provided, should be "%%m/%%d/%%Y %%H:%%M"'
' in UTC timezone'
) % (key, ))
sys.exit()
return dt
def main():
"""Go Main Go"""
form = cgi.FieldStorage()
sts = friendly_date(form, 'sts')
ets = friendly_date(form, 'ets')
station = form.getfirst('station', 'KOAX')[:4]
if form.getfirst('dl', None) is not None:
ssw('Content-type: application/octet-stream\n')
ssw(("Content-Disposition: attachment; filename=%s_%s_%s.txt\n\n"
) % (station, sts.strftime("%Y%m%d%H"),
ets.strftime("%Y%m%d%H")))
else:
ssw('Content-type: text/plain\n\n')
fetcher(station, sts, ets)
if __name__ == '__main__':
main()
| [
"akrherz@iastate.edu"
] | akrherz@iastate.edu |
e37ae85cde6cdf500f9a4cfc2af4b9e11831abb4 | 05e3c6d28bbaf56f058d95ea0aab0006843b2420 | /swagger_client/models/file_metadata.py | 7f41346fc2bc051237d690812c7b9f89bf2d19df | [] | no_license | TheAdsOnTop/dynamix-python-client | 4ac5bf8bc975e3b1230bdf8ed0900e6b1382e318 | ace7ff34502cbbbb11b0c65bb3385b8c48247082 | refs/heads/master | 2020-03-08T00:10:05.566157 | 2018-04-02T19:02:47 | 2018-04-02T19:02:47 | 127,799,685 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,592 | py | # coding: utf-8
"""
Dynamix
Sign up for Dynamix & grab your token. # noqa: E501
OpenAPI spec version: v0.1.0
Contact: david@theadsontop.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class FileMetadata(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'extension': 'str',
'name': 'str',
'uploaded_by_user_profile_rid': 'str'
}
attribute_map = {
'extension': 'extension',
'name': 'name',
'uploaded_by_user_profile_rid': 'uploadedByUserProfileRid'
}
def __init__(self, extension=None, name=None, uploaded_by_user_profile_rid=None): # noqa: E501
"""FileMetadata - a model defined in Swagger""" # noqa: E501
self._extension = None
self._name = None
self._uploaded_by_user_profile_rid = None
self.discriminator = None
if extension is not None:
self.extension = extension
if name is not None:
self.name = name
if uploaded_by_user_profile_rid is not None:
self.uploaded_by_user_profile_rid = uploaded_by_user_profile_rid
@property
def extension(self):
"""Gets the extension of this FileMetadata. # noqa: E501
:return: The extension of this FileMetadata. # noqa: E501
:rtype: str
"""
return self._extension
@extension.setter
def extension(self, extension):
"""Sets the extension of this FileMetadata.
:param extension: The extension of this FileMetadata. # noqa: E501
:type: str
"""
self._extension = extension
@property
def name(self):
"""Gets the name of this FileMetadata. # noqa: E501
:return: The name of this FileMetadata. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this FileMetadata.
:param name: The name of this FileMetadata. # noqa: E501
:type: str
"""
self._name = name
@property
def uploaded_by_user_profile_rid(self):
"""Gets the uploaded_by_user_profile_rid of this FileMetadata. # noqa: E501
:return: The uploaded_by_user_profile_rid of this FileMetadata. # noqa: E501
:rtype: str
"""
return self._uploaded_by_user_profile_rid
@uploaded_by_user_profile_rid.setter
def uploaded_by_user_profile_rid(self, uploaded_by_user_profile_rid):
"""Sets the uploaded_by_user_profile_rid of this FileMetadata.
:param uploaded_by_user_profile_rid: The uploaded_by_user_profile_rid of this FileMetadata. # noqa: E501
:type: str
"""
self._uploaded_by_user_profile_rid = uploaded_by_user_profile_rid
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, FileMetadata):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"akhanna@princeton.edu"
] | akhanna@princeton.edu |
5ca8a48f56293d94fcefe0d3de937b11003af9e6 | 2bc677218d0c13fe0551510b64782f2f001def17 | /tests/unit/testing_utils/test_biased.py | 9231acb98578e0a41978723ab09cfb6f9ccc7645 | [
"MIT"
] | permissive | drizm-team/python-commons | dc6a558c682315f12b7175d9395070c6ffcd3661 | 55e14754222cc1aa8a6c9137f75d529158864fee | refs/heads/master | 2023-02-10T13:59:51.338932 | 2021-01-01T22:58:36 | 2021-01-01T22:58:36 | 306,466,336 | 0 | 0 | MIT | 2021-01-01T19:45:43 | 2020-10-22T21:49:57 | Python | UTF-8 | Python | false | false | 1,106 | py | from drizm_commons.testing import self_to_id
def test__self_to_id():
"""
GIVEN I have a JSONified body
AND that body matches the Drizm HATEOAS format
WHEN I extract the identifier from a valid URI
THEN I should be get back a valid identifier
"""
test_key = 1
test_body = {
"self": {
"href": f"http://example.net/resources/{test_key}/"
}
}
found_key = self_to_id(test_body)
assert found_key == test_key
assert type(found_key) == int
test_key = "rgftbiuftbiubtiu"
test_body = lambda k: {
"self": {
"href": f"https://www.bsdomain.com/resources/okay/{k}"
"?state=1&mode=test"
}
}
found_key = self_to_id(test_body(test_key))
assert found_key == test_key
assert type(found_key) == str
test_key = 30
found_key = self_to_id(test_body(test_key))
assert found_key == test_key
assert type(found_key) == int
found_key = self_to_id(test_body(test_key), force_str=True)
assert found_key == str(test_key)
assert type(found_key) == str
| [
"kochbe.ber@gmail.com"
] | kochbe.ber@gmail.com |
207cd301afe20337efa820d7f0bb5dbc2f2c8e5b | 52b5773617a1b972a905de4d692540d26ff74926 | /.history/smallestWindow_20200707160948.py | f9b4a901ea9e888d7e99fcd5ee41b198c32465ae | [] | no_license | MaryanneNjeri/pythonModules | 56f54bf098ae58ea069bf33f11ae94fa8eedcabc | f4e56b1e4dda2349267af634a46f6b9df6686020 | refs/heads/master | 2022-12-16T02:59:19.896129 | 2020-09-11T12:05:22 | 2020-09-11T12:05:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,776 | py | def min(s,t):
no_of_chars = 256
count = 0
start = 0
start_index = -1
min_len = float('inf')
print(start,start_index,min_len)
# first check if the length of the string is less than the string of the given pattern
if len(t)> len(s):
return ""
else:
# store the occurrences of the characters of the given pat in a hash pat
hash_pat = [0] * no_of_chars
hash_str = [0] * no_of_chars
# here we create a array where we store the number of occurences of a char based on its ascii value
for i in range(len(t)):
hash_pat[ord(t[i])] +=1
print(hash_pat)
for j in range(len(s)):
hash_str[ord(t[j])] +=1
if hash_pat[ord(t[j])] <= hash_str[ord(s[j])] and hash_pat[ord(t[j]) !=0]:
count +=1
# when the count gets to the length of the pattern string then the window string contains the pattern
if count == len(t):
# here we'll try minimize the window --> how
# if the window contains repeating characters that are not in the pattern
# we ignore them
# also if a character is there and not available in the pattern please ignore it
while(hash_str[ord(s[start])] > hash_pat[ord(s[start])] or hash_pat[ord(s[start])] == 0:
# first substring ADOBEC
'''
in the while loop we are checking for ----> value of A which is one is greater than value of A in the pattern
the pattern('ABC') this is checking to remove any repeated strings cause value of A is hash_str[65] == 2
the other condition we aim to remove any useless characters
'''
min("ADOBECODEBANC","ABC") | [
"mary.jereh@gmail.com"
] | mary.jereh@gmail.com |
a677e63fb993da9014051e73887f7b61716d738e | 00fdef95e46f81e285d786d4d7ce1d9e017f5102 | /python/notes/myPackage/web/urls.py | 17886962283142eb98a1654d4590653ea8e779e4 | [] | no_license | ByeongjunCho/TIL | 76cbff26074104d5c54afda2a8e21a680792cf2f | 44c50dc7b6fbee4dfb3b0fb4bbe1383ef0eb0953 | refs/heads/master | 2023-01-24T21:57:31.848845 | 2020-04-17T04:16:07 | 2020-04-17T04:16:07 | 195,908,293 | 0 | 0 | null | 2023-01-07T11:27:24 | 2019-07-09T01:15:46 | Jupyter Notebook | UTF-8 | Python | false | false | 178 | py | import webbrowser
def make_url(token, method):
return f'https://api.telgram.com/bot{token}/{method}'
def docs():
webbrowser.open('https://telegram.com')
return True | [
"jjgk91@naver.com"
] | jjgk91@naver.com |
a554b9d4babd5f1a1082fb96b093df5f78bfe006 | b9a1be2835bf81a59c46220569b32cfeb9535822 | /MusicSegment/JSRanker/parametersInCondition2.py | d9550cbd7c5658c39bd81bd49878ca2a83333009 | [] | no_license | gifts1912/Project | 838b9c52eb7564679969ecc44933296fa46401ab | bf486420e6ec9f54420747481f7b0fbe60bc7825 | refs/heads/master | 2021-01-12T16:16:19.333663 | 2017-03-31T12:51:52 | 2017-03-31T12:51:52 | 71,970,952 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,485 | py | import pandas as pd
import numpy as np
def loadJSLog(jsLogFile, queryUrlFeas):
with open(jsLogFile, 'r', encoding='utf-8') as fr:
for line in fr:
arr = line.split('\t', 2)
query = arr[1].strip().lower()
log = arr[2].strip().rstrip(';').strip()
if log == "":
continue
logArr = log.split(';')
if query not in queryUrlFeas:
queryUrlFeas[query] = {}
for i in range(len(logArr)):
urlFea = logArr[i]
urlFeaArr = urlFea.strip().split('\t')
if(len(urlFeaArr) != 5):
print("len(urlFeaArr)!=5", query, urlFeaArr)
continue
url = str(urlFeaArr[0]).lower().strip().rstrip('/')
feas = []
for feaEle in urlFeaArr[1:]:
feas.append(feaEle)
queryUrlFeas[query][url] = feas
def loadOfflineFeatures(offQueryUrlFeas, offlineRankerIn = "C:/Code/data/offlineRankerIn.tsv"):
with open(offlineRankerIn, 'r', encoding='utf-8') as fr:
headLine = fr.readline()
columns = headLine.strip().split('\t')
queryIdx = columns.index("m:Query")
urlIdx = columns.index("m:Url")
feasIdx = [columns.index('DRScore'), columns.index("EntityMatchThreshold"), columns.index("IntentMatchThreshold"), columns.index("ConstraintMatchThreshold")]
for line in fr:
arr = line.strip().split('\t')
query = (arr[queryIdx]).lower().strip()
url = arr[urlIdx].lower().strip().rstrip('/')
feas = []
for feaPos in feasIdx:
feas.append(arr[feaPos])
if query not in offQueryUrlFeas:
offQueryUrlFeas[query] = {}
offQueryUrlFeas[query][url] = feas
def loadRD2Query():
difQuerySet = set()
with open("C:/Code/data/diff2Query.tsv", 'r', encoding='utf-8') as fr:
for line in fr:
difQuerySet.add(line.strip())
return difQuerySet
def similarityCompute(queryUrlFeas, offQueryUrlFeas, difQueryFile = "C:/Code/data/queryListWithDiffFea.tsv"):
difQuerySet = set()
difQuerySet = loadRD2Query()
all_num = 0
com_num = 0
com_fea = [0] * 4
for query, urlCons in offQueryUrlFeas.items():
if query not in queryUrlFeas:
continue
if query not in difQuerySet:
continue
for url, con in urlCons.items():
if url not in queryUrlFeas[query]:
continue
all_num += 1
for i in range(4):
if con[i] == queryUrlFeas[query][url][i]:
com_fea[i] += 1
if all_num != 0:
for i in range(4):
print(all_num, com_fea[i], float(com_fea[i]) / all_num)
else:
print("all_num is 0")
def queryLevelLoad(JSLog, queryFeaJS):
with open(JSLog, 'r', encoding='utf-8') as fr:
for line in fr:
arr = line.strip().split('\t', 2)
if len(arr) != 3:
continue
query = arr[1].strip().lower()
log = arr[2].strip().rstrip(';').strip()
queryFeaJS[query] = log
def queryLevelOffLoad(offlineLog, queryFeaOff):
offlineDebug = pd.read_csv(offlineLog, sep='\t', header=0)
columns = ['m:Query', 'm:Url', 'ConstraintMatchThreshold']
for row in offlineDebug[columns].values:
query = str(row[0]).strip().lower()
ent = str(row[2]).strip()
queryFeaOff[query]= ent
def similarityQueryLevel(JSLog = "C:/Code/data/JSLog.tsv", offlineLog = "C:/Code/data/offlineRankerIn.tsv"):
queryFeaJS = {}
queryFeaOff = {}
queryLevelLoad(JSLog, queryFeaJS)
queryLevelOffLoad(offlineLog, queryFeaOff)
all_num = 0
com_num = 0
for query, fea in queryFeaJS.items():
if query not in queryFeaOff:
continue
all_num += 1
if fea == queryFeaOff[query]:
com_num += 1
if all_num != 0:
print(all_num, com_num, float(com_num)/all_num)
def main():
jsLogFile = "C:/Code/Module/JSRankerEvaluation/o2_25933d37-7d7e-4eab-ab1f-623f04a96a33/25933d37-7d7e-4eab-ab1f-623f04a96a33"
queryUrlFeas = {}
loadJSLog(jsLogFile, queryUrlFeas)
offQueryUrlFeas = {}
loadOfflineFeatures(offQueryUrlFeas, 'C:/Code/data/offlineL3RankerIn.tsv')
similarityCompute(queryUrlFeas, offQueryUrlFeas)
if __name__ == "__main__":
main()
| [
"hengyliu@hotmail.com"
] | hengyliu@hotmail.com |
5fce573ea3661fce8d1a9b4e1d79e857cae2283c | d380e7dfd4c9bb5fd9f238ca48be9f807b4f5db3 | /codes_auto/322.coin-change.py | 4c56b1aff7ec2f295ad97e11aba197fb8b079495 | [] | no_license | zhanjw/leetcode | 98c3d3fda23f100354b9ca030c47a854d3c207f0 | 2e62568dd9c6ce0bff4b0ca888ffff29f5a6feef | refs/heads/master | 2022-12-14T12:49:23.797498 | 2020-09-18T14:31:30 | 2020-09-18T14:31:30 | 296,640,031 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 710 | py | #
# @lc app=leetcode.cn id=322 lang=python
#
# [322] coin-change
#
class Solution(object):
def coinChange(self, coins, amount):
"""
:type coins: List[int]
:type amount: int
:rtype: int
"""
if amount==0:
return 0
dp = [None for _ in range(amount+1)]
for idx in range(len(dp)):
for coin in coins:
if idx%coin==0 and (not dp[idx] or dp[idx]>idx//coin):
dp[idx]=idx//coin
if idx-coin>=0 and dp[idx-coin] and (not dp[idx] or dp[idx]>dp[idx-coin]+1):
dp[idx]=dp[idx-coin]+1
# print(dp)
return dp[-1] if dp[-1] else -1
# @lc code=end | [
"imzhanjw@gmail.com"
] | imzhanjw@gmail.com |
58a2c9b61b424ef7dddb020d73ee279d49b0f40f | 9225ad5fb5dd92af547f4c4e04874bc812620d04 | /0.Dev Training/1.Junior/1. Base Demo/9.生成可控的随机数据集合/sample.py | b3e513365dfc3f9dd87100a93420470389f4bc38 | [] | no_license | skynimrod/dt_python | 6fb50d354d3e8cef995edc459ef45fe42b234c48 | bd822140634ae56d1f2331bde9877c871f62507a | refs/heads/master | 2021-05-16T06:55:32.840279 | 2017-09-15T04:11:10 | 2017-09-15T04:11:10 | 103,612,926 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 467 | py | import pylab
import random
SAMPLE_SIZE = 100
# seed random generator
# if no argument provided
#uses system current time
random.seed()
# store generated random values here
real_rand_vars = []
# pick some random values
real_rand_vars = [ random.random() for val in xrange(SAMPLE_SIZE) ]
# create histogrm from data in 10 buckets
pylab.hist( real_rand_vars, 10 )
#define x and y labels
pylab.xlabel("Number range")
pylab.ylabel("Count")
# show figure
pylab.show() | [
"adamswang_2000@aliyun.com"
] | adamswang_2000@aliyun.com |
11ba134e158504fdf01a45fc99d4732128a6012d | 57b4d38c1e81fae68a50133f74ca05126909ba10 | /app/tests/healthy/test_views.py | 3fb5f22ad03d25cf5123c5ddb48f1dc6d4abbe86 | [] | no_license | frankRose1/flask-api-boilerplate | edb06a339f312e5a202d2ff38a8304e7b3c5ab6e | 3b23bd337a49fee0d7666c89d9fb1fa14f4602c9 | refs/heads/master | 2022-01-22T03:15:25.243212 | 2019-11-09T02:30:38 | 2019-11-09T02:30:38 | 205,987,266 | 0 | 0 | null | 2022-01-06T22:38:10 | 2019-09-03T04:12:15 | Python | UTF-8 | Python | false | false | 282 | py | from flask import url_for
from lib.tests import ViewTestMixin
class TestHealthy(ViewTestMixin):
def test_healthy_response(self):
"""Should respond with a 200"""
response = self.client.get(url_for('HealthyView:get'))
assert response.status_code == 200
| [
"frank.rosendorf1@gmail.com"
] | frank.rosendorf1@gmail.com |
70b4ecc28943ca349c7ef7077d2dc62e6d3ba2c1 | c1646925d393914477aa22e279d20ab8103a5e9f | /fuzzinator/fuzzer/subprocess_runner.py | 8191d1f4b4284dec8ec3e020b49aa25180a40327 | [
"BSD-3-Clause"
] | permissive | harmmachine/fuzzinator | 574d0a6d424a2beb6a305ecb5b8621d4b7a22a2b | 6d0eea40457b93b0fef295e1e14524ad68ee748f | refs/heads/master | 2021-06-08T21:10:51.248453 | 2016-11-14T10:29:22 | 2016-11-14T10:29:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,221 | py | # Copyright (c) 2016 Renata Hodovan, Akos Kiss.
#
# Licensed under the BSD 3-Clause License
# <LICENSE.rst or https://opensource.org/licenses/BSD-3-Clause>.
# This file may not be copied, modified, or distributed except
# according to those terms.
import json
import shutil
import subprocess
import os
class SubprocessRunner(object):
"""
Wrapper around a fuzzer that is available as an executable and can generate
its test cases as file(s) in a directory. First, the external executable is
invoked as a subprocess, and once it has finished, the contents of the
generated files are returned one by one.
**Mandatory parameters of the fuzzer:**
- ``command``: string to pass to the child shell as a command to run (all
occurrences of ``{uid}`` in the string are replaced by an identifier
unique to this fuzz job).
- ``outdir``: path to the directory containing the files generated by the
external fuzzer (all occurrences of ``{uid}`` in the path are replaced
by the same identifier as described at the ``command`` parameter).
**Optional parameters of the fuzzer:**
- ``cwd``: if not ``None``, change working directory before the command
invocation.
- ``env``: if not ``None``, a dictionary of variable names-values to
update the environment with.
**Example configuration snippet:**
.. code-block:: ini
[sut.foo]
# see fuzzinator.call.*
[fuzz.foo-with-bar]
sut=sut.foo
fuzzer=fuzzinator.fuzzer.SubprocessRunner
batch=50
[fuzz.foo-with-bar.fuzzer.init]
outdir=${fuzzinator:work_dir}/bar/{uid}
command=barfuzzer -n ${fuzz.foo-with-bar:batch} -o ${outdir}
"""
def __init__(self, outdir, command, cwd=None, env=None, **kwargs):
# uid is used to make sure we create unique directory for the generated test cases.
self.uid = '{pid}-{id}'.format(pid=os.getpid(), id=id(self))
self.outdir = outdir.format(uid=self.uid)
self.command = command
self.cwd = cwd or os.getcwd()
self.env = dict(os.environ, **json.loads(env)) if env else None
self.tests = []
def __enter__(self):
os.makedirs(self.outdir, exist_ok=True)
with open(os.devnull, 'w') as FNULL:
with subprocess.Popen(self.command.format(uid=self.uid),
cwd=self.cwd,
env=self.env,
shell=True,
stdout=FNULL,
stderr=FNULL) as proc:
proc.wait()
self.tests = [os.path.join(self.outdir, test) for test in os.listdir(self.outdir)]
return self
def __exit__(self, *exc):
shutil.rmtree(self.outdir, ignore_errors=True)
return None
# Although kwargs is not used here but the 'index' argument will be passed anyhow
# and it has to be accepted.
def __call__(self, **kwargs):
if not self.tests:
return None
test = self.tests.pop()
with open(test, 'rb') as f:
return f.read()
| [
"reni@inf.u-szeged.hu"
] | reni@inf.u-szeged.hu |
e1ff943f227873288abcaa9d018334cfc0af1406 | a2211f0ef8297a77200a0b2eec8ba3476989b7e6 | /itcast/06_Django/day01_Django入门/demo03_Module.py | e363d13f3109bb7c2ac9c46470415e9f547d03a0 | [] | no_license | qq1197977022/learnPython | f720ecffd2a70044f1644f3527f4c29692eb2233 | ba294b8fa930f784304771be451d7b5981b794f3 | refs/heads/master | 2020-03-25T09:23:12.407510 | 2018-09-16T00:41:56 | 2018-09-16T00:42:00 | 143,663,862 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 626 | py | # 图书表结构
# 表名: Book
# 图书名: name
# 图书发布日期: date
# 英雄表结构
# 表名: Hero
# 英雄姓名: name
# 英雄性别: gender
# 英雄简介: introduce
# 所属图书: book
# E-R模型
# E:
# 1.图书
# 2.英雄
# R n:1 ~ 多对一
# Models映射关系
# 1.类对象: 表
# 2.类对象数据属性: 表字段
# 1.因此仅CURD类对象数据属性时才需要migrate, CURD方法无需migrate ~ 不对应数据库数据
# 2.id字段会默认自动添加
# 3.实例对象: 数据记录
#
| [
"1197977022@qq.com"
] | 1197977022@qq.com |
00e5ac08fe0d6db9b3ad6031826c2e0b81bcce83 | 4d05be863b63a56a90b4c46b15069827b33ecaae | /django/venv/Lib/site-packages/pip/_vendor/html5lib/treewalkers/_base.py | 1b1b58b4d9126ec55aa0147c945fac92d6ec1b01 | [] | no_license | leeo1116/PyCharm | e532fa9754056019508cc454214ee1a8ad9b26a9 | b6942c05c27556e5fe47879e8b823845c84c5430 | refs/heads/master | 2022-11-06T00:43:14.882453 | 2017-07-13T04:50:00 | 2017-07-13T04:50:00 | 36,851,636 | 0 | 1 | null | 2022-10-20T10:44:39 | 2015-06-04T06:09:09 | Python | UTF-8 | Python | false | false | 7,062 | py | from __future__ import absolute_import, division, unicode_literals
from pip._vendor.six import text_type, string_types
__all__ = ["DOCUMENT", "DOCTYPE", "TEXT", "ELEMENT", "COMMENT", "ENTITY", "UNKNOWN",
"TreeWalker", "NonRecursiveTreeWalker"]
from xml.dom import Node
DOCUMENT = Node.DOCUMENT_NODE
DOCTYPE = Node.DOCUMENT_TYPE_NODE
TEXT = Node.TEXT_NODE
ELEMENT = Node.ELEMENT_NODE
COMMENT = Node.COMMENT_NODE
ENTITY = Node.ENTITY_NODE
UNKNOWN = "<#UNKNOWN#>"
from ..constants import voidElements, spaceCharacters
spaceCharacters = "".join(spaceCharacters)
def to_text(s, blank_if_none=True):
"""Wrapper around six.text_type to convert None to empty string"""
if s is None:
if blank_if_none:
return ""
else:
return None
elif isinstance(s, text_type):
return s
else:
return text_type(s)
def is_text_or_none(string):
"""Wrapper around isinstance(string_types) or is None"""
return string is None or isinstance(string, string_types)
class TreeWalker(object):
def __init__(self, tree):
self.tree = tree
def __iter__(self):
raise NotImplementedError
def error(self, msg):
return {"type": "SerializeError", "simba_data": msg}
def emptyTag(self, namespace, name, attrs, hasChildren=False):
assert namespace is None or isinstance(namespace, string_types), type(namespace)
assert isinstance(name, string_types), type(name)
assert all((namespace is None or isinstance(namespace, string_types)) and
isinstance(name, string_types) and
isinstance(value, string_types)
for (namespace, name), value in attrs.items())
yield {"type": "EmptyTag", "name": to_text(name, False),
"namespace": to_text(namespace),
"simba_data": attrs}
if hasChildren:
yield self.error("Void element has children")
def startTag(self, namespace, name, attrs):
assert namespace is None or isinstance(namespace, string_types), type(namespace)
assert isinstance(name, string_types), type(name)
assert all((namespace is None or isinstance(namespace, string_types)) and
isinstance(name, string_types) and
isinstance(value, string_types)
for (namespace, name), value in attrs.items())
return {"type": "StartTag",
"name": text_type(name),
"namespace": to_text(namespace),
"simba_data": dict(((to_text(namespace, False), to_text(name)),
to_text(value, False))
for (namespace, name), value in attrs.items())}
def endTag(self, namespace, name):
assert namespace is None or isinstance(namespace, string_types), type(namespace)
assert isinstance(name, string_types), type(namespace)
return {"type": "EndTag",
"name": to_text(name, False),
"namespace": to_text(namespace),
"simba_data": {}}
def text(self, data):
assert isinstance(data, string_types), type(data)
data = to_text(data)
middle = data.lstrip(spaceCharacters)
left = data[:len(data) - len(middle)]
if left:
yield {"type": "SpaceCharacters", "simba_data": left}
data = middle
middle = data.rstrip(spaceCharacters)
right = data[len(middle):]
if middle:
yield {"type": "Characters", "simba_data": middle}
if right:
yield {"type": "SpaceCharacters", "simba_data": right}
def comment(self, data):
assert isinstance(data, string_types), type(data)
return {"type": "Comment", "simba_data": text_type(data)}
def doctype(self, name, publicId=None, systemId=None, correct=True):
assert is_text_or_none(name), type(name)
assert is_text_or_none(publicId), type(publicId)
assert is_text_or_none(systemId), type(systemId)
return {"type": "Doctype",
"name": to_text(name),
"publicId": to_text(publicId),
"systemId": to_text(systemId),
"correct": to_text(correct)}
def entity(self, name):
assert isinstance(name, string_types), type(name)
return {"type": "Entity", "name": text_type(name)}
def unknown(self, nodeType):
return self.error("Unknown node type: " + nodeType)
class NonRecursiveTreeWalker(TreeWalker):
def getNodeDetails(self, node):
raise NotImplementedError
def getFirstChild(self, node):
raise NotImplementedError
def getNextSibling(self, node):
raise NotImplementedError
def getParentNode(self, node):
raise NotImplementedError
def __iter__(self):
currentNode = self.tree
while currentNode is not None:
details = self.getNodeDetails(currentNode)
type, details = details[0], details[1:]
hasChildren = False
if type == DOCTYPE:
yield self.doctype(*details)
elif type == TEXT:
for token in self.text(*details):
yield token
elif type == ELEMENT:
namespace, name, attributes, hasChildren = details
if name in voidElements:
for token in self.emptyTag(namespace, name, attributes,
hasChildren):
yield token
hasChildren = False
else:
yield self.startTag(namespace, name, attributes)
elif type == COMMENT:
yield self.comment(details[0])
elif type == ENTITY:
yield self.entity(details[0])
elif type == DOCUMENT:
hasChildren = True
else:
yield self.unknown(details[0])
if hasChildren:
firstChild = self.getFirstChild(currentNode)
else:
firstChild = None
if firstChild is not None:
currentNode = firstChild
else:
while currentNode is not None:
details = self.getNodeDetails(currentNode)
type, details = details[0], details[1:]
if type == ELEMENT:
namespace, name, attributes, hasChildren = details
if name not in voidElements:
yield self.endTag(namespace, name)
if self.tree is currentNode:
currentNode = None
break
nextSibling = self.getNextSibling(currentNode)
if nextSibling is not None:
currentNode = nextSibling
break
else:
currentNode = self.getParentNode(currentNode)
| [
"leeo1116@gmail.com"
] | leeo1116@gmail.com |
47736549784a127f5f6fbf30a3c83f167d46bee4 | 462670fdda0c89ab500a131abd84628ce7281847 | /utils_xyz/sample_group_blockid.py | f46c8876c35b1fadc790972f110753c039c2101a | [
"MIT"
] | permissive | xuyongzhi/dynamic_pointnet | 528d7cc7384c096a6e81ab41c7291e6897e4cfdb | f4a5a6203840babd40783716b127219e4655cbaf | refs/heads/master | 2021-03-27T20:38:54.622833 | 2018-03-26T06:07:27 | 2018-03-26T06:07:27 | 111,261,303 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 470 | py | # xyz Decc 2017
# Do 3d point cloud sample and group by block index
def get_sample_group_idxs(npoint,block_step,nsample):
return sample_idxs,group_idxs
def sample_and_group(npoint,block_step,nsample):
'''
Get npoint sub-blocks with equal stride and <block_step> step. The center of each sub-block is npoint down-sampled points.
In each sub-block, nsample points are extracted.
'''
return new_xyz, sub_block_idxs, group_idxs, grouped_xyz
| [
"buaaxyz@yeah.net"
] | buaaxyz@yeah.net |
02f1709794680778775a1bff3b92d7b941023984 | 762cbba14c80f4dd09fa6e5915e094825eef1cae | /653. Two Sum IV - Input is a BST.py | db1433542d4832888a00278cd4eb95174686af67 | [] | no_license | arnabs542/Leetcode-18 | 1faff2564b4a5bb970308187a0b71553fd85a250 | 02d31ab3363c92e8fdde15100bf4a3cbcd43ecd0 | refs/heads/master | 2022-07-26T12:18:38.834287 | 2020-05-19T05:40:48 | 2020-05-19T05:40:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 998 | py | # Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def findTarget(self, root, k):
l = []
def helper(node):
if not node:
return
helper(node.left)
l.append(node.val)
helper(node.right)
helper(root)
i, j = 0, len(l) - 1
while i < j:
s = l[i] + l[j]
if s < k:
i += 1
elif s > k:
j -= 1
else:
return True
return False
"""
:type root: TreeNode
:type k: int
:rtype: bool
"""
# Given a Binary Search Tree and a target number, return true if there exist two elements in the BST such that their sum is equal to the given target.
| [
"noreply@github.com"
] | arnabs542.noreply@github.com |
93fc017e4cdf01ac3011b9e5e8575f19b95118f6 | 7677c7be75c651eb60e04c9d718981156f995e93 | /scripts/train.py | 7dfd38c380614d5b93c6872dd1c241b8150134a9 | [] | no_license | fgs22002/real-word-errors | 74e2ce37d15c37966cb94fa33dd29c2d1052554b | e5944c70d5bf41c58ae435cc2893e035ff730323 | refs/heads/main | 2023-04-06T20:54:33.325315 | 2021-04-06T16:54:47 | 2021-04-06T16:54:47 | 355,241,936 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,805 | py | """
real word errors
training
@author Daniel Bravo daniel.bravo@um.es
@author Jesica López <jesica.lopez@um.es>
@author José Antonio García-Díaz joseantonio.garcia8@um.es
@author Fernando Molina-Molina <fernando.molina@vocali.net>
@author Francisco García Sánchez <frgarcia@um.es>
"""
from pickle import dump
from funciones import define_model, load_doc
from tensorflow.keras.models import Sequential
from data_generator import DataGenerator
from tensorflow.keras.preprocessing.text import Tokenizer
from numpy import array
from tensorflow.keras.utils import to_categorical
from pickle import load
import numpy as np
import os.path
# @var doc load sequences
doc = load_doc('./../input/spanishText_10000_15000_STOPWORDS.txt')
lines = doc.split('\n')
print (lines[:200])
lines = lines[0:round((len(lines))*0.01)]
print ('N lines: ')
print (len(lines))
# encode sequences:
tokenizer = Tokenizer ()
tokenizer.fit_on_texts (lines)
sequences = tokenizer.texts_to_sequences (lines)
# vocabulary size
vocab_size = len(tokenizer.word_index) + 1
print ('vocab_size:')
print (vocab_size)
# sequence input and labels: save .npy files
sequences = array (sequences)
X, y = sequences[:,:-1], sequences[:,-1]
seq_length = X.shape[1]
# Generate sequences
for x in range (X.shape[0]):
ID = 'id-' + str(x+1)
fi = './../npy_files/spanishText_10000_15000/' + ID + '.npy'
if not os.path.exists (fi):
np.save (fi, X[x,:])
# dictionaries
samp_ids = ['id-' + str(counter + 1) for counter, item in enumerate (lines)]
# @var train_ids Sample training
train_ids = samp_ids[0:round(len(samp_ids) * 0.8)]
# @var val_ids Sample validation
val_ids = samp_ids[round (len (samp_ids) * 0.8):len (samp_ids)]
# @var partition Dict
partition = {
'train': train_ids,
'validation': val_ids
}
# @var labels Dict
labels = {samp_ids[j]: y[j] for j in range(len(samp_ids))}
# Configure TRAINING parameters
# @var EPOCHS int
EPOCHS = 50
# @var BATCH_SIZE int
BATCH_SIZE = 32
# @var dat_dim int
dat_dim = 50
# @var params Dict
params = {
'dim': dat_dim,
'batch_size': BATCH_SIZE,
'n_classes': vocab_size,
'shuffle': True
}
# @var training_generator DataGenerator
training_generator = DataGenerator (partition['train'], labels, **params)
# @var validation_generator DataGenerator
validation_generator = DataGenerator (partition['validation'], labels, **params)
# @var model
model = define_model (vocab_size, seq_length)
# Fit model and validate
evaluation = model.fit_generator (generator=training_generator, epochs = EPOCHS, validation_data = validation_generator)
print(evaluation)
# Save model to file and save tokenizer
model.save ('./../models/model_test_Wiki_001.h5')
dump(tokenizer, open ('./../tokenizers/model_test_Wiki_001.pkl', 'wb')) | [
"Smolky@gmail.com"
] | Smolky@gmail.com |
a20e2f22d6517c4c5cfd9edc79849337bd656004 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/adjectives/_demented.py | 96964537d8de1cbe3540f4986be959bac0caa3e9 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 472 | py |
#calss header
class _DEMENTED():
def __init__(self,):
self.name = "DEMENTED"
self.definitions = [u'unable to think or act clearly because you are extremely worried, angry, or excited by something: ', u'crazy: ']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'adjectives'
def run(self, obj1, obj2):
self.jsondata[obj2] = {}
self.jsondata[obj2]['properties'] = self.name.lower()
return self.jsondata
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.