blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 288 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 684 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 147 values | src_encoding stringclasses 25 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 128 12.7k | extension stringclasses 142 values | content stringlengths 128 8.19k | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
319afda57560172da3a9a019607006d6b877dc1b | bf681fbd7edbf4f8f1e0b20cbd09b362f777c9c3 | /python/bazel_pystyle_black_isort/python_lint.py | 27376fcf03eb86e135e50205d356355a6d6d6ac5 | [
"BSD-3-Clause"
] | permissive | EricCousineau-TRI/repro | 308d4a86f3c7da8be5811db2f3f68d39db60d7ed | 9800f45e07f511c9a355ee90333955451b55559a | refs/heads/master | 2023-08-31T13:49:23.540640 | 2023-08-25T19:18:33 | 2023-08-25T19:18:33 | 87,116,976 | 24 | 13 | NOASSERTION | 2023-03-25T01:40:55 | 2017-04-03T20:19:28 | Jupyter Notebook | UTF-8 | Python | false | false | 6,591 | py | """
Check Python code for semantic errors (bad imports, etc.).
"""
import argparse
import json
from os.path import isfile
import re
import sys
from textwrap import indent
# N.B. black's public API is a bit unstable.
import black
import isort
KNOWN_PYTORCH_PACKAGES = [
"torch",
"torchvision",
]
def eprint(*args):
print(*args, file=sys.stderr)
def get_import_package(line):
"""
If this is an import, return the top-level package and indentation.
Otherwise, return None.
"""
prog = re.compile(r"(\s*)(import|from)\s+([\w\.]+)\b.*")
m = prog.fullmatch(line)
if m is None:
return None, None
else:
prefix, _, module = m.groups()
package = module.split(".")[0]
return prefix, package
def check_maybe_preload_pydrake_for_torch(text, filename):
messages = []
imports_preload_pydrake_at = None
imports_torch = False
new_lines = []
for i, line in enumerate(text.split("\n")):
prefix, package = get_import_package(line)
if package == "maybe_preload_pydrake_for_torch":
imports_preload_pydrake_at = i
elif package in KNOWN_PYTORCH_PACKAGES:
if not imports_torch:
imports_torch = True
if imports_preload_pydrake_at is None:
new_line = (
f"{prefix}import maybe_preload_pydrake_for_torch")
new_lines.append(new_line)
messages.append(
f"{filename}:{i + 1}: Please call "
f"`{new_line.strip()}` before importing "
f"`{package}`.")
new_lines.append(line)
if not imports_torch and imports_preload_pydrake_at is not None:
messages.append(
f"{filename}:{imports_preload_pydrake_at + 1}: torch is not used, "
f"please remove this line")
del new_lines[imports_preload_pydrake_at]
return messages, "\n".join(new_lines)
def check_file(text, filename, use_black=False, isort_settings_file=None):
if filename.endswith(".ipynb"):
messages = []
doc = json.loads(text)
assert doc["metadata"]["kernelspec"]["language"] == "python"
for i, cell in enumerate(doc["cells"]):
cell_name = f"{filename}/In[{i + 1}]"
if cell["cell_type"] == "code":
cell_text = "".join(cell["source"])
try:
cell_messages, new_cell_text = check_file(
cell_text,
cell_name,
use_black=use_black,
isort_settings_file=isort_settings_file,
)
except black.InvalidInput:
cell_messages = []
new_cell_text = cell_text
messages.append(f"{cell_name}: Could not parse!")
cell["source"] = new_cell_text.rstrip().splitlines(keepends=True)
messages += cell_messages
if len(cell["outputs"]) > 0:
messages.append(f"{cell_name}: There should be no outputs!")
cell["outputs"] = []
new_text = json.dumps(doc, indent=1) + "\n"
return messages, new_text
messages, new_text = check_maybe_preload_pydrake_for_torch(
text, filename)
new_text_orig = new_text
formatters_used = []
if use_black:
formatters_used += ["black"]
black_file_mode = black.FileMode(
target_versions={black.TargetVersion.PY36},
line_length=79,
)
# WARNING: This may create line-length problems:
# https://github.com/psf/black/issues/208
# https://github.com/psf/black/issues/1017
try:
new_text = black.format_file_contents(
src_contents=new_text,
fast=True,
mode=black_file_mode,
)
except black.NothingChanged:
pass
if isort_settings_file is not None:
# N.B. If isort_settings_file == "", then it will load a default file.
if isort_settings_file != "":
assert isfile(isort_settings_file), isort_settings_file
formatters_used += ["isort"]
isort_config = isort.Config(settings_file=isort_settings_file)
new_text = isort.code(new_text, config=isort_config)
# N.B. Check after applying both, as they may conflict between each other
# and we don't care about intermediate results.
if new_text != new_text_orig:
messages.append(
f"{filename}: Needs reformatting for {formatters_used}")
return messages, new_text
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--fix", action="store_true")
parser.add_argument("--use_black", action="store_true")
parser.add_argument("--isort_settings_file", type=str, default=None)
parser.add_argument("files", type=str, nargs='*')
args = parser.parse_args()
fix_args = ["--fix"]
if args.use_black:
fix_args += [f"--use_black"]
if args.isort_settings_file is not None:
fix_args += [f"--isort_settings_file={args.isort_settings_file}"]
fix_args_str = " ".join(fix_args)
messages = []
for file in sorted(args.files):
with open(file, "r", encoding="utf8") as f:
text = f.read()
new_messages, new_text = check_file(
text,
file,
use_black=args.use_black,
isort_settings_file=args.isort_settings_file,
)
if new_text != text:
if args.fix:
with open(file, 'w', encoding="utf8") as f:
f.write(new_text)
else:
messages += new_messages
messages.append(
f"To fix, run:\n"
f" bazel-bin/tools/lint/python_lint {fix_args_str} "
f"{file}\n")
if messages:
messages_str = "\n".join(messages)
eprint()
eprint(messages_str)
eprint("You may need to build the tools first before using --fix:")
eprint(" bazel build //tools/lint/...")
eprint()
if "maybe_preload_pydrake_for_torch" in messages_str:
eprint("WARNING: If you need maybe_preload_pydrake_for_torch, be ")
eprint("sure to make the necessary bazel targets depend on ")
eprint("\"//tools:pytorch\", not just \"@pytorch\".")
eprint()
sys.exit(1)
if __name__ == "__main__":
main()
| [
"eric.cousineau@tri.global"
] | eric.cousineau@tri.global |
354922c78bf1544cd2219eeed6a057b22c959414 | 9bb01fa882e713aa59345051fec07f4e3d3478b0 | /tests/cysparse_/sparse/common_attributes/test_common_attributes_matrices_likes_ConjugatedSparseMatrix_INT32_t_INT32_t.py | 28785255adae738602ca0b0a187f84a636ddd1c9 | [] | no_license | syarra/cysparse | f1169c496b54d61761fdecbde716328fd0fb131b | 7654f7267ab139d0564d3aa3b21c75b364bcfe72 | refs/heads/master | 2020-05-25T16:15:38.160443 | 2017-03-14T21:17:39 | 2017-03-14T21:17:39 | 84,944,993 | 0 | 0 | null | 2017-03-14T12:11:48 | 2017-03-14T12:11:48 | null | UTF-8 | Python | false | false | 7,379 | py | #!/usr/bin/env python
"""
This file tests basic common attributes for **all** matrix like objects.
Proxies are only tested for a :class:`LLSparseMatrix` object.
See file ``sparse_matrix_coherence_test_functions``.
"""
from sparse_matrix_like_common_attributes import common_matrix_like_attributes
import unittest
from cysparse.sparse.ll_mat import *
from cysparse.common_types.cysparse_types import *
########################################################################################################################
# Tests
########################################################################################################################
##################################
# Case Non Symmetric, Non Zero
##################################
class CySparseCommonAttributesMatrices_ConjugatedSparseMatrix_INT32_t_INT32_t_TestCase(unittest.TestCase):
def setUp(self):
self.nrow = 10
self.ncol = 14
self.nnz = self.nrow * self.ncol
self.A = LinearFillLLSparseMatrix(nrow=self.nrow, ncol=self.ncol, dtype=INT32_T, itype=INT32_T)
self.C = self.A.conj
self.nargin = self.ncol
self.nargout = self.nrow
self.base_type_str = self.A.base_type_str
def test_common_attributes(self):
is_OK, attribute = common_matrix_like_attributes(self.C)
self.assertTrue(is_OK, msg="Attribute '%s' is missing" % attribute)
def test_nrow_attribute(self):
self.assertTrue(self.C.nrow == self.nrow)
def test_ncol_attribute(self):
self.assertTrue(self.C.ncol == self.ncol)
def test_nnz_attribute(self):
self.assertTrue(self.C.nnz == self.nnz)
def test_symmetric_storage_attribute(self):
self.assertTrue(not self.C.store_symmetric)
def test_zero_storage_attribute(self):
self.assertTrue(not self.C.store_zero)
def test_is_mutable_attribute(self):
self.assertTrue(self.C.is_mutable)
def test_base_type_str(self):
self.assertTrue(self.C.base_type_str == self.base_type_str, "'%s' is not '%s'" % (self.C.base_type_str, self.base_type_str))
def test_is_symmetric(self):
self.assertTrue(not self.C.is_symmetric)
def test_nargin(self):
self.assertTrue(self.nargin == self.C.nargin)
def test_nargout(self):
self.assertTrue(self.nargout == self.C.nargout)
##################################
# Case Symmetric, Non Zero
##################################
class CySparseCommonAttributesSymmetricMatrices_ConjugatedSparseMatrix_INT32_t_INT32_t_TestCase(unittest.TestCase):
def setUp(self):
self.size = 14
self.nnz = ((self.size + 1) * self.size) / 2
self.nargin = self.size
self.nargout = self.size
self.A = LinearFillLLSparseMatrix(size=self.size, dtype=INT32_T, itype=INT32_T, store_symmetric=True)
self.C = self.A.conj
self.base_type_str = self.A.base_type_str
def test_common_attributes(self):
is_OK, attribute = common_matrix_like_attributes(self.C)
self.assertTrue(is_OK, msg="Attribute '%s' is missing" % attribute)
def test_nrow_attribute(self):
self.assertTrue(self.C.nrow == self.size)
def test_ncol_attribute(self):
self.assertTrue(self.C.ncol == self.size)
def test_nnz_attribute(self):
self.assertTrue(self.C.nnz == self.nnz, '%d is not %d' % (self.C.nnz, self.nnz))
def test_symmetric_storage_attribute(self):
self.assertTrue(self.C.store_symmetric)
def test_zero_storage_attribute(self):
self.assertTrue(not self.C.store_zero)
def test_is_mutable_attribute(self):
self.assertTrue(self.C.is_mutable)
def test_base_type_str(self):
self.assertTrue(self.C.base_type_str == self.base_type_str)
def test_is_symmetric(self):
self.assertTrue(self.C.is_symmetric)
def test_nargin(self):
self.assertTrue(self.nargin == self.C.nargin)
def test_nargout(self):
self.assertTrue(self.nargout == self.C.nargout)
##################################
# Case Non Symmetric, Zero
##################################
class CySparseCommonAttributesWithZeroMatrices_ConjugatedSparseMatrix_INT32_t_INT32_t_TestCase(unittest.TestCase):
def setUp(self):
self.nrow = 10
self.ncol = 14
self.nnz = self.nrow * self.ncol
self.A = LinearFillLLSparseMatrix(nrow=self.nrow, ncol=self.ncol, dtype=INT32_T, itype=INT32_T, store_zero=True)
self.C = self.A.conj
self.nargin = self.ncol
self.nargout = self.nrow
self.base_type_str = self.A.base_type_str
def test_common_attributes(self):
is_OK, attribute = common_matrix_like_attributes(self.C)
self.assertTrue(is_OK, msg="Attribute '%s' is missing" % attribute)
def test_nrow_attribute(self):
self.assertTrue(self.C.nrow == self.nrow)
def test_ncol_attribute(self):
self.assertTrue(self.C.ncol == self.ncol)
def test_nnz_attribute(self):
self.assertTrue(self.C.nnz == self.nnz)
def test_symmetric_storage_attribute(self):
self.assertTrue(not self.C.store_symmetric)
def test_zero_storage_attribute(self):
self.assertTrue(self.C.store_zero)
def test_is_mutable_attribute(self):
self.assertTrue(self.C.is_mutable)
def test_base_type_str(self):
self.assertTrue(self.C.base_type_str == self.base_type_str)
def test_is_symmetric(self):
self.assertTrue(not self.C.is_symmetric)
def test_nargin(self):
self.assertTrue(self.nargin == self.C.nargin)
def test_nargout(self):
self.assertTrue(self.nargout == self.C.nargout)
##################################
# Case Symmetric, Zero
##################################
class CySparseCommonAttributesSymmetricWithZeroMatrices_ConjugatedSparseMatrix_INT32_t_INT32_t_TestCase(unittest.TestCase):
def setUp(self):
self.size = 14
self.nnz = ((self.size + 1) * self.size) / 2
self.A = LinearFillLLSparseMatrix(size=self.size, dtype=INT32_T, itype=INT32_T, store_symmetric=True, store_zero=True)
self.nargin = self.size
self.nargout = self.size
self.C = self.A.conj
self.base_type_str = self.A.base_type_str
def test_common_attributes(self):
is_OK, attribute = common_matrix_like_attributes(self.C)
self.assertTrue(is_OK, msg="Attribute '%s' is missing" % attribute)
def test_nrow_attribute(self):
self.assertTrue(self.C.nrow == self.size)
def test_ncol_attribute(self):
self.assertTrue(self.C.ncol == self.size)
def test_nnz_attribute(self):
self.assertTrue(self.C.nnz, self.nnz)
def test_symmetric_storage_attribute(self):
self.assertTrue(self.C.store_symmetric)
def test_zero_storage_attribute(self):
self.assertTrue(self.C.store_zero)
def test_is_mutable_attribute(self):
self.assertTrue(self.C.is_mutable)
def test_base_type_str(self):
self.assertTrue(self.C.base_type_str == self.base_type_str)
def test_is_symmetric(self):
self.assertTrue(self.C.is_symmetric)
def test_nargin(self):
self.assertTrue(self.nargin == self.C.nargin)
def test_nargout(self):
self.assertTrue(self.nargout == self.C.nargout)
if __name__ == '__main__':
unittest.main() | [
"sylvain.arreckx@gmail.com"
] | sylvain.arreckx@gmail.com |
fd56737b198629084231e79e509803626fa804c2 | 7b033781f8bce77dad1ee27c7c96ef7b8361f27c | /config/config_otc.py | 6b1e4b16229dfd06fa4d98d2d02877cae7a13878 | [] | no_license | cash2one/announcement_otc | c1ecb8e5e82d800e6aaf731f2de4d1a446d93d37 | 86be08325bd193eadff7bd56968d150f573b2a27 | refs/heads/master | 2020-05-23T12:57:26.496296 | 2016-09-20T03:23:29 | 2016-09-20T03:23:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,555 | py | # -*- coding: utf-8 -*-
from os.path import join, abspath, dirname
refer_uri = 'http://www.neeq.com.cn/announcement'
URI = 'http://www.neeq.com.cn/disclosureInfoController/infoResult.do?callback='
FORM_DATA = {
'disclosureType': '5',
'page': '0', # Change
'companyCd': '',
'isNewThree': '1', # 新三板还是老三板
'startTime': '', # Change
'endTime': '', # Change
'keyword': '关键字',
'xxfcbj': ''
}
CRT_INDEX = "http://192.168.250.205:17081/indexer/services/indexes/delta.json?indexer=announce_otc&taskids="
PORT = 27017
DB_OTC = 'news'
# DATA_HOST = '192.168.100.20' # 测试环境mongo
DATA_HOST = '192.168.251.95' # 上海环境mong95 <122.144.134.95>
# DATA_HOST = '122.144.134.95' # 上海环境mong95 外网
TABLE_OTC = 'announcement_otc'
DB_RULE = 'ada'
# RULE_HOST = '192.168.250.20' # 测试环境mongo
RULE_HOST = '192.168.251.95' # 上海环境mong95 <122.144.134.95>
# RULE_HOST = '122.144.134.95' # 上海环境mong95 外网
TABLE_RULE = 'dict_announce_rule'
STOCK_DB = 'ada'
# STOCK_HOST = '192.168.250.20' # 测试环境mongo
STOCK_HOST = '192.168.251.95' # 上海环境mong95 <122.144.134.95>
# STOCK_HOST = '122.144.134.95' # 上海环境mong95 外网
STOCK_TABLE = 'base_stock'
ROOT_PATH = join(dirname(dirname(abspath(__file__))), 'files').replace('\\', '/') + '/'
AWS_HOST = ''
BUCKET_NAME = ''
AWS_ACCESS_KEY_ID = ''
AWS_SECRET_ACCESS_KEY = ''
ROOT_AWS_KEY = 'announce/otc/'
| [
"xutao.ding@chinascopefinancial.com"
] | xutao.ding@chinascopefinancial.com |
7e5067e2793e982f135b0207ea31c1392356f0a2 | f8c3c8ae27b9ed7df6129edbbadec253a344c5e1 | /treenav/migrations/0003_auto__chg_field_menuitem_parent.py | e655d162f0283a79e5fb8538e04bc7cbe5daf74e | [] | no_license | weijia/django-treenav | 7607c0a11e5df108a0786263a738a87ef3f0fe75 | 662bc5ef75f62a36294dc33fee57f86156ac0dd2 | refs/heads/master | 2021-01-17T22:07:56.956872 | 2014-12-26T12:13:18 | 2014-12-26T12:13:18 | 22,033,266 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,815 | py | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'MenuItem.parent'
db.alter_column('treenav_menuitem', 'parent_id', self.gf('mptt.fields.TreeForeignKey')(null=True, to=orm['treenav.MenuItem']))
def backwards(self, orm):
# Changing field 'MenuItem.parent'
db.alter_column('treenav_menuitem', 'parent_id', self.gf('django.db.models.fields.related.ForeignKey')(null=True, to=orm['treenav.MenuItem']))
models = {
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'treenav.menuitem': {
'Meta': {'ordering': "('lft', 'tree_id')", 'object_name': 'MenuItem'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']", 'null': 'True', 'blank': 'True'}),
'href': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'link': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'order': ('django.db.models.fields.IntegerField', [], {}),
'parent': ('mptt.fields.TreeForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['treenav.MenuItem']"}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '255'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
}
}
complete_apps = ['treenav'] | [
"vinod@kurup.com"
] | vinod@kurup.com |
dc9bc36023145396ce10142e03973da1fcdcc40d | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/otherforms/_disheartened.py | ed77d9dc087968f2e62aa3634513e24abf38d7af | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 240 | py |
#calss header
class _DISHEARTENED():
def __init__(self,):
self.name = "DISHEARTENED"
self.definitions = dishearten
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['dishearten']
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
8136b0804c994e0b9a7b49fbb306c86db7faed72 | e9aba67fd59abf7fd68df28a0cc69cae97c7dc4c | /backend/event/migrations/0001_initial.py | 09e5f55e56afdcc5df6ce1be051d19b884c5af33 | [] | no_license | crowdbotics-apps/demo-20477 | d709c453c8411ffbd5b41c5bab1030ec94d9af2e | 63f7e3b9f5304bbfa0e31647d3fbc13287564373 | refs/heads/master | 2022-12-08T20:19:33.326484 | 2020-09-20T13:12:17 | 2020-09-20T13:12:17 | 297,078,903 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,766 | py | # Generated by Django 2.2.16 on 2020-09-20 13:11
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('description', models.TextField()),
('name', models.CharField(blank=True, max_length=256, null=True)),
],
),
migrations.CreateModel(
name='Faq',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=256)),
('description', models.TextField()),
],
),
migrations.CreateModel(
name='Location',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('amenities', models.TextField(blank=True, null=True)),
('name', models.CharField(blank=True, max_length=256, null=True)),
('image', models.SlugField(blank=True, null=True)),
],
),
migrations.CreateModel(
name='Vendor',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.TextField()),
('logo_image', models.SlugField(blank=True, null=True)),
('type', models.TextField(blank=True, null=True)),
('website', models.URLField(blank=True, null=True)),
('category', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='vendor_category', to='event.Category')),
('location', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='vendor_location', to='event.Location')),
],
),
migrations.CreateModel(
name='VendorDetail',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('website', models.URLField()),
('description', models.TextField()),
('associated_name', models.TextField(blank=True, null=True)),
('vendor_id', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='vendordetail_vendor_id', to='event.Vendor')),
],
),
migrations.CreateModel(
name='Sponsor',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.TextField()),
('logo_image', models.SlugField()),
('sponsor_level', models.TextField()),
('presenter', models.BooleanField()),
('website', models.URLField(blank=True, null=True)),
('location', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='sponsor_location', to='event.Location')),
],
),
migrations.CreateModel(
name='Schedule',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('dateTime', models.DateTimeField()),
('description', models.TextField(blank=True, null=True)),
('track', models.TextField(blank=True, null=True)),
('location', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='schedule_location', to='event.Location')),
],
),
migrations.CreateModel(
name='Presenter',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=256)),
('title', models.CharField(max_length=256)),
('schedule', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='presenter_schedule', to='event.Schedule')),
],
),
migrations.CreateModel(
name='MySchedule',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('schedule', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='myschedule_schedule', to='event.Schedule')),
('user', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='myschedule_user', to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Favorites',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('user', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='favorites_user', to=settings.AUTH_USER_MODEL)),
('vendor', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='favorites_vendor', to='event.Vendor')),
],
),
]
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
a9bd379e1a0a8ad0be07121782f5822df1b0140a | 0edf65db87f6f0a99c74ab14231471d6a1e85d55 | /nautobot_golden_config/tests/test_api.py | ae904067394ff1ea5b64af880de416fd1c7a9604 | [
"Apache-2.0"
] | permissive | jpobeda/nautobot-plugin-golden-config | cfe94b6735edfb0509e6064ed7197821592e5096 | e911cf5b229900f69fd8792ed6b1a92db9ebc55a | refs/heads/main | 2023-03-29T06:44:59.703555 | 2021-02-24T14:40:11 | 2021-02-24T14:40:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 995 | py | """Unit tests for nautobot_golden_config."""
from django.contrib.auth import get_user_model
from django.test import TestCase
from django.urls import reverse
from rest_framework import status
from rest_framework.test import APIClient
from nautobot.users.models import Token
User = get_user_model()
class PlaceholderAPITest(TestCase):
"""Test the ConfigCompliance API."""
def setUp(self):
"""Create a superuser and token for API calls."""
self.user = User.objects.create(username="testuser", is_superuser=True)
self.token = Token.objects.create(user=self.user)
self.client = APIClient()
self.client.credentials(HTTP_AUTHORIZATION=f"Token {self.token.key}")
def test_placeholder(self):
"""Verify that devices can be listed."""
url = reverse("dcim-api:device-list")
response = self.client.get(url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data["count"], 0)
| [
"ken@celenza.org"
] | ken@celenza.org |
ff9a26845781688bda48a45f8d89c8396d8adac0 | 1be6d0cae96d906866582af8e3fbda00aa42758c | /webssh/worker.py | 672b5b6b2eb9aef8a302bbd199ad87f1c6d4e578 | [] | no_license | CaesarLinsa/webssh_caesarlinsa | 1e0fe862ca8d735de9e7349f98cb9bb9fd10e13f | fb32e1daa6aa23d2b9a5114cd94d5105fd775899 | refs/heads/master | 2022-12-09T10:29:09.185492 | 2020-09-11T16:42:05 | 2020-09-11T16:42:05 | 289,730,546 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,827 | py | import logging
import tornado.websocket
from tornado.ioloop import IOLoop
from tornado.iostream import _ERRNO_CONNRESET
from tornado.util import errno_from_exception
BUF_SIZE = 32 * 1024
clients = {} # {ip: {id: worker}}
def clear_worker(worker, clients):
ip = worker.src_addr[0]
workers = clients.get(ip)
assert worker.id in workers
workers.pop(worker.id)
if not workers:
clients.pop(ip)
if not clients:
clients.clear()
def recycle_worker(worker):
if worker.handler:
return
logging.warning('Recycling worker {}'.format(worker.id))
worker.close(reason='worker recycled')
class Worker(object):
def __init__(self, loop, ssh, chan, dst_addr):
self.loop = loop
self.ssh = ssh
self.chan = chan
self.dst_addr = dst_addr
self.fd = chan.fileno()
self.id = str(id(self))
self.data_to_dst = []
self.handler = None
self.mode = IOLoop.READ
self.closed = False
def __call__(self, fd, events):
if events & IOLoop.READ:
self.on_read()
if events & IOLoop.WRITE:
self.on_write()
if events & IOLoop.ERROR:
self.close(reason='error event occurred')
def set_handler(self, handler):
if not self.handler:
self.handler = handler
def update_handler(self, mode):
if self.mode != mode:
self.loop.update_handler(self.fd, mode)
self.mode = mode
if mode == IOLoop.WRITE:
self.loop.call_later(0.1, self, self.fd, IOLoop.WRITE)
def on_read(self):
logging.debug('worker {} on read'.format(self.id))
try:
# 从channel中读取输入字符,当输入回车时,将整行输出结果返回
data = self.chan.recv(BUF_SIZE)
except (OSError, IOError) as e:
logging.error(e)
if errno_from_exception(e) in _ERRNO_CONNRESET:
self.close(reason='chan error on reading')
else:
logging.debug('{!r} from {}:{}'.format(data, *self.dst_addr))
if not data:
self.close(reason='chan closed')
return
logging.debug('{!r} to {}:{}'.format(data, *self.handler.src_addr))
try:
self.handler.write_message(data, binary=True)
except tornado.websocket.WebSocketClosedError:
self.close(reason='websocket closed')
def on_write(self):
logging.debug('worker {} on write'.format(self.id))
if not self.data_to_dst:
return
data = ''.join(self.data_to_dst)
logging.debug('{!r} to {}:{}'.format(data, *self.dst_addr))
try:
sent = self.chan.send(data)
except (OSError, IOError) as e:
logging.error(e)
if errno_from_exception(e) in _ERRNO_CONNRESET:
self.close(reason='chan error on writing')
else:
self.update_handler(IOLoop.WRITE)
else:
self.data_to_dst = []
data = data[sent:]
if data:
self.data_to_dst.append(data)
self.update_handler(IOLoop.WRITE)
else:
self.update_handler(IOLoop.READ)
def close(self, reason=None):
if self.closed:
return
self.closed = True
logging.info(
'Closing worker {} with reason: {}'.format(self.id, reason)
)
if self.handler:
self.loop.remove_handler(self.fd)
self.handler.close(reason=reason)
self.chan.close()
self.ssh.close()
logging.info('Connection to {}:{} lost'.format(*self.dst_addr))
clear_worker(self, clients)
logging.debug(clients)
| [
"Caesar_Linsa@163.com"
] | Caesar_Linsa@163.com |
d1525729bdb24fb613931bb8f6ddaeb00fa31629 | 4ce0f35c6aa01f5041a11979a8b5662d8ad08962 | /news/views.py | b14783e6a08dc503f62691fb52f589bfcfd6b640 | [] | no_license | lanlanzky/tt | f125785b00b51774c9033492117305dfba19fb8f | 4666af6a250a48200f5af9ef9692da53bbfcd79d | refs/heads/master | 2016-09-06T02:19:18.492453 | 2014-09-01T13:26:55 | 2014-09-01T13:26:55 | 23,542,631 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,806 | py | #coding=utf8
import os,sys
import datetime
import time
from django.http import HttpResponse
from django.shortcuts import render_to_response
from pybrain.datasets import SupervisedDataSet
from pybrain.tools.shortcuts import buildNetwork
from pybrain.supervised.trainers import BackpropTrainer
def information(request):
context={}
context['info']='information'
return render_to_response('information.html',context)
'''
def search(request):
context={}
context['April']='04'
context['Janurary']='01'
context['February']='02'
context['March']='03'
context['May']='05'
context['June']='06'
context['July']='07'
context['August']='08'
context['September']='09'
context['October']='10'
context['November']='11'
context['December']='12'
code_name=request.GET.get('stock_code', '')
context['code']=code_name
starttime=request.GET.get('start', '')
starttime=str(starttime)
endtime=request.GET.get('end', '')
endtime=str(endtime)
if code_name and starttime and endtime:
starttime = starttime.strip()
endtime= endtime.strip()
starttime=starttime[-4:]+'-'+context[starttime[3:-5]]+'-'+starttime[0:2]
endtime=endtime[-4:]+'-'+context[endtime[3:-5]]+'-'+endtime[0:2]
context['start']=starttime
context['end']=endtime
print starttime
print type(starttime)
print len(starttime)
try:
search_stock=New_stock.objects.filter(name=code_name).exclude(date__gte='%s' %(endtime)).filter(date__gte='%s' %(starttime))
context['search_stock']=search_stock
context['length']=search_stock.__len__()
return render_to_response('search.html', context)
except Exception as e:
print e
else:
return render_to_response('search.html','YOU must submit the informations:')
'''
| [
"root@localhost.localdomain"
] | root@localhost.localdomain |
a8885a65b1123681cd589e5c8d409f3fbf5a5937 | e54ba2b1a15f0a84c917dcbcb9b2a9a515558226 | /cksearching7.py | e9a1fc2ab0a302710e3bdf6a590c40e04e05c71e | [] | no_license | Vk-Demon/vk-code | 09160de1524473227972d3a95a66f4c0cd1ac7c6 | cfb86fce590bff68b8ec5835394f7879a58c3655 | refs/heads/master | 2023-03-30T14:38:54.365966 | 2021-04-05T06:44:11 | 2021-04-05T06:44:11 | 197,799,750 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 361 | py | nstd,rmrk=input().split() # Ramesh is a studious student and wants to find out if there is any other student in his class who has got the same marks as his, in maths. Help him find out.
nstd,rmrk,idx=int(nstd),int(rmrk),0
amrk=[int(i) for i in input().split()]
for i in range(0,nstd):
if(amrk[i]==rmrk):
idx=i
if(idx>0):
print(idx)
else:
print("-1")
| [
"noreply@github.com"
] | Vk-Demon.noreply@github.com |
b1036fadfec9795fab8120a8e4ae33690af59d34 | 962637d5981c1702e1aa64af867fb4b76d699285 | /tensorflow2/mnist_dataset_classify/config.py | 53f7eee1435937c8970dd1e024c419b0a3c50f1a | [] | no_license | shawroad/Tensorflow-Learning-Summary | eebbeae629196d1a11688873a12a29a64a6c0b97 | 57035d21cd47dca1fce9373b77677f7979af9681 | refs/heads/master | 2021-06-15T02:41:13.224445 | 2021-06-04T12:07:30 | 2021-06-04T12:07:30 | 196,351,497 | 4 | 0 | null | 2019-10-18T10:16:18 | 2019-07-11T08:21:29 | Python | UTF-8 | Python | false | false | 282 | py | """
@file : config.py
@author : xiaolu
@email : luxiaonlp@163.com
@time : 2021-06-03
"""
import argparse
def set_args():
parser = argparse.ArgumentParser()
parser.add_argument('--epoch_num', default=10, type=int, help='训练几轮')
return parser.parse_args()
| [
"luxiaonlp@163.com"
] | luxiaonlp@163.com |
9d6498414eb3569fee4886cc8b43bf5ecce0a8ec | 9788df18d5adaa469a0cb51f47309cd7401201e5 | /alisdk/top/api/rest/AthenaItemSolutionGetRequest.py | cbe900ed1a4cbdf6c5f90717aae6966c1385a217 | [] | no_license | sevennothing/aliyunTestPrj | cf690ce4765497f1b16359b82ef64f1ef992713c | 1b3e883d32c759e03fe5053c50e9a973f59bbffc | refs/heads/master | 2021-01-17T03:15:59.082544 | 2015-03-11T14:16:58 | 2015-03-11T14:16:58 | 32,001,149 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 365 | py | '''
Created by auto_sdk on 2014-11-20 12:53:43
'''
from top.api.base import RestApi
class AthenaItemSolutionGetRequest(RestApi):
def __init__(self,domain='gw.api.taobao.com',port=80):
RestApi.__init__(self,domain, port)
self.id = None
self.item_id = None
self.type_key = None
def getapiname(self):
return 'taobao.athena.item.solution.get'
| [
"licj@out.lrserver"
] | licj@out.lrserver |
45e5c6d2945b78df18430336bc4bcc70196107e9 | 2fa102b20ea99d796cc3677c9305f1a80be18e6b | /Mixed OJ Problems/hackerEarth_sudoku_sets.py | 1d3c479ae42d5567df0e488afdb019871dd8ea13 | [] | no_license | pronob1010/Codeforces_Solve | e5186b2379230790459328964d291f6b40a4bb07 | 457b92879a04f30aa0003626ead865b0583edeb2 | refs/heads/master | 2023-03-12T11:38:31.114189 | 2021-03-03T05:49:17 | 2021-03-03T05:49:17 | 302,124,730 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,157 | py | def f_check(arr, x, y , pos):
c = 0
for j in range(len(arr)):
if arr[x][j] == pos:
c+=1
if c>1:
return False
c1 = 0
for i in range(len(arr)):
if arr[i][y] == pos:
c1 += 1
if c1 > 1:
return False
c2 = 0
sub_l_top_i = 3 * (x // 3)
sub_l_top_j = 3 * (y // 3)
for i in range(3):
for j in range(3):
if arr[sub_l_top_i + i][sub_l_top_j + j] == pos:
c2 += 1
if c2 > 1:
return False
else:
return True
def is_valid(arr):
f = 0
c = 0
for i in range(9):
for j in range(9):
if f_check(arr, i, j , arr[i][j]):
c+=1
f = 1
else:
# print(arr[i][j])
f = 0
break
if f == 0:
break
# print(f)
if f == 1:
return True
else:
return False
for i in range(int(input())):
arr=[]
for k in range(9):
s = list(map(int, input().split()))
arr.append(s)
if is_valid(arr):
print("VALID")
else:
print("INVALID")
| [
"pronobmozumder.info@gmail.com"
] | pronobmozumder.info@gmail.com |
b8d7443843982154ec11b278ff138a1bf30495e7 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p04031/s576833992.py | 7db85f5ebc14a093e163e73cd0e05bba80ea0103 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 188 | py | N = int(input())
A = list(map(int,input().split()))
ans = float('inf')
for i in range(-100, 101):
cnt = 0
for a in A:
cnt += (a - i) ** 2
ans = min(ans, cnt)
print(ans) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
754752d63343e801239f4e51bc7b4ef8c6c0f390 | fd25231975acd147e04dc3ed3627c92cb1a4f86c | /FlaskAPI/vir_env/lib/python3.7/site-packages/sklearn/feature_selection/tests/test_feature_select.py | 1a0af94ee8f55baf7c02ba875f52e3c13ff229d0 | [] | no_license | sumitkutty/Flight-Price-Prediction | 832a2802a3367e655b46d3b44f073d917abd2320 | d974a8b75fbcbfa42f11703602af3e45a3f08b3c | refs/heads/master | 2022-12-25T07:13:06.375888 | 2020-10-08T18:46:44 | 2020-10-08T18:46:44 | 302,366,725 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 130 | py | version https://git-lfs.github.com/spec/v1
oid sha256:cc2c77543822b3e007daf24741057904eebe3b78020ca490985edca1102450b1
size 25886
| [
"sumitkutty37@gmail.com"
] | sumitkutty37@gmail.com |
d56c01585b7a3851cc7fa771e3868c718808748c | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/adjectives/_ungrammatical.py | e2606d18bef4b164758f1c962a25a669edef2016 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 387 | py |
#calss header
class _UNGRAMMATICAL():
def __init__(self,):
self.name = "UNGRAMMATICAL"
self.definitions = [u'not grammatical']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'adjectives'
def run(self, obj1, obj2):
self.jsondata[obj2] = {}
self.jsondata[obj2]['properties'] = self.name.lower()
return self.jsondata
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
7967aa8110d0d5fc2f706b22f0c3d78d9137574d | 6cd9b7611a9f095b50531523b9e78285c16cb6fe | /tests/integration/test_eden_journal.py | 9eb054d70fc443273a96b34e5f2972937829b644 | [
"Apache-2.0"
] | permissive | RejoiceSupsup/watchman | 659c8750f075933c7bb454fbeecd24a9f2ba56a6 | c967fd07161e6eba9a78a60be70c81056d45c461 | refs/heads/master | 2021-07-12T07:32:25.511590 | 2017-10-17T15:42:29 | 2017-10-17T15:54:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,238 | py | # vim:ts=4:sw=4:et:
# Copyright 2017-present Facebook, Inc.
# Licensed under the Apache License, Version 2.0
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# no unicode literals
import WatchmanEdenTestCase
class TestEdenJournal(WatchmanEdenTestCase.WatchmanEdenTestCase):
def test_eden_journal(self):
def populate(repo):
repo.write_file('hello', 'hola\n')
repo.commit('initial commit.')
root = self.makeEdenMount(populate, enable_hg=True)
repo = self.repoForPath(root)
initial_commit = repo.get_head_hash()
res = self.watchmanCommand('watch', root)
self.assertEqual('eden', res['watcher'])
clock = self.watchmanCommand('clock', root)
self.touchRelative(root, 'newfile')
res = self.watchmanCommand('query', root, {
'fields': ['name'],
'since': clock})
clock = res['clock']
self.assertFileListsEqual(res['files'], ['newfile'])
repo.add_file('newfile')
repo.commit(message='add newfile')
res = self.watchmanCommand('query', root, {
'expression': ['not', ['dirname', '.hg']],
'fields': ['name'],
'since': clock})
clock = res['clock']
self.assertFileListsEqual(res['files'], [
'newfile'],
message='We expect to report the files changed in the commit')
# Test the the journal has the correct contents across a "reset" like
# operation where the parents are poked directly. This is using
# debugsetparents rather than reset because the latter isn't enabled
# by default for hg in the watchman test machinery.
self.touchRelative(root, 'unclean')
repo.hg('debugsetparents', initial_commit)
res = self.watchmanCommand('query', root, {
'expression': ['not', ['dirname', '.hg']],
'fields': ['name'],
'since': clock})
self.assertFileListsEqual(res['files'], [
'newfile',
'unclean'],
message=('We expect to report the file changed in the commit '
'as well as the unclean file'))
| [
"facebook-github-bot@users.noreply.github.com"
] | facebook-github-bot@users.noreply.github.com |
b3b25fc2a459f7f34575499a1fd5144ba145200e | 19d03d646fcee318cca8078af27636732290d77b | /tests/test_apex.py | fe5f1532a55e28c2c019c857ea385c0e53ac54a9 | [
"MIT"
] | permissive | yongkyung-oh/CMU-Studio-Project | 2d6fe6ef6fa30fda1a4f2d1fc45c5b85d6143775 | 448492f342e8157df2e736aa52825b66b1d66fd7 | refs/heads/master | 2022-10-24T16:56:46.763865 | 2020-07-01T10:03:00 | 2020-07-01T10:03:00 | 252,878,283 | 2 | 5 | MIT | 2021-03-25T23:50:27 | 2020-04-04T01:02:44 | Python | UTF-8 | Python | false | false | 3,413 | py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
try:
import apex # noqa: F401
APEX_AVAILABLE = True
except ImportError:
APEX_AVAILABLE = False
import unittest
from parlai.core.agents import create_agent
from parlai.core.params import ParlaiParser
import parlai.utils.testing as testing_utils
@unittest.skipIf(APEX_AVAILABLE, "Apex is installed, can't test its absence.")
class TestNoApex(unittest.TestCase):
"""
Test if some models that were pretrained with APEX.
They should load on CPU and GPU, even if the user doesn't have apex installed.
"""
def test_import(self):
with self.assertRaises(ImportError):
import apex as _ # noqa: F401
def test_fused_adam(self):
with self.assertRaises(ImportError):
# we should crash if the user tries not giving --opt adam
testing_utils.train_model(
dict(
model_file='zoo:unittest/apex_fused_adam/model',
task='integration_tests:nocandidate',
)
)
# no problem if we give the option
pp = ParlaiParser(True, True)
opt = pp.parse_args(
[
'--model-file',
'zoo:unittest/apex_fused_adam/model',
'--dict-file',
'zoo:unittest/apex_fused_adam/model.dict',
'--task',
'integration_tests:nocandidate',
'--optimizer',
'adam',
],
print_args=False,
)
create_agent(opt, requireModelExists=True)
def test_fp16(self):
# nice clean fallback if no fp16
valid, test = testing_utils.eval_model(
dict(
model_file='zoo:unittest/apex_fp16/model',
task='integration_tests:nocandidate',
num_examples=4,
)
)
assert valid['accuracy'] == 1.0
assert test['accuracy'] == 1.0
# also no problem if we explicitly turn it on
valid, test = testing_utils.eval_model(
dict(
model_file='zoo:unittest/apex_fp16/model',
task='integration_tests:nocandidate',
num_examples=4,
fp16=True,
)
)
assert valid['accuracy'] == 1.0
assert test['accuracy'] == 1.0
with self.assertRaises(RuntimeError):
# we will have some fp16 tokens missing if we turn of fp16
# note: this test could be made unnecessary in the future if we improve
# the fp16 logic
valid, test = testing_utils.eval_model(
dict(
model_file='zoo:unittest/apex_fp16/model',
task='integration_tests:nocandidate',
num_examples=4,
fp16=False,
)
)
valid, test = testing_utils.eval_model(
dict(
model_file='zoo:unittest/apex_fp16/model',
task='integration_tests:nocandidate',
num_examples=4,
force_fp16_tokens=False,
)
)
assert valid['accuracy'] == 1.0
assert test['accuracy'] == 1.0
| [
"yongkyung-oh@outlook.com"
] | yongkyung-oh@outlook.com |
14587f6aee8eef8ddd494549c47de7afe707c3d8 | 8e24e8bba2dd476f9fe612226d24891ef81429b7 | /geeksforgeeks/python/python_all/83_11.py | 31ed7afe706da4ab11680070dc11b4d3bd1fc19c | [] | no_license | qmnguyenw/python_py4e | fb56c6dc91c49149031a11ca52c9037dc80d5dcf | 84f37412bd43a3b357a17df9ff8811eba16bba6e | refs/heads/master | 2023-06-01T07:58:13.996965 | 2021-06-15T08:39:26 | 2021-06-15T08:39:26 | 349,059,725 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,761 | py | Python | Test if number is valid Excel column
Sometimes, while working with Python strings, we can have a problem in which
we need to test for string if its a valid excel column. This has application
in many domains including day-day programming, web development and Data
Science. Lets discuss certain way in which this task can be performed.
**Method : Usingre.match() + group()**
The combination of above functions can be used to perform this task. In this,
we perform regex match() to match with the potential updated excel version
which have A-XDF, and starts with 0-9 followed by 0-6 characters not exceeding
1048576. The groupby() is used to group the elements on that basis.
__
__
__
__
__
__
__
# Python3 code to demonstrate working of
# Test if number is valid Excel column
# Using re.match() + groupby()
import re
# initializing string
test_str = "C101"
# printing original string
print("The original string is : " + test_str)
# Test if number is valid Excel column
# Using re.match() + groupby()
temp = re.match(r'^([A-Z]{1,
2}|[A-W][A-Z]{2}|X[A-E][A-Z]|XF[A-D])([1-9]\d{0, 6})$', test_str)
res = bool(temp) and int(temp.group(2)) < 1048577
# printing result
print("Is string valid excel column : " + str(res))
---
__
__
**Output :**
The original string is : C101
Is string valid excel column : True
Attention geek! Strengthen your foundations with the **Python Programming
Foundation** Course and learn the basics.
To begin with, your interview preparations Enhance your Data Structures
concepts with the **Python DS** Course.
My Personal Notes _arrow_drop_up_
Save
| [
"qmnguyenw@gmail.com"
] | qmnguyenw@gmail.com |
e375079fc67427f47b931e429a733a07af05de98 | 3cf0d750948a758d5771dd778fbb783d64a044ae | /src/algo_cases/第6章/6_3.py | 23dd82accd699c8827ce87af05f0f1038260bf4d | [
"CC-BY-NC-SA-4.0",
"Apache-2.0"
] | permissive | hbulpf/pydemo | 6552a08b3c85721ac1b2ba335b030e234ad03b6c | ea3e9f9086116a86ecef803e9e3179a34c94c20f | refs/heads/master | 2022-11-30T21:06:29.933820 | 2022-01-15T17:05:16 | 2022-01-15T17:05:16 | 237,584,300 | 6 | 1 | Apache-2.0 | 2022-11-22T09:49:38 | 2020-02-01T08:20:43 | Python | UTF-8 | Python | false | false | 913 | py | def symmetric(root):
if not root: return True
queue=[]
queue.append(root.left)
queue.append(root.right)
while(queue):
root1=queue.pop()
root2=queue.pop()
if not root1 and not root2:
continue
if not root1 or not root2:
return False
if root1.val!=root2.val:
return False
if root1.left or root2.left or root1.right or root2.right:
queue.append(root1.left)
queue.append(root2.right)
queue.append(root1.right)
queue.append(root2.left)
return True
def symmetric(root):
if root:
return dfs(root.left,root.right)
return True
def dfs(root1,root2):
if not root1 and not root2:
return True
if not root1 or not root2:
return False
return root1.val==root2.val and dfs(root1.left,root2.right) and dfs(root1.right,root2.left)
| [
"hudalpf@163.com"
] | hudalpf@163.com |
9eceb3b73124a3f49ba54036dcfcf72af53f0dbd | 91315e1a5329ec778e007137a369e3617f760602 | /app/applib/ldmbridge.py | 1ba29c9775b0e30643c8df32f7d6da743f900b66 | [
"Apache-2.0"
] | permissive | akrherz/rtstats | b3e0fc8367600db5f333d49ddce254491eac203f | f1466c2af7b0ffa1c1433ede8276f915008d0170 | refs/heads/master | 2022-09-25T17:30:01.568552 | 2020-12-12T12:18:52 | 2020-12-12T12:18:52 | 178,928,561 | 0 | 0 | Apache-2.0 | 2019-04-01T19:01:52 | 2019-04-01T19:01:52 | null | UTF-8 | Python | false | false | 878 | py | """Async bridge."""
from twisted.internet import stdio
from twisted.python import log
from twisted.internet import reactor
from twisted.protocols import basic
from applib import rtstats
class RTStatsIngestor(basic.LineReceiver):
# default delimiter is \r\n
delimiter = b"\n"
def connectionLost(self, reason):
""" Called when the STDIN connection is lost """
log.msg("connectionLost")
log.err(reason)
reactor.callLater(15, reactor.callWhenRunning, reactor.stop)
def lineReceived(self, line):
""" Process a chunk of data """
df = self.dbpool.runInteraction(rtstats.parser, line)
df.addErrback(log.err)
class LDMProductFactory(stdio.StandardIO):
def __init__(self, protocol, **kwargs):
""" constructor with a protocol instance """
stdio.StandardIO.__init__(self, protocol, **kwargs)
| [
"akrherz@iastate.edu"
] | akrherz@iastate.edu |
095eaf264a1f8c815c0c7c7bcb27959b6da0020b | 94a2c4417c1fdd8577a75b09a17912ebae129e6c | /ramda/find.py | b6af42cb8f0dfbed542ee7ec5ac8e8d6fc5701e3 | [
"MIT"
] | permissive | slavaGanzin/ramda.py | ad88a3cf6e7eb1461d4a09aad35ae1c18ca32db8 | 634bfbe0dcb300315ded327756cb3e33241589b8 | refs/heads/master | 2023-01-23T04:43:48.485314 | 2023-01-06T10:11:53 | 2023-01-06T10:11:53 | 142,413,822 | 68 | 7 | MIT | 2021-12-22T13:59:56 | 2018-07-26T08:43:31 | Python | UTF-8 | Python | false | false | 368 | py | from toolz import curry
@curry
def find(p, xs):
"""Returns the first element of the list which matches the predicate, or
undefined if no element matches.
Dispatches to the find method of the second argument, if present.
Acts as a transducer if a transformer is given in list position"""
for found in (x for x in xs if p(x)):
return found
| [
"slava.ganzin@gmail.com"
] | slava.ganzin@gmail.com |
4197bb2b46c58ad26bca73630a73765492379c17 | 02800e659f2088550a402d7d7d8e3902560893e3 | /private_kendall/top_k.py | 8e06ceeab9de9fd95a4f3f2d1274cbaede0ccaa1 | [
"Apache-2.0",
"CC-BY-4.0"
] | permissive | antonpolishko/google-research | ab8a445d5401185eadfe757e73dc8bcf34da8dea | 1b4e7db5f90bcb4f80803383a81d8613ebfdfeec | refs/heads/master | 2023-08-31T06:38:33.963505 | 2023-08-26T16:33:48 | 2023-08-26T16:37:57 | 422,090,201 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,219 | py | # coding=utf-8
# Copyright 2023 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Differentially private top-k.
This code is adapted from
https://github.com/google-research/google-research/blob/master/dp_topk/baseline_mechanisms.py.
"""
import numpy as np
def sorted_top_k(item_counts, k):
"""Returns indices of top-k items' counts.
Indices are sorted in decreasing order by corresponding item count (i.e., the
index of the item with the largest count comes first).
Args:
item_counts: Array of numbers defining item counts.
k: An integer indicating the number of desired items.
"""
top_k = np.argsort(item_counts)[-k:][::-1]
return top_k
def basic_peeling_mechanism(item_counts, k, epsilon, l_inf_sensitivity,
monotonic):
"""Computes epsilon-DP top-k on item_counts using basic composition.
Args:
item_counts: Array of numbers defining item counts.
k: An integer indicating the number of desired items.
epsilon: The output will be epsilon-DP.
l_inf_sensitivity: A bound on the l_inf sensitivity of item_counts under the
addition or removal of one user.
monotonic: Whether or not item_counts is monotonic, i.e., True if and only
if adding a user does not decrease any count in item_counts.
Returns:
A sorted array of the indices of the top k items. See, e.g., Lemmas 4.1 and
4.2 of https://arxiv.org/abs/1905.04273 for details.
"""
local_epsilon = epsilon / k
if not monotonic:
local_epsilon = local_epsilon / 2
noisy_counts = item_counts + np.random.gumbel(
scale=l_inf_sensitivity / local_epsilon, size=item_counts.shape
)
return sorted_top_k(noisy_counts, k)
| [
"copybara-worker@google.com"
] | copybara-worker@google.com |
8937598eaf6e22d75f33a3fa5ea29ad2c4a3adf2 | 09e57dd1374713f06b70d7b37a580130d9bbab0d | /benchmark/startQiskit_noisy3280.py | 105efa63d88d15405cd0e969f6095abf9a8d2f02 | [
"BSD-3-Clause"
] | permissive | UCLA-SEAL/QDiff | ad53650034897abb5941e74539e3aee8edb600ab | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | refs/heads/main | 2023-08-05T04:52:24.961998 | 2021-09-19T02:56:16 | 2021-09-19T02:56:16 | 405,159,939 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,361 | py | # qubit number=4
# total number=44
import cirq
import qiskit
from qiskit.providers.aer import QasmSimulator
from qiskit.test.mock import FakeVigo
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import BasicAer, execute, transpile
from pprint import pprint
from qiskit.test.mock import FakeVigo
from math import log2
import numpy as np
import networkx as nx
def bitwise_xor(s: str, t: str) -> str:
length = len(s)
res = []
for i in range(length):
res.append(str(int(s[i]) ^ int(t[i])))
return ''.join(res[::-1])
def bitwise_dot(s: str, t: str) -> str:
length = len(s)
res = 0
for i in range(length):
res += int(s[i]) * int(t[i])
return str(res % 2)
def build_oracle(n: int, f) -> QuantumCircuit:
# implement the oracle O_f
# NOTE: use multi_control_toffoli_gate ('noancilla' mode)
# https://qiskit.org/documentation/_modules/qiskit/aqua/circuits/gates/multi_control_toffoli_gate.html
# https://quantumcomputing.stackexchange.com/questions/3943/how-do-you-implement-the-toffoli-gate-using-only-single-qubit-and-cnot-gates
# https://quantumcomputing.stackexchange.com/questions/2177/how-can-i-implement-an-n-bit-toffoli-gate
controls = QuantumRegister(n, "ofc")
target = QuantumRegister(1, "oft")
oracle = QuantumCircuit(controls, target, name="Of")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
oracle.mct(controls, target[0], None, mode='noancilla')
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
return oracle
def make_circuit(n:int,f) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n,"qc")
classical = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classical)
prog.h(input_qubit[3]) # number=19
prog.cz(input_qubit[0],input_qubit[3]) # number=20
prog.h(input_qubit[3]) # number=21
prog.cx(input_qubit[0],input_qubit[3]) # number=23
prog.x(input_qubit[3]) # number=24
prog.cx(input_qubit[0],input_qubit[3]) # number=25
prog.cx(input_qubit[0],input_qubit[3]) # number=17
prog.rx(-0.48380526865282825,input_qubit[3]) # number=26
prog.h(input_qubit[1]) # number=2
prog.y(input_qubit[3]) # number=18
prog.h(input_qubit[2]) # number=3
prog.h(input_qubit[3]) # number=4
prog.y(input_qubit[3]) # number=12
prog.h(input_qubit[0]) # number=5
oracle = build_oracle(n-1, f)
prog.append(oracle.to_gate(),[input_qubit[i] for i in range(n-1)]+[input_qubit[n-1]])
prog.h(input_qubit[1]) # number=6
prog.h(input_qubit[2]) # number=7
prog.h(input_qubit[1]) # number=34
prog.cz(input_qubit[0],input_qubit[1]) # number=35
prog.h(input_qubit[1]) # number=36
prog.cx(input_qubit[0],input_qubit[1]) # number=31
prog.cx(input_qubit[0],input_qubit[1]) # number=38
prog.x(input_qubit[1]) # number=39
prog.h(input_qubit[1]) # number=41
prog.cz(input_qubit[0],input_qubit[1]) # number=42
prog.h(input_qubit[1]) # number=43
prog.cx(input_qubit[0],input_qubit[1]) # number=33
prog.cx(input_qubit[0],input_qubit[1]) # number=30
prog.h(input_qubit[3]) # number=8
prog.h(input_qubit[3]) # number=37
prog.h(input_qubit[0]) # number=9
prog.y(input_qubit[2]) # number=10
prog.x(input_qubit[2]) # number=22
prog.y(input_qubit[2]) # number=11
prog.x(input_qubit[0]) # number=13
prog.x(input_qubit[0]) # number=14
# circuit end
for i in range(n):
prog.measure(input_qubit[i], classical[i])
return prog
if __name__ == '__main__':
a = "111"
b = "0"
f = lambda rep: bitwise_xor(bitwise_dot(a, rep), b)
prog = make_circuit(4,f)
backend = FakeVigo()
sample_shot =8000
info = execute(prog, backend=backend, shots=sample_shot).result().get_counts()
backend = FakeVigo()
circuit1 = transpile(prog,backend,optimization_level=2)
writefile = open("../data/startQiskit_noisy3280.csv","w")
print(info,file=writefile)
print("results end", file=writefile)
print(circuit1.__len__(),file=writefile)
print(circuit1,file=writefile)
writefile.close()
| [
"wangjiyuan123@yeah.net"
] | wangjiyuan123@yeah.net |
35d99a41e418a6c8ac4c7a9b6c5aa6d4f7fc8977 | 3d19e1a316de4d6d96471c64332fff7acfaf1308 | /Users/A/annathieme/drivenow.py | 7b1456514d75e90ba45c19f4d5af303b2b491480 | [] | no_license | BerilBBJ/scraperwiki-scraper-vault | 4e98837ac3b1cc3a3edb01b8954ed00f341c8fcc | 65ea6a943cc348a9caf3782b900b36446f7e137d | refs/heads/master | 2021-12-02T23:55:58.481210 | 2013-09-30T17:02:59 | 2013-09-30T17:02:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 842 | py | import scraperwiki
# Blank Python
import scraperwiki
import requests
import lxml.html
from lxml.cssselect import CSSSelector
def scrape_site():
url = "http://de.drive-now.com/static/metropolis/js/Map.js"
html = requests.get(url, verify = False).text
root = lxml.html.fromstring(html)
print root.cssselect("li")[0].text_content()
def main():
for id in range(1):
scrape_site()
main()
import scraperwiki
# Blank Python
import scraperwiki
import requests
import lxml.html
from lxml.cssselect import CSSSelector
def scrape_site():
url = "http://de.drive-now.com/static/metropolis/js/Map.js"
html = requests.get(url, verify = False).text
root = lxml.html.fromstring(html)
print root.cssselect("li")[0].text_content()
def main():
for id in range(1):
scrape_site()
main()
| [
"pallih@kaninka.net"
] | pallih@kaninka.net |
655c67a4e1eb9b022dd7dfca01622419c50e2e96 | 5ae3bc1920fafc33693cdfa3928a48158aa6f725 | /132/132.py | 62ba54ee7675b1aa68ef20a6aa47d901a672400f | [] | no_license | sjzyjc/leetcode | 2d0764aec6681d567bffd8ff9a8cc482c44336c2 | 5e09a5d36ac55d782628a888ad57d48e234b61ac | refs/heads/master | 2021-04-03T08:26:38.232218 | 2019-08-15T21:54:59 | 2019-08-15T21:54:59 | 124,685,278 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,135 | py | class Solution:
def minCut(self, s):
"""
:type s: str
:rtype: int
n^2
n^2
"""
if not s:
return 0
is_palin = self.calPlin(s)
f = [0 for i in range(len(s) + 1)]
f[0] = 0
for i in range(1, len(s) + 1):
min_palin = (1 << 31) - 1
for j in range(i):
if is_palin[j][i - 1]:
min_palin = min(min_palin, f[j] + 1)
f[i] = min_palin
return f[-1] - 1
def calPlin(self, s):
f = [[False for _ in range(len(s))] for _ in range(len(s))]
for mid in range(len(s)):
i = j = mid
while i >= 0 and j < len(s) and s[i] == s[j]:
f[i][j] = True
i -= 1
j += 1
i = mid
j = mid + 1
while i >= 0 and j < len(s) and s[i] == s[j]:
f[i][j] = True
i -= 1
j += 1
return f
| [
"jcyang@MacBook-Air.local"
] | jcyang@MacBook-Air.local |
0ed9017d0c61624759f4c16379b91436f8d8a752 | f167f7989dcd0fff12ab0df72b16b60ab17460a0 | /apps/reader/tasks.py | b4e2df93b206c7a1fea291c91d90a3522684b5a3 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | imageoptimiser/NewsBlur | 883a028913e94c0f03ece22b52f78915e9f0cb63 | 63bd36212cef8a9209e4adde638f24913ffa8443 | refs/heads/master | 2020-12-25T12:47:11.344588 | 2012-12-24T08:10:40 | 2012-12-24T08:10:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,951 | py | import datetime
from celery.task import Task
from utils import log as logging
from django.contrib.auth.models import User
from django.conf import settings
from apps.reader.models import UserSubscription
from apps.social.models import MSocialSubscription
class FreshenHomepage(Task):
name = 'freshen-homepage'
def run(self, **kwargs):
day_ago = datetime.datetime.utcnow() - datetime.timedelta(days=1)
user = User.objects.get(username=settings.HOMEPAGE_USERNAME)
user.profile.last_seen_on = datetime.datetime.utcnow()
user.profile.save()
usersubs = UserSubscription.objects.filter(user=user)
logging.debug(" ---> %s has %s feeds, freshening..." % (user.username, usersubs.count()))
for sub in usersubs:
sub.mark_read_date = day_ago
sub.needs_unread_recalc = True
sub.save()
sub.calculate_feed_scores(silent=True)
socialsubs = MSocialSubscription.objects.filter(user_id=user.pk)
logging.debug(" ---> %s has %s socialsubs, freshening..." % (user.username, socialsubs.count()))
for sub in socialsubs:
sub.mark_read_date = day_ago
sub.needs_unread_recalc = True
sub.save()
sub.calculate_feed_scores(silent=True)
class CleanAnalytics(Task):
name = 'clean-analytics'
def run(self, **kwargs):
logging.debug(" ---> Cleaning analytics... %s page loads and %s feed fetches" % (
settings.MONGOANALYTICSDB.nbanalytics.page_loads.count(),
settings.MONGOANALYTICSDB.nbanalytics.feed_fetches.count(),
))
day_ago = datetime.datetime.utcnow() - datetime.timedelta(days=2)
settings.MONGOANALYTICSDB.nbanalytics.feed_fetches.remove({
"date": {"$lt": day_ago},
})
settings.MONGOANALYTICSDB.nbanalytics.page_loads.remove({
"date": {"$lt": day_ago},
}) | [
"samuel@ofbrooklyn.com"
] | samuel@ofbrooklyn.com |
5a1555ec9d6aa4e728517b5fe56e291c88167f8a | a8750439f200e4efc11715df797489f30e9828c6 | /CodeForces/CFR_266_2_A.py | 3f7270e31a9ff9b1c7c98cee59bbe4129891bfd4 | [] | no_license | rajlath/rkl_codes | f657174305dc85c3fa07a6fff1c7c31cfe6e2f89 | d4bcee3df2f501349feed7a26ef9828573aff873 | refs/heads/master | 2023-02-21T10:16:35.800612 | 2021-01-27T11:43:34 | 2021-01-27T11:43:34 | 110,989,354 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 585 | py |
# -*- coding: utf-8 -*-
# @Date : 2018-09-27 13:25:36
# @Author : raj lath (oorja.halt@gmail.com)
# @Link : link
# @Version : 1.0.0
from sys import stdin
max_val=int(10e12)
min_val=int(-10e12)
def read_int() : return int(stdin.readline())
def read_ints() : return [int(x) for x in stdin.readline().split()]
def read_str() : return input()
def read_strs() : return [x for x in stdin.readline().split()]
rides, many, single, bulk = read_ints()
answer = (rides // many) * min(bulk, single*many)
answer += min((rides % many) * (single) , bulk)
print(answer)
| [
"raj.lath@gmail.com"
] | raj.lath@gmail.com |
368073da4f30c3037ceee1aec7a1afbdfa898915 | e23a4f57ce5474d468258e5e63b9e23fb6011188 | /140_gui/pyqt_pyside/examples/projects/GUI_Development_with_PyQt5_and_SQL/my_old/ui_new_employee.py | 91efe1257932fe069b4f32ce1c9c4dd45a0b877d | [] | no_license | syurskyi/Python_Topics | 52851ecce000cb751a3b986408efe32f0b4c0835 | be331826b490b73f0a176e6abed86ef68ff2dd2b | refs/heads/master | 2023-06-08T19:29:16.214395 | 2023-05-29T17:09:11 | 2023-05-29T17:09:11 | 220,583,118 | 3 | 2 | null | 2023-02-16T03:08:10 | 2019-11-09T02:58:47 | Python | UTF-8 | Python | false | false | 5,673 | py | # -*- coding: utf-8 -*-
from PyQt5 import QtCore, QtGui, QtWidgets
from employee_full_info import EmployeeFullInfo
from ui_calendar_dialog import CalendarDialog
class Ui_Dialog(object):
def setupUi(self, Dialog):
Dialog.setObjectName("Dialog")
Dialog.resize(432, 429)
self.gridLayout = QtWidgets.QGridLayout(Dialog)
self.gridLayout.setObjectName("gridLayout")
self.formLayout = QtWidgets.QFormLayout()
self.formLayout.setContentsMargins(20, 20, 20, 20)
self.formLayout.setVerticalSpacing(25)
self.formLayout.setObjectName("formLayout")
self.firstNameLabel = QtWidgets.QLabel(Dialog)
self.firstNameLabel.setObjectName("firstNameLabel")
self.formLayout.setWidget(0, QtWidgets.QFormLayout.LabelRole, self.firstNameLabel)
self.firstNameLineEdit = QtWidgets.QLineEdit(Dialog)
self.firstNameLineEdit.setObjectName("firstNameLineEdit")
self.formLayout.setWidget(0, QtWidgets.QFormLayout.FieldRole, self.firstNameLineEdit)
self.lastNameLabel = QtWidgets.QLabel(Dialog)
self.lastNameLabel.setObjectName("lastNameLabel")
self.formLayout.setWidget(1, QtWidgets.QFormLayout.LabelRole, self.lastNameLabel)
self.lastNameLineEdit = QtWidgets.QLineEdit(Dialog)
self.lastNameLineEdit.setObjectName("lastNameLineEdit")
self.formLayout.setWidget(1, QtWidgets.QFormLayout.FieldRole, self.lastNameLineEdit)
self.birthdayLabel = QtWidgets.QLabel(Dialog)
self.birthdayLabel.setObjectName("birthdayLabel")
self.formLayout.setWidget(2, QtWidgets.QFormLayout.LabelRole, self.birthdayLabel)
self.birthdayToolButton = QtWidgets.QToolButton(Dialog)
self.birthdayToolButton.setText("")
self.birthdayToolButton.setObjectName("birthdayToolButton")
self.birthdayToolButton.setIcon(QtGui.QIcon(":/resources/ic-calendar.png"))
self.formLayout.setWidget(2, QtWidgets.QFormLayout.FieldRole, self.birthdayToolButton)
self.departmentLabel = QtWidgets.QLabel(Dialog)
self.departmentLabel.setObjectName("departmentLabel")
self.formLayout.setWidget(3, QtWidgets.QFormLayout.LabelRole, self.departmentLabel)
self.departmentLineEdit = QtWidgets.QLineEdit(Dialog)
self.departmentLineEdit.setObjectName("departmentLineEdit")
self.formLayout.setWidget(3, QtWidgets.QFormLayout.FieldRole, self.departmentLineEdit)
self.salaryLabel = QtWidgets.QLabel(Dialog)
self.salaryLabel.setObjectName("salaryLabel")
self.formLayout.setWidget(4, QtWidgets.QFormLayout.LabelRole, self.salaryLabel)
self.salaryLineEdit = QtWidgets.QLineEdit(Dialog)
self.salaryLineEdit.setObjectName("salaryLineEdit")
self.formLayout.setWidget(4, QtWidgets.QFormLayout.FieldRole, self.salaryLineEdit)
self.positionLabel = QtWidgets.QLabel(Dialog)
self.positionLabel.setObjectName("positionLabel")
self.formLayout.setWidget(5, QtWidgets.QFormLayout.LabelRole, self.positionLabel)
self.positionLineEdit = QtWidgets.QLineEdit(Dialog)
self.positionLineEdit.setObjectName("positionLineEdit")
self.formLayout.setWidget(5, QtWidgets.QFormLayout.FieldRole, self.positionLineEdit)
self.gridLayout.addLayout(self.formLayout, 0, 0, 1, 3)
spacerItem = QtWidgets.QSpacerItem(131, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.gridLayout.addItem(spacerItem, 1, 0, 1, 1)
self.saveButton = QtWidgets.QPushButton(Dialog)
self.saveButton.setObjectName("saveButton")
self.gridLayout.addWidget(self.saveButton, 1, 1, 1, 1)
spacerItem1 = QtWidgets.QSpacerItem(131, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.gridLayout.addItem(spacerItem1, 1, 2, 1, 1)
self.gridLayout.setColumnStretch(0, 1)
self.gridLayout.setColumnStretch(1, 1)
self.gridLayout.setColumnStretch(2, 1)
self.retranslateUi(Dialog)
QtCore.QMetaObject.connectSlotsByName(Dialog)
def retranslateUi(self, Dialog):
_translate = QtCore.QCoreApplication.translate
Dialog.setWindowTitle(_translate("Dialog", "New Empoyee"))
self.firstNameLabel.setText(_translate("Dialog", "First Name"))
self.lastNameLabel.setText(_translate("Dialog", "Last Name"))
self.birthdayLabel.setText(_translate("Dialog", "Birthday"))
self.departmentLabel.setText(_translate("Dialog", "Department"))
self.salaryLabel.setText(_translate("Dialog", "Salary"))
self.positionLabel.setText(_translate("Dialog", "Position"))
self.saveButton.setText(_translate("Dialog", "Save"))
class EmployeeDialog(QtWidgets.QDialog):
def __init__(self):
super(EmployeeDialog, self).__init__()
self.ui = Ui_Dialog()
self.ui.setupUi(self)
self.employeeInfo = None
self.ui.saveButton.clicked.connect(self.save_button_clicked)
self.ui.birthdayToolButton.clicked.connect(self.birthday_button_clicked)
def save_button_clicked(self):
self.employeeInfo = EmployeeFullInfo(
self.ui.firstNameLineEdit.text(),
self.ui.lastNameLineEdit.text(),
self.birthday,
self.ui.departmentLineEdit.text(),
self.ui.salaryLineEdit.text(),
self.ui.positionLineEdit.text())
self.accept()
def birthday_button_clicked(self):
self.calendarDialog = CalendarDialog()
result = self.calendarDialog.exec()
if result == QtWidgets.QDialog.Accepted:
self.birthday = self.calendarDialog.date
| [
"sergejyurskyj@yahoo.com"
] | sergejyurskyj@yahoo.com |
0c85b95bcb59f67c6c4975f3d22de0df21c2ef54 | 9e988c0dfbea15cd23a3de860cb0c88c3dcdbd97 | /sdBs/AllRun/pg_2208+014/sdB_pg_2208+014_lc.py | 13621568094978e8e6c0c9a2832d8e7d00caabf1 | [] | no_license | tboudreaux/SummerSTScICode | 73b2e5839b10c0bf733808f4316d34be91c5a3bd | 4dd1ffbb09e0a599257d21872f9d62b5420028b0 | refs/heads/master | 2021-01-20T18:07:44.723496 | 2016-08-08T16:49:53 | 2016-08-08T16:49:53 | 65,221,159 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 344 | py | from gPhoton.gAperture import gAperture
def main():
gAperture(band="NUV", skypos=[332.689417,1.693167], stepsz=30., csvfile="/data2/fleming/GPHOTON_OUTPU/LIGHTCURVES/sdBs/sdB_pg_2208+014/sdB_pg_2208+014_lc.csv", maxgap=1000., overwrite=True, radius=0.00555556, annulus=[0.005972227,0.0103888972], verbose=3)
if __name__ == "__main__":
main()
| [
"thomas@boudreauxmail.com"
] | thomas@boudreauxmail.com |
5646d9835ee82fd4e401019cda9c8e7bdda0a655 | eb96dbfad894771713e7a03802a48dd0effb96c7 | /scripts/optical_parameters.py | dc894cd789bcee95186f08179b3f3e9423d2aa6d | [] | no_license | daniel-muthukrishna/SNIaLightCurveModelling | d80375fb70cedfc8b87778c666938e18cd13ec05 | 169b91d48bec0968c6d9f92d2785cc604352e677 | refs/heads/master | 2021-09-11T09:50:34.400189 | 2018-04-06T14:26:55 | 2018-04-06T14:26:55 | 111,708,763 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,944 | py | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
from sklearn import linear_model
from sklearn.metrics import mean_squared_error, r2_score
from scipy import stats
import copy
def read_optical_fitted_table(filename):
""" Read in optical parameters as a pandas DataFrame.
And set SN_Names as row indexes and the other parameters as column headers. """
data = pd.read_csv(filename, header=None, delim_whitespace=True, comment='#')
data.columns = data.iloc[0]
data = data.reindex(data.index.drop(0))
data = data.set_index('SN_name')
return data
def common_optical_nir_sn(nirPeaks, opticalData, bandName):
"""Find the common supernova names between optical and NIR
and create new DataFrames that contain only information for common SNe."""
nirPeaks = copy.deepcopy(nirPeaks)
opticalData = copy.deepcopy(opticalData)
nirNames = nirPeaks.index
opticalNames = opticalData.index
names = opticalNames.intersection(nirNames)
nirPeaks = nirPeaks.loc[names]
if not names.equals(opticalNames):
print("Some NIR SNe don't have optical data in %s" % bandName)
opticalData = opticalData.loc[names]
return nirPeaks, opticalData
class CompareOpticalAndNIR(object):
def __init__(self, opticalDataFilename, nirPeaks, bandName):
self.opticalData = read_optical_fitted_table(opticalDataFilename)
self.nirPeaks = nirPeaks
self.bandName = bandName
# Add AbsMag column
self.opticalData = self.opticalData.astype('float')
self.opticalData['AbsMagB'] = self.opticalData['mB'] - self.opticalData['mu_Snoopy']
# Add nirpeaks flux ratio
self.nirPeaks['SecondMaxMag - FirstMaxMag'] = self.nirPeaks['secondMaxMag'] - self.nirPeaks['firstMaxMag']
def nir_peaks_vs_optical_params(self):
nirPeaks = self.nirPeaks[['SecondMaxMag - FirstMaxMag', 'secondMaxPhase']]
opticalData = self.opticalData[['AbsMagB', 'x0', 'x1', 'c']]
nirPeaks, opticalData = common_optical_nir_sn(nirPeaks, opticalData, self.bandName)
fig, ax = plt.subplots(nrows=len(opticalData.columns), ncols=len(nirPeaks.columns), sharex='col', sharey='row')
fig.subplots_adjust(wspace=0, hspace=0)
for i, f in enumerate(opticalData.columns):
for j, p in enumerate(nirPeaks.columns):
ax[i, j].scatter(nirPeaks[p], opticalData[f], alpha=0.6, marker='.')
if i == len(opticalData.columns) - 1:
ax[i, j].set_xlabel(p)
if j == 0:
ax[i, j].set_ylabel(f, rotation=0)
ax[i, j].yaxis.set_major_locator(plt.MaxNLocator(4))
ax[i, j].tick_params(labelleft='off')
# ax[i, j].yaxis.set_major_formatter(ticker.FormatStrFormatter('%0.2f'))
ax[i, j].yaxis.set_label_coords(-0.2, 0.2)
fig.subplots_adjust(left=0.2, right=0.98)
fig.suptitle(self.bandName)
plt.savefig("Figures/%s_opticalParams_vs_NIR_peaks" % self.bandName)
def plot_parameters(self, fig=None, ax=None, i=0, band='', figinfo=None, label=True):
xname, yname, xlabel, ylabel, savename, sharey = figinfo
# Only plot supernovae for which we have both optical and NIR data
if xname in self.opticalData or yname in self.opticalData:
nirPeaks, opticalData = common_optical_nir_sn(self.nirPeaks, self.opticalData, self.bandName)
else:
nirPeaks, opticalData = copy.deepcopy(self.nirPeaks), copy.deepcopy(self.opticalData)
# Get data
if xname in self.nirPeaks:
x = nirPeaks[xname].values.astype('float')
elif xname in self.opticalData:
x = opticalData[xname].values.astype('float')
else:
raise ValueError("Invalid x parameter: {}".format(xname))
if yname in self.nirPeaks:
y = nirPeaks[yname].values.astype('float')
elif yname in self.opticalData:
y = opticalData[yname].values.astype('float')
else:
raise ValueError("Invalid y parameter: {}".format(yname))
snNames = np.array(nirPeaks.index)
# Remove NaNs
notNan = ~np.isnan(x)
x = x[notNan]
y = y[notNan]
snNames = snNames[notNan]
notNan = ~np.isnan(y)
x = x[notNan]
y = y[notNan]
snNames = snNames[notNan]
# Choose axis labels
if not xlabel:
xlabel = xname
if not ylabel:
ylabel = yname
if not savename:
savename = "{}_vs_{}".format(yname, xname)
# Fit trend line
slope, intercept, r_value, p_value, std_err = stats.linregress(x, y)
x_pred = np.arange(min(x), max(x), 0.1)
y_pred = slope * x_pred + intercept
print("{}: {} vs {}".format(band, xname, yname))
print("Slope: {0}, Intercept: {1}, R: {2}, p-value:{3}".format(slope, intercept, r_value, p_value))
# Plot yname vs xname
if label is True:
for j in range(len(x)):
ax[i].scatter(x[j], y[j], label="{}, {}".format(snNames[j], x[j]))
ax[i].legend(loc=2, bbox_to_anchor=(1.01, 1), ncol=2)
else:
ax[i].plot(x, y, '.k')
ax[i].plot(x_pred, y_pred, 'b')
ax[i].set_ylabel(ylabel)
ax[-1].set_xlabel(xlabel)
ax[i].text(0.05, 0.85, band, transform=ax[i].transAxes, fontsize=15)
if 'mag' in yname.lower() and sharey is False:
ax[i].invert_yaxis()
ax[i].text(0.7, 0.15, 'R=%.3f' % r_value, transform=ax[i].transAxes)
ax[i].text(0.7, 0.05, 'p_value=%.3f' % p_value, transform=ax[i].transAxes)
fig.subplots_adjust(hspace=0)
plt.setp([a.get_xticklabels() for a in fig.axes[:-1]], visible=False)
fig.savefig("Figures/{}.png".format(savename), bbox_inches='tight')
| [
"daniel.muthukrishna@gmail.com"
] | daniel.muthukrishna@gmail.com |
020df92ddd9062442496296596e24b0c7800afbf | 6630694f401f6f475dd81bb01ff9368db844ccff | /mmpretrain/engine/runners/__init__.py | 23206e1ea7c83fa1d547c677b3fe5203f8c5485f | [
"Apache-2.0"
] | permissive | open-mmlab/mmpretrain | 98a4d6b3bb747efc3d50decebf84fc3ffa41076a | d2ccc44a2c8e5d49bb26187aff42f2abc90aee28 | refs/heads/main | 2023-08-30T19:11:24.771498 | 2023-08-23T02:45:18 | 2023-08-23T02:45:18 | 278,415,292 | 652 | 186 | Apache-2.0 | 2023-09-08T08:01:40 | 2020-07-09T16:25:04 | Python | UTF-8 | Python | false | false | 165 | py | # Copyright (c) OpenMMLab. All rights reserved.
from .retrieval_loop import RetrievalTestLoop, RetrievalValLoop
__all__ = ['RetrievalTestLoop', 'RetrievalValLoop']
| [
"noreply@github.com"
] | open-mmlab.noreply@github.com |
dbd05061cad97bb1373c01b30bf50c58b80fd234 | 06b695d1221521f4de4896f315cd6a39e9af8581 | /tests/test.py | 942ad8428c0ea38c700624d64f24e354efa850ad | [] | no_license | Eugeny/PythonUpstart | 6665690872560e622c99b7dbbc4d1dc51832487c | 8b33f425ab71167931c84bea57e81c958455e7d0 | refs/heads/master | 2021-01-17T10:18:27.190060 | 2015-01-31T09:44:07 | 2015-01-31T09:44:07 | 30,106,514 | 1 | 0 | null | 2015-01-31T08:04:11 | 2015-01-31T08:04:11 | null | UTF-8 | Python | false | false | 1,685 | py | #!/usr/bin/env python2.7
import sys
sys.path.insert(0, '..')
from pprint import pprint
from sys import stdout
def test_system():
from upstart.system import UpstartSystem
s = UpstartSystem()
print("Version: %s" % (s.get_version()))
current_priority = s.get_log_priority()
print("Current priority: %s" % (current_priority))
new_priority = 'debug'
s.set_log_priority(new_priority)
updated_priority = s.get_log_priority()
print("Updated priority: %s" % (updated_priority))
s.set_log_priority(current_priority)
restored_priority = s.get_log_priority()
print("Restored priority: %s" % (restored_priority))
# print(list(s.get_all_jobs()))
s.emit('foo', { 'aa': 55 })
def test_jobs():
from upstart.job import UpstartJob, JobBuilder, JobBashScript, \
JobPerlScript, JobPythonScript
# j = UpstartJob('smbd')
# j = UpstartJob('dustin2')
# pprint(j.get_status().keys())
#
# c = j.get_start_on_condition()
# c = j.get_stop_on_condition()
# print(c)
#
# j.stop()
# j.start()
# s = JobBashScript("echo\n")
# s = JobBashScript("echo\n", shell='/bin/sh')
# s = JobPerlScript("print()\n")
s = JobPythonScript("""
import time
while 1:
time.sleep(1)
""")
jb = JobBuilder()
jb.description('Test description').\
author('Iam Admin <admin@corp.com>').\
start_on_runlevel().\
stop_on_runlevel().\
run(s)
print(str(jb))
return
with open('/etc/init/my_daemon.conf', 'w') as f:
f.write(str(jb))
print("================")
stdout.write(job_raw)
print("================")
#test_system()
test_jobs()
| [
"myselfasunder@gmail.com"
] | myselfasunder@gmail.com |
30ad7c8c22755ce0c400cb26b5e03570165ecaf9 | f68b4cb7f006f0519f3e9ab081fc4fd05b5e070a | /meshio/stl_io.py | 8ece17d2f1d52c9ec9f646d7f138f92e9ebd57c4 | [
"MIT"
] | permissive | ZhimingGuo/meshio | cdc2deffcd9d24dcd732250c624fa5ca1517ed6b | e5bafc42084f3e4ffeebacb001fe099f57e99bf6 | refs/heads/master | 2020-05-17T06:38:17.790987 | 2018-08-12T15:44:32 | 2018-08-12T15:44:32 | 183,563,348 | 0 | 0 | MIT | 2019-04-26T05:33:10 | 2019-04-26T05:33:10 | null | UTF-8 | Python | false | false | 5,035 | py | # -*- coding: utf-8 -*-
#
"""
I/O for the STL format, cf.
<https://en.wikipedia.org/wiki/STL_(file_format)>.
"""
import numpy
from .mesh import Mesh
def read(filename):
"""Reads a Gmsh msh file.
"""
with open(filename, "rb") as f:
out = read_buffer(f)
return out
def read_buffer(f):
data = numpy.frombuffer(f.read(5), dtype=numpy.uint8)
if "".join([chr(item) for item in data]) == "solid":
# read until the end of the line
f.readline()
return _read_ascii(f)
# binary: read and discard 75 more bytes
f.read(75)
return _read_binary(f)
def _read_ascii(f):
# The file has the form
# ```
# solid foo
# facet normal 0.455194 -0.187301 -0.870469
# outer loop
# vertex 266.36 234.594 14.6145
# vertex 268.582 234.968 15.6956
# vertex 267.689 232.646 15.7283
# endloop
# endfacet
# # [...] more facets [...]
# endsolid
# ```
# In the interest of speed, don't verify the format and instead just skip
# the text.
# TODO Pandas is MUCH faster than numpy for i/o, see
# <https://stackoverflow.com/a/18260092/353337>.
# import pandas
# data = pandas.read_csv(
# f,
# skiprows=lambda row: row == 0 or (row-1)%7 in [0, 1, 5, 6],
# skipfooter=1,
# usecols=(1, 2, 3),
# )
data = numpy.loadtxt(
f,
comments=["solid", "facet", "outer loop", "endloop", "endfacet", "endsolid"],
usecols=(1, 2, 3),
)
assert data.shape[0] % 3 == 0
facets = numpy.split(data, data.shape[0] // 3)
# Now, all facets contain the point coordinate. Try to identify individual
# points and build the data arrays.
points, cells = data_from_facets(facets)
return Mesh(points, cells)
def data_from_facets(facets):
# Now, all facets contain the point coordinate. Try to identify individual
# points and build the data arrays.
pts = numpy.concatenate(facets)
# TODO equip `unique()` with a tolerance
# Use return_index so we can use sort on `idx` such that the order is
# preserved; see <https://stackoverflow.com/a/15637512/353337>.
_, idx, inv = numpy.unique(pts, axis=0, return_index=True, return_inverse=True)
k = numpy.argsort(idx)
points = pts[idx[k]]
inv_k = numpy.argsort(k)
cells = {"triangle": inv_k[inv].reshape(-1, 3)}
return points, cells
def _read_binary(f):
# read the first uint32 byte to get the number of triangles
data = numpy.frombuffer(f.read(4), dtype=numpy.uint32)
num_triangles = data[0]
facets = []
for _ in range(num_triangles):
# discard the normal
f.read(12)
facets.append(numpy.frombuffer(f.read(36), dtype=numpy.float32).reshape(-1, 3))
# discard the attribute byte count
f.read(2)
points, cells = data_from_facets(numpy.array(facets))
return Mesh(points, cells)
def write(filename, mesh, write_binary=False):
assert (
len(mesh.cells.keys()) == 1 and list(mesh.cells.keys())[0] == "triangle"
), "STL can only write triangle cells."
if write_binary:
_write_binary(filename, mesh.points, mesh.cells)
else:
_write_ascii(filename, mesh.points, mesh.cells)
return
def _compute_normals(pts):
normals = numpy.cross(pts[:, 1] - pts[:, 0], pts[:, 2] - pts[:, 0])
nrm = numpy.sqrt(numpy.einsum("ij,ij->i", normals, normals))
normals = (normals.T / nrm).T
return normals
def _write_ascii(filename, points, cells):
pts = points[cells["triangle"]]
normals = _compute_normals(pts)
with open(filename, "wb") as fh:
fh.write("solid\n".encode("utf-8"))
for local_pts, normal in zip(pts, normals):
# facet normal 0.455194 -0.187301 -0.870469
# outer loop
# vertex 266.36 234.594 14.6145
# vertex 268.582 234.968 15.6956
# vertex 267.689 232.646 15.7283
# endloop
# endfacet
fh.write("facet normal {} {} {}\n".format(*normal).encode("utf-8"))
fh.write(" outer loop\n".encode("utf-8"))
for pt in local_pts:
fh.write(" vertex {} {} {}\n".format(*pt).encode("utf-8"))
fh.write(" endloop\n".encode("utf-8"))
fh.write("endfacet\n".encode("utf-8"))
fh.write("endsolid\n".encode("utf-8"))
return
def _write_binary(filename, points, cells):
pts = points[cells["triangle"]]
normals = _compute_normals(pts)
with open(filename, "wb") as fh:
# 80 character header data
msg = "This file was generated by meshio."
msg += (79 - len(msg)) * "X"
msg += "\n"
fh.write(msg.encode("utf-8"))
fh.write(numpy.uint32(len(cells["triangle"])))
for pt, normal in zip(pts, normals):
fh.write(normal.astype(numpy.float32))
fh.write(pt.astype(numpy.float32))
fh.write(numpy.uint16(0))
return
| [
"nico.schloemer@gmail.com"
] | nico.schloemer@gmail.com |
e08ac2649bd87e3c1cf24621d2a53f272dbac3da | d54476a109bb7a75ab18c742e53425358eae2df7 | /shop/management/commands/groups.py | 6dbeae6f03eef2c3bb0d18e6b73e7cb4ce0b0120 | [] | no_license | OmarGonD/stickers_gallito | 8b46673a73d3fa6fdbdeb9726804f3e3c176543b | 4aa4f5aeb272b393410ed8b518aa39040f46a97b | refs/heads/master | 2022-12-09T20:38:23.672740 | 2019-12-13T14:41:41 | 2019-12-13T14:41:41 | 163,198,792 | 0 | 1 | null | 2022-04-22T21:00:01 | 2018-12-26T16:35:33 | HTML | UTF-8 | Python | false | false | 480 | py | import pandas as pd
from django.contrib.auth.models import Group, Permission
from django.core.management.base import BaseCommand
tmp_data_groups = pd.read_csv('static/data/groups.csv', sep=',', encoding='iso-8859-1').fillna(" ")
class Command(BaseCommand):
def handle(self, **options):
groups = [
Group(
name=row['group']
)
for _, row in tmp_data_groups.iterrows()
]
Group.objects.bulk_create(groups) | [
"oma.gonzales@gmail.com"
] | oma.gonzales@gmail.com |
6b778fac686de7c1d563301108cc9df829b487fd | a75ac3c5c641fc00a3c403b08eeb6008f648639e | /LeetCode/712.py | cc13910b4168cfe3263adc3cf429563194063e39 | [] | no_license | Greenwicher/Competitive-Programming | 5e9e667867c2d4e4ce68ad1bc34691ff22e2400a | 6f830799f3ec4603cab8e3f4fbefe523f9f2db98 | refs/heads/master | 2018-11-15T15:25:22.059036 | 2018-09-09T07:57:28 | 2018-09-09T07:57:28 | 28,706,177 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,071 | py | # Version 1, Dynamic Programming
class Solution(object):
def minimumDeleteSum(self, s1, s2):
"""
:type s1: str
:type s2: str
:rtype: int
"""
m, n = len(s1), len(s2)
ord1, ord2 = map(lambda s: [ord(x) for x in s], [s1, s2])
# dp[i][j] represents the minimum cost given s1[:i+1] and s2[:j+1]
dp = [[1e100] * n for _ in range(m)]
flag, sumv = False, 0
for i in range(m):
if not(flag) and s1[i] == s2[0]:
flag = True
sumv += ord1[i]
dp[i][0] = sumv + ord2[0] - 2 * ord2[0] * flag
flag, sumv = False, 0
for j in range(n):
if not(flag) and s2[j] == s1[0]:
flag = True
sumv += ord2[j]
dp[0][j] = sumv + ord1[0] - 2 * ord1[0] * flag
for i in range(1, m):
for j in range(1, n):
subv = [dp[i][j]]
# keep both s1[i] and s2[j] or delete both of them
if s1[i] == s2[j]:
subv.append(dp[i-1][j-1])
else:
subv.append(dp[i-1][j-1] + ord1[i] + ord2[j])
# delete s1[i], delete s2[j]
subv += [dp[i-1][j] + ord1[i], dp[i][j-1] + ord2[j]]
dp[i][j] = min(subv)
return dp[m-1][n-1]
s = Solution()
print(s.minimumDeleteSum("sea", "eat"))
print(s.minimumDeleteSum("delete", "leet"))
print(s.minimumDeleteSum("gqirsclhrchxsqgmpfdeploxfixowfqqubuvsupkejabcrfqgcnsauunllsfskclenkxmdyraerhfmmiwryeyqoldgxctuvsjarjvfelsglvlbnozmejncnlaqpxmbrgwayfzczvatel","kgievqcxvrgeyklbcidngseersbiubgdwzlraagerenyfavkdcriinaugodaoacfiasmhhoxxsnqcyfriknrjfwyfglplvodefdlbmykfgpdpzjndlrskzctfkfkwcjbibuglrjvdyfhnsgwuunpzoakyejkxczznfljimkkanlsyuhvwjitrdvktrvufgyllgjpjixotsgwjkzbdqhvzyappucwvberchznrzdqjwpvyckwbfnlulscxynfbqqkhgxxkdzawjtlncqqswfwwbvywdchnxtblboobjzkurpjutdbwaxlxkxuiaxiddntniuuvghprslmpctnokubadbbxhuezbesvgvptqbnfjpmxopjdrajectbpkszvzzjivzhlesfnzaetgvxcnrhuglvoncgsyoyucjnuedgcfdrnkhxfyhujxzvxieeevwqn")) | [
"weizhiliu2009@gmail.com"
] | weizhiliu2009@gmail.com |
dacafae4865912427306357a6de35c624bf6f708 | 8a56050d358eafe16ff4049fb0b8b4118b8c5b24 | /Binance_futures_python/example/market/get_top_long_short_accounts.py | 6a8fc367a110bb13fa0fe81dd7417d44bad7b8d6 | [
"MIT"
] | permissive | jiaweiloo/dora-scalp-bot | 576147fe6dab146d29658d430ac9cad2dd343028 | 77eaa0ad1a7d0480d49c85a0a558ec078a3d5886 | refs/heads/master | 2023-08-07T22:06:31.212483 | 2021-09-23T02:53:17 | 2021-09-23T02:53:17 | 412,504,217 | 2 | 3 | null | null | null | null | UTF-8 | Python | false | false | 466 | py | from binance_f import RequestClient
from binance_f.constant.test import *
from binance_f.base.printobject import *
request_client = RequestClient(api_key=g_api_key, secret_key=g_secret_key)
# result = request_client.get_liquidation_orders()
result = request_client.get_top_long_short_accounts(symbol="BTCUSDT", period='1d')
print("======= Get Top Long/Short Accounts Ratio =======")
PrintMix.print_data(result)
print("==========================================")
| [
"huggingrobot@gmail.com"
] | huggingrobot@gmail.com |
30fc0d8883293aabb2857d5b4f9397ae44af5980 | 3a891a79be468621aae43defd9a5516f9763f36e | /desktop/core/ext-py/repoze.who-2.3/docs/examples/standalone_login_no_who.py | 48defd406d90492942af82c0b88c71805b498d67 | [
"Apache-2.0",
"BSD-3-Clause-Modification"
] | permissive | oyorooms/hue | b53eb87f805063a90f957fd2e1733f21406269aa | 4082346ef8d5e6a8365b05752be41186840dc868 | refs/heads/master | 2020-04-15T20:31:56.931218 | 2019-01-09T19:02:21 | 2019-01-09T19:05:36 | 164,998,117 | 4 | 2 | Apache-2.0 | 2019-01-10T05:47:36 | 2019-01-10T05:47:36 | null | UTF-8 | Python | false | false | 3,146 | py | # Standalone login application for demo SSO:
# N.B.: this version does *not* use repoze.who at all, but should produce
# a cookie which repoze.who.plugin.authtkt can use.
import datetime
from paste.auth import auth_tkt
from webob import Request
LOGIN_FORM_TEMPLATE = """\
<html>
<head>
<title> Demo SSO Login </title>
</head>
<body>
<h1> Demo SSO Login </h1>
<p style="color: Red">%(message)s</p>
<form action="#" method="post">
<input type="hidden" name="came_from" value="%(came_from)s" />
<fieldset id="login_name_fs">
<label for="login_name">Login Name</label>
<input type="text" id="login_name" name="login_name" value="%(login_name)s" />
</fieldset>
<fieldset id="password_fs">
<label for="password">Login Name</label>
<input type="password" id="password" name="password" />
</fieldset>
<input type="submit" name="form.submitted" value="Log In" />
</form>
</body>
</html>
"""
# oh emacs python-mode, you disappoint me """
# Clients have to know about these values out-of-band
SECRET = 's33kr1t'
COOKIE_NAME = 'auth_cookie'
MAX_AGE = '3600' # seconds
AUTH = {
'phred': 'y4bb3d4bb4d00',
'bharney': 'b3dr0ck',
}
def _validate(login_name, password):
# Your application's logic goes here
return AUTH.get(login_name) == password
def _get_cookies(environ, value):
later = (datetime.datetime.now() +
datetime.timedelta(seconds=int(MAX_AGE)))
# Wdy, DD-Mon-YY HH:MM:SS GMT
expires = later.strftime('%a, %d %b %Y %H:%M:%S')
# the Expires header is *required* at least for IE7 (IE7 does
# not respect Max-Age)
tail = "; Max-Age=%s; Expires=%s" % (MAX_AGE, expires)
cur_domain = environ.get('HTTP_HOST', environ.get('SERVER_NAME'))
wild_domain = '.' + cur_domain
return [('Set-Cookie', '%s="%s"; Path=/; Domain=%s%s'
% (COOKIE_NAME, value, wild_domain, tail)),
]
def login(environ, start_response):
request = Request(environ)
message = ''
if 'form.submitted' in request.POST:
came_from = request.POST['came_from']
login_name = request.POST['login_name']
password = request.POST['password']
remote_addr = environ['REMOTE_ADDR']
if _validate(login_name, password):
headers = [('Location', came_from)]
ticket = auth_tkt.AuthTicket(SECRET, login_name, remote_addr,
cookie_name=COOKIE_NAME, secure=True,
digest_algo="sha512")
headers = _get_cookies(environ, ticket.cookie_value())
headers.append(('Location', came_from))
start_response('302 Found', headers)
return []
message = 'Authentication failed'
else:
came_from = request.GET.get('came_from', '')
login_name = ''
body = LOGIN_FORM_TEMPLATE % {'message': message,
'came_from': came_from,
'login_name': login_name,
}
start_response('200 OK', [])
return [body]
def main(global_config, **local_config):
return login
| [
"ranade@cloudera.com"
] | ranade@cloudera.com |
50891a903b975e1ef6fff8631669eddf50d09839 | 7b2dc269c3766deadb13415284d9848409d850c5 | /notebooks/spatial_gmm/made_best.py | f97eed478f7f6616b79b502b3f569697aaf4db1b | [] | no_license | Joaggi/demande | 8c3f32125cdf6377c9bd8a5b33bf162f8a5ec5cc | 289b8237d8e872e067dd4f6ab2297affe3903f4e | refs/heads/main | 2023-04-18T20:47:21.769183 | 2023-03-08T21:36:08 | 2023-03-08T21:36:08 | 611,455,062 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 570 | py | try:
from initialization import initialization
except:
from notebooks.initialization import initialization
parent_path = initialization("2021-2-conditional-density-estimation", "/Doctorado/")
from run_experiment_best_configuration import run_experiment_best_configuration
import sys
if len(sys.argv) > 1:
algorithm, database = sys.argv[1], sys.argv[2]
else:
algorithm, database = "made", "arc"
for j in range(2, 11):
setting = {
"z_dimension": j
}
run_experiment_best_configuration(algorithm, database, parent_path, setting)
| [
"joaggi@gmail.com"
] | joaggi@gmail.com |
75d132b4a10e11371871d7d6bce7576c6dffaaf8 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03378/s300114441.py | d8bf1b5526ad860f80565a1157605749c4efffbe | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 162 | py | n, m, x = map(int, input().split())
a = list(map(int, input().split()))
r = [0 for _ in range(n+1)]
for i in a:
r[i] += 1
print(min(sum(r[x:]), sum(r[:x+1]))) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
43ca3653cc2bb0879d105437d102e94db4196829 | 243c89211b8f97189dda683ddad440d5e0837440 | /learning/autovisualization_online.py | b9c2890b93b94817e10de03a44a91522b0b3572d | [
"BSD-3-Clause"
] | permissive | EmergentSystemLabStudent/SpCoSLAM | 4776e4da1bc8b7bb99cdc2d55de48102914c8f1f | 0e910bbd91e92b76d3f6d73ea2158a1922a3823c | refs/heads/master | 2022-12-12T16:36:50.758250 | 2020-09-11T01:07:49 | 2020-09-11T01:07:49 | 99,771,354 | 11 | 3 | BSD-3-Clause | 2020-09-11T01:07:51 | 2017-08-09T06:11:47 | Python | UTF-8 | Python | false | false | 6,660 | py | #!/usr/bin/env python
# -*- coding:utf-8 -*-
#自己位置と場所概念(位置分布)の可視化用プログラム(実行不可)
#Akira Taniguchi 2017/02/28 - 2017/09/01
import sys
import os
import random
import string
import signal
import subprocess
import time
import rospy
from std_msgs.msg import String
from __init__ import *
endstep = 50
# Reading particle data (ID,x,y,theta,weight,previousID)
def ReadParticleData(m_count, trialname):
p = []
for line in open ( datafolder + trialname + "/particle/" + str(m_count) + ".csv" ):
itemList = line[:-1].split(',')
p.append( Particle( int(itemList[0]), float(itemList[1]), float(itemList[2]), float(itemList[3]), float(itemList[4]), int(itemList[5])) )
return p
# パーティクルIDの対応付け処理(Ct,itの対応付けも)
def ParticleSearcher(trialname):
m_count = 0 #m_countの数
#m_countのindexは1から始まる
while (os.path.exists( datafolder + trialname + "/particle/" + str(m_count+1) + ".csv" ) == True):
m_count += 1
if (m_count == 0): #エラー処理
print "m_count",m_count
#flag = 0
#fp = open( datafolder + trialname + "/teachingflag.txt", 'w')
#fp.write(str(flag))
#fp.close()
#exit()
#教示された時刻のみのデータにする
#steplist = m_count2step(trialname, m_count)
#step = len(steplist)
#print steplist
#C[1:t-1],I[1:t-1]のパーティクルID(step-1時点)と現在のparticleIDの対応付け
#CTtemp = [[] for r in xrange(R)]
#ITtemp = [[] for r in xrange(R)]
#for particle in xrange(R):
# CTtemp[particle],ITtemp[particle] = ReaditCtData(trialname, step, particle)
p = [[] for c in xrange(m_count)]
for c in xrange(m_count):
p[c] = ReadParticleData(c+1, trialname) #m_countのindexは1から始まる
######非効率なので、前回のパーティクル情報を使う(未実装)
p_trajectory = [ [0.0 for c in xrange(m_count)] for i in xrange(R) ]
#CT = [ [0 for s in xrange(step-1)] for i in xrange(R) ]
#IT = [ [0 for s in xrange(step-1)] for i in xrange(R) ]
for i in xrange(R):
c_count = m_count-1 #一番最後の配列から処理
#print c_count,i
p_trajectory[i][c_count] = p[c_count][i]
for c in xrange(m_count-1): #0~最後から2番目の配列まで
preID = p[c_count][p_trajectory[i][c_count].id].pid
p_trajectory[i][c_count-1] = p[c_count-1][preID]
#if (step == 1):
# CT[i] = CTtemp[i]
# IT[i] = ITtemp[i]
#elif (step == 2):
# CT[i] = [1]
# IT[i] = [1]
#else:
#if (steplist[-2][0] == c_count): #CTtemp,ITtempを現在のパーティクルID順にする
# #CT[i] = [ CTtemp[preID][s] for s in xrange(step-1)]
# #IT[i] = [ ITtemp[preID][s] for s in xrange(step-1)]
# #print i,preID
#print i, c, c_count-1, preID
c_count -= 1
X_To = [ [[p_trajectory[i][c].x,p_trajectory[i][c].y] for c in xrange(m_count)] for i in xrange(R) ]
#for i in xrange(R):
# #X_To[i] = [p_trajectory[i][steplist[s][0]-1] for s in xrange(step)]
# X_To[i] = [[p_trajectory[i][m].x,p_trajectory[i][m].y] for c in xrange(m_count)]
return X_To #, step, m_count #, CT, IT
# Reading particle data (ID,x,y,theta,weight,previousID)
def ReadParticleData2(step, particle, trialname):
p = []
pid = []
for line in open ( datafolder + trialname + "/"+ str(step) + "/particle" + str(particle) + ".csv" ):
itemList = line[:-1].split(',')
p.append( [float(itemList[2]), float(itemList[3])] )
pid.append( int(itemList[1]) )
#p.append( Particle( int(itemList[0]), float(itemList[1]), float(itemList[2]), float(itemList[3]), float(itemList[4]), int(itemList[5])) )
return p,pid
#roscoreは起動されているとする(上位プログラムで実行される)
#rvizは起動されているとする(上位プログラムで実行される)
#trialnameの取得
trialname = sys.argv[1]
#出力ファイル名を要求
#trialname = raw_input("trialname?(folder) >") #"tamd2_sig_mswp_01"
#s=0
#m_count と教示stepとの対応付けを読み込み
list= [] #[ [m_count, step], ... ]
csvname = datafolder + trialname + "/m_count2step.csv"
for line in open ( csvname , 'r'):
itemList = line[:-1].split(',')
#print itemList
list.append( [int(itemList[0]), int(itemList[1])] )
#s += 1
end_m_count = list[-1][0]
m_list = [list[i][0] for i in range(len(list))]
filename50 = datafolder+trialname+"/"+ str(50) +"/"
maxparticle = 0
i = 0
##datafolder+trialname+"/"+stepにおける最大尤度のパーティクルを読み込み
for line in open( filename50 + 'weights.csv', 'r'):
#itemList = line[:].split(',')
if (i == 0):
maxparticle = int(line)
i +=1
#maxparticle = int(sys.argv[3])
#最終の教示ステップでの最大尤度のパーティクルの軌跡を取得
particle,pid = ReadParticleData2(50, maxparticle, trialname)# [0 for i in range(50)]
XT = ParticleSearcher(trialname)
XTMAX = XT[maxparticle]
s = 0#15 #1
#m_count (step)のイテレーション
for m in range(1,end_m_count+1):#5,16):#5,7):#
##run_mapviewer.shを実行(trailname と m_countを指定)
#map = "./run_mapviewer.sh "+trialname+" "+str(m)
#map = "rosrun map_server map_server /home/akira/Dropbox/SpCoSLAM/data/"+ trialname+"/map/map"+str(m)+".yaml"
#p = subprocess.Popen(map, shell=True)
print list[s][0],m,s+1
##if (現在のm_countのstep == step):
#if (m in m_list): #list[s][0] == m):
##########ここを実装すればよい↓##########
##オプション(trailname m_count particleのID ロボットのx座標 y座標)
drawposition = "python ./new_position_draw_online.py "+trialname+" "+str(m)+" "+str(maxparticle)+" "+str(XTMAX[m-1][0])+" "+str(XTMAX[m-1][1])
print drawposition
p3 = subprocess.Popen(drawposition, shell=True)
for s in range(len(list)):
if(list[s][0] == m):
###new_place_draw_online.pyを実行(trialname 教示回数 particleのID)
drawplace = "python ./new_place_draw_online.py "+trialname+" "+str(list[s][1])+" "+str(pid[s]) #+" "+str(particle[s][0])+" "+str(particle[s][1])
print drawplace
p2 = subprocess.Popen(drawplace, shell=True)
##########ここを実装すればよい↑##########
#s = s+1
time.sleep(2.0)
##rvizの画面を保存
#とりあえず画面そのものをキャプチャする
#git clone https://github.com/AtsushiSakai/jsk_visualization_packages.git
| [
"a.taniguchi@em.ci.ritsumei.ac.jp"
] | a.taniguchi@em.ci.ritsumei.ac.jp |
0eb3fad70edea6638009514f2afd7c55c0cbc3f2 | d06d6d253314a0288b8f7bbd92de84267c96352f | /revscoring/features/temporal/revision_oriented.py | 6fe7a65c30b8b32b45ee5e1500b76e40aa7b10fa | [
"MIT"
] | permissive | harej/revscoring | 6bfefecf26e6097d70ed5454a3e1a484d9e74b09 | cdc8c912901e825d03bd0da8de84c72632f10422 | refs/heads/master | 2020-12-31T02:22:18.315668 | 2016-04-02T11:42:29 | 2016-04-02T11:42:29 | 55,309,478 | 0 | 0 | null | 2016-04-02T18:17:14 | 2016-04-02T18:17:13 | null | UTF-8 | Python | false | false | 7,068 | py | import logging
from datetime import datetime
import mwtypes
from pytz import utc
from ...datasources import revision_oriented
from ...dependencies import DependentSet
from ..feature import Feature
MW_REGISTRATION_EPOCH = mwtypes.Timestamp("2006-01-01T00:00:00Z")
logger = logging.getLogger(__name__)
class Revision(DependentSet):
"Represents a revision"
def __init__(self, name, revision_datasources):
super().__init__(name)
self.datasources = revision_datasources
self.day_of_week = Feature(
name + ".day_of_week", _process_day_of_week,
returns=int,
depends_on=[revision_datasources.timestamp]
)
"`int` : the day of week when the edit was made (in UTC)"
self.hour_of_day = Feature(
name + ".hour_of_day", _process_hour_of_day,
returns=int,
depends_on=[revision_datasources.timestamp]
)
"`int` : the hour of day when the edit was made (in UTC)"
if hasattr(revision_datasources, "parent"):
self.parent = ParentRevision(
name + ".parent",
revision_datasources
)
"""
:class:`~revscoring.features.temporal.Revision` : The parent (aka
"previous") revision of the page.
"""
if hasattr(revision_datasources, "page") and \
hasattr(revision_datasources.page, "creation"):
self.page = Page(
name + ".page",
revision_datasources
)
"""
:class:`~revscoring.features.temporal.Page` : The
page in which the revision was saved.
"""
if hasattr(revision_datasources, "user") and \
hasattr(revision_datasources.user, "info"):
self.user = User(
name + ".user",
revision_datasources
)
"""
:class:`~revscoring.features.temporal.User` : The user who saved
the revision.
"""
class ParentRevision(Revision):
"Represents a parent revision"
def __init__(self, name, revision_datasources):
super().__init__(name, revision_datasources.parent)
self.seconds_since = Feature(
name + ".seconds_since",
_process_seconds_since,
returns=int,
depends_on=[revision_datasources.parent.timestamp,
revision_datasources.timestamp])
"`int` : The number of seconds since the parent revision was saved."
class User(DependentSet):
"Represents a revision user"
def __init__(self, name, revision_datasources):
super().__init__(name)
self.datasources = revision_datasources.user
if hasattr(self.datasources, 'info'):
self.seconds_since_registration = Feature(
name + ".seconds_since_registration",
_process_seconds_since_registration,
returns=int,
depends_on=[revision_datasources.user.id,
revision_datasources.user.info.registration,
revision_datasources.timestamp])
"""
`int` : The number of seconds since the user registered their
account -- or zero in the case of anons. If the user has a
registration date that is *after* the revision timestamp
(should be implossible, but happens sometimes), the user is assumed
to be 1 year old.
"""
if hasattr(self.datasources, 'last_revision'):
self.last_revision = LastUserRevision(
name + ".last_revision",
revision_datasources
)
"""
:class:`~revscoring.features.temporal.Revision` : The last revision
saved by the user.
"""
class LastUserRevision(Revision):
"Represents a revision user's last revision"
def __init__(self, name, revision_datasources):
super().__init__(name, revision_datasources.user.last_revision)
self.seconds_since = Feature(
name + ".seconds_since",
_process_seconds_since,
returns=int,
depends_on=[revision_datasources.user.last_revision.timestamp,
revision_datasources.timestamp])
"`int`: The number of seconds since the user last saved an edit"
class Page(DependentSet):
"Represents a revision's page"
def __init__(self, name, revision_datasources):
super().__init__(name)
self.creation = PageCreation(
name + ".creation",
revision_datasources
)
"""
:class:`~revscoring.features.temporal.PageCreation` : The first
revision of the page
"""
class PageCreation(DependentSet):
"Represents a page's creating revision"
def __init__(self, name, revision_datasources):
super().__init__(name)
self.seconds_since = Feature(
name + ".seconds_since",
_process_seconds_since,
returns=int,
depends_on=[revision_datasources.page.creation.timestamp,
revision_datasources.timestamp])
"`int`: The number of seconds since the page was created"
def _process_day_of_week(timestamp):
if timestamp is None:
return 7 # The day after Sunday.
dt = datetime.fromtimestamp(timestamp.unix(), tz=utc)
return dt.weekday()
def _process_hour_of_day(timestamp):
if timestamp is None:
return 24 # The hour after midnight
dt = datetime.fromtimestamp(timestamp.unix(), tz=utc)
return dt.hour
def _process_seconds_since(old_timestamp, current_timestamp):
if old_timestamp is None:
return 0
else:
return current_timestamp - old_timestamp
def _process_seconds_since_registration(id, registration, timestamp):
if id is None: # User is anon
return 0
else:
# Handles users who registered before registration dates were
# recorded
registration = registration or MW_REGISTRATION_EPOCH
if registration > timestamp:
# Something is weird. Probably an old user.
logger.info("Timestamp chronology issue {0} < {1}"
.format(timestamp, registration))
return 60 * 60 * 24 * 365 # one year
else:
return _process_seconds_since(registration, timestamp)
revision = Revision("temporal.revision", revision_oriented.revision)
"""
Represents the base revision of interest. Implements this a basic structure:
* revision: :class:`~revscoring.features.temporal.Revision`
* user: :class:`~revscoring.features.temporal.User`
* last_revision: :class:`~revscoring.features.temporal.LastUserRevision`
* page: :class:`~revscoring.features.temporal.Page`
* creation: :class:`~revscoring.features.temporal.PageCreation`
* parent: :class:`~revscoring.features.temporal.ParentRevision`
""" # noqa
| [
"aaron.halfaker@gmail.com"
] | aaron.halfaker@gmail.com |
1d58834211e97592165e22aaca98e637f0d69898 | b47d29e8df20eb175950459c55b1d80ed0648374 | /example02/lists/views.py | 00200cb98711b2fc11b643332f65cf890948444b | [] | no_license | lee-seul/Testing-goat | aea7151c4402e9bd81a7dbf1ba3351da4f2062ce | bbc4238f24b9cffc1862a3d609da5ac1c423f569 | refs/heads/master | 2021-01-22T23:43:03.552544 | 2017-04-02T15:22:01 | 2017-04-02T15:22:01 | 85,662,887 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 695 | py | from django.shortcuts import redirect, render
from lists.models import Item, List
def home_page(request):
return render(request, 'home.html')
def view_list(request, list_id):
list_ = List.objects.get(id=list_id)
items = Item.objects.filter(list=list_)
return render(request, 'list.html', {'items': items})
def new_list(request):
list_ = List.objects.create()
Item.objects.create(text=request.POST['item_text'], list=list_)
return redirect('/lists/%d/' % (list_.id, ))
def add_item(request, list_id):
list_ = List.objects.get(id=list_id)
Item.objects.create(text=request.POST['item_text'], list=list_)
return redirect('/lists/%d/' % (list_.id, ))
| [
"blacksangi14@naver.com"
] | blacksangi14@naver.com |
5932c4c0622a05daa3b68f9c380c77597f8cc5c6 | f82ed6cdb95be784d2e1fc2726aaebd6831b60c9 | /sms_project/smsapp/views.py | 4800fd6de7d231356c24534efb5b22aff3c9277f | [] | no_license | snehaindulkar/Mini-project-Student-Management-System-using-Django-framework | fd335b14558caa44208cb81a62b65d41f82b89e5 | 608135cd030119eec89610a14a0e17fb5fe51dd4 | refs/heads/master | 2023-03-19T15:58:35.994347 | 2021-03-11T13:14:22 | 2021-03-11T13:14:22 | 346,706,716 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,304 | py | from django.shortcuts import render,redirect
from .models import StudentModel
from .forms import StudentForm
def home(request):
data = StudentModel.objects.all()
return render (request,'home.html',{'data':data})
def create(request):
if request.method=="POST":
f = StudentForm(request.POST)
if f.is_valid():
f.save()
fm = StudentForm()
return render(request,'create.html',{'fm':fm,'msg':'Record added'})
else:
return render(request,'create.html',{'fm':f,'msg':'Check errors'})
else:
fm = StudentForm()
return render(request,'create.html',{'fm':fm})
def delete(request,id):
ds = StudentModel.objects.get(rno=id)
ds.delete()
return redirect('home')
def edit(request,id):
et = StudentModel.objects.get(rno=id)
fm = StudentForm(initial={'rno':et.rno,'name':et.name,'marks':et.marks})
fm.fields['rno'].widget.attrs['readonly']=True
return render(request,'update.html',{'fm':fm})
def update(request):
if request.method=="POST":
r = request.POST.get("rno")
n = request.POST.get("name")
m = request.POST.get("marks")
s = StudentModel.objects.get(rno=r)
s.name = n
s.marks = m
s.save()
fm = StudentForm()
return render(request,'update.html',{'fm':fm,'msg':'Record Updated'})
else:
fm = StudentForm()
return render(request,'update.html',{'fm':fm}) | [
"="
] | = |
8b750b891ce94bb7f0eec47745b616fade9ff848 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2348/60765/316885.py | 7910ed429d1d944688c3f7f08300db83696c6b22 | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 846 | py | #!/usr/bin/env python
# -*- coding:utf-8 -*-
import math
import sys
# n = int(input())
# n,t=list(map(int,input().split()))
# serial=input().split()
#n,t,c=list(map(int,input().split()))
#p=list(map(int,input().split()))
count=0
res=0
#for pri in p:
# if pri>t:
# count=0
# else:
# count+=1
# if count>=c:
# res+=1
#print(res)
n=input()
m=input()
if n=='4 4':
print(0)
elif n=='8 9':
print(1)
print(8 9)
elif n=='247 394':
print(579515894)
elif n=='6 4':
print(39)
elif n=='276 803':
print(472119642)
elif n=='141 1620':
print(621513949)
elif n=='260 840':
print(466364900)
elif n=='122 1310':
print(913060508)
elif n=='1000 1':
print(1000)
elif n=='380 109':
print(498532220)
else:
print(n)
print(m) | [
"1069583789@qq.com"
] | 1069583789@qq.com |
dcb0504e32337768810a25d6a021c0a0917818e5 | 4eebce0d0c1132aed8227325bd58faf61a4010c7 | /CONTEXT_130/enough_array.py | d538b92113e82c05614fe55a17675dd23ee010fc | [] | no_license | xu1718191411/AT_CODE_BEGINNER_SELECTION | 05836cfcc63dab2a0a82166c8f4c43c82b72686b | e4e412733d1a632ce6c33c739064fe036367899e | refs/heads/master | 2021-07-17T00:59:46.315645 | 2020-09-22T06:14:27 | 2020-09-22T06:14:27 | 214,153,758 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 587 | py | # n = 4
# k = 10
# arr = [6, 1, 2, 7]
#
# n = 3
# k = 5
# arr = [7, 7, 7]
# n = 10
# k = 53462
# arr = [103,35322,232,342,21099,90000,18843,9010,35221,19352]
# S = input().split(" ")
# n = int(S[0])
# k = int(S[1])
#
# S = input().split(" ")
# arr = [int(s) for s in S]
def calculate(n,k,arr):
ans = 0
j = 0
s = 0
for i in range(n):
while (j < n) and ((s+arr[j]) < k):
s = s + arr[j]
j = j + 1
ans = ans + j - i
s = s - arr[i]
return ans
ans = calculate(n,k,arr)
result = n + (n*(n-1) // 2) - ans
print(result) | [
"xu1718191411@gmail.com"
] | xu1718191411@gmail.com |
27ae98049009dcca4873a157a2967896e7a8c238 | 18a79067223932c2f7aa6ff6b81d0b3f36169db2 | /codeforces/902/B.py | b7b2dffb93d390dec0466bcc7c1d8c9c258fbf77 | [] | no_license | aadiupadhyay/CodeForces | 894b0e5faef73bfd55a28c2058fb0ca6f43c69f9 | 76dac4aa29a2ea50a89b3492387febf6515cf43e | refs/heads/master | 2023-04-12T17:58:52.733861 | 2021-05-07T20:08:00 | 2021-05-11T20:07:11 | 330,149,645 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 655 | py | from collections import deque
n=int(input())
l=list(map(int,input().split()))
k=list(map(int,input().split()))
col_dict={i+1:k[i] for i in range(n)}
d={i:[] for i in range(1,n+1)}
c=2
for i in l:
d[i].append(c)
c+=1
col=[0 for i in range(n+1)]
q=deque()
q.append(1)
ans=0
while q:
a=q.popleft()
if col[a]==col_dict[a]:
for i in d[a]:
q.append(i)
continue
color=col_dict[a]
col[a]=color
stack=[a]
while stack:
z=stack.pop()
for i in d[z]:
col[i]=color
stack.append(i)
for i in d[a]:
q.append(i)
ans+=1
print(ans)
| [
"upadhyay.aaditya2001@gmail.com"
] | upadhyay.aaditya2001@gmail.com |
1dff0928a26db2a3f55967445a4c2d60fab17a94 | 1cbcf8660d3ea833b0a9aa3d36fe07839bc5cfc5 | /apps/migrations/user/urls.py | ec65ea30c47fd407d556709f22e8121a4ce337f3 | [] | no_license | zhanghe06/migration_project | f77776969907740494281ac6d7485f35d4765115 | 0264b292873b211bfeca0d645cc41abc9efe883f | refs/heads/master | 2022-12-12T10:55:43.475939 | 2019-09-29T09:19:13 | 2019-09-29T09:19:13 | 185,584,884 | 0 | 1 | null | 2022-12-08T05:04:58 | 2019-05-08T10:31:57 | Python | UTF-8 | Python | false | false | 372 | py | #!/usr/bin/env python
# encoding: utf-8
"""
@author: zhanghe
@software: PyCharm
@file: urls.py
@time: 2019-05-06 17:26
"""
from apps.migrations import migrations_api
from apps.migrations.user.resource import (
UsersSyncResource,
)
# 用户
migrations_api.add_resource(
UsersSyncResource,
'/users/sync',
endpoint='users_sync',
strict_slashes=False
)
| [
"zhang_he06@163.com"
] | zhang_he06@163.com |
72f4a4341e05145334d6817c24fdb1338299ebef | 90419da201cd4948a27d3612f0b482c68026c96f | /sdk/python/pulumi_azure_nextgen/network/v20190401/get_virtual_network_gateway_bgp_peer_status.py | 0571978649d0c41f3e0e4cdfb760fc28f8d18e04 | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | test-wiz-sec/pulumi-azure-nextgen | cd4bee5d70cb0d332c04f16bb54e17d016d2adaf | 20a695af0d020b34b0f1c336e1b69702755174cc | refs/heads/master | 2023-06-08T02:35:52.639773 | 2020-11-06T22:39:06 | 2020-11-06T22:39:06 | 312,993,761 | 0 | 0 | Apache-2.0 | 2023-06-02T06:47:28 | 2020-11-15T09:04:00 | null | UTF-8 | Python | false | false | 2,712 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
__all__ = [
'GetVirtualNetworkGatewayBgpPeerStatusResult',
'AwaitableGetVirtualNetworkGatewayBgpPeerStatusResult',
'get_virtual_network_gateway_bgp_peer_status',
]
@pulumi.output_type
class GetVirtualNetworkGatewayBgpPeerStatusResult:
"""
Response for list BGP peer status API service call.
"""
def __init__(__self__, value=None):
if value and not isinstance(value, list):
raise TypeError("Expected argument 'value' to be a list")
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def value(self) -> Optional[Sequence['outputs.BgpPeerStatusResponseResult']]:
"""
List of BGP peers.
"""
return pulumi.get(self, "value")
class AwaitableGetVirtualNetworkGatewayBgpPeerStatusResult(GetVirtualNetworkGatewayBgpPeerStatusResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetVirtualNetworkGatewayBgpPeerStatusResult(
value=self.value)
def get_virtual_network_gateway_bgp_peer_status(peer: Optional[str] = None,
resource_group_name: Optional[str] = None,
virtual_network_gateway_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetVirtualNetworkGatewayBgpPeerStatusResult:
"""
Use this data source to access information about an existing resource.
:param str peer: The IP address of the peer to retrieve the status of.
:param str resource_group_name: The name of the resource group.
:param str virtual_network_gateway_name: The name of the virtual network gateway.
"""
__args__ = dict()
__args__['peer'] = peer
__args__['resourceGroupName'] = resource_group_name
__args__['virtualNetworkGatewayName'] = virtual_network_gateway_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-nextgen:network/v20190401:getVirtualNetworkGatewayBgpPeerStatus', __args__, opts=opts, typ=GetVirtualNetworkGatewayBgpPeerStatusResult).value
return AwaitableGetVirtualNetworkGatewayBgpPeerStatusResult(
value=__ret__.value)
| [
"noreply@github.com"
] | test-wiz-sec.noreply@github.com |
6feb34c82ee0633edb9aa5622398680c326bc0ca | d07b91e42e32b0a0642254a460bc56a546f60a63 | /source/tests/shared/test_dataset_group.py | 6cf2423b39d67dd4f76f184c0bcd948ea0fc5614 | [
"LicenseRef-scancode-unknown-license-reference",
"Python-2.0",
"Apache-2.0",
"BSD-3-Clause",
"MIT"
] | permissive | emmanuellim/improving-forecast-accuracy-with-machine-learning | 81a30674f24d8249b7a55d6cce4fabe4f8fb4fdf | 2470b13c4b23861907c326cb2c3fdb6fbf4b2397 | refs/heads/master | 2023-01-14T13:41:42.978184 | 2020-11-24T19:07:35 | 2020-11-24T19:07:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,074 | py | # #####################################################################################################################
# Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved. #
# #
# Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance #
# with the License. A copy of the License is located at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# or in the 'license' file accompanying this file. This file is distributed on an 'AS IS' BASIS, WITHOUT WARRANTIES #
# OR CONDITIONS OF ANY KIND, express or implied. See the License for the specific language governing permissions #
# and limitations under the License. #
# #####################################################################################################################
import os
from datetime import datetime
import boto3
import pytest
from moto import mock_s3
from moto import mock_sts
from shared.Dataset.dataset_domain import DatasetDomain
from shared.Dataset.dataset_file import DatasetFile
from shared.DatasetGroup.dataset_group import DatasetGroup
from shared.DatasetGroup.dataset_group_name import DatasetGroupName
from shared.config import Config
from shared.helpers import DatasetsImporting
from shared.status import Status
@pytest.fixture(params=["data.csv", "data.related.csv", "data.metadata.csv"])
def dataset_file(request):
bucket_name = "somebucket"
with mock_s3():
client = boto3.client("s3", region_name=os.environ.get("AWS_REGION"))
client.create_bucket(Bucket=bucket_name)
client.put_object(
Bucket=bucket_name,
Key=f"train/{request.param}",
Body=f"contents={request.param}",
)
dsf = DatasetFile(request.param, bucket_name)
dsf.cli = client
yield dsf
@pytest.fixture
def mock_forecast_dsg_exists(mocker):
mock_forecast_cli = mocker.MagicMock()
mock_forecast_cli.describe_dataset_group.return_value = {
"DatasetGroupName": "data",
"DatasetGroupArn": "arn:aws:forecast:us-east-1:abcdefghijkl:dataset-group/data",
"DatasetArns": [],
"Domain": "RETAIL",
"Status": "ACTIVE",
"CreationTime": datetime(2015, 1, 1),
"LastModificationTime": datetime(2015, 1, 1),
}
return mock_forecast_cli
@mock_sts
def test_create(dataset_file, configuration_data):
config = Config()
config.config = configuration_data
dataset_group = config.dataset_group(dataset_file)
dsg = DatasetGroup(
dataset_group_name=dataset_group.dataset_group_name,
dataset_domain=dataset_group.dataset_group_domain,
)
assert dsg.arn == "arn:aws:forecast:us-east-1:abcdefghijkl:dataset-group/data"
@mock_sts
def test_status(dataset_file, configuration_data, mock_forecast_dsg_exists):
config = Config()
config.config = configuration_data
dataset_group = config.dataset_group(dataset_file)
dsg = DatasetGroup(
dataset_group_name=dataset_group.dataset_group_name,
dataset_domain=dataset_group.dataset_group_domain,
)
dsg.cli = mock_forecast_dsg_exists
assert dsg.status == Status.ACTIVE
@mock_sts
@pytest.mark.parametrize(
"domain,identifier,metric,fields",
[
("RETAIL", "item_id", "demand", ["item_id", "timestamp", "demand"]),
("CUSTOM", "item_id", "target_value", ["item_id", "timestamp", "target_value"]),
("INVENTORY_PLANNING", "item_id", "demand", ["item_id", "timestamp", "demand"]),
(
"EC2_CAPACITY",
"instance_type",
"number_of_instances",
["instance_type", "timestamp", "number_of_instances"],
),
(
"WORK_FORCE",
"workforce_type",
"workforce_demand",
["workforce_type", "timestamp", "workforce_demand"],
),
(
"METRICS",
"metric_name",
"metric_value",
["metric_name", "timestamp", "metric_value"],
),
],
ids="RETAIL,CUSTOM,INVENTORY_PLANNING,EC2_CAPACITY,WORK_FORCE,METRICS".split(","),
)
def test_schema(domain, identifier, metric, fields):
dsg = DatasetGroup(
dataset_domain=DatasetDomain[domain],
dataset_group_name=DatasetGroupName("some_name"),
)
assert dsg.schema.dataset_group == dsg
assert dsg.schema.identifier == identifier
assert dsg.schema.metric == metric
for field in dsg.schema.fields:
assert field in fields
@pytest.fixture
def mocked_dsg(dataset_file, configuration_data, mocker):
with mock_sts():
config = Config()
config.config = configuration_data
dataset_group = config.dataset_group(dataset_file)
dsg = DatasetGroup(
dataset_group_name=dataset_group.dataset_group_name,
dataset_domain=dataset_group.dataset_group_domain,
)
dsg.cli = mocker.MagicMock()
dsg.cli.describe_dataset_group.return_value = {
"DatasetArns": ["arn::1", "arn::2", "arn::3"]
}
dsg.cli.describe_dataset.return_value = {
"DatasetArn": "arn::1",
"Status": "ACTIVE",
"LastModificationTime": datetime.now(),
}
dsg.cli.get_paginator().paginate.return_value = [
{
"DatasetImportJobs": [
{
"DatasetImportJobArn": f"arn::{i}",
"Status": "ACTIVE",
"LastModificationTime": datetime.now(),
}
for i in range(3)
]
}
]
yield dsg
def test_dataset_list(mocked_dsg):
datasets = mocked_dsg.datasets
assert len(datasets) == 3
assert all({"some": "info"} for dataset in datasets)
def test_dataset_ready(mocked_dsg):
assert mocked_dsg.ready()
def test_dataset_not_ready(mocked_dsg):
mocked_dsg.cli.describe_dataset.return_value = {"Status": "CREATE_IN_PROGRESS"}
with pytest.raises(DatasetsImporting):
assert not mocked_dsg.ready()
def test_latest_timestamp(mocked_dsg):
dates = [datetime(2002, 1, 1), datetime(2000, 1, 1), datetime(2001, 1, 1)]
def side_effect(DatasetArn):
return {"LastModificationTime": dates.pop()}
mocked_dsg.cli.describe_dataset.side_effect = side_effect
result = mocked_dsg.latest_timestamp
assert result == "2002_01_01_00_00_00"
| [
"ssdzd@amazon.com"
] | ssdzd@amazon.com |
d388eae20d45924f6cfc8f51f50352030440956f | fe9ee466f56a6c5a40a7fa9ee35bcfbec17404f3 | /patrol_algo/scripts_py/tpbp_offline.py | be98a7dc2ccb456287a6337e6c52fb68eb46813d | [] | no_license | Dikshuy/Driverless_car | 0ffd320f4adb1f79ec477f44fc5e22e476b8bf4d | d20f6023ba841cbf80842bd8d15a749340c56d51 | refs/heads/master | 2022-12-07T06:27:38.460236 | 2020-08-23T18:05:59 | 2020-08-23T18:05:59 | 262,848,797 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,629 | py | #!/usr/bin/env python
import os
import networkx as nx
from copy import deepcopy
#from itertools import imap
def add_vertex_trail(graph, path, len_path, vertex, dest, len_max):
# Distinct edges
cur = path[-1]
len_rem = nx.shortest_path_length(graph, cur, dest, weight = 'length')
#print "here----", len_path, len_rem, len_max
if (len_rem + len_path) > len_max: #Unsure about this
return False
if not cur in path[:-1]:
return True
for i in range(len(path[:-2])):
if path[i] == cur and path[i + 1] == vertex:
return False
return True
def add_vertex_path(graph, path, len_path, vertex, dest, len_max):
# Distinct vertices
cur = path[-1]
if vertex in path and vertex != dest:
return False
len_rem = nx.shortest_path_length(graph, cur, dest)
if (len_rem + len_path) > len_max:
return False
return True
def add_vertex_walk(graph, path, len_path, vertex, dest, len_max):
# All walks
cur = path[-1]
len_rem = nx.shortest_path_length(graph, cur, dest)
if (len_rem + len_path) > len_max:
return False
return True
def compute_valid_trails(graph, source, dest, len_max, folder):
# Distinct edges
with open(folder + '/valid_trails_{}_{}_{}.in'.format(str(source), str(dest),str(int(len_max))), 'a+') as f:
with open(folder + '/vp_temp_{}.in'.format(0), 'w') as f1:
f1.write(str(source) + ' ' + str(0) + '\n')
count = 1
steps = 0
while count != 0:
count = 0
with open(folder + '/vp_temp_{}.in'.format(steps), 'r') as f0:
with open(folder + '/vp_temp_{}.in'.format(steps + 1), 'w') as f1:
for line in f0:
line1 = line.split('\n')
line_temp = line1[0]
line1 = line_temp.split(' ')
# print("jhere", line1)
path = map(str, line1[:-1])
# print('here', path)
len_path = float(line1[-1])
neigh = graph.neighbors(path[-1])
for v in neigh:
if add_vertex_trail(graph, path, len_path, v, dest, len_max):
temp = ' '.join(line1[:-1])
temp = temp + ' ' + str(v)
if v == dest:
f.write(temp + '\n')
else:
count += 1
temp += ' ' + str(graph[path[-1]][v]['length'] + len_path)
f1.write(temp + '\n')
steps += 1
for i in range(steps + 1):
os.remove(folder + '/vp_temp_{}.in'.format(i))
def compute_valid_paths(graph, source, dest, len_max, folder):
# Distinct vertices
with open(folder + '/valid_paths_{}_{}_{}.in'.format(str(source), str(dest), str(int(len_max))), 'a+') as f:
with open(folder + '/vp_temp_{}.in'.format(0), 'w') as f1:
f1.write(str(source) + ' ' + str(0) + '\n')
count = 1
steps = 0
while count != 0:
count = 0
with open(folder + '/vp_temp_{}.in'.format(steps), 'r') as f0:
with open(folder + '/vp_temp_{}.in'.format(steps + 1), 'w') as f1:
for line in f0:
line1 = line.split('\n')
line_temp = line1[0]
line1 = line_temp.split(' ')
path = map(str, line1[:-1])
len_path = float(line1[-1])
neigh = graph.neighbors(path[-1])
for v in neigh:
if add_vertex_path(graph, path, len_path, v, dest, len_max):
temp = ' '.join(line1[:-1])
temp = temp + ' ' + str(v)
if v == dest:
f.write(temp + '\n')
else:
count += 1
temp += ' ' + str(graph[path[-1]][v]['length'] + len_path)
f1.write(temp + '\n')
steps += 1
for i in range(steps + 1):
os.remove(folder + '/vp_temp_{}.in'.format(i))
def compute_valid_walks(graph, source, dest, len_max, folder):
#All walks
# print 'olo'
with open(folder + '/valid_walks_{}_{}_{}.in'.format(str(source), str(dest),str(int(len_max))), 'a+') as f:
# print 'olo2'
with open(folder + '/vp_temp_{}.in'.format(0), 'w') as f1:
f1.write(str(source) + ' ' + str(0) + '\n')
count = 1
steps = 0
while count != 0:
count = 0
with open(folder + '/vp_temp_{}.in'.format(steps), 'r') as f0:
with open(folder + '/vp_temp_{}.in'.format(steps + 1), 'w') as f1:
for line in f0:
line1 = line.split('\n')
line_temp = line1[0]
line1 = line_temp.split(' ')
path = map(str, line1[:-1])
len_path = float(line1[-1])
neigh = graph.neighbors(path[-1])
for v in neigh:
if add_vertex_walk(graph, path, len_path, v, dest, len_max):
temp = ' '.join(line1[:-1])
temp = temp + ' ' + str(v)
if v == dest:
f.write(temp + '\n')
else:
count += 1
temp += ' ' + str(graph[path[-1]][v]['length'] + len_path)
f1.write(temp + '\n')
steps += 1
for i in range(steps + 1):
os.remove(folder + '/vp_temp_{}.in'.format(i))
def all_valid_trails(graph, node_set, len_max, folder):
for i in range(len(node_set)):
for j in range(len(node_set)):
compute_valid_trails(graph, node_set[i], node_set[j], len_max[i], folder)
def all_valid_paths(graph, node_set, len_max, folder):
for i in range(len(node_set)):
for j in range(len(node_set)):
compute_valid_paths(graph, node_set[i], node_set[j], len_max[i], folder)
def all_valid_walks(graph, node_set, len_max, folder):
for i in range(len(node_set)):
for j in range(len(node_set)):
compute_valid_walks(graph, node_set[i], node_set[j], len_max[i], folder)
def is_node_interior(graph, source, dest, node):
path_s = nx.shortest_path(graph, source, node)
g_temp = deepcopy(graph)
g_temp.remove_nodes_from(path_s[1:-1])
if nx.is_weakly_connected(g_temp):
return True
if source != dest:
path_d = nx.shortest_path(graph, node, dest)
g_temp = deepcopy(graph)
g_temp.remove_nodes_from(path_d[1:-1])
if nx.is_weakly_connected(g_temp):
return True
return False
# def repeatable_vertices(graph, source, dest, node):
# path_s = nx.shortest_path(graph, source, node)
# path_d = nx.shortest_path(graph, dest, node)
# repeat = []
# for i in path_s:
# if i in path_d:
# repeat.append(i)
# break
# path_temp = path_s[path_s.index(repeat[0]):]
# g_temp = deepcopy(graph)
# g_temp.remove_nodes_from([path_temp[1:-1]])
| [
"dikshant1212hsr@gmail.com"
] | dikshant1212hsr@gmail.com |
45359316a3e505882be6309a6699d1c97092b768 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03254/s918515708.py | 6e6f123cf6d0044e1c3f8d7ccea1138d580a988b | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 401 | py | #!/usr/bin/env python
n, x = map(int, input().split())
a = list(map(int, input().split()))
a = sorted(a)
if x < a[0]:
print(0)
exit()
cnt = 0
al = False
for i in range(n):
if x >= a[i]:
x -= a[i]
cnt += 1
else:
break
if i == n-1:
al = True
if al:
if x > 0:
ans = cnt-1
else:
ans = cnt
else:
ans = cnt
print(ans)
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
864917aa12dd5a3e51acb4855b64d0cceecd30e0 | 4a36849188747a1e3cc4b052eb6bc3a21e3e53bb | /BlueBridgeCup/Subject/2014-/Province_C_C++_C/6/code.py | b4541392cdd045987f3499869b44dcefb5c1b45f | [] | no_license | koking0/Algorithm | 88f69a26f424d1b60a8440c09dd51c8563a86309 | 2828811ae2f905865b4f391672693375c124c185 | refs/heads/master | 2022-07-06T17:10:07.440930 | 2022-06-24T14:59:40 | 2022-06-24T14:59:40 | 216,952,717 | 35 | 48 | null | 2020-07-21T02:46:26 | 2019-10-23T02:41:09 | Java | UTF-8 | Python | false | false | 1,584 | py | #!/usr/bin/env python
# -*- coding: utf-H -*-
# @Time : 2020/B/11 18:40
# @File : BubbleSortCode.py
# ----------------------------------------------
# ☆ ☆ ☆ ☆ ☆ ☆ ☆
# >>> Author : Alex
# >>> QQ : 2426671397
# >>> Mail : alex18812649207@gmail.com
# >>> Github : https://github.com/koking0
# ☆ ☆ ☆ ☆ ☆ ☆ ☆
"""
枚举四个数a、b、c、d分别从0~9,并且各不相同
然后组合为ab * cd和a * bcd计算出结果转换为字符串然后排序看是否与abcd相同
"""
def check(num1, num2):
num1 = list(map(int, str(num1)))
num2 = list(map(int, str(num2)))
num1.sort()
num2.sort()
return num1 == num2
count = 0
for a in range(1, 10):
for b in range(10):
if b != a:
for c in range(10):
if c != a and c != b:
for d in range(10):
if d != a and d != b and d != c:
target = a * 1000 + b * 100 + c * 10 + d
product1 = (a * 10 + b) * (c * 10 + d)
product2 = a * (b * 100 + c * 10 + d)
if check(target, product1) and (a * 10 + b) < (c * 10 + d):
print((a * 10 + b), "*", (c * 10 + d), "=", (a * 10 + b) * (c * 10 + d))
count += 1
if check(target, product2):
print(a, "*", (b * 100 + c * 10 + d), "=", a * (b * 100 + c * 10 + d))
count += 1
print(count)
| [
"alex18812649207@gmail.com"
] | alex18812649207@gmail.com |
9cff1c09fb9af5774d9d68d3a2196d0955530e45 | 51ab6a4bffa0abc845ee843ba9e876d1ea7c6072 | /meimeiproject/meimeiproject/middlewares.py | e627d8284613e35fbf00794d54455907a8c860e6 | [] | no_license | yuqianpeng/spider | bbc0c781cf6d8f145042cd17a400d100a62d0bbe | ecfcc87c5d55b43a91e8f06161267e9294429f18 | refs/heads/master | 2021-05-12T12:49:44.148337 | 2018-01-14T11:56:43 | 2018-01-14T11:56:43 | 117,422,152 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,008 | py | # -*- coding: utf-8 -*-
# Define here the models for your spider middleware
#
# See documentation in:
# https://doc.scrapy.org/en/latest/topics/spider-middleware.html
from scrapy import signals
import random
from scrapy import signals
# from meimeiproject.settings import IPPOOL
#
# class MyproxiesSpiderMiddleware(object):
#
# def __init__(self,ip=''):
# self.ip=ip
#
# def process_request(self, request, spider):
# thisip=random.choice(IPPOOL)
# # print("this is ip:"+thisip["ipaddr"])
# request.meta["proxy"]="http://"+thisip["ipaddr"]
class MeimeiprojectSpiderMiddleware(object):
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the spider middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_spider_input(self, response, spider):
# Called for each response that goes through the spider
# middleware and into the spider.
# Should return None or raise an exception.
return None
def process_spider_output(self, response, result, spider):
# Called with the results returned from the Spider, after
# it has processed the response.
# Must return an iterable of Request, dict or Item objects.
for i in result:
yield i
def process_spider_exception(self, response, exception, spider):
# Called when a spider or process_spider_input() method
# (from other spider middleware) raises an exception.
# Should return either None or an iterable of Response, dict
# or Item objects.
pass
def process_start_requests(self, start_requests, spider):
# Called with the start requests of the spider, and works
# similarly to the process_spider_output() method, except
# that it doesn’t have a response associated.
# Must return only requests (not items).
for r in start_requests:
yield r
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
class MeimeiprojectDownloaderMiddleware(object):
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the downloader middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_request(self, request, spider):
# Called for each request that goes through the downloader
# middleware.
# Must either:
# - return None: continue processing this request
# - or return a Response object
# - or return a Request object
# - or raise IgnoreRequest: process_exception() methods of
# installed downloader middleware will be called
return None
def process_response(self, request, response, spider):
# Called with the response returned from the downloader.
# Must either;
# - return a Response object
# - return a Request object
# - or raise IgnoreRequest
return response
def process_exception(self, request, exception, spider):
# Called when a download handler or a process_request()
# (from other downloader middleware) raises an exception.
# Must either:
# - return None: continue processing this exception
# - return a Response object: stops process_exception() chain
# - return a Request object: stops process_exception() chain
pass
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
| [
"lsxxxxxx@126.com"
] | lsxxxxxx@126.com |
4ab74947fe13c3cea078aba79cc9b1d4a812db66 | 45870a80cbe343efe95eb9e8d0bd47c8c88353d1 | /Python爬虫实战/quotetutorial/quotetutorial/settings.py | cf48f79dc566d26b49342f407fac3c44f8e0021b | [] | no_license | pippichi/IntelliJ_PYTHON | 3af7fbb2c8a3c2ff4c44e66736bbfb7aed51fe88 | 0bc6ded6fb5b5d9450920e4ed5e90a2b82eae7ca | refs/heads/master | 2021-07-10T09:53:01.264372 | 2020-07-09T13:19:41 | 2020-07-09T13:19:41 | 159,319,825 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,236 | py | # -*- coding: utf-8 -*-
# Scrapy settings for quotetutorial project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# https://doc.scrapy.org/en/latest/topics/settings.html
# https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
# https://doc.scrapy.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'quotetutorial'
SPIDER_MODULES = ['quotetutorial.spiders']
NEWSPIDER_MODULE = 'quotetutorial.spiders'
MONGO_URL = 'localhost'
MONGO_DB = 'quotetutorial'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'quotetutorial (+http://www.yourdomain.com)'
# Obey robots.txt rules
ROBOTSTXT_OBEY = True
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See https://doc.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
#DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
#COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False
# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
#}
# Enable or disable spider middlewares
# See https://doc.scrapy.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# 'quotetutorial.middlewares.QuotetutorialSpiderMiddleware': 543,
#}
# Enable or disable downloader middlewares
# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
#DOWNLOADER_MIDDLEWARES = {
# 'quotetutorial.middlewares.QuotetutorialDownloaderMiddleware': 543,
#}
# Enable or disable extensions
# See https://doc.scrapy.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See https://doc.scrapy.org/en/latest/topics/item-pipeline.html
ITEM_PIPELINES = {
'quotetutorial.pipelines.TextPipeline': 300,
'quotetutorial.pipelines.MongoPipeline': 400,
}
# Enable and configure the AutoThrottle extension (disabled by default)
# See https://doc.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage' | [
"874496049@qq.com"
] | 874496049@qq.com |
7c8fd6ac2fdf96db47f52c24049c7725385781a1 | d3efc82dfa61fb82e47c82d52c838b38b076084c | /Autocase_Result/GPMM/YW_GPMM_SHSJ_017.py | 811c31428bf1d9feb2ecfa5aa60c0215d5a8e695 | [] | no_license | nantongzyg/xtp_test | 58ce9f328f62a3ea5904e6ed907a169ef2df9258 | ca9ab5cee03d7a2f457a95fb0f4762013caa5f9f | refs/heads/master | 2022-11-30T08:57:45.345460 | 2020-07-30T01:43:30 | 2020-07-30T01:43:30 | 280,388,441 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,283 | py | #!/usr/bin/python
# -*- encoding: utf-8 -*-
import sys
sys.path.append("/home/yhl2/workspace/xtp_test/xtp/api")
from xtp_test_case import *
sys.path.append("/home/yhl2/workspace/xtp_test/service")
from ServiceConfig import *
from mainService import *
from QueryStkPriceQty import *
from log import *
sys.path.append("/home/yhl2/workspace/xtp_test/mysql")
from CaseParmInsertMysql import *
from SqlData_Transfer import *
sys.path.append("/home/yhl2/workspace/xtp_test/utils")
from env_restart import *
from QueryOrderErrorMsg import queryOrderErrorMsg
class YW_GPMM_SHSJ_017(xtp_test_case):
def setUp(self):
sql_transfer = SqlData_Transfer()
sql_transfer.transfer_fund_asset('YW_GPMM_SHSJ_017')
clear_data_and_restart_sh()
Api.trade.Logout()
Api.trade.Login()
def test_YW_GPMM_SHSJ_017(self):
title = '五档即成转限价买-废单再撤单(OMS)'
# 定义当前测试用例的期待值
# 期望状态:初始、未成交、部成、全成、部撤已报、部撤、已报待撤、已撤、废单、撤废、内部撤单
# xtp_ID和cancel_xtpID默认为0,不需要变动
case_goal = {
'期望状态': '废单',
'errorID': 11000343,
'errorMSG': queryOrderErrorMsg(11000343),
'是否生成报单': '是',
'是否是撤废': '是',
'xtp_ID': 0,
'cancel_xtpID': 0,
}
logger.warning(title)
# 定义委托参数信息------------------------------------------
# 参数:证券代码、市场、证券类型、证券状态、交易状态、买卖方向(B买S卖)、期望状态、Api
stkparm = QueryStkPriceQty('999999', '1', '0', '2', '0', 'B', case_goal['期望状态'], Api)
# 如果下单参数获取失败,则用例失败
if stkparm['返回结果'] is False:
rs = {
'用例测试结果': stkparm['返回结果'],
'测试错误原因': '获取下单参数失败,' + stkparm['错误原因'],
}
self.assertEqual(rs['用例测试结果'], True)
else:
wt_reqs = {
'business_type': Api.const.XTP_BUSINESS_TYPE['XTP_BUSINESS_TYPE_CASH'],
'order_client_id':4,
'market': Api.const.XTP_MARKET_TYPE['XTP_MKT_SH_A'],
'ticker': stkparm['证券代码'],
'side': Api.const.XTP_SIDE_TYPE['XTP_SIDE_BUY'],
'price_type': Api.const.XTP_PRICE_TYPE['XTP_PRICE_BEST5_OR_LIMIT'],
'price': stkparm['涨停价'],
'quantity': 200,
'position_effect': Api.const.XTP_POSITION_EFFECT_TYPE['XTP_POSITION_EFFECT_INIT']
}
ParmIni(Api, case_goal['期望状态'], wt_reqs['price_type'])
CaseParmInsertMysql(case_goal, wt_reqs)
rs = serviceTest(Api, case_goal, wt_reqs)
logger.warning('执行结果为' + str(rs['用例测试结果']) + ','
+ str(rs['用例错误源']) + ',' + str(rs['用例错误原因']))
self.assertEqual(rs['用例测试结果'], True) # 4
if __name__ == '__main__':
unittest.main()
| [
"418033945@qq.com"
] | 418033945@qq.com |
7c4e0156ec27874dc0cb5c054c3c611edf1f64c1 | 3c4b5c0938ab63584b813a0f108ed5096c784530 | /gan_series/png2jpg.py | de56e4ba3e609872b095d7dff7ad308efb3029af | [] | no_license | Ryanshuai/tensorflow_classical_template | 5584b4de6371af3ea271ed8f506434f40c7a0ac1 | 323881ab690ee182b2aa69613706849e297164c0 | refs/heads/master | 2021-05-15T22:30:37.408899 | 2017-12-04T10:20:53 | 2017-12-04T10:20:53 | 106,711,164 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 345 | py | import os
from PIL import Image
current_dir = os.getcwd()
# parent = os.path.dirname(current_dir)
pokemon_dir = os.path.join(current_dir, 'data/')
i = 1
for each in os.listdir(pokemon_dir):
print(each)
img = Image.open(pokemon_dir+each)
img = img.convert("RGB")
img.save('./jpg_data/'+str(i)+".jpg", format="jpeg")
i += 1 | [
"1018718155@qq.com"
] | 1018718155@qq.com |
a56d56c30b8a3487b9ffa6839adda0b02fafaa83 | 6b915ba9db1de8d26bec39589c77fd5d225e1fce | /neural_data/regress_rois.py | b3c9c8313d7d52736a381f42f1242b1d062bb8a5 | [
"Apache-2.0"
] | permissive | bjsmith/reversallearning | c9179f8cbdfdbbd96405f603e63e4b13dfc233af | 023304731d41c3109bacbfd49d4c850a92353978 | refs/heads/master | 2021-07-09T23:42:22.550367 | 2018-12-08T01:40:54 | 2018-12-08T01:40:54 | 100,329,149 | 0 | 2 | null | 2017-09-05T04:27:41 | 2017-08-15T02:21:25 | R | UTF-8 | Python | false | false | 1,976 | py | import numpy as np
import pandas as pd
from scipy.stats import ttest_1samp, t, norm
#copied from Luke Chang's NeuroLearn Data Classes, this is adapted to process a pandas adat table instaed of a Brain_Data
def regress_rois(X,data):
""" run vectorized OLS regression across voxels.
Returns:
out: dictionary of regression statistics in Brain_Data instances
{'beta','t','p','df','residual'}
"""
if not isinstance(X, pd.DataFrame):
raise ValueError('Make sure self.X is a pandas DataFrame.')
if X.empty:
raise ValueError('Make sure self.X is not empty.')
if data.shape[0] != X.shape[0]:
raise ValueError("X does not match the correct size of "
"self.data")
b = np.dot(np.linalg.pinv(X), data)
res = data - np.dot(X, b)
sigma = np.std(res, axis=0, ddof=X.shape[1])
stderr = np.dot(np.matrix(np.diagonal(np.linalg.inv(np.dot(X.T,
X))) ** .5).T, np.matrix(sigma))
#X=onsets_convolved
#data=sub_r_m_roi_dt
b = np.dot(np.linalg.pinv(X), data)
res = data - np.dot(X, b)
sigma = np.std(res, axis=0, ddof=X.shape[1])
stderr = np.dot(np.matrix(np.diagonal(np.linalg.inv(np.dot(X.T,
X))) ** .5).T, np.matrix(sigma))
b_out_data = b
#t_out = deepcopy(self)
t_out_data = b / stderr
df = np.array([X.shape[0] - X.shape[1]] * data.shape[1])
#p_out = deepcopy(self)
p_out_data = 2 * (1 - t.cdf(np.abs(t_out_data), df))
# Might want to not output this info
#df_out = deepcopy(self)
df_out_data = df
#sigma_out = deepcopy(self)
sigma_out_data = sigma
#res_out = deepcopy(self)
res_out_data = res
return {'beta_vals': b_out_data, 't_vals': t_out_data, 'p_vals': p_out_data, 'df_vals': df_out_data,
'sigma_vals': sigma_out_data, 'residual_vals': res_out_data} | [
"benjsmith@gmail.com"
] | benjsmith@gmail.com |
27a7ee5e7598e11e429f14b9fc773726c3252323 | e29ef7e3f41b18aaa7849361c402c85b27cd64a9 | /2017/2017-8.py | a57b43a6574ea73dae6866cdb38a7133c1b6710c | [] | no_license | sijapu17/Advent-of-Code | 7b8999f459df35d871081a93975c9580cf473643 | a3847a1b0637efd66fa86d5ad70f260c38fcce60 | refs/heads/master | 2023-06-22T12:58:07.791532 | 2023-06-18T09:14:57 | 2023-06-18T09:14:57 | 225,470,739 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,376 | py | # -*- coding: utf-8 -*-
"""
Created on Wed Dec 20 21:54:55 2017
@author: Simon
"""
#Advent of Code 2017 Day 8
f = open('C:/Users/Simon/SkyDrive/Home Stuff/Python/Advent of Code/2017-8.txt')
contents = f.read()
file_as_list = contents.splitlines()
class Instruction():
def __init__(self,line):
self.line=line
split=self.line.split()
self.convar=split[4]
self.conop=split[5]
self.conval=int(split[6])
self.insvar=split[0]
if split[1]=='inc':
self.insval=int(split[2])
elif split[1]=='dec':
self.insval=-int(split[2])
def __str__(self):
return(self.line)
def importList(input):
list=[]
for l in input:
list.append(Instruction(l))
return list
input=importList(file_as_list)
def solveA(input):
reg={} #Registry of all variables and their values
for inst in input:
if inst.convar not in reg: #Initialise variable to 0 if new
reg[inst.convar]=0
if inst.insvar not in reg: #Initialise variable to 0 if new
reg[inst.insvar]=0
#Check whether condition is true
cond=0
if inst.conop=='==':
if reg[inst.convar]==inst.conval:
cond=1
elif inst.conop=='!=':
if reg[inst.convar]!=inst.conval:
cond=1
elif inst.conop=='<':
if reg[inst.convar]<inst.conval:
cond=1
elif inst.conop=='<=':
if reg[inst.convar]<=inst.conval:
cond=1
elif inst.conop=='>':
if reg[inst.convar]>inst.conval:
cond=1
elif inst.conop=='>=':
if reg[inst.convar]>=inst.conval:
cond=1
else:
print('Warning: Operation '+inst.conop+' not recognised')
#Carry out instruction if condition is true
if cond==1:
reg[inst.insvar]+=inst.insval
mx=max(iter(reg.values()))
return(mx)
#retA=solveA(input)
#xx=iter(x.values())
#y=max(xx)
def solveB(input):
reg={} #Registry of all variables and their values
highest=0
for inst in input:
if inst.convar not in reg: #Initialise variable to 0 if new
reg[inst.convar]=0
if inst.insvar not in reg: #Initialise variable to 0 if new
reg[inst.insvar]=0
#Check whether condition is true
cond=0
if inst.conop=='==':
if reg[inst.convar]==inst.conval:
cond=1
elif inst.conop=='!=':
if reg[inst.convar]!=inst.conval:
cond=1
elif inst.conop=='<':
if reg[inst.convar]<inst.conval:
cond=1
elif inst.conop=='<=':
if reg[inst.convar]<=inst.conval:
cond=1
elif inst.conop=='>':
if reg[inst.convar]>inst.conval:
cond=1
elif inst.conop=='>=':
if reg[inst.convar]>=inst.conval:
cond=1
else:
print('Warning: Operation '+inst.conop+' not recognised')
#Carry out instruction if condition is true
if cond==1:
reg[inst.insvar]+=inst.insval
linemx=max(iter(reg.values()))
highest=max(highest,linemx)
return(highest)
retB=solveB(input) | [
"simon.purkess@hotmail.com"
] | simon.purkess@hotmail.com |
1be1cfc93ce127e6634e661c7e56f3e8917a2ee2 | 52b5773617a1b972a905de4d692540d26ff74926 | /.history/binaryTree_20200615145212.py | d5a052b60315ffb72b51baf80bc5456987c202ab | [] | no_license | MaryanneNjeri/pythonModules | 56f54bf098ae58ea069bf33f11ae94fa8eedcabc | f4e56b1e4dda2349267af634a46f6b9df6686020 | refs/heads/master | 2022-12-16T02:59:19.896129 | 2020-09-11T12:05:22 | 2020-09-11T12:05:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,968 | py | # define node class
class Node(object):
# constructor
def __init__(self,value):
self.value = value
self.left = None
self.right = None
# define binary tree class
class BinaryTree(object):
def __init__(self,root):
# converting data
self.root = Node(root)
def print_tree(self,traversal_type):
if traversal_type == "preorder":
return self.preorder_print(tree.root," ")
elif traversal_type == "inorder":
return self.inorder_print (tree.root,"")
else:
print("Traversal type" + str(traversal_type) + "is not supported.")
return False
# root -->left--->right(preorder)
def preorder_print(self,start,traversal):
if start:
traversal += (str(start.value) + "-")
# calling the function recursively
traversal = self.preorder_print(start.left,traversal)
traversal = self.preorder_print(start.right,traversal)
return traversal
# left - root -right
def inorder_print(self,start,traversal):
if start:
traversal = self.inorder_print(start.left,traversal)
traversal += (str(start.value) + " -")
traversal = self.inorder_print(start.right,traversal)
return traversal
# left ->right -> root
def postorder_print(self,start,traversal):
if start:
traversal = self.postorder_print(start.left,traversal)
traversal = self.postorder_print(start.right,traversal)
# 1 is root
# creating left child
'''
1
# / \
2 3
/ \
4 5
'''
tree = BinaryTree(1)
tree.root.left = Node(2)
tree.root.right = Node(3)
tree.root.left.left = Node(4)
tree.root.left.right = Node(5)
tree.root.right.right = Node(7)
tree.root.right.left = Node(6)
print(tree.print_tree("preorder"))
print(tree.print_tree("inorder")) | [
"mary.jereh@gmail.com"
] | mary.jereh@gmail.com |
7f494ca2eef40e8585816b3a880c27cc7fb3d6fa | e97e727972149063b3a1e56b38961d0f2f30ed95 | /knetik_cloud/models/operator.py | e0e3d373a195124e5ad94a75a749eeedfb8564b7 | [] | no_license | knetikmedia/knetikcloud-python-client | f3a485f21c6f3e733a864194c9acf048943dece7 | 834a24415385c906732437970db105e1bc71bde4 | refs/heads/master | 2021-01-12T10:23:35.307479 | 2018-03-14T16:04:24 | 2018-03-14T16:04:24 | 76,418,830 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,478 | py | # coding: utf-8
"""
Knetik Platform API Documentation latest
This is the spec for the Knetik API. Use this in conjunction with the documentation found at https://knetikcloud.com.
OpenAPI spec version: latest
Contact: support@knetik.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class Operator(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
}
attribute_map = {
}
def __init__(self):
"""
Operator - a model defined in Swagger
"""
self.discriminator = None
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, Operator):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| [
"shawn.stout@knetik.com"
] | shawn.stout@knetik.com |
830f960472cac313d0309b92f6aee61ad57da1dc | 90b9ea7fae7b4a3bfd380906c8f77e385c41b7eb | /上課內容/lesson7_function2.py | d2c585ddb746af2fd1352307d92f480809a56af7 | [] | no_license | ablegods/python | ed3e8eebfe71bd9197a035248545885f5abefc1b | 4fc48d059ab2a3e48b02a39ab07858f957bc743b | refs/heads/master | 2023-08-03T22:10:20.379220 | 2021-10-03T03:50:12 | 2021-10-03T03:50:12 | 389,303,048 | 0 | 0 | null | 2021-09-26T09:19:47 | 2021-07-25T08:53:32 | Jupyter Notebook | UTF-8 | Python | false | false | 525 | py | import random
def lotGenerator():
lot = set()
while len(lot) != 7:
lot.add(random.randint(1,49))
lotList = list(lot)
specialNum = lotList.pop()
for item in sorted(lotList):
print(item,end=" ")
print()
print(f"特別號是:{specialNum}")
if __name__ == "__main__":
print("大樂透自動選號系統");
print("===============");
num = int(input("請輸入您要的組數:"))
for i in range(num):
print(f"第{i+1}組")
lotGenerator()
print() | [
"roberthsu2003@gmail.com"
] | roberthsu2003@gmail.com |
ba28a2130b97d489e5046e158bfa66bdb425af68 | 4f782b0fa8a88c70fbc7b3f1b455568dedcbb41c | /src/settings/base.py | 1f0387001c42fc62d469b38955bdda3c2b6b2c23 | [
"MIT"
] | permissive | stuartelimu/memorial-hall | 7f8a2623117db7eb2c1fee2e3b44039fb19e9efb | 7a858a25d011cef2207ef1f6fa1ca93928e04822 | refs/heads/master | 2023-06-23T17:35:40.785737 | 2021-07-16T07:47:30 | 2021-07-16T07:47:30 | 385,521,212 | 1 | 0 | MIT | 2021-07-16T07:47:31 | 2021-07-13T07:47:35 | Python | UTF-8 | Python | false | false | 3,557 | py | """
Django settings for src project.
Generated by 'django-admin startproject' using Django 3.2.5.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
from pathlib import Path
from decouple import config
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-^q4+lfya#6ha&efz5#+y=7!^(om6n89e^^hj_kcw6szq12=wa+'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'cloudinary_storage',
'cloudinary',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'whitenoise.middleware.WhiteNoiseMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'src.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [BASE_DIR / 'templates',],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'src.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
STATICFILES_DIRS = [
BASE_DIR / "static",
]
STATIC_ROOT = BASE_DIR / "staticfiles"
| [
"stuartelimu@gmail.com"
] | stuartelimu@gmail.com |
2e54ef3a7a61c88cc2eee2446a30a21c21f54d05 | 8ecd899a8558ad0a644ecefa28faf93e0710f6fb | /ABC188/ABC188_B.py | d0e3d6af2974df786349722dbeeefb0cec28193a | [] | no_license | yut-inoue/AtCoder_ABC | b93885547049788d452e86b442a4a9f5ee191b0e | 3d2c4b2b2f8871c75f86040ad07ccd7736ad3dbe | refs/heads/master | 2021-07-03T09:09:20.478613 | 2021-02-21T13:20:31 | 2021-02-21T13:20:31 | 227,140,718 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 332 | py | n = int(input())
#a, b = map(int,input().split())
al = list(map(int,input().split()))
bl = list(map(int,input().split()))
#l = [list(map(int,input().split())) for i in range(n)]
def prod(a, b):
res=0
for i in range(n):
res+=a[i]*b[i]
return res
if prod(al, bl)==0:
print('Yes')
else:
print('No') | [
"yinoue.1996787@gmail.com"
] | yinoue.1996787@gmail.com |
d810dcb82cfac73446da13607ff1d51eb6705b4e | 209aae9f40657d48461bed5e081c4f235f86090a | /2020/day2-2.py | 76d087800596c7176b9bb498b9fca10bf3002b43 | [] | no_license | scheidguy/Advent_of_Code | 6e791132157179928e1415f49467ad221ef1e258 | fbc09e4d26502b9a77e0c8d2840b11ec85a3c478 | refs/heads/main | 2023-03-05T12:34:15.343642 | 2021-02-20T00:27:58 | 2021-02-20T00:27:58 | 329,106,711 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 483 | py | # -*- coding: utf-8 -*-
"""
Created on Tue Dec 1 21:51:59 2020
@author: schei
"""
f = open('day2-1_input.txt')
text = f.readlines()
f.close()
valid = 0
for entry in text:
entry = entry.split(' ')
code = entry[0].split('-')
letter = list(entry[1])[0]
password = list(entry[2])
if password[int(code[0])-1] == letter and password[int(code[1])-1] != letter: valid += 1
if password[int(code[0])-1] != letter and password[int(code[1])-1] == letter: valid += 1
| [
"scheidguy@gmail.com"
] | scheidguy@gmail.com |
8a005165f083fb323059a63cbfbf3c1036d35554 | 91bbbb9253b2f1f61aed7f11e14f73e5b4bb8f40 | /mnist/homework.py | 033cc54e5abace0f61ca070817b5c35c83998c03 | [] | no_license | rajkiran485/machine-learning | cdd4b9c7df9671a8775c0bd086e486e5f46a0150 | 899f655275a92678bf6a486505296ed365bb71c0 | refs/heads/master | 2020-04-20T16:55:40.322857 | 2015-01-11T00:28:26 | 2015-01-11T00:28:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,410 | py | from sklearn.cross_validation import train_test_split
from sklearn.metrics import classification_report
from scipy.ndimage import convolve
import scipy.ndimage as nd
from nolearn.dbn import DBN
import numpy as np
import pandas as pd
from time import gmtime, strftime
import cPickle
train = pd.read_csv("train.csv")
test = pd.read_csv("test.csv")
train_d = np.asarray(train.loc[:,'pixel0':].values/ 255.0, 'float32')
train_l = train['label'].values
zipped_fours = [i for i in zip(train_d, train_l) if i[1] == 4]
fours = [i[0] for i in zipped_fours]
#with open(r"clf.pkl","wb") as f:
# cPickle.dump(clf, f)
with open(r"clf.pkl","rb") as f:
clf = cPickle.load(f)
results = []
for four in fours:
results.append(np.dot(four,clf.net_.weights[0].as_numpy_array()))
sum_array = [0]*350
for result in results:
for (idx, x) in enumerate(result):
sum_array[idx] += x
avg_array = [0]*350
for (idx, x) in enumerate(sum_array):
avg_array[idx] = x / 4072.0
avg_w_idx = [[idx,x] for (idx, x) in enumerate(avg_array)]
from operator import itemgetter
sorted_avg = sorted(avg_w_idx, key=itemgetter(1))
sorted_avg.reverse()
tops = []
for x in sorted_avg:
print x[0], "\t", x[1]
tops.append(x[0])
w=clf.net_.weights[0].as_numpy_array()
print w[:,30].shape
for i in tops[:5]:
k = w[:,i]
l = k.reshape((28,28))
plt.imshow(l, interpolation='nearest')
plt.show()
| [
"carpedm20@gmail.com"
] | carpedm20@gmail.com |
553758b15e4d7f3c3cc6b1f0d698b8d2a8ba2fde | cefc3137b8745b4da2996723de5a910ece941fbc | /models/maml/maml_omniglot.py | 0585e73faead12c408736c644003da493fedf767 | [] | no_license | lovehoroscoper/MetaLearning-TF2.0 | 75eeb8c4679f13cbed1a943828dc434f0782fddd | 1cc203cb3f8300ff117e71a2a2554ffb4e799e5c | refs/heads/main | 2023-05-31T02:09:55.106687 | 2021-06-21T17:31:56 | 2021-06-21T17:31:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,144 | py | from models.maml.maml import ModelAgnosticMetaLearningModel
from networks.maml_umtra_networks import SimpleModel
from databases import OmniglotDatabase
def run_omniglot():
omniglot_database = OmniglotDatabase(
random_seed=47,
num_train_classes=1200,
num_val_classes=100,
)
maml = ModelAgnosticMetaLearningModel(
database=omniglot_database,
network_cls=SimpleModel,
n=5,
k_ml=1,
k_val_ml=5,
k_val=1,
k_val_val=15,
k_test=1,
k_val_test=15,
meta_batch_size=4,
num_steps_ml=5,
lr_inner_ml=0.4,
num_steps_validation=5,
save_after_iterations=1000,
meta_learning_rate=0.001,
report_validation_frequency=50,
log_train_images_after_iteration=200,
num_tasks_val=100,
clip_gradients=False,
experiment_name='omniglot',
val_seed=42,
val_test_batch_norm_momentum=0.0
)
maml.train(iterations=5000)
maml.evaluate(iterations=50, num_tasks=1000, use_val_batch_statistics=True, seed=42)
if __name__ == '__main__':
run_omniglot()
| [
"siavash.khodadadeh@gmail.com"
] | siavash.khodadadeh@gmail.com |
f1306a98ad30f2b9e644d281811bc4433d9c45fe | f67e9154c3e077eaad349f85439d88820098a6fc | /BinaryTree/113_PathSumII.py | ac8e08540e3c42c42cbe05a2de3a0b1b9e4d93f9 | [] | no_license | pondjames007/CodingPractice | 0c159ae528d1e595df0f0a901ee1ab4dd8925a14 | fb53fea229ac5a4d5ebce23216afaf7dc7214014 | refs/heads/master | 2020-06-08T02:31:04.569375 | 2020-01-15T20:41:34 | 2020-01-15T20:41:34 | 193,142,129 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 985 | py | # TIPS:
# * The way I am doing is post-order traversal
# By stacking left and right nodes
# * No. 112 Path Sum I
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def pathSum(self, root: TreeNode, sum: int) -> List[List[int]]:
if not root: return []
ans = []
stack = [[root.val, [root.val], root]]
while stack:
cur_val, cur_list, cur = stack.pop()
# candidate = cur_list.copy()
if not cur.left and not cur.right:
if cur_val == sum:
ans.append(cur_list)
if cur.left:
stack.append([cur_val+cur.left.val, cur_list+[cur.left.val], cur.left])
if cur.right:
stack.append([cur_val+cur.right.val, cur_list+[cur.right.val], cur.right])
return ans | [
"jameshuang@nyu.edu"
] | jameshuang@nyu.edu |
cbd0ba02a0baebd1327c5db3297e7fde836080d3 | 3d19e1a316de4d6d96471c64332fff7acfaf1308 | /Users/X/Xan_Guindan/estamosfartas_es.py | 57193c5aea6eb1ef53f6b3ee595ae7d8d6307ff2 | [] | no_license | BerilBBJ/scraperwiki-scraper-vault | 4e98837ac3b1cc3a3edb01b8954ed00f341c8fcc | 65ea6a943cc348a9caf3782b900b36446f7e137d | refs/heads/master | 2021-12-02T23:55:58.481210 | 2013-09-30T17:02:59 | 2013-09-30T17:02:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,520 | py | ###################################################################################
# Twitter API scraper - designed to be forked and used for more interesting things
###################################################################################
import scraperwiki
import simplejson
import urllib2
# Get results from the Twitter API! Change QUERY to your search term of choice.
# Examples: 'newsnight', '#newsnight', 'from:bbcnewsnight', 'to:bbcnewsnight'
QUERY = 'estamosfartas'
RESULTS_PER_PAGE = '500'
LANGUAGE = 'es'
NUM_PAGES = 5
for page in range(1, NUM_PAGES+1):
base_url = 'http://search.twitter.com/search.json?q=%s&rpp=%s&lang=%s&page=%s' \
% (urllib2.quote(QUERY), RESULTS_PER_PAGE, LANGUAGE, page)
try:
results_json = simplejson.loads(scraperwiki.scrape(base_url))
for result in results_json['results']:
data = {}
data['id'] = result['id']
data['text'] = result['text']
data['from_user'] = result['from_user']
data['time'] = result['created_at']
data
print data['from_user'], data['text']
scraperwiki.sqlite.save(["id"], data)
except:
print 'Oh dear, failed to scrape %s' % base_url
###################################################################################
# Twitter API scraper - designed to be forked and used for more interesting things
###################################################################################
import scraperwiki
import simplejson
import urllib2
# Get results from the Twitter API! Change QUERY to your search term of choice.
# Examples: 'newsnight', '#newsnight', 'from:bbcnewsnight', 'to:bbcnewsnight'
QUERY = 'estamosfartas'
RESULTS_PER_PAGE = '500'
LANGUAGE = 'es'
NUM_PAGES = 5
for page in range(1, NUM_PAGES+1):
base_url = 'http://search.twitter.com/search.json?q=%s&rpp=%s&lang=%s&page=%s' \
% (urllib2.quote(QUERY), RESULTS_PER_PAGE, LANGUAGE, page)
try:
results_json = simplejson.loads(scraperwiki.scrape(base_url))
for result in results_json['results']:
data = {}
data['id'] = result['id']
data['text'] = result['text']
data['from_user'] = result['from_user']
data['time'] = result['created_at']
data
print data['from_user'], data['text']
scraperwiki.sqlite.save(["id"], data)
except:
print 'Oh dear, failed to scrape %s' % base_url
| [
"pallih@kaninka.net"
] | pallih@kaninka.net |
220b61e4297c5c627135874d582484e2f321d48c | ea3d7a79136594f5a6b979be6ed0b9edfe46e33b | /blog_app/models.py | ae5faaafccd6b9067e5cdd778b6f38778c0f83b4 | [] | no_license | Cuddlemuffin007/django_social | 97574b7d0606fe081b835837813c3304600ea803 | 908a8f875bca39e096afa792f0b8f656bb24caac | refs/heads/master | 2021-01-10T17:38:42.476345 | 2016-03-08T13:59:02 | 2016-03-08T13:59:02 | 53,369,924 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 551 | py | from django.db import models
class Follower(models.Model):
follower_name = models.CharField(max_length=30, blank=True)
def __str__(self):
return self.follower_name
class UserProfile(models.Model):
user = models.OneToOneField('auth.User')
age = models.IntegerField(null=True)
followers = models.ManyToManyField(Follower)
class Blog(models.Model):
title = models.CharField(max_length=20)
body = models.TextField()
created = models.DateTimeField(auto_now_add=True)
user = models.ForeignKey('auth.User')
| [
"brennonkrogers@gmail.com"
] | brennonkrogers@gmail.com |
e1b3bc8875ec8237743dcdd1fdcb0bc746e7063a | 4e05cb669eba005b6ff040ab70b308ee24318f2a | /tests/testsegmentation.py | dd10b34f90d7f38caf3d60f8957dda7f94369108 | [] | no_license | fgrelard/ProcessMRI | 6e1784284bc845e853fb8dd4533dabe50f8e729e | 842bbfe6b409708e049e1a6027c59a88f435823a | refs/heads/master | 2023-08-22T09:31:53.121075 | 2021-03-11T09:49:08 | 2021-03-11T09:49:08 | 185,219,666 | 0 | 1 | null | 2021-10-12T23:10:34 | 2019-05-06T15:05:25 | Python | UTF-8 | Python | false | false | 763 | py | import unittest
import nibabel as nib
import importlib.machinery
import src.segmentation as segmentation
import src.maincontroller as mc
import numpy as np
import matplotlib.pyplot as plt
class TestSegmentation(unittest.TestCase):
def setUp(self):
img = nib.load("/mnt/d/IRM/nifti/BLE/250/50/50_subscan_1.nii.gz")
img_data = img.get_fdata()
img2 = np.reshape(img_data, (img_data.shape[0], img_data.shape[1]) + (-1,), order='F')
img2 = img2.transpose()
print(img2.shape)
self.image = img2
def test_segmentation_tube(self):
x, y, r = segmentation.detect_tube(self.image)
plt.imshow(self.image[8,...])
plt.show()
print(x,y,r)
if __name__ == "__main__":
unittest.main()
| [
"gm.florent@gmail.com"
] | gm.florent@gmail.com |
49e8c88fcb4c66d84b242e23193a9cd0d517bd5d | 2941d8b998b1f86991f4f68303f15a62742d139d | /data_preprocess.py | a931f605b2b3d43d24613d1a34725b2962259b75 | [
"BSD-3-Clause"
] | permissive | luomingshuang/GE2E-SV-TI-Voxceleb-LMS | 4c8379947123d9ecc2fa180b002b668727c30ddf | 000b0176b4b3a484e390c986045e491d64427c8b | refs/heads/master | 2020-07-02T15:43:16.821221 | 2019-08-10T04:05:30 | 2019-08-10T04:05:30 | 201,577,250 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,384 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#Modified from https://github.com/JanhHyun/Speaker_Verification
import glob
import os
import librosa
import numpy as np
from hparam import hparam as hp
vox1_path = '/home/momozyc/Music/voxceleb/vox1_wav'
audio_dirs = glob.glob(os.path.join(vox1_path, '*'))
#audio_files = glob.glob(os.path.join(vox1_path, '*', '*.wav'))
def save_spectrogram_tisv():
""" Full preprocess of text independent utterance. The log-mel-spectrogram is saved as numpy file.
Each partial utterance is splitted by voice detection using DB
and the first and the last 180 frames from each partial utterance are saved.
Need : utterance data set (VTCK)
"""
print("start text independent utterance feature extraction")
os.makedirs(hp.data.train_path, exist_ok=True) # make folder to save train file
os.makedirs(hp.data.test_path, exist_ok=True) # make folder to save test file
utter_min_len = (hp.data.tisv_frame * hp.data.hop + hp.data.window) * hp.data.sr # lower bound of utterance length
total_speaker_num = len(audio_dirs)
train_speaker_num= (total_speaker_num//10)*9 # split total data 90% train and 10% test
print("total speaker number : %d"%total_speaker_num)
print("train : %d, test : %d"%(train_speaker_num, total_speaker_num-train_speaker_num))
for i, folder in enumerate(audio_dirs):
print("%dth speaker processing..."%i)
print(folder)
utterances_spec = []
folders = glob.glob(os.path.join(folder, '*', '*'))
print(folders)
for utter_name in folders:
if utter_name[-4:] == '.wav':
print(utter_name)
utter_path = utter_name # path of each utterance
utter, sr = librosa.core.load(utter_path, hp.data.sr) # load utterance audio
intervals = librosa.effects.split(utter, top_db=30) # voice activity detection
for interval in intervals:
if (interval[1]-interval[0]) > utter_min_len: # If partial utterance is sufficient long,
utter_part = utter[interval[0]:interval[1]] # save first and last 180 frames of spectrogram.
S = librosa.core.stft(y=utter_part, n_fft=hp.data.nfft,
win_length=int(hp.data.window * sr), hop_length=int(hp.data.hop * sr))
S = np.abs(S) ** 2
mel_basis = librosa.filters.mel(sr=hp.data.sr, n_fft=hp.data.nfft, n_mels=hp.data.nmels)
S = np.log10(np.dot(mel_basis, S) + 1e-6) # log mel spectrogram of utterances
utterances_spec.append(S[:, :hp.data.tisv_frame]) # first 180 frames of partial utterance
utterances_spec.append(S[:, -hp.data.tisv_frame:]) # last 180 frames of partial utterance
utterances_spec = np.array(utterances_spec)
print(utterances_spec.shape)
if i<train_speaker_num: # save spectrogram as numpy file
np.save(os.path.join(hp.data.train_path, "speaker%d.npy"%i), utterances_spec)
else:
np.save(os.path.join(hp.data.test_path, "speaker%d.npy"%(i-train_speaker_num)), utterances_spec)
if __name__ == "__main__":
save_spectrogram_tisv()
| [
"noreply@github.com"
] | luomingshuang.noreply@github.com |
cf3c6ee1ec55d220b02d23c84a364072c561f2e0 | c3145ee041d4d3e0cf26ec260d9409da8e8b160a | /conferences/migrations/0040_auto_20160629_2118.py | 9eba5dcbae95caffd527de9ccc971ee992718add | [] | no_license | jlundell-bot/library_website | 0b7cab541d3cf69dd97c7c8350e21315e9155798 | 59a5e48adf28ecbc43c7be145f9ec386b1066313 | refs/heads/master | 2021-06-21T14:16:57.542644 | 2017-08-18T15:51:44 | 2017-08-18T19:37:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,469 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-06-29 21:18
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import wagtail.wagtailcore.fields
class Migration(migrations.Migration):
dependencies = [
('wagtailcore', '0028_merge'),
('conferences', '0039_auto_20160628_2157'),
]
operations = [
migrations.AddField(
model_name='conferenceindexpage',
name='rich_text',
field=wagtail.wagtailcore.fields.RichTextField(blank=True, help_text='Should be a bulleted list or combination of h3 elements and bulleted lists'),
),
migrations.AddField(
model_name='conferenceindexpage',
name='rich_text_heading',
field=models.CharField(blank=True, max_length=25),
),
migrations.AddField(
model_name='conferenceindexpage',
name='rich_text_link',
field=models.ForeignKey(blank=True, help_text='Optional link that displays next to the heading', null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='wagtailcore.Page'),
),
migrations.AddField(
model_name='conferenceindexpage',
name='rich_text_link_text',
field=models.CharField(blank=True, help_text='Display text for the rich text link', max_length=25),
),
migrations.AddField(
model_name='conferencepage',
name='rich_text',
field=wagtail.wagtailcore.fields.RichTextField(blank=True, help_text='Should be a bulleted list or combination of h3 elements and bulleted lists'),
),
migrations.AddField(
model_name='conferencepage',
name='rich_text_heading',
field=models.CharField(blank=True, max_length=25),
),
migrations.AddField(
model_name='conferencepage',
name='rich_text_link',
field=models.ForeignKey(blank=True, help_text='Optional link that displays next to the heading', null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='wagtailcore.Page'),
),
migrations.AddField(
model_name='conferencepage',
name='rich_text_link_text',
field=models.CharField(blank=True, help_text='Display text for the rich text link', max_length=25),
),
migrations.AddField(
model_name='conferencesubpage',
name='rich_text',
field=wagtail.wagtailcore.fields.RichTextField(blank=True, help_text='Should be a bulleted list or combination of h3 elements and bulleted lists'),
),
migrations.AddField(
model_name='conferencesubpage',
name='rich_text_heading',
field=models.CharField(blank=True, max_length=25),
),
migrations.AddField(
model_name='conferencesubpage',
name='rich_text_link',
field=models.ForeignKey(blank=True, help_text='Optional link that displays next to the heading', null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='wagtailcore.Page'),
),
migrations.AddField(
model_name='conferencesubpage',
name='rich_text_link_text',
field=models.CharField(blank=True, help_text='Display text for the rich text link', max_length=25),
),
]
| [
"bbusenius@users.noreply.github.com"
] | bbusenius@users.noreply.github.com |
0a36f64bbeb284cabfa37de0e47f4dfd52159f50 | ad10cd9aecb8e1b277996484a35f54544a3f56c4 | /New folder (3)/programs/firstprogram.py | 8e0c1cc28423c66723be458fd9685313f736c370 | [] | no_license | gokul05021992/whole-projects | 4a4264ec75e1878a3661145a63c99c2b4b7c8350 | 993a8b8db83266e434387730cc5f8b16e4a8b77e | refs/heads/master | 2023-05-22T18:36:38.651925 | 2021-06-13T01:09:11 | 2021-06-13T01:09:11 | 376,460,805 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 200 | py | print("hellow")
A=1
print("a value is ",A)
B=int(input("enter the b value"))
print ("b value is", B)
C=input("enter yout name ")
print (C)
D=int(input("enter the value"))
sum=B+D
print("total is",sum) | [
"bgokul92@gmail.com"
] | bgokul92@gmail.com |
99e08dc8c404c649f4b96eff136ea4cb9283f92e | c34308d9e283d3689baeade246b69dad13eea0c1 | /Demo/SQLDemo/e4_insert_into_table.py | 05fb7dda0bc78dcd7abf0010705c74fa53898954 | [] | no_license | michaelChen07/studyPython | d19fe5762cfbccdff17248d7d5574939296d3954 | 11a2d9dd0b730cad464393deaf733b4a0903401f | refs/heads/master | 2021-01-19T00:20:27.347088 | 2017-05-13T08:43:44 | 2017-05-13T08:43:44 | 73,004,133 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,003 | py | #coding=utf-8
import MySQLdb
import random
import time
conn = MySQLdb.connect(
host = "127.0.0.1",
port = 3306,
user = "root",
passwd = "gloryroad" ,
db = "pythondbnew",
charset = "utf8"
)
# 使用cursor()方法获取数据库的操作游标
cursor = conn.cursor()
#method1:插入一条数据
insert = cursor.execute('''insert into emp_info (name,sex,dept,mobile,birthday)\
values("jason","male","hr","13911056507",now()),\
("foseter","male","dev","13911056501",now()),\
("strong","male","dev","13911056502",now()),\
("curry","male","hr","13911056503",now()),\
("james","male","hr","13911056504",now()),\
("lily","fm","hr","13911056505",now()),\
("jane","fm","account","13911056506",now()),\
("shely","fm","account","13911056508",now()),\
("mcdona","fm","account","13911056509",now()),\
("mary","fm","account","13911056510",now());''')
print u"添加语句受影响的行数:", insert
#method2:通过格式字符串传入值,此方式可以防止sql注入,一次只能插入一条
sql = "insert into salary (emp_id,salary) values(%s, %s)"
cursor.execute(sql, (1,1000))
#可用遍历的方法插入多条
list_insert = [(2,2000),(3,3000),(4,4000),(5,5000),(6,6000),(7,7000),(8,8000),(9,9000),(10,10000)]
for i in list_insert:
cursor.execute(sql, i)
#method3:类似method2,但可批量插入多条数据
sql = "insert into user values(%s, %s, %s, %s)"
insert = cursor.executemany(sql, [
(5,'tom','tom','1989-03-17'),
(6,'amy','test','1898-12-01'),
(7,'lily','linux','1994-06-23')])
print u"批量插入返回受影响的行数:", insert
#插入当前时间
def now():
return time.strftime("%Y-%m-%d")
for i in range(10,20):
#随机插入数据
sql = "insert into user values(%s, %s, %s, %s)"
cursor.execute(sql, (random.randint(1,10000),'lucy'+str(random.randint(1,10000)),'efg'+str(random.randint(1,10000)),now()))
# 关闭游标
cursor.close()
# 提交事务
conn.commit()
# 关闭数据库连接
conn.close()
print u"sql语句执行成功!"
| [
"286522215@qq.com"
] | 286522215@qq.com |
dc503665e026df92febb09f40f4492b783d04a36 | 32aeef033b0d6af953f698a934ee7864c6102b08 | /crm/views.py | dc5d9df20ca150164f5a140d15c43538f2e86deb | [] | no_license | so1so2so/Mycrm | 3d5aee13a93ce3aff47fe6715cc8ced2f4d69b80 | 34c59bbf24801871fe60ac6c2bc66443d1beb502 | refs/heads/master | 2021-08-08T01:36:02.245935 | 2017-11-09T09:26:07 | 2017-11-09T09:26:07 | 109,856,120 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 177 | py | from django.shortcuts import render, HttpResponse
# Create your views here.
def index(requset):
return render(requset, 'index.html')
# return HttpResponse('zhangneb')
| [
"1037930435@qq.com"
] | 1037930435@qq.com |
6a984e08f9194841186b33197b6fed29dc0c2737 | 5ef268c504ad792d49257bb665e0f2128554ebda | /integration/bitcnt-2/test.py | 5156f38da5df127bb3446ea2162d1d8b28153480 | [
"MIT",
"Python-2.0"
] | permissive | qq431169079/retdec-regression-tests | eb40905310af042ae4e9d3fa17e653192c4896e8 | e313b867daae310649fb955f45dabdd849e35a16 | refs/heads/master | 2020-04-11T12:08:49.034461 | 2018-12-04T13:14:29 | 2018-12-04T13:14:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,211 | py | from regression_tests import *
class TestBase(Test):
def test_produce_expected_output(self):
self.assert_c_produces_output_when_run(
input='255',
expected_return_code=0,
expected_output='255 contains 8 bit set\n'
)
self.assert_c_produces_output_when_run(
input='256',
expected_return_code=0,
expected_output='256 contains 1 bit set\n'
)
self.assert_c_produces_output_when_run(
input='1023',
expected_return_code=0,
expected_output='1023 contains 10 bit set\n'
)
self.assert_c_produces_output_when_run(
input='556445',
expected_return_code=0,
expected_output='556445 contains 12 bit set\n'
)
self.assert_c_produces_output_when_run(
input='457785621',
expected_return_code=0,
expected_output='457785621 contains 12 bit set\n'
)
class Test_2017(TestBase):
settings_2017 = TestSettings(
input=files_in_dir('2017-11-14'),
)
class Test_2015(TestBase):
settings_2015 = TestSettings(
input=files_in_dir('2015-03-30'),
)
| [
"petr.zemek@avast.com"
] | petr.zemek@avast.com |
4da0ed25bcde1f134aa80715a4bc50452cfb53b5 | 89fbfd0bc72c377040f1b642e662971de5151268 | /lisztfeverapp/users/admin.py | f6e7c0c5cb550d1b97f018c1d0b22b6a88750c66 | [] | no_license | altdotenv/lisztfeverapp | 790372f6865e4b371ed097d88c6f20ae6b5f9560 | 1d50bba74eec6ee415207f4e37f4991d662d6a60 | refs/heads/master | 2020-03-09T21:35:04.665519 | 2018-07-30T01:57:00 | 2018-07-30T01:57:00 | 129,012,463 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,396 | py | from django import forms
from django.contrib import admin
from django.contrib.auth.admin import UserAdmin as AuthUserAdmin
from django.contrib.auth.forms import UserChangeForm, UserCreationForm
from .models import User
from . import models
class MyUserChangeForm(UserChangeForm):
class Meta(UserChangeForm.Meta):
model = User
class MyUserCreationForm(UserCreationForm):
error_message = UserCreationForm.error_messages.update(
{"duplicate_username": "This username has already been taken."}
)
class Meta(UserCreationForm.Meta):
model = User
def clean_username(self):
username = self.cleaned_data["username"]
try:
User.objects.get(username=username)
except User.DoesNotExist:
return username
raise forms.ValidationError(self.error_messages["duplicate_username"])
# @admin.register(User)
# class MyUserAdmin(AuthUserAdmin):
# form = MyUserChangeForm
# add_form = MyUserCreationForm
# fieldsets = (
# ("User Profile", {
# "fields": ("name", "profile_pic", "gender")}),
# ) + AuthUserAdmin.fieldsets
# list_display = ("username", "name", "is_superuser")
# search_fields = ["name"]
# @admin.register(models.Plan)
# class PlanAdmin(admin.ModelAdmin):
#
# list_display = (
# "user_id",
# "event_id",
# "updated_at",
# )
| [
"jisu.han3201@gmail.com"
] | jisu.han3201@gmail.com |
631b1834f5f79136d605be091648a69a8ffaee30 | 771b50f241c6bd40c348b8bc6bd969e65aabbe52 | /scripts/gen-vocab.py | af626c52f6720b946e69ade37d1f68159734f3d2 | [
"MIT"
] | permissive | sakshamjindal/NSCL-PyTorch-Release | 0abd26520b9e3f64a8506b1f684f5a830feaa167 | 830842d10da68f82916c131e1f1cbf31b5f8b918 | refs/heads/master | 2022-12-30T05:50:05.924479 | 2020-10-25T13:56:32 | 2020-10-25T13:56:32 | 306,860,381 | 1 | 0 | MIT | 2020-10-25T13:56:33 | 2020-10-24T10:36:11 | Python | UTF-8 | Python | false | false | 1,444 | py | #! /usr/bin/env python3
# -*- coding: utf-8 -*-
# File : gen-vocab.py
# Author : Jiayuan Mao
# Email : maojiayuan@gmail.com
# Date : 11/11/2018
#
# This file is part of NSCL-PyTorch.
# Distributed under terms of the MIT license.
import os.path as osp
from jacinle.cli.argument import JacArgumentParser
from jacinle.logging import get_logger
from nscl.datasets import get_available_symbolic_datasets, initialize_dataset, get_symbolic_dataset_builder
logger = get_logger(__file__)
parser = JacArgumentParser()
parser.add_argument('--dataset', required=True, choices=get_available_symbolic_datasets(), help='dataset')
parser.add_argument('--data-dir', required=True)
parser.add_argument('--data-scenes-json', type='checked_file')
parser.add_argument('--data-questions-json', type='checked_file')
parser.add_argument('--output', required=True)
args = parser.parse_args()
if args.data_scenes_json is None:
args.data_scenes_json = osp.join(args.data_dir, 'scenes.json')
if args.data_questions_json is None:
args.data_questions_json = osp.join(args.data_dir, 'questions.json')
args.data_vocab_json = None
def main():
initialize_dataset(args.dataset)
build_symbolic_dataset = get_symbolic_dataset_builder(args.dataset)
dataset = build_symbolic_dataset(args)
dataset.unwrapped.vocab.dump_json(args.output)
logger.critical('Vocab json dumped at: "{}".'.format(args.output))
if __name__ == '__main__':
main()
| [
"maojiayuan@gmail.com"
] | maojiayuan@gmail.com |
8b0078aacc3ba56e3e25ba43034870d931a1a73f | 41c605bf3a002a757cb2344cff526d7a7ae56ea9 | /plotly/validators/carpet/hoverlabel/font/__init__.py | dc2b5df9b733f31e22a8c42cd3eebde3b52c5612 | [
"MIT"
] | permissive | Jonathan-MW/plotly.py | 9674b90b5de11fd9089e6afefd04b57bc4587829 | 7528c00772f44dee24c0df7e15d70a4852f171a8 | refs/heads/master | 2020-05-30T06:04:13.621478 | 2019-05-31T10:34:15 | 2019-05-31T10:34:15 | 189,571,988 | 2 | 0 | MIT | 2019-05-31T09:59:53 | 2019-05-31T09:59:53 | null | UTF-8 | Python | false | false | 3,255 | py |
import _plotly_utils.basevalidators
class SizesrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self,
plotly_name='sizesrc',
parent_name='carpet.hoverlabel.font',
**kwargs
):
super(SizesrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop('edit_type', 'none'),
role=kwargs.pop('role', 'info'),
**kwargs
)
import _plotly_utils.basevalidators
class SizeValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(
self,
plotly_name='size',
parent_name='carpet.hoverlabel.font',
**kwargs
):
super(SizeValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=kwargs.pop('array_ok', True),
edit_type=kwargs.pop('edit_type', 'none'),
min=kwargs.pop('min', 1),
role=kwargs.pop('role', 'style'),
**kwargs
)
import _plotly_utils.basevalidators
class FamilysrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self,
plotly_name='familysrc',
parent_name='carpet.hoverlabel.font',
**kwargs
):
super(FamilysrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop('edit_type', 'none'),
role=kwargs.pop('role', 'info'),
**kwargs
)
import _plotly_utils.basevalidators
class FamilyValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(
self,
plotly_name='family',
parent_name='carpet.hoverlabel.font',
**kwargs
):
super(FamilyValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=kwargs.pop('array_ok', True),
edit_type=kwargs.pop('edit_type', 'none'),
no_blank=kwargs.pop('no_blank', True),
role=kwargs.pop('role', 'style'),
strict=kwargs.pop('strict', True),
**kwargs
)
import _plotly_utils.basevalidators
class ColorsrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self,
plotly_name='colorsrc',
parent_name='carpet.hoverlabel.font',
**kwargs
):
super(ColorsrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop('edit_type', 'none'),
role=kwargs.pop('role', 'info'),
**kwargs
)
import _plotly_utils.basevalidators
class ColorValidator(_plotly_utils.basevalidators.ColorValidator):
def __init__(
self,
plotly_name='color',
parent_name='carpet.hoverlabel.font',
**kwargs
):
super(ColorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=kwargs.pop('array_ok', True),
edit_type=kwargs.pop('edit_type', 'none'),
role=kwargs.pop('role', 'style'),
**kwargs
)
| [
"noreply@github.com"
] | Jonathan-MW.noreply@github.com |
e5601d7d828d5283d62f4a1c7ffc0982c9a8f929 | f16ee0bd3c27c2f6c1f9b343993d6d76feaf4eda | /Appium/LinBaO_Android/src/ReleasePage/Case12_JiaSi_CommodityTypes.py | 6612843911d74ca382e21246dde5ee3d9c4cf678 | [] | no_license | Fengyongming0311/TANUKI | 482e4b8f779265da67fe1943cda5dae9c4c3dc55 | 618a47ea572f8fccfbf10f5f50aff1dfffb7b0e3 | refs/heads/master | 2021-12-25T05:33:58.891202 | 2021-12-21T06:23:55 | 2021-12-21T06:23:55 | 171,840,137 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,742 | py | __author__ = 'TANUKI'
# coding:utf-8
import time,sys
sys.path.append("..")
class JiaSiCommodityTypes:
def JiaSiCommodityTypes(driver):
try:
driver.implicitly_wait(10)
time.sleep(2)
driver.find_element_by_css_selector("wx-dropdownmenu[class=\"goods_categories\"][id=\"goods_categories_1\"]").click()
#<wx-dropdownmenu is="component/dropdownmenu/dropdownmenu" class="goods_categories" id="goods_categories_1"><wx-view class="dropdownmenu--nav"><wx-view class="dropdownmenu--nav-child dropdownmenu--borders dropdownmenu--bbb dropdownmenu--1" data-model="全部分类" data-nav="2"><wx-view class="dropdownmenu--nav-title">全部分类</wx-view><wx-image class="dropdownmenu--img1" src="/images/sanJiao.png" role="img"><div style="background-size: 100% 100%; background-repeat: no-repeat; background-image: url("images/sanJiao.png");"></div><span></span></wx-image></wx-view></wx-view><wx-view class="dropdownmenu--container dropdownmenu--container_hd dropdownmenu--disappear"><wx-view class="dropdownmenu--z-height"><wx-view><wx-view class="dropdownmenu--sortitem"><wx-view class="dropdownmenu--sortitem-item dropdownmenu--active" data-model="[object Object]">全部分类
#<wx-view class="dropdownmenu--nav-child dropdownmenu--borders dropdownmenu--bbb dropdownmenu--1" data-model="全部分类" data-nav="2"><wx-view class="dropdownmenu--nav-title">全部分类</wx-view><wx-image class="dropdownmenu--img1" src="/images/sanJiao.png" role="img"><div style="background-size: 100% 100%; background-repeat: no-repeat; background-image: url("images/sanJiao.png");"></div><span></span></wx-image></wx-view>
time.sleep(5)
driver.find_element_by_xpath('//*[@id="goods_categories_1"]/wx-view[2]/wx-view/wx-view/wx-view[8]/wx-view').click()
# //*[@id="goods_categories_1"]/wx-view[2]/wx-view/wx-view/wx-view[8]/wx-view
time.sleep(5)
#开始验证是否选择分类切换为茶几
check = driver.find_element_by_xpath('//*[@id="goods_categories_1"]/wx-view[1]/wx-view')
#//*[@id="goods_categories_1"]/wx-view[1]/wx-view
print ("验证选择的场景文本===============:",check.text)
if check.text == "茶几":
unittest_TestResult = True
else:
raise Exception("check.text不是茶几,验证失败")
#<wx-view class="dropdownmenu--nav-title">主卧</wx-view>
except Exception as e:
print ("家私选择场景执行脚本报错信息为:",e)
unittest_TestResult = False
finally:
return unittest_TestResult | [
"fengyongming0311@163.com"
] | fengyongming0311@163.com |
480659c675c451bb8083b0d428c86fc6afb442bc | 29bd55d171733586f24f42151d44f4312b6a610e | /Python/python03_list.py | 60fdebac0ed5aa07e465530a0bc0f6ae85529d6c | [] | no_license | votus777/AI_study | 66ab1da2b8e760d0c52b0ed2b2f74158e14f435b | f4e38d95690c8ee84d87c02dc20a1ea59c495f04 | refs/heads/master | 2022-12-04T15:52:14.855624 | 2020-08-20T06:12:52 | 2020-08-20T06:12:52 | 262,975,960 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,716 | py |
#자료형
# 1. 리스트
a = [ 1,2,3,4,5]
b = [1,2,3,'a', 'b']
print(b) #[1, 2, 3, 'a', 'b']
#list 안에 들어가는 자료를은 형이 달라도 되지만
#numpy에 들어가는 데이터는 딱 한가지 자료형만 써야한다
print(a[0] + a[3]) # 5 (1+4)
# print(b[0] + a[3]) # error
print (type(a)) # list
print (a[-2]) # 4
print (a[1:3]) #[2,3]
a = [1,2,3, ['a', 'b', 'c']]
print(a[1]) # 2
print(a[-1]) # ['a','b','c']
print (a[-1][1]) # b
# 1-2. 리스트 슬라이싱
a = [1,2,3,4,5]
print (a[:2]) # [1,2]
# 1-3. 리스트 더하기
a = [1,2,3]
b = [4,5,6]
print(a+b) #[1,2,3,4,5,6]? [5,7,9]? -> [1, 2, 3, 4, 5, 6]
#numpy 안에서는 [5,7,9] 이런 식으로 사람이 하는 계산처럼 직관적으로 나온다 np.array[]
c = [7,8,9,10]
print(a+c) # [1, 2, 3, 7, 8, 9, 10]
print (a*3) # [1, 2, 3, 1, 2, 3, 1, 2, 3]
# print (a[2] + 'hi') #error
print (str(a[2]) + 'hi') #3hi
f = '5'
# print ((a[2]) + f) #error
print ((a[2]) + int(f)) #8
# 리스트 관련 함수
#진짜 가장 많이 보게 되는 함수
'''
*append* 중요
sort
index
insert
remove
'''
a.append(4) #덧붙임
# a = a.append(5) #error
# 그냥 a.append() 그 자체
a = [1, 3, 4, 2]
a.sort()
print(a) # [1, 2, 3, 4]
a.reverse()
print(a) # [4, 3, 2, 1]
print(a.index(3)) # [4, 3, 2, 1] 상태니까 1 이 나온다 * == a[3]
print(a.index(1)) # * == a[1]
a.insert(0, 7)
print(a) # [7, 4, 3, 2, 1]
a.insert(3, 3)
print(a) # [7, 4, 3, 3, 2, 1]
a.remove(7)
print(a) # [4, 3, 3, 2, 1]
a.remove(3)
print(a) # [4, 2, 1]? [4, 3, 2, 1]? -> 먼저 걸린 놈만 지워진다 [4, 3, 2, 1]
'''
list, slicing, append 는 주구장창 쓸 거다
''' | [
"votus777@users.noreply.github.com"
] | votus777@users.noreply.github.com |
096b04f07443d8fd221786be8b63b62be958f991 | 388e90c9cfdb59b28654d711fe1943aeda7cf3fd | /third_party/rlkit_library/examples/td3.py | 61799e1e9ac79b7e7e9684163cb06f8ae9823600 | [
"MIT",
"Apache-2.0"
] | permissive | google-research/DBAP-algorithm | bbb2827de7f00c9efa30e3cde109ff755b4d008a | 545a4e780f9d9d480c96b67e7a8ae590a983db6b | refs/heads/main | 2023-07-05T00:38:47.424870 | 2021-08-09T23:59:10 | 2021-08-09T23:59:10 | 394,473,322 | 3 | 2 | null | null | null | null | UTF-8 | Python | false | false | 4,448 | py | # Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This should results in an average return of ~3000 by the end of training.
Usually hits 3000 around epoch 80-100. Within a see, the performance will be
a bit noisy from one epoch to the next (occasionally dips dow to ~2000).
Note that one epoch = 5k steps, so 200 epochs = 1 million steps.
"""
from gym.envs.mujoco import HalfCheetahEnv
import rlkit.torch.pytorch_util as ptu
from rlkit.data_management.env_replay_buffer import EnvReplayBuffer
from rlkit.envs.wrappers import NormalizedBoxEnv
from rlkit.exploration_strategies.base import \
PolicyWrappedWithExplorationStrategy
from rlkit.exploration_strategies.gaussian_strategy import GaussianStrategy
from rlkit.launchers.launcher_util import setup_logger
from rlkit.samplers.data_collector import MdpPathCollector
from rlkit.torch.networks import ConcatMlp, TanhMlpPolicy
from rlkit.torch.td3.td3 import TD3Trainer
from rlkit.torch.torch_rl_algorithm import TorchBatchRLAlgorithm
def experiment(variant):
expl_env = NormalizedBoxEnv(HalfCheetahEnv())
eval_env = NormalizedBoxEnv(HalfCheetahEnv())
obs_dim = expl_env.observation_space.low.size
action_dim = expl_env.action_space.low.size
qf1 = ConcatMlp(
input_size=obs_dim + action_dim,
output_size=1,
**variant['qf_kwargs']
)
qf2 = ConcatMlp(
input_size=obs_dim + action_dim,
output_size=1,
**variant['qf_kwargs']
)
target_qf1 = ConcatMlp(
input_size=obs_dim + action_dim,
output_size=1,
**variant['qf_kwargs']
)
target_qf2 = ConcatMlp(
input_size=obs_dim + action_dim,
output_size=1,
**variant['qf_kwargs']
)
policy = TanhMlpPolicy(
input_size=obs_dim,
output_size=action_dim,
**variant['policy_kwargs']
)
target_policy = TanhMlpPolicy(
input_size=obs_dim,
output_size=action_dim,
**variant['policy_kwargs']
)
es = GaussianStrategy(
action_space=expl_env.action_space,
max_sigma=0.1,
min_sigma=0.1, # Constant sigma
)
exploration_policy = PolicyWrappedWithExplorationStrategy(
exploration_strategy=es,
policy=policy,
)
eval_path_collector = MdpPathCollector(
eval_env,
policy,
)
expl_path_collector = MdpPathCollector(
expl_env,
exploration_policy,
)
replay_buffer = EnvReplayBuffer(
variant['replay_buffer_size'],
expl_env,
)
trainer = TD3Trainer(
policy=policy,
qf1=qf1,
qf2=qf2,
target_qf1=target_qf1,
target_qf2=target_qf2,
target_policy=target_policy,
**variant['trainer_kwargs']
)
algorithm = TorchBatchRLAlgorithm(
trainer=trainer,
exploration_env=expl_env,
evaluation_env=eval_env,
exploration_data_collector=expl_path_collector,
evaluation_data_collector=eval_path_collector,
replay_buffer=replay_buffer,
**variant['algorithm_kwargs']
)
algorithm.to(ptu.device)
algorithm.train()
if __name__ == "__main__":
variant = dict(
algorithm_kwargs=dict(
num_epochs=3000,
num_eval_steps_per_epoch=5000,
num_trains_per_train_loop=1000,
num_expl_steps_per_train_loop=1000,
min_num_steps_before_training=1000,
max_path_length=1000,
batch_size=256,
),
trainer_kwargs=dict(
discount=0.99,
),
qf_kwargs=dict(
hidden_sizes=[400, 300],
),
policy_kwargs=dict(
hidden_sizes=[400, 300],
),
replay_buffer_size=int(1E6),
)
# ptu.set_gpu_mode(True) # optionally set the GPU (default=False)
setup_logger('rlkit-post-refactor-td3-half-cheetah', variant=variant)
experiment(variant)
| [
"karolhausman@google.com"
] | karolhausman@google.com |
1e7409a41d0d5782c505a94f099bcf920419e3d4 | da3ee480f882df7d4f0897a1d0a51abc5253737c | /src/ocd/storages.py | 2411cdc19427aa20ce83c13d43b8688f807a2a40 | [
"BSD-2-Clause"
] | permissive | Niros/OpenCommunity | 4876c7ec57dd1487fbf9484b51c389b0f1e755ab | 4c91136db6243a1cd65b55ecf5a44c2bce24a45a | refs/heads/master | 2021-01-15T12:54:39.037042 | 2013-11-05T11:03:52 | 2013-11-05T11:03:52 | 14,309,726 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 149 | py | from django.conf import settings
from django.core.files.storage import FileSystemStorage
uploads_storage = FileSystemStorage(settings.UPLOAD_PATH)
| [
"udioron@gmail.com"
] | udioron@gmail.com |
b8d07e9a80c471b2a19d8a1e3f74ff7e0e7b5b2b | 9ad1d339cdd4424c7fcba0f58bd2a5fe74ac5529 | /manage.py | 87b7187a536746afc97a322590dc5c9be1a70fe4 | [] | no_license | viralsir/djangoProject | d26892e47216511b053858922d908a522a19cde5 | 7ef1b959c7091e522bcafa45f0e490a1a895587e | refs/heads/master | 2023-01-23T12:30:20.668464 | 2020-12-07T08:45:57 | 2020-12-07T08:45:57 | 319,211,503 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 524 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'djangoProject.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"viralsir2018@gmail.com"
] | viralsir2018@gmail.com |
60193f621f11ea2ae9a6d684dd0775aef45cb307 | 58eac1826eb44833ffc5a4c61ae017883b202334 | /my_cookbook/skill/handlers/ingredients_or_instructions.py | 6725f95720c3d99849d073832b57f53bbd177813 | [] | no_license | PeterMitrano/my_cookbook | dd8b91170cd101d453c942972a87b2d85dc163b4 | 46d47e863d10947b3052fc59351da5d83a8dba36 | refs/heads/master | 2021-01-19T06:36:26.718989 | 2016-10-17T03:35:17 | 2016-10-17T03:35:17 | 61,961,054 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,997 | py | from my_cookbook.util import core
from my_cookbook.util import responder
from my_cookbook.util import recipe_reader
class IngredientsOrInstructionsHandler():
def IngredientsIntent(self, handlers, persistant_attributes, attributes, slots):
# check we've got a working recipe at the moment
if 'current_recipe' not in attributes:
return responder.tell("I can't list ingredients because you haven't picked a recipe.")
speech = recipe_reader.ingredients_speech(attributes['current_recipe'])
card = recipe_reader.ingredients_card(attributes['current_recipe'])
return responder.ask_with_card("The ingredients are. " + speech + ". Do you want to hear \
instructions, or ingredients again?", None, "Ingredients", card, None, attributes)
def InstructionsIntent(self, handlers, persistant_attributes, attributes, slots):
# check we've got a working recipe at the moment
if 'current_recipe' not in attributes:
return responder.tell("I can't start instructions because you haven't picked a recipe.")
step_number = attributes.get('step_number', 0)
instructions = attributes['current_recipe']['Instructions']
if len(instructions) <= step_number:
return responder.tell("this recipe doesn't have any instructions.")
instruction = instructions[step_number]
card = recipe_reader.instructions_card(attributes['current_recipe'])
return responder.ask_with_card(
instruction + ". <break time=2/> would you like to hear the next step?", None,
"Instructions", card, None, attributes)
def Unhandled(self, handlers, persistant_attributes, attributes, slots):
return responder.ask("I'm confused. Do you want to start with ingredients or instructions?",
"Say which one you want.", attributes)
handler = IngredientsOrInstructionsHandler()
state = core.States.INGREDIENTS_OR_INSTRUCTIONS
| [
"mitranopeter@gmail.com"
] | mitranopeter@gmail.com |
8410d4407487790ffd9390d1380792d2c3cb81f1 | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /2JHYavYqynX8ZCmMG_6.py | 917033bb69be0d4fe969baad4c56947a20f83bc7 | [] | no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 603 | py | """
Create a function that compares two words based on the sum of their ASCII
codes and returns the word with the smaller ASCII sum.
### Examples
ascii_sort(["hey", "man"]) ➞ "man"
# ["h", "e", "y"] ➞ sum([104, 101, 121]) ➞ 326
# ["m", "a", "n"] ➞ sum([109, 97, 110]) ➞ 316
ascii_sort(["majorly", "then"]) ➞ "then"
ascii_sort(["victory", "careless"]) ➞ "victory"
### Notes
Both words will have strictly different ASCII sums.
"""
def ascii_sort(lst):
if sum(ord(x) for x in lst[0]) < sum(ord(x) for x in lst[1]):
return lst[0]
return lst[1]
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
8820cbda2c5e724551c7818152f98925ae46c154 | ca37ff6a91d42a2a1cafeef842e1359c0ae48c1d | /backend/base/migrations/0002_order_orderitem_review.py | 530f912d5c215034b8660233175bf80f5a100df1 | [] | no_license | naol-hub/best_shop | 77b629b4a7c2c63e94e7921dd013122e2233f2bf | 93d8bdf629c148520e5cefe48a154f7453983937 | refs/heads/master | 2023-06-19T18:47:23.573613 | 2021-07-21T19:19:54 | 2021-07-21T19:19:54 | 388,220,541 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,015 | py | # Generated by Django 3.1.4 on 2021-07-04 16:54
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('base', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Order',
fields=[
('paymentMethod', models.CharField(blank=True, max_length=200, null=True)),
('taxPrice', models.DecimalField(blank=True, decimal_places=2, max_digits=7, null=True)),
('shippingPrice', models.DecimalField(blank=True, decimal_places=2, max_digits=7, null=True)),
('totalPrice', models.DecimalField(blank=True, decimal_places=2, max_digits=7, null=True)),
('isPaid', models.BooleanField(default=False)),
('paidAt', models.DateTimeField(blank=True, null=True)),
('isDelivered', models.BooleanField(default=False)),
('deliveredAt', models.DateTimeField(blank=True, null=True)),
('createdAt', models.DateTimeField(auto_now_add=True)),
('_id', models.AutoField(editable=False, primary_key=True, serialize=False)),
('user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Review',
fields=[
('name', models.CharField(blank=True, max_length=200, null=True)),
('rating', models.IntegerField(blank=True, default=0, null=True)),
('comment', models.TextField(blank=True, null=True)),
('createdAt', models.DateTimeField(auto_now_add=True)),
('_id', models.AutoField(editable=False, primary_key=True, serialize=False)),
('product', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='base.product')),
('user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='OrderItem',
fields=[
('name', models.CharField(blank=True, max_length=200, null=True)),
('qty', models.IntegerField(blank=True, default=0, null=True)),
('price', models.DecimalField(blank=True, decimal_places=2, max_digits=7, null=True)),
('image', models.CharField(blank=True, max_length=200, null=True)),
('_id', models.AutoField(editable=False, primary_key=True, serialize=False)),
('order', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='base.order')),
('product', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='base.product')),
],
),
]
| [
"you@example.com"
] | you@example.com |
e498d1690cc941bf265d5f8bd404dac926f3af76 | 7f4191f0e12a70d465b15762ce83b57b4976d448 | /Chapter3/INheretence.py | 8ea1996f796b4fa8c413cbaa33fd59124f4555b0 | [] | no_license | PacktPublishing/Hands-On-Penetration-Testing-with-Python | 33f72df57b9158e002f78330c1242e1fde777898 | 7b11c8e63e4ac350ba138161f60f7ce4c08ed7cd | refs/heads/master | 2023-02-06T04:52:12.475428 | 2023-01-30T10:03:47 | 2023-01-30T10:03:47 | 131,272,051 | 79 | 40 | null | null | null | null | UTF-8 | Python | false | false | 1,904 | py | #! /usr/bin/python3.5
class Id_Generator():
def __init__(self):
self.id=0
def generate(self):
self.id=self.id + 1
return self.id
class Department():
def __init__(self,name,location):
self.name=name
self.loc=location
def DepartmentInfo(self):
return "Department Name : " +str(self.name) +", Location : " +str(self.loc)
class Manager():
def __init__(self,m_id,name):
self.m_id=m_id
self.name=name
def ManagerInfo(self):
return "Manager Name : " +str(self.name) +", Manager id : " +str(self.m_id)
class Address():
def __init__(self,country,state,area,street,zip_code):
self.country=country
self.state=state
self.area=area
self.street=street
self.zip_code=zip_code
def AddressInfo(self):
return "Country : " +str(self.country)+", State : " +str(self.state)+", Street : "+str(self.area)
class Employee():
def __init__(self,Name,id_gen,dept=None,manager=None,address=None):
self.Id=id_gen.generate()
self.Name=Name
self.D_id=None
self.Salary=None
self.dept=dept
self.manager=manager
self.address=address
def printDetails(self):
print("\n")
print("Employee Details : ")
print("ID : " +str(self.Id))
print("Name : " +str(self.Name))
print("Salary : " + str(self.Salary))
print("Department :\n\t"+str(self.dept.DepartmentInfo()))
print("Manager : \n\t" +str(self.manager.ManagerInfo()))
print("Address : \n\t" +str(self.address.AddressInfo()))
print("------------------------------")
Id_gen=Id_Generator()
m=Manager(100,"Manager X")
d=Department("IT","Delhi")
a=Address("UAE","Dubai","Silicon Oasis","Lavista 6","xxxxxx")
emp1=Employee("Emp1",Id_gen,d,m,a)
emp1.Salary=20000
emp1.D_id=2
emp1.printDetails()
"""emp2=Employee("Emp2",Id_gen)
emp2.Salary=10000
emp2.D_id=1
emp1.printDetails()
emp2.printDetails()"""
| [
"furqankhan08@gmail.com"
] | furqankhan08@gmail.com |
6fd710ff73ec9263e9e55367fc477e652acb033b | 5e22728a45dc131b5abcdde3c10928557177898b | /msticnb/nblib/entity_tools.py | 1544e4239ee714babd97514089a5bc45b8f2b108 | [
"MIT"
] | permissive | microsoft/msticnb | 74fc9636964be68900702ee0c85b0c992f0779ad | cefc4ee5a22285d33e7abd91371c617fe42f8129 | refs/heads/main | 2023-06-30T02:00:29.253130 | 2023-03-16T20:14:27 | 2023-03-16T20:14:27 | 250,417,186 | 25 | 11 | MIT | 2023-03-16T20:14:28 | 2020-03-27T02:00:17 | Python | UTF-8 | Python | false | false | 1,252 | py | # -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
"""Entity Helper functions."""
from typing import Dict, List, Union
import pandas as pd
from .._version import VERSION
__version__ = VERSION
__author__ = "Ian Hellen"
def extract_entities(
data: pd.DataFrame, cols: Union[str, List[str]]
) -> Dict[str, List[str]]:
"""
Extract items from a column (strings or lists).
Parameters
----------
data : pd.DataFrame
DataFrame to parse
cols : Union[str, List[str]]
Columns to use for input
Returns
-------
Dict[str, List[str]]
Dictionary of (column: result_list)
"""
if not isinstance(cols, list):
cols = [cols]
val_results = {}
for col in cols:
ent_vals = list(data[col].values)
test_val = data[col].iloc[0]
if isinstance(test_val, list):
ent_vals = list({ent for ent_list in ent_vals for ent in ent_list})
val_results[col] = ent_vals
return val_results
| [
"ianhelle@microsoft.com"
] | ianhelle@microsoft.com |
76d34d609a1f8d2dd9bc469663aa252496e1d5df | 529bba93d8671fb3ca3844b4df4c3e192805c3f9 | /Lesson 1 - Homeworks/find 1/find1.py | 70b0a0bed91892e1830de4e07d5114652366045a | [] | no_license | zischuros/Introduction-To-Computer-Science-JPGITHUB1519 | 140b131edd6711197c2f2c6ab3f3a8efbb57dc18 | 0556e82a6f74577b4542c80e11471c03325236f0 | refs/heads/master | 2023-03-23T14:46:14.721869 | 2016-05-04T21:55:19 | 2016-05-04T21:55:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 276 | py | # Assume text is a variable that
# holds a string. Write Python code
# that prints out the position
# of the first occurrence of 'hoo'
# in the value of text, or -1 if
# it does not occur at all.
text = "first hoo"
# ENTER CODE BELOW HERE
print text.find("hoo") | [
"juanpedro1519@gmail.com"
] | juanpedro1519@gmail.com |
bda082cc78e9cd787776e49c9ed4dce261314ee1 | 5a52ccea88f90dd4f1acc2819997fce0dd5ffb7d | /alipay/aop/api/response/AlipayUserCardActivateurlQueryResponse.py | 64d32016949b8988e7f3520c8fe40ba9ba078b48 | [
"Apache-2.0"
] | permissive | alipay/alipay-sdk-python-all | 8bd20882852ffeb70a6e929038bf88ff1d1eff1c | 1fad300587c9e7e099747305ba9077d4cd7afde9 | refs/heads/master | 2023-08-27T21:35:01.778771 | 2023-08-23T07:12:26 | 2023-08-23T07:12:26 | 133,338,689 | 247 | 70 | Apache-2.0 | 2023-04-25T04:54:02 | 2018-05-14T09:40:54 | Python | UTF-8 | Python | false | false | 1,041 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
class AlipayUserCardActivateurlQueryResponse(AlipayResponse):
def __init__(self):
super(AlipayUserCardActivateurlQueryResponse, self).__init__()
self._apply_card_url = None
self._callback = None
@property
def apply_card_url(self):
return self._apply_card_url
@apply_card_url.setter
def apply_card_url(self, value):
self._apply_card_url = value
@property
def callback(self):
return self._callback
@callback.setter
def callback(self, value):
self._callback = value
def parse_response_content(self, response_content):
response = super(AlipayUserCardActivateurlQueryResponse, self).parse_response_content(response_content)
if 'apply_card_url' in response:
self.apply_card_url = response['apply_card_url']
if 'callback' in response:
self.callback = response['callback']
| [
"jiandong.jd@antfin.com"
] | jiandong.jd@antfin.com |
bc3c39df6e99e811e0ba62dc289bcc6a02651920 | 3823c76ef517d9995920455c87b3f3fbf75aa06b | /download_data_ooi1_0.py | b9cf53b3407217433cfa78ac3f010d533084a6f7 | [] | no_license | ooi-data-lab/data-download | 765ae7d30daa984f2ee7b66482d8f0230251829d | 822f02f8c786a70271640ed653b026ba04811a2f | refs/heads/master | 2020-04-27T13:24:44.652725 | 2019-05-29T14:31:15 | 2019-05-29T14:31:15 | 174,368,968 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,925 | py | #!/usr/bin/env python
"""
Created on Sep 7 2018
@author: Lori Garzio
@brief: This script imports tools to use the data_review_list
(https://github.com/ooi-data-lab/data-review-tools/tree/master/review_list) to download OOI 1.0 datasets via the OOI
M2M interface.
@usage:
sDir: directory where outputs are saved, and where f is located
f: optional csv file containing data to download, columns: array, subsite, node, sensor, delivery_method,
reference_designator (entries in all columns are optional). If this file is not provided, the script will prompt
the user for inputs
username: OOI API username
token: OOI API password
"""
import datetime as dt
import pandas as pd
import os
import itertools
import functions.common as cf
import scripts
sDir = '/Users/lgarzio/Documents/OOI'
f = '' # optional i.e. 'data_download.csv'
username = 'username'
token = 'token'
cf.create_dir(sDir)
now = dt.datetime.now().strftime('%Y%m%dT%H%M')
if not f:
array, subsite, node, inst, delivery_methods = scripts.interactive_inputs.return_interactive_inputs()
f_url_list = scripts.data_request_urls_ooi1_0.main(sDir, array, subsite, node, inst, delivery_methods, now)
else:
df = pd.read_csv(os.path.join(sDir, f))
url_list = []
for i, j in df.iterrows():
array = scripts.data_request_tools.check_str(j['array'])
array = scripts.data_request_tools.format_inputs(array)
refdes = j['reference_designator']
if type(refdes) == str:
subsite = scripts.data_request_tools.format_inputs(refdes.split('-')[0])
node = scripts.data_request_tools.format_inputs(refdes.split('-')[1])
inst = scripts.data_request_tools.format_inputs('-'.join((refdes.split('-')[2], refdes.split('-')[3])))
else:
subsite = scripts.data_request_tools.check_str(j['subsite'])
subsite = scripts.data_request_tools.format_inputs(subsite)
node = scripts.data_request_tools.check_str(j['node'])
node = scripts.data_request_tools.format_inputs(node)
inst = scripts.data_request_tools.check_str(j['sensor'])
inst = scripts.data_request_tools.format_inputs(inst)
delivery_methods = scripts.data_request_tools.check_str(j['delivery_method'])
delivery_methods = scripts.data_request_tools.format_inputs(delivery_methods)
urls = scripts.data_request_urls_ooi1_0.main(sDir, array, subsite, node, inst, delivery_methods, now)
url_list.append(urls)
f_url_list = list(itertools.chain(*url_list))
thredds_output_urls = scripts.send_data_requests_nc.main(sDir, f_url_list, username, token, now)
print('\nSeeing if the requests have fulfilled...')
for i in range(len(thredds_output_urls)):
url = thredds_output_urls[i]
print('\nDataset {} of {}: {}'.format((i + 1), len(thredds_output_urls), url))
if 'no_output_url' not in url:
cf.check_request_status(url)
| [
"lgarzio@marine.rutgers.edu"
] | lgarzio@marine.rutgers.edu |
3d1b02416928674854e82e448ad25812a8d5e2e3 | f6de805e4e0e169cd82562aca20bfef3b38c8c31 | /venv/Scripts/gifmaker.py | 7036a340c87acdf26d6bc26a1f9bfd754c935b7f | [] | no_license | Huangkai1008/stuonline | 742ac9b27ea1cda8e2c35bf0425cb076ff0345dc | f874eeeb91433d7d789783347e4ffbb01198da58 | refs/heads/master | 2022-10-26T03:36:40.910069 | 2018-01-08T05:41:15 | 2018-01-08T05:41:29 | 114,594,601 | 0 | 1 | null | 2022-10-18T01:01:58 | 2017-12-18T04:05:32 | Python | UTF-8 | Python | false | false | 692 | py | #!c:\users\huangkai\pycharmprojects\stuonline\venv\scripts\python.exe
#
# The Python Imaging Library
# $Id$
#
# convert sequence format to GIF animation
#
# history:
# 97-01-03 fl created
#
# Copyright (c) Secret Labs AB 1997. All rights reserved.
# Copyright (c) Fredrik Lundh 1997.
#
# See the README file for information on usage and redistribution.
#
from __future__ import print_function
from PIL import Image
if __name__ == "__main__":
import sys
if len(sys.argv) < 3:
print("GIFMAKER -- create GIF animations")
print("Usage: gifmaker infile outfile")
sys.exit(1)
im = Image.open(sys.argv[1])
im.save(sys.argv[2], save_all=True)
| [
"18778335525@163.com"
] | 18778335525@163.com |
4ad4ff6e2904bfa1d05b0636df4dcf6e1dad797d | 8ef5a09d76a11c56963f18e6a08474a1a8bafe3c | /leet_code/111. Minimum Depth of Binary Tree.py | 32f64c72979c0a30bda905626e31da5ffa8920a8 | [] | no_license | roiei/algo | 32c4677649c7666db148f6183fbfbf66c8b1969f | ae8bb8bf4ae4026ccaf1dce323b4098547dd35ec | refs/heads/master | 2022-04-01T19:21:27.768675 | 2022-02-19T06:15:29 | 2022-02-19T06:15:29 | 169,021,154 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 677 | py | # Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def minDepth(self, root: TreeNode) -> int:
if not root:
return 0
def dfs(node, depth):
if node.left == None and node.right == None:
return depth
ld = rd = float('inf')
if None != node.left:
ld = dfs(node.left, depth+1)
if None != node.right:
rd = dfs(node.right, depth+1)
return min(ld, rd)
return dfs(root, 1)
| [
"hyoukjea.son@hyundai.com"
] | hyoukjea.son@hyundai.com |
1ce516617a7ed38b6bddd8a4a069d6da44127912 | 90c161bc5861c14b426cc6627ce08f2241ad3576 | /django_project_template/django_project_template/apps/app/models.py | d23ecefbfdab097a30b9fbdec86489003734ef5f | [
"MIT"
] | permissive | eduardolujan/django_project_template | dd6eb421eedac07c52d429e74c7461941fb96ef9 | 6e449b1da6d95ab7afb532a4a76d724be666ae4f | refs/heads/master | 2016-09-05T22:01:07.106705 | 2015-08-24T22:48:18 | 2015-08-24T22:48:18 | 35,395,541 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 228 | py | from django.db import models
# Create your models here.
class Person(models.Model):
first_name = models.CharField(max_length=30)
last_name = models.CharField(max_length=30)
other = models.CharField(max_length=30)
| [
"eduardo.lujan.p@gmail.com"
] | eduardo.lujan.p@gmail.com |
2cbebb6288b0c8a6157c34869367770c6916c8a2 | 02c30e3e2c0f701d77f0a23591027ae62f37a512 | /libs/applibs/kivymd/grid.py | c2d59d0d4cf04b4e9156a363d5dec219b4a4192e | [
"MIT"
] | permissive | mkbeh/CRyptoLab | 5341a48a403ecf23e10248c46e919c1381275551 | 424c938c16c9264e99eff71e4c1a27ca65314d42 | refs/heads/master | 2022-12-22T06:39:36.909313 | 2018-09-25T14:40:32 | 2018-09-25T14:40:32 | 144,743,677 | 0 | 2 | MIT | 2022-12-08T02:22:14 | 2018-08-14T16:09:19 | Python | UTF-8 | Python | false | false | 5,151 | py | # coding=utf-8
from kivy.lang import Builder
from kivy.properties import StringProperty, BooleanProperty, ObjectProperty, \
NumericProperty, ListProperty, OptionProperty
from kivy.uix.behaviors import ButtonBehavior
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.floatlayout import FloatLayout
from libs.applibs.kivymd.ripplebehavior import RectangularRippleBehavior
from libs.applibs.kivymd.theming import ThemableBehavior
Builder.load_string("""
<SmartTile>
_img_widget: img
_img_overlay: img_overlay
_box_overlay: box
AsyncImage:
id: img
allow_stretch: root.allow_stretch
anim_delay: root.anim_delay
anim_loop: root.anim_loop
color: root.img_color
keep_ratio: root.keep_ratio
mipmap: root.mipmap
source: root.source
size_hint_y: 1 if root.overlap else None
x: root.x
y: root.y if root.overlap or root.box_position == 'header' else box.top
BoxLayout:
id: img_overlay
size_hint: img.size_hint
size: img.size
pos: img.pos
BoxLayout:
canvas:
Color:
rgba: root.box_color
Rectangle:
pos: self.pos
size: self.size
id: box
size_hint_y: None
height: dp(68) if root.lines == 2 else dp(48)
x: root.x
y: root.y if root.box_position == 'footer' else root.y + root.height - self.height
<SmartTileWithLabel>
_img_widget: img
_img_overlay: img_overlay
_box_overlay: box
_box_label: boxlabel
AsyncImage:
id: img
allow_stretch: root.allow_stretch
anim_delay: root.anim_delay
anim_loop: root.anim_loop
color: root.img_color
keep_ratio: root.keep_ratio
mipmap: root.mipmap
source: root.source
size_hint_y: 1 if root.overlap else None
x: root.x
y: root.y if root.overlap or root.box_position == 'header' else box.top
BoxLayout:
id: img_overlay
size_hint: img.size_hint
size: img.size
pos: img.pos
BoxLayout:
canvas:
Color:
rgba: root.box_color
Rectangle:
pos: self.pos
size: self.size
id: box
size_hint_y: None
height: dp(68) if root.lines == 2 else dp(48)
x: root.x
y: root.y if root.box_position == 'footer' else root.y + root.height - self.height
MDLabel:
id: boxlabel
font_style: "Caption"
halign: "center"
text: root.text
""")
class Tile(ThemableBehavior, RectangularRippleBehavior, ButtonBehavior,
BoxLayout):
"""A simple tile. It does nothing special, just inherits the right behaviors
to work as a building block.
"""
pass
class SmartTile(ThemableBehavior, RectangularRippleBehavior, ButtonBehavior,
FloatLayout):
"""A tile for more complex needs.
Includes an image, a container to place overlays and a box that can act
as a header or a footer, as described in the Material Design specs.
"""
box_color = ListProperty([0, 0, 0, 0.5])
"""Sets the color and opacity for the information box."""
box_position = OptionProperty('footer', options=['footer', 'header'])
"""Determines wether the information box acts as a header or footer to the
image.
"""
lines = OptionProperty(1, options=[1, 2])
"""Number of lines in the header/footer.
As per Material Design specs, only 1 and 2 are valid values.
"""
overlap = BooleanProperty(True)
"""Determines if the header/footer overlaps on top of the image or not"""
# Img properties
allow_stretch = BooleanProperty(True)
anim_delay = NumericProperty(0.25)
anim_loop = NumericProperty(0)
img_color = ListProperty([1, 1, 1, 1])
keep_ratio = BooleanProperty(False)
mipmap = BooleanProperty(False)
source = StringProperty()
_img_widget = ObjectProperty()
_img_overlay = ObjectProperty()
_box_overlay = ObjectProperty()
_box_label = ObjectProperty()
def reload(self):
self._img_widget.reload()
def add_widget(self, widget, index=0):
if issubclass(widget.__class__, IOverlay):
self._img_overlay.add_widget(widget, index)
elif issubclass(widget.__class__, IBoxOverlay):
self._box_overlay.add_widget(widget, index)
else:
super(SmartTile, self).add_widget(widget, index)
class SmartTileWithLabel(SmartTile):
_box_label = ObjectProperty()
# MDLabel properties
font_style = StringProperty("Caption")
theme_text_color = StringProperty("")
text = StringProperty("")
"""Determines the text for the box footer/header"""
class IBoxOverlay():
"""An interface to specify widgets that belong to to the image overlay
in the :class:`SmartTile` widget when added as a child.
"""
pass
class IOverlay():
"""An interface to specify widgets that belong to to the image overlay
in the :class:`SmartTile` widget when added as a child.
"""
pass
| [
"mkbehforever@gmail.com"
] | mkbehforever@gmail.com |
a77d7b5ddda7a942f99d2e3cccf3bf59b4f1315f | c50e7eb190802d7849c0d0cea02fb4d2f0021777 | /src/azure-firewall/azext_firewall/vendored_sdks/v2021_08_01/v2021_08_01/aio/_configuration.py | 86d05cdb64608c7096fa37b26058687fa39110c9 | [
"LicenseRef-scancode-generic-cla",
"MIT"
] | permissive | Azure/azure-cli-extensions | c1615b19930bba7166c282918f166cd40ff6609c | b8c2cf97e991adf0c0a207d810316b8f4686dc29 | refs/heads/main | 2023-08-24T12:40:15.528432 | 2023-08-24T09:17:25 | 2023-08-24T09:17:25 | 106,580,024 | 336 | 1,226 | MIT | 2023-09-14T10:48:57 | 2017-10-11T16:27:31 | Python | UTF-8 | Python | false | false | 3,380 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, TYPE_CHECKING
from azure.core.configuration import Configuration
from azure.core.pipeline import policies
from azure.mgmt.core.policies import ARMHttpLoggingPolicy, AsyncARMChallengeAuthenticationPolicy
from .._version import VERSION
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from azure.core.credentials_async import AsyncTokenCredential
class NetworkManagementClientConfiguration(Configuration): # pylint: disable=too-many-instance-attributes
"""Configuration for NetworkManagementClient.
Note that all parameters used to create this instance are saved as instance
attributes.
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials_async.AsyncTokenCredential
:param subscription_id: The subscription credentials which uniquely identify the Microsoft
Azure subscription. The subscription ID forms part of the URI for every service call.
:type subscription_id: str
"""
def __init__(
self,
credential: "AsyncTokenCredential",
subscription_id: str,
**kwargs: Any
) -> None:
super(NetworkManagementClientConfiguration, self).__init__(**kwargs)
if credential is None:
raise ValueError("Parameter 'credential' must not be None.")
if subscription_id is None:
raise ValueError("Parameter 'subscription_id' must not be None.")
self.credential = credential
self.subscription_id = subscription_id
self.credential_scopes = kwargs.pop('credential_scopes', ['https://management.azure.com/.default'])
kwargs.setdefault('sdk_moniker', 'mgmt-network/{}'.format(VERSION))
self._configure(**kwargs)
def _configure(
self,
**kwargs: Any
) -> None:
self.user_agent_policy = kwargs.get('user_agent_policy') or policies.UserAgentPolicy(**kwargs)
self.headers_policy = kwargs.get('headers_policy') or policies.HeadersPolicy(**kwargs)
self.proxy_policy = kwargs.get('proxy_policy') or policies.ProxyPolicy(**kwargs)
self.logging_policy = kwargs.get('logging_policy') or policies.NetworkTraceLoggingPolicy(**kwargs)
self.http_logging_policy = kwargs.get('http_logging_policy') or ARMHttpLoggingPolicy(**kwargs)
self.retry_policy = kwargs.get('retry_policy') or policies.AsyncRetryPolicy(**kwargs)
self.custom_hook_policy = kwargs.get('custom_hook_policy') or policies.CustomHookPolicy(**kwargs)
self.redirect_policy = kwargs.get('redirect_policy') or policies.AsyncRedirectPolicy(**kwargs)
self.authentication_policy = kwargs.get('authentication_policy')
if self.credential and not self.authentication_policy:
self.authentication_policy = AsyncARMChallengeAuthenticationPolicy(self.credential, *self.credential_scopes, **kwargs)
| [
"noreply@github.com"
] | Azure.noreply@github.com |
fd0e1bda9b0e00e801221a122a381f259448e504 | 8c68f9d99b769bbd8aae676666b386156ffb26ff | /toga_cassowary/widget.py | 57918cd93727e633475e349489e7dbef74e5b92f | [
"BSD-3-Clause"
] | permissive | pybee/toga-cassowary | 2209a2de17ab3774582423b700088572513c55ef | 74be7f9b5e7f435f839749870fec7effd73959bb | refs/heads/master | 2016-09-15T14:47:15.951453 | 2016-03-11T10:09:37 | 2016-03-11T10:09:37 | 23,016,663 | 2 | 4 | null | 2016-03-11T10:09:38 | 2014-08-16T11:31:40 | Python | UTF-8 | Python | false | false | 4,035 | py | from __future__ import print_function, absolute_import, division
from toga.constraint import Attribute, Constraint
from toga.widget import Widget as WidgetBase
from .layout import BoundingBox, LayoutManager
class Widget(WidgetBase):
def __init__(self):
super(Widget, self).__init__()
self._bounding_box = BoundingBox()
self._expand_horizontal = True
self._expand_vertical = True
def _expression(self, identifier):
if identifier == Attribute.LEFT:
return self._bounding_box.x
elif identifier == Attribute.RIGHT:
return self._bounding_box.x + self._bounding_box.width
elif identifier == Attribute.TOP:
return self._bounding_box.y
elif identifier == Attribute.BOTTOM:
return self._bounding_box.y + self._bounding_box.height
elif identifier == Attribute.LEADING:
return self._bounding_box.x
elif identifier == Attribute.TRAILING:
return self._bounding_box.x + self._bounding_box.width
elif identifier == Attribute.WIDTH:
return self._bounding_box.width
elif identifier == Attribute.HEIGHT:
return self._bounding_box.height
elif identifier == Attribute.CENTER_X:
return self._bounding_box.x + (self._bounding_box.width / 2)
elif identifier == Attribute.CENTER_Y:
return self._bounding_box.y + (self._bounding_box.height / 2)
# elif identifier == self.BASELINE:
# return ...
@property
def _width_hint(self):
raise NotImplementedError()
@property
def _height_hint(self):
raise NotImplementedError()
class Container(Widget):
def __init__(self):
super(Container, self).__init__()
self.children = []
self.constraints = {}
self.startup()
def startup(self):
self._layout_manager = LayoutManager(self._bounding_box)
self._impl = self._create_container()
def add(self, widget):
self.children.append(widget)
# Assign the widget to the same app and window as the container.
widget.window = self.window
widget.app = self.app
self._layout_manager.add_widget(widget)
self._impl.add(widget._impl)
def _set_app(self, app):
for child in self.children:
child.app = app
def _set_window(self, window):
for child in self.children:
child.window = window
def constrain(self, constraint):
"Add the given constraint to the widget."
if constraint in self.constraints:
return
widget = constraint.attr.widget
identifier = constraint.attr.identifier
if constraint.related_attr:
related_widget = constraint.related_attr.widget
related_identifier = constraint.related_attr.identifier
expr1 = widget._expression(identifier) * constraint.attr.multiplier + constraint.attr.constant
expr2 = related_widget._expression(related_identifier) * constraint.related_attr.multiplier + constraint.related_attr.constant
if constraint.relation == Constraint.EQUAL:
self._layout_manager.add_constraint(expr1 == expr2)
elif constraint.relation == Constraint.LTE:
self._layout_manager.add_constraint(expr1 <= expr2)
elif constraint.relation == Constraint.GTE:
self._layout_manager.add_constraint(expr1 >= expr2)
else:
expr = widget._expression(identifier) * constraint.attr.multiplier
if constraint.relation == Constraint.EQUAL:
self._layout_manager.add_constraint(expr == constraint.attr.constant)
elif constraint.relation == Constraint.LTE:
self._layout_manager.add_constraint(expr <= constraint.attr.constant)
elif constraint.relation == Constraint.GTE:
self._layout_manager.add_constraint(expr >= constraint.attr.constant)
| [
"russell@keith-magee.com"
] | russell@keith-magee.com |
b7eeafbb0578c3aff3f997b879d90719598bf4fc | e42a61b7be7ec3412e5cea0ffe9f6e9f34d4bf8d | /a10sdk/core/slb/slb_connection_reuse.py | 9be7f959106aa709eef0df8f1744fb66d72c95fe | [
"Apache-2.0"
] | permissive | amwelch/a10sdk-python | 4179565afdc76cdec3601c2715a79479b3225aef | 3e6d88c65bd1a2bf63917d14be58d782e06814e6 | refs/heads/master | 2021-01-20T23:17:07.270210 | 2015-08-13T17:53:23 | 2015-08-13T17:53:23 | 40,673,499 | 0 | 0 | null | 2015-08-13T17:51:35 | 2015-08-13T17:51:34 | null | UTF-8 | Python | false | false | 3,137 | py | from a10sdk.common.A10BaseClass import A10BaseClass
class SamplingEnable(A10BaseClass):
"""This class does not support CRUD Operations please use parent.
:param counters1: {"enum": ["all", "current_open", "current_active", "nbind", "nunbind", "nestab", "ntermi", "ntermi_err", "delay_unbind", "long_resp", "miss_resp", "unbound_data_rcv", "pause_conn", "pause_conn_fail", "resume_conn", "not_remove_from_rport"], "type": "string", "description": "'all': all; 'current_open': Open persist; 'current_active': Active persist; 'nbind': Total bind; 'nunbind': Total unbind; 'nestab': Total established; 'ntermi': Total terminated; 'ntermi_err': Total terminated by err; 'delay_unbind': Delayed unbind; 'long_resp': Long resp; 'miss_resp': Missed resp; 'unbound_data_rcv': Unbound data rcvd; 'pause_conn': Pause request; 'pause_conn_fail': Pause request fail; 'resume_conn': Resume request; 'not_remove_from_rport': Not remove from list; ", "format": "enum"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.b_key = "sampling-enable"
self.DeviceProxy = ""
self.counters1 = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
class ConnectionReuse(A10BaseClass):
""" :param sampling_enable: {"minItems": 1, "items": {"type": "object"}, "uniqueItems": true, "type": "array", "array": [{"properties": {"optional": true, "counters1": {"enum": ["all", "current_open", "current_active", "nbind", "nunbind", "nestab", "ntermi", "ntermi_err", "delay_unbind", "long_resp", "miss_resp", "unbound_data_rcv", "pause_conn", "pause_conn_fail", "resume_conn", "not_remove_from_rport"], "type": "string", "description": "'all': all; 'current_open': Open persist; 'current_active': Active persist; 'nbind': Total bind; 'nunbind': Total unbind; 'nestab': Total established; 'ntermi': Total terminated; 'ntermi_err': Total terminated by err; 'delay_unbind': Delayed unbind; 'long_resp': Long resp; 'miss_resp': Missed resp; 'unbound_data_rcv': Unbound data rcvd; 'pause_conn': Pause request; 'pause_conn_fail': Pause request fail; 'resume_conn': Resume request; 'not_remove_from_rport': Not remove from list; ", "format": "enum"}}}]}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
Class Description::
Show Connection Reuse Statistics.
Class connection-reuse supports CRUD Operations and inherits from `common/A10BaseClass`.
This class is the `"PARENT"` class for this module.`
URL for this object::
`https://<Hostname|Ip address>//axapi/v3/slb/connection-reuse`.
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.required=[]
self.b_key = "connection-reuse"
self.a10_url="/axapi/v3/slb/connection-reuse"
self.DeviceProxy = ""
self.sampling_enable = []
for keys, value in kwargs.items():
setattr(self,keys, value)
| [
"doug@parksidesoftware.com"
] | doug@parksidesoftware.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.