blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
7c432b6ff53b6f2d2ff8f063dc48ade3a4c92cd1
|
e2e08d7c97398a42e6554f913ee27340226994d9
|
/pyautoTest-master(ICF-7.5.0)/test_case/scg/scg_Administrator/test_c139330.py
|
0952f34469d1beb7cbb7b7a29b6cc088aec4bbac
|
[] |
no_license
|
lizhuoya1111/Automated_testing_practice
|
88e7be512e831d279324ad710946232377fb4c01
|
b3a532d33ddeb8d01fff315bcd59b451befdef23
|
refs/heads/master
| 2022-12-04T08:19:29.806445
| 2020-08-14T03:51:20
| 2020-08-14T03:51:20
| 287,426,498
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,662
|
py
|
import pytest
import time
import sys
from os.path import dirname, abspath
sys.path.insert(0, dirname(dirname(abspath(__file__))))
from page_obj.scg.scg_def_sys import *
from page_obj.scg.scg_def import *
from page_obj.scg.scg_button import *
from page_obj.scg.scg_def_log import *
from page_obj.common.rail import *
from page_obj.scg.scg_def_ifname_OEM import *
from page_obj.scg.scg_dev import *
test_id = 139330
def test_c139330(browser):
try:
login_web(browser, url=dev1)
configuer(browser)
time.sleep(2)
loginfo = get_log(browser, 管理日志)
browser.switch_to.default_content()
# print(loginfo)
delete_all_admin_list_jyl(browser)
time.sleep(1)
delete_all_admin_profile_jyl(browser)
time.sleep(1)
try:
assert "添加管理员帐户成功" in loginfo
rail_pass(test_run_id, test_id)
except:
rail_fail(test_run_id, test_id)
assert "添加管理员帐户失败" in loginfo
except Exception as err:
# 如果上面的步骤有报错,重新设备,恢复配置
reload(hostip=dev1)
print(err)
rail_fail(test_run_id, test_id)
assert False
def configuer(browser):
add_admin_profile(browser, profile_name='aaa', desc="aaa权限", cfg="读写", report="读写")
time.sleep(2)
add_admin_remote_jyl(browser, admin_name="bob", auth_database="remote", temp="log_profile", https="yes",
telent="yes", ssh="yes", console="yes", status="enable", interface=interface_name_6,
online_num="3", ip1="0.0.0.0/0", ip2="3.3.3.0/24", )
if __name__ == '__main__':
pytest.main(["-v", "-s", "test_c" + str(test_id) + ".py"])
|
[
"15501866985@163.com"
] |
15501866985@163.com
|
d330833cb9b420e93029ecd1df3e12af203a3a9d
|
ee05d803ee01c939d7324db8e8ff2b8990877d69
|
/e_单词规律/290.py
|
f5edbac5e8f0297cffaf85541074befd5136adb1
|
[] |
no_license
|
Nostalogicwh/Leetcode
|
7660153ffe56b1a348d2bb145bbd77c9c46a5525
|
5004d6b7157dc6a21666c7f79a38e95fa0ca092f
|
refs/heads/master
| 2023-02-26T10:23:26.575572
| 2021-02-01T09:05:04
| 2021-02-01T09:05:04
| 295,983,019
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 498
|
py
|
class Solution:
def wordPattern(self, pattern: str, s: str) -> bool:
t = s.split()
if len(pattern) != len(t):
return False
dct = {}
for i in range(len(pattern)):
if pattern[i] not in dct:
if t[i] in dct.values():
return False
dct[pattern[i]] = t[i]
else:
if dct[pattern[i]] != t[i]:
return False
print(dct)
return True
|
[
"apocalypsewh@163.com"
] |
apocalypsewh@163.com
|
e13b84b29a8d28f05463a7eab4ee596dc1714cae
|
5b7af6548668085da9a6ab86f564538ee73c4865
|
/build/scripts/slave/recipe_modules/luci_config/example.py
|
ddbd1f0ab8890cf00a7b52766c5d291be3d044b2
|
[
"BSD-3-Clause"
] |
permissive
|
elastos/Elastos.APP.Android.ShiJiuTV
|
463a986450a915f7b3066e6a03aca903cf56f69b
|
f77189a2b8df86028adc68105988710d16ce012b
|
refs/heads/master
| 2023-03-18T03:11:58.337349
| 2018-03-12T08:50:57
| 2018-03-13T11:10:27
| 124,007,751
| 0
| 1
| null | 2022-10-03T03:30:29
| 2018-03-06T02:21:25
| null |
UTF-8
|
Python
| false
| false
| 1,504
|
py
|
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from recipe_engine.recipe_api import Property
DEPS = [
'luci_config',
'recipe_engine/properties',
'recipe_engine/step'
]
PROPERTIES = {
'auth_token': Property(default=None),
'protobuf': Property(default=None),
}
def RunSteps(api, auth_token, protobuf):
if auth_token:
api.luci_config.c.auth_token = auth_token
if protobuf:
result = api.luci_config.parse_textproto(api.luci_config.get_project_config(
'build', 'recipes.cfg')['content'].split('\n'))
api.step('checkit', ['echo', str(result)])
return
api.luci_config.get_project_config('build', 'recipes.cfg')
api.luci_config.get_project_metadata('build')
def GenTests(api):
yield (
api.test('basic') +
api.luci_config.get_projects(['build']) +
api.luci_config.get_project_config('build', 'recipes.cfg', 'testcontent')
)
yield (
api.test('auth_token') +
api.properties(auth_token='ya2930948320948203480=') +
api.luci_config.get_projects(['build']) +
api.luci_config.get_project_config('build', 'recipes.cfg', 'testcontent')
)
protobuf_lines = """
foo: 1
bar: "hi"
baz {
the_thing: "hi"
}
"""
yield (
api.test('protobuf') +
api.luci_config.get_project_config(
'build', 'recipes.cfg', protobuf_lines) +
api.properties(protobuf=True)
)
|
[
"xiaokun.mengxk@qcast.cn"
] |
xiaokun.mengxk@qcast.cn
|
e9fac787cd491026568ab5aa63c583c3cc349c1c
|
78ea634c53a3b52cbc379b5509d99f6729e1644b
|
/user/forms.py
|
04c395cf518eb98c1946fa41c4eb578ee10656ab
|
[] |
no_license
|
TapanManu/Todo-Backend
|
56a2999a8139a967e1f59d346d22b902f5ed151a
|
4ed02cbbf2c843fc248edb4a4027e3491fa800c9
|
refs/heads/master
| 2022-03-30T10:21:49.165029
| 2020-02-02T18:45:39
| 2020-02-02T18:45:39
| 226,716,615
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 666
|
py
|
from django import forms
from .models import UserProfile
from django.contrib.auth.models import User
class UserForm(forms.ModelForm):
username=forms.CharField(label=("Username"),widget=forms.TextInput( attrs={'placeholder':
('Username'),
'autofocus': 'autofocus','required':'true'}))
password=forms.CharField(widget=forms.PasswordInput(attrs={'placeholder':('password')}))
email = forms.EmailField(max_length=254)
class Meta:
model=User
fields=('username','email','password')
class UserProfileForm(forms.ModelForm):
class Meta:
model=UserProfile
fields=('website',)
|
[
"you@example.com"
] |
you@example.com
|
2109b2bc169fb2b076bbf069f2ef187f1e7baab8
|
df328969e8d61a02603374c0cb3450556a51c184
|
/tests/runtests.py
|
f0f367a3d3e63c7eff09f034e8fdb67b71a9796e
|
[
"BSD-2-Clause"
] |
permissive
|
samuderapase/django-comments-xtd
|
c5dd8641e8ca6933a124ae91377f003dd1dfc7a6
|
5ac29f5269c18acb1709a35b30de3decff7a59fe
|
refs/heads/master
| 2020-12-24T23:49:33.382692
| 2012-10-17T20:07:09
| 2012-10-17T20:07:09
| 6,543,617
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 610
|
py
|
import os
import sys
def setup_django_settings():
os.chdir(os.path.join(os.path.dirname(__file__), ".."))
sys.path.insert(0, os.getcwd())
os.environ["DJANGO_SETTINGS_MODULE"] = "tests.settings"
def run_tests():
if not os.environ.get("DJANGO_SETTINGS_MODULE", False):
setup_django_settings()
from django.conf import settings
from django.test.utils import get_runner
TestRunner = get_runner(settings)
test_suite = TestRunner(verbosity=2, interactive=True, failfast=False)
test_suite.run_tests(["django_comments_xtd"])
if __name__ == "__main__":
run_tests()
|
[
"danirus@eml.cc"
] |
danirus@eml.cc
|
1b002ff27e79f71f469b0ebe1a940084049bd552
|
52c8d7594de49e3ba47573c50c95bd112c3c8828
|
/cycles.py
|
67f8843f2dd44172c7166fbadb936912a45fed0f
|
[] |
no_license
|
vitroid/LiqChemSummerSchool2014
|
bcf592be419bccb6e972c11ffff95a7eff355213
|
23a29acbc13548c702518448f52f0f8336b167e2
|
refs/heads/master
| 2021-06-03T10:29:56.428479
| 2021-02-15T05:34:56
| 2021-02-15T05:34:56
| 21,470,973
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,020
|
py
|
#!/usr/bin/env python
# coding: utf-8
############# functions ###############################################
#cycles
def all_cycles(graph, maxc=6):
#local functions
def self_avoiding_cycle(graph,vertex_list):
#迷走して経路長が長くなりすぎた時は
if len(vertex_list) == maxc+1:
#あきらめる。(結果なし)
return []
last = vertex_list[-1]
results = []
#last頂点に隣接する頂点それぞれについて
for next in graph[last]:
#もしnextが経路の最初の頂点に戻ってきて、しかも経路長が3以上なら、
if next == vertex_list[0]:
if len(vertex_list) >= 3:
#帰ってきた!
#vertex_listを、結果に加える
results.append(vertex_list)
else:
continue
#経路の途中に交わってしまったら
elif next in vertex_list:
continue
else:
#再帰的にwalkを延ばす
results += self_avoiding_cycle(graph,vertex_list + [next,])
return results
#end of local functions
cycles = []
graph_size = len(graph)
#すべての頂点を順番に始点として、
for v in range(graph_size):
#self-avoiding pathをさがす
cycles += self_avoiding_cycle(graph, [v,])
#重複(始点が違うだけ、逆回りなどすべて)を含むすべてのサイクルを返す。
return cycles
def unique_cycles(graph, maxc=6):
cycles = all_cycles(graph, maxc)
#重複するサイクルを省く。
#重複のないリスト
uniquecycles = []
#重複のない集合
uniqueset = set()
for cycle in cycles:
#cycleに含まれる頂点を集合にする。
fs = frozenset(cycle)
#その集合が、既出でなければ
if not fs in uniqueset:
#集合の集合に追加する
uniqueset.add(fs)
#リストにも追加する
uniquecycles.append(cycle)
#リストのほうを返り値とする
return uniquecycles
############# end of functions ########################################
from distance_matrix import *
#test case
if __name__ == "__main__":
#グラフは、辺の集合(set)で表現する。
#頂点のラベルは0からの連番とする。
#辺は無向とし、2頂点のラベル(小さい順)のタプルで表す。
#大きなグラフ(立方体グラフ)
edges = set([(0,1),(1,2),(2,3),(0,3),
(0,4),(1,5),(2,6),(3,7),
(4,5),(5,6),(6,7),(4,7)])
size = 8
#連結な頂点から頂点へたどっていきやすいように、隣接関係をリストで表現する。
graph = adjacency_table(edges, size)
#cycleのリスト(最大サイズは6歩)
print unique_cycles(graph,6)
|
[
"vitroid@gmail.com"
] |
vitroid@gmail.com
|
e062c0a4d1a6c48d574c1f5205d0a19e1c11e9be
|
6fff0893ef43f1018d65f2e8e1bf27d9f8accf5b
|
/pw_package/py/pw_package/package_manager.py
|
1254121126b21ce292b55127abe72651f6f4487a
|
[
"Apache-2.0"
] |
permissive
|
isabella232/pigweed
|
eeb68a4eda6f0a9b5ef0b8145d0204bc9f85bfdc
|
53c2f3e2569d7e582d3dd3056ceb9b2c3b8197b2
|
refs/heads/main
| 2023-06-03T10:32:29.498066
| 2021-06-17T06:38:15
| 2021-06-17T20:44:55
| 378,165,913
| 0
| 0
|
Apache-2.0
| 2021-06-18T13:54:37
| 2021-06-18T13:53:40
| null |
UTF-8
|
Python
| false
| false
| 6,156
|
py
|
# Copyright 2020 The Pigweed Authors
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
"""Install and remove optional packages."""
import argparse
import dataclasses
import logging
import os
import pathlib
import shutil
from typing import Dict, List, Sequence, Tuple
_LOG: logging.Logger = logging.getLogger(__name__)
class Package:
"""Package to be installed.
Subclass this to implement installation of a specific package.
"""
def __init__(self, name):
self._name = name
@property
def name(self):
return self._name
def install(self, path: pathlib.Path) -> None: # pylint: disable=no-self-use
"""Install the package at path.
Install the package in path. Cannot assume this directory is empty—it
may need to be deleted or updated.
"""
def remove(self, path: pathlib.Path) -> None: # pylint: disable=no-self-use
"""Remove the package from path.
Removes the directory containing the package. For most packages this
should be sufficient to remove the package, and subclasses should not
need to override this package.
"""
if os.path.exists(path):
shutil.rmtree(path)
def status(self, path: pathlib.Path) -> bool: # pylint: disable=no-self-use
"""Returns if package is installed at path and current.
This method will be skipped if the directory does not exist.
"""
def info(self, path: pathlib.Path) -> Sequence[str]: # pylint: disable=no-self-use
"""Returns a short string explaining how to enable the package."""
_PACKAGES: Dict[str, Package] = {}
def register(package_class: type, *args, **kwargs) -> None:
obj = package_class(*args, **kwargs)
_PACKAGES[obj.name] = obj
@dataclasses.dataclass
class Packages:
all: Tuple[str, ...]
installed: Tuple[str, ...]
available: Tuple[str, ...]
class PackageManager:
"""Install and remove optional packages."""
def __init__(self, root: pathlib.Path):
self._pkg_root = root
os.makedirs(root, exist_ok=True)
def install(self, package: str, force: bool = False) -> None:
pkg = _PACKAGES[package]
if force:
self.remove(package)
pkg.install(self._pkg_root / pkg.name)
def remove(self, package: str) -> None:
pkg = _PACKAGES[package]
pkg.remove(self._pkg_root / pkg.name)
def status(self, package: str) -> bool:
pkg = _PACKAGES[package]
path = self._pkg_root / pkg.name
return os.path.isdir(path) and pkg.status(path)
def list(self) -> Packages:
installed = []
available = []
for package in sorted(_PACKAGES.keys()):
pkg = _PACKAGES[package]
if pkg.status(self._pkg_root / pkg.name):
installed.append(pkg.name)
else:
available.append(pkg.name)
return Packages(
all=tuple(_PACKAGES.keys()),
installed=tuple(installed),
available=tuple(available),
)
def info(self, package: str) -> Sequence[str]:
pkg = _PACKAGES[package]
return pkg.info(self._pkg_root / pkg.name)
class PackageManagerCLI:
"""Command-line interface to PackageManager."""
def __init__(self):
self._mgr: PackageManager = None
def install(self, package: str, force: bool = False) -> int:
_LOG.info('Installing %s...', package)
self._mgr.install(package, force)
_LOG.info('Installing %s...done.', package)
for line in self._mgr.info(package):
_LOG.info('%s', line)
return 0
def remove(self, package: str) -> int:
_LOG.info('Removing %s...', package)
self._mgr.remove(package)
_LOG.info('Removing %s...done.', package)
return 0
def status(self, package: str) -> int:
if self._mgr.status(package):
_LOG.info('%s is installed.', package)
for line in self._mgr.info(package):
_LOG.info('%s', line)
return 0
_LOG.info('%s is not installed.', package)
return -1
def list(self) -> int:
packages = self._mgr.list()
_LOG.info('Installed packages:')
for package in packages.installed:
_LOG.info(' %s', package)
for line in self._mgr.info(package):
_LOG.info(' %s', line)
_LOG.info('')
_LOG.info('Available packages:')
for package in packages.available:
_LOG.info(' %s', package)
_LOG.info('')
return 0
def run(self, command: str, pkg_root: pathlib.Path, **kwargs) -> int:
self._mgr = PackageManager(pkg_root.resolve())
return getattr(self, command)(**kwargs)
def parse_args(argv: List[str] = None) -> argparse.Namespace:
parser = argparse.ArgumentParser("Manage packages.")
parser.add_argument(
'--package-root',
'-e',
dest='pkg_root',
type=pathlib.Path,
default=(pathlib.Path(os.environ['_PW_ACTUAL_ENVIRONMENT_ROOT']) /
'packages'),
)
subparsers = parser.add_subparsers(dest='command', required=True)
install = subparsers.add_parser('install')
install.add_argument('--force', '-f', action='store_true')
remove = subparsers.add_parser('remove')
status = subparsers.add_parser('status')
for cmd in (install, remove, status):
cmd.add_argument('package', choices=_PACKAGES.keys())
_ = subparsers.add_parser('list')
return parser.parse_args(argv)
def run(**kwargs):
return PackageManagerCLI().run(**kwargs)
|
[
"pigweed-scoped@luci-project-accounts.iam.gserviceaccount.com"
] |
pigweed-scoped@luci-project-accounts.iam.gserviceaccount.com
|
f0e7c0414602a99eb49b99e2deeb7a13c9cef635
|
1b2a1f807b98034567e936b9b5c76c2fc89b908a
|
/adj_stf/experimental/classification/multi_label_classification_model.py
|
6cb7fefe4e9aa02a3c55a5373d8693d745ffdb73
|
[] |
no_license
|
Adreambottle/Transformer2GP
|
48c955d8eb155caef4c24a3c03ee3aa9ab0bd3da
|
5ba1a5005c2ad21066304cdeb1d7c2587c8191da
|
refs/heads/main
| 2023-07-07T14:17:51.673437
| 2021-08-17T14:14:56
| 2021-08-17T14:14:56
| 397,279,894
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,051
|
py
|
from multiprocessing import cpu_count
import torch
from adj_tf import (
WEIGHTS_NAME,
AlbertConfig,
AlbertTokenizer,
BertConfig,
BertTokenizer,
DistilBertConfig,
DistilBertTokenizer,
RobertaConfig,
RobertaTokenizer,
XLMConfig,
XLMTokenizer,
XLNetConfig,
XLNetTokenizer,
)
from adj_stf.classification import ClassificationModel
from adj_stf.custom_models.models import (
AlbertForMultiLabelSequenceClassification,
BertForMultiLabelSequenceClassification,
DistilBertForMultiLabelSequenceClassification,
RobertaForMultiLabelSequenceClassification,
XLMForMultiLabelSequenceClassification,
XLNetForMultiLabelSequenceClassification,
)
class MultiLabelClassificationModel(ClassificationModel):
def __init__(self, model_type, model_name, num_labels=None, pos_weight=None, args=None, use_cuda=True):
"""
Initializes a MultiLabelClassification model.
Args:
model_type: The type of model (bert, roberta)
model_name: Default Transformer model name or path to a directory containing Transformer model file (pytorch_nodel.bin).
num_labels (optional): The number of labels or classes in the dataset.
pos_weight (optional): A list of length num_labels containing the weights to assign to each label for loss calculation.
args (optional): Default args will be used if this parameter is not provided. If provided, it should be a dict containing the args that should be changed in the default args.
use_cuda (optional): Use GPU if available. Setting to False will force model to use CPU only.
"""
MODEL_CLASSES = {
"bert": (BertConfig, BertForMultiLabelSequenceClassification, BertTokenizer),
"roberta": (RobertaConfig, RobertaForMultiLabelSequenceClassification, RobertaTokenizer),
"xlnet": (XLNetConfig, XLNetForMultiLabelSequenceClassification, XLNetTokenizer),
"xlm": (XLMConfig, XLMForMultiLabelSequenceClassification, XLMTokenizer),
"distilbert": (DistilBertConfig, DistilBertForMultiLabelSequenceClassification, DistilBertTokenizer),
"albert": (AlbertConfig, AlbertForMultiLabelSequenceClassification, AlbertTokenizer),
}
config_class, model_class, tokenizer_class = MODEL_CLASSES[model_type]
if num_labels:
self.config = config_class.from_pretrained(model_name, num_labels=num_labels)
self.num_labels = num_labels
else:
self.config = config_class.from_pretrained(model_name)
self.num_labels = self.config.num_labels
self.tokenizer = tokenizer_class.from_pretrained(model_name)
self.tokenizer = tokenizer_class.from_pretrained(model_name)
self.num_labels = num_labels
self.pos_weight = pos_weight
self.sliding_window = False
if use_cuda:
if torch.cuda.is_available():
self.device = torch.device("cuda")
else:
raise ValueError(
"'use_cuda' set to True when cuda is unavailable. Make sure CUDA is available or set use_cuda=False."
)
else:
self.device = "cpu"
if self.pos_weight:
self.model = model_class.from_pretrained(
model_name, config=self.config, pos_weight=torch.Tensor(self.pos_weight).to(self.device)
)
else:
self.model = model_class.from_pretrained(model_name, config=self.config)
self.results = {}
self.args = {
"output_dir": "outputs/",
"cache_dir": "cache_dir/",
"fp16": False,
"max_seq_length": 128,
"train_batch_size": 8,
"gradient_accumulation_steps": 1,
"eval_batch_size": 8,
"num_train_epochs": 1,
"weight_decay": 0,
"learning_rate": 4e-5,
"adam_epsilon": 1e-8,
"warmup_ratio": 0.06,
"warmup_steps": 0,
"max_grad_norm": 1.0,
"stride": False,
"logging_steps": 50,
"save_steps": 2000,
"evaluate_during_training": False,
"overwrite_output_dir": False,
"reprocess_input_data": False,
"process_count": cpu_count() - 2 if cpu_count() > 2 else 1,
"n_gpu": 1,
"use_multiprocessing": True,
"silent": False,
"threshold": 0.5,
}
if not use_cuda:
self.args["fp16"] = False
if args:
self.args.update(args)
self.args["model_name"] = model_name
self.args["model_type"] = model_type
def train_model(
self, train_df, multi_label=True, eval_df=None, output_dir=None, show_running_loss=True, args=None
):
return super().train_model(
train_df,
multi_label=multi_label,
eval_df=eval_df,
output_dir=output_dir,
show_running_loss=show_running_loss,
args=args,
)
def eval_model(self, eval_df, multi_label=True, output_dir=None, verbose=False, **kwargs):
return super().eval_model(eval_df, output_dir=output_dir, multi_label=multi_label, verbose=verbose, **kwargs)
def evaluate(self, eval_df, output_dir, multi_label=True, prefix="", **kwargs):
return super().evaluate(eval_df, output_dir, multi_label=multi_label, prefix=prefix, **kwargs)
def load_and_cache_examples(self, examples, evaluate=False, no_cache=False, multi_label=True):
return super().load_and_cache_examples(examples, evaluate=evaluate, no_cache=no_cache, multi_label=multi_label)
def compute_metrics(self, preds, labels, eval_examples, multi_label=True, **kwargs):
return super().compute_metrics(preds, labels, eval_examples, multi_label=multi_label, **kwargs)
def predict(self, to_predict, multi_label=True):
return super().predict(to_predict, multi_label=multi_label)
|
[
"adreambottle@outlook.com"
] |
adreambottle@outlook.com
|
ba1dcb17a6e7d524f9f07edc2b67b588720faaad
|
c25a17f0f82c2eebca55bbe180f4c2ccbbf00292
|
/01_Jump_to_python/Chap06/6장_practice/practice4_메모장.py
|
786b7b2f7c82951b7ad1f09ccb24176b86c390b9
|
[] |
no_license
|
superbeom97/jumpjump
|
a0a4da6f0df0483ef0cef9833b5fe0402ec63c9c
|
fc45efce2a2b00c614aa5aa54b36be1572ed40ce
|
refs/heads/master
| 2021-09-15T09:35:16.903857
| 2018-05-30T00:00:59
| 2018-05-30T00:00:59
| 111,883,402
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,901
|
py
|
import sys
args = sys.argv
args_one = args[1]
args_two = args[2:]
if args_one == "-a":
try:
f = open("memo.txt", 'r')
f = open("memo.txt", 'a')
f.write(" ".join(args_two))
f.write("\n")
f.close()
except:
number = int(input("memo.txt 파일이 없습니다. 아래 중 선택하세요\n1. memo.txt 파일을 새로 생성하시겠습니까?\n2. 파일 경로를 입력하시겠습니까?\n: "))
if number == 1:
f = open("memo.txt", 'w')
f.write(" ".join(args_two))
f.write("\n")
print("memo.txt 파일을 생성했습니다. 감사합니다.")
f.close()
elif number == 2:
address = str(input("파일 경로를 입력하세요: "))
f = open(address, 'a')
f.write(" ".join(args_two))
f.write("\n")
f.close()
print("정상 처리되었습니다. 감사합니다.")
else:
print("1번과 2번 중에 선택해라잉 확 마")
elif args_one == "-au":
for i in args_two:
f = open("memo.txt", 'a')
b = "".join(i)
f.write(b.upper())
f.write("\n")
f.close
elif args_one == "-v":
try:
f = open("memo.txt", 'r')
data = f.read()
print(data)
except FileNotFoundError:
number = int(input("memo.txt 파일이 없습니다. 아래 중 선택하세요\n1. 종료하시겠습니까?\n2. 파일 경로를 입력하시겠습니까?\n: "))
if number == 1:
print("이용해 주셔서 감사합니다.")
# break
elif number == 2:
address = str(input("파일 경로를 입력하세요: "))
f = open(address, 'r')
data = f.read()
print(data)
else:
print("1번과 2번 중에 선택해라잉 확 마")
|
[
"beom9790@naver.com"
] |
beom9790@naver.com
|
e67af2421558b2a196f0c5584ac66fb0e1dd4252
|
8f0fc0f4ac44e85e87ade78ac3f8d3b1996587e5
|
/Model_1.py
|
ae103fcd74ce05a1803a2d225dbbde006b88ee19
|
[] |
no_license
|
Data-drone/Kaggle_Allstate
|
91bc9d2400866f58813f4ba6f04009647d90fd0a
|
70b7f63eefea55ec9f0c8da6217b7f9ee8f12e76
|
refs/heads/master
| 2021-06-30T09:57:59.864482
| 2017-09-21T17:48:43
| 2017-09-21T17:48:43
| 74,899,326
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,111
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Tue Nov 29 23:23:27 2016
@author: Brian
"""
"""
Build model 1 script
"""
import os
import pandas as pd
import numpy as np
"""
feat table_1
"""
#### standard functions
def Find_Low_Var(frame):
sizes = []
low_var_col = []
for column in frame:
data_sets = frame[column]
uniques = data_sets.unique() # note the brackets
# print the pivot to show distributions
# will error
if (column != 'id'):
pd_Table = pd.pivot_table(train, values = 'id', index=column, aggfunc='count').apply(lambda x: np.round(np.float(x)/len(train)*100, 2))
#print (pd_Table )
if max(pd_Table) > 99:
low_var_col.append(column)
#variance = np.var(data_sets)
#print(uniques.size)
#print(column)
sizes.append(uniques.size)
#print(len(uniques))
return(sizes, low_var_col)
data_path = 'Data'
os.listdir('./Data/')
train = pd.read_csv('./Data/train.csv')
### 2 tyoes if columns the continuous and the categorical
# lets separate these
cate_cols = [col for col in train.columns if 'cat' in col]
continuous_cols = [col for col in train.columns if 'cont' in col]
categorical = train[cate_cols]
sizes, low_var_cols_to_drop = Find_Low_Var(categorical)
categorical_to_keep = categorical.drop(low_var_cols_to_drop, axis = 1)
### check how big the one hot is first
OneHot = pd.get_dummies(categorical_to_keep)
## try feature hasher again
from sklearn.feature_extraction import FeatureHasher
FH = FeatureHasher(n_features = 1000, input_type = 'dict')
hashed_Feat = FH.transform(categorical_to_keep.to_dict(orient='records'))
#dense_Feat = hashed_Feat.todense()
### make into categories
continuous = train[continuous_cols]
## id and target columns
id_target = train[['id', 'loss']]
## quick test 1
frame = [continuous, OneHot]
merge = pd.concat(frame, axis = 1)
"""
train test splitting
"""
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split( merge, np.log(id_target.loss), test_size=0.4, random_state=0)
assert X_train.shape[0] + X_test.shape[0] == continuous.shape[0]
# model 1 # like
# 8917348 RMSE for log
# 8116237 for normal
#from sklearn import linear_model
#reg = linear_model.RidgeCV(alphas=[0.1, 1.0, 10.0])
#reg.fit(X_train, y_train)
#result = reg.predict(X_test)
# model 2
# 9069687 rmse for 100 regressors
# with one hots vars 4332759 rmse
#from sklearn.ensemble import RandomForestRegressor
#clf = RandomForestRegressor(n_estimators = 400, criterion='mse', verbose = 1, n_jobs = 7)
#clf.fit(X_train, y_train)
#result = clf.predict(X_test)
# model 3 # default xgb was 4389784
import xgboost as xgb
from xgboost.sklearn import XGBRegressor
xgb_mod = XGBRegressor(max_depth = 10, learning_rate = 0.25, n_estimators = 150)
xgb_mod.fit(X_train, y_train)
result = xgb_mod.predict(X_test)
# score
from sklearn.metrics import mean_squared_error
mean_squared_error(np.exp(y_test), np.exp(result) )
import matplotlib.pyplot as plt
y_test.hist()
plt.hist(result, bins='auto')
|
[
"bpl.law@gmail.com"
] |
bpl.law@gmail.com
|
ad0646d8b6725f5fb875510a1944d2a4c900e23d
|
c89e59b4d018e8a2d7dc0dbc3bb7a3768024f849
|
/before2021/python/문제풀이/day5/3_숫자카운팅.py
|
a21ba81101844a7fb5f88a171cfd12bc1c5fbafa
|
[] |
no_license
|
leeiopd/algorithm
|
ff32103a43e467a5a091257cc07cf35365ecbf91
|
e41647d3918c3099110d97f455c5ebf9a38d571e
|
refs/heads/master
| 2023-03-08T23:46:34.919991
| 2023-02-22T09:39:46
| 2023-02-22T09:39:46
| 166,131,885
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,375
|
py
|
import sys
sys.stdin = open("3_input.txt")
'''
첫째 줄에 N 이 입력된다. (1≤N≤200,000)
둘째 줄에 배열에 저장 되어있는 N개의 숫자가 순서대로 공백으로 구분되어 입력된다.
셋째 줄에 M 이 입력된다. (1≤M≤200,000)
넷째 줄에 M개의 탐색할 숫자가 순서대로 공백으로 구분되어 입력된다.
(이 숫자는 정렬 되어있지 않다)
입력 넷째 줄에서 주어진 탐색할 숫자의 배열 내 저장된 개수를 차례대로 출력한다.
'''
N = int(input())
arr = list(map(int, input().split()))
M = int(input())
find = list(map(int, input().split()))
def lowerSearch(s, e, where):
global arr
while s < e:
# e = N-1
# s = 0
m = (s+e)//2 # mid
# if where == arr[m]: return m+1
if where > arr[m] : s = m + 1
else: e = m
return e
def upperSearch(s, e, where):
global arr
while s < e:
# e = N-1
# s = 0
m = (s+e)//2 # mid
# if where == arr[m]: return m+1
if where >= arr[m] : s = m + 1
else: e = m
return e
for i in range(M):
low = lowerSearch(0, N, find[i])
up = upperSearch(0, N, find[i])
if low != up: # 찾았을 경우에만 오른쪽 끝 탐색
print(up-low, end=' ')
else:
print(0,end=' ')
|
[
"leeiopd@hanmail.net"
] |
leeiopd@hanmail.net
|
9c7e0ec9ac281a7e422bfd1d6657a9deae3fdd71
|
5b9035dbfe0750e9933728f9631ad7a183dd3429
|
/18/01/Pool.py
|
370cc9e9856dfee8cce0818fc689057d3fd7a977
|
[
"CC0-1.0"
] |
permissive
|
pylangstudy/201709
|
271efbd4f337d912d0ca958a621eb2a040091528
|
53d868786d7327a83bfa7f4149549c6f9855a6c6
|
refs/heads/master
| 2021-01-21T12:16:21.950493
| 2017-09-30T00:02:34
| 2017-09-30T00:02:34
| 102,058,300
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,791
|
py
|
import weakref
import csv
class ConstMeta(type):
class ConstError(TypeError): pass
def __init__(self, name, bases, dict):
super(ConstMeta, self).__init__(name, bases, dict)
import sys
sys.modules[name]=self()#ConstMetaを継承したクラスのモジュールに、そのクラスのインスタンスを代入する
def __setattr__(self, name, value):
if name in self.__dict__.keys(): raise self.ConstError('readonly。再代入禁止です。')
super(ConstMeta, self).__setattr__(name, value)
class Pool:
def __new__(cls):
cls.__Pool = {}
cls.__WeakPool = weakref.WeakValueDictionary(cls.__Pool)
print(dir(cls))
return super().__new__(cls)
@classmethod
def Get(cls, _id):
if _id in cls.__WeakPool: return cls.__WeakPool[_id]
else:
target = cls.__Read(_id)
if None is target: raise ValueError('指定したidのデータが存在しませんでした。: _id={_id}')
cls.__Pool[target.Id] = target
return cls.__WeakPool[_id]
@classmethod
def Release(cls, _id):
if _id in cls.__Pool: del cls.__Pool[_id]
@classmethod
def __Read(cls, _id):
with csv.read('Humans.csv') as f:
reader = csv.reader(f)
header = next(reader) # ヘッダーを読み飛ばしたい時
print(header)
Human = collections.namedtuple('Humans', header)
for row in reader:
if 0 == len(row.strip()): continue
# print row # 1行づつ取得できる
if row[0] == _id: return Human(','.split(row))
return None
if __name__ == '__main__':
h0 = Pool.Get(0)
print(h0)
|
[
"pylangstudy@yahoo.co.jp"
] |
pylangstudy@yahoo.co.jp
|
07b293d229357fb7a30af429f26cc27c3258bcc2
|
a7fd2ed77661ed0e32d75678bdfbb448a4b1a38d
|
/ABC/ABC169/E.py
|
83299df85f453d27cbba2a130895d08c020b6e77
|
[] |
no_license
|
Rikuo-git/AtCoder
|
218a976a96f961371f4c1f4a8bf0e4ef7a78e09c
|
e7fe5db837acba11aed890181d429517c0f4bedc
|
refs/heads/master
| 2023-03-03T10:07:09.825074
| 2021-02-15T11:37:10
| 2021-02-15T11:37:10
| 266,530,674
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 438
|
py
|
# my solution
import numpy as np
(n,),*l = [[*map(int,i.split())]for i in open(0)]
a,b = np.median(l,axis=0)
if n%2>0:
print(int(b-a+1))
else:
print(int(b*2-a*2)+1)
# 奇数の時は中央値の間、偶数の時は中央値の間かける2倍
# shortest fsdshn 数学的にやっていることは同じ
n,*l=map(int,open(0).read().split())
m,M=sorted(l[::2]),sorted(l[1::2])
d=n//2
print(M[d]-m[d]+1+(M[d-1]+-m[d-1])*(n%2^1))
|
[
"rikuo.takahashi@keio.jp"
] |
rikuo.takahashi@keio.jp
|
43d4c7d2d7bb6f1ecbf5e63561d6f9a6cec1f7ee
|
b891b6f5f51750a95c4b4ad5766cc63431ad2799
|
/config.py
|
0618a3ed2bdd25bc6efb6da48cbc927c5bb0eb49
|
[
"MIT"
] |
permissive
|
dennisnyamweya/pitching
|
b12a75f5681289ee70cab65eeb19da5c35c7e718
|
9c338d3f496f9855b0a9233579f9aa4d3c0d6464
|
refs/heads/master
| 2022-09-26T13:55:22.784323
| 2019-08-06T04:27:52
| 2019-08-06T04:27:52
| 200,616,118
| 0
| 0
|
MIT
| 2022-09-16T18:07:47
| 2019-08-05T08:36:44
|
Python
|
UTF-8
|
Python
| false
| false
| 821
|
py
|
import os
class Config:
"""Main configurations class"""
SQLALCHEMY_TRACK_MODIFICATIONS = False
SQLALCHEMY_DATABASE_URI = 'postgresql+psycopg2://moringa:123@localhost/pitchy'
SECRET_KEY = "try harder"
UPLOADED_PHOTOS_DEST = 'app/static/photos'
MAIL_SERVER = 'smtp.gmail.com'
MAIL_PORT = 587
MAIL_USE_TLS = True
MAIL_USERNAME = os.environ.get("MAIL_USERNAME")
MAIL_PASSWORD = os.environ.get("MAIL_PASSWORD")
class ProdConfig(Config):
"""Production configuration class that inherits from the main configurations class"""
SQLALCHEMY_DATABASE_URI = os.environ.get("DATABASE_URL")
class DevConfig(Config):
"""Configuration class for development stage of the app"""
DEBUG = True
config_options = {
'development': DevConfig,
'production': ProdConfig
}
|
[
"denisnyamweya813@gmail.com"
] |
denisnyamweya813@gmail.com
|
431b96040b15421c2067d65fcbc5cc24243089f3
|
3929d28489c8cf53b7d828998ba48e618ee08d08
|
/example_django_react_templatetags/runtests.py
|
89cce13f9e26c82c47c886b3fc007c977217adc1
|
[
"MIT"
] |
permissive
|
EriSilver/django-react-templatetags
|
89e0c08ff54a1a4b5cf96e4848a44fd173172072
|
b11dd2f1802015589621c3c173850355969c88cf
|
refs/heads/main
| 2023-05-27T04:07:29.710783
| 2021-05-25T19:22:06
| 2021-05-25T19:22:06
| 386,394,693
| 1
| 0
|
MIT
| 2021-07-15T18:52:56
| 2021-07-15T18:52:55
| null |
UTF-8
|
Python
| false
| false
| 392
|
py
|
import os
import sys
import argparse
from django.core.management import execute_from_command_line
os.environ["DJANGO_SETTINGS_MODULE"] = "django_react_templatetags.tests.demosite.settings"
def runtests():
args, rest = argparse.ArgumentParser().parse_known_args()
argv = [sys.argv[0], "test"] + rest
execute_from_command_line(argv)
if __name__ == "__main__":
runtests()
|
[
"martin@marteinn.se"
] |
martin@marteinn.se
|
6aba396a479895a42694694b4f4d0f13154ed4bc
|
da687718aa8ce62974090af63d25e057262e9dfe
|
/cap18-Interface-GUI-Tkinter/extras/02_tutopoint/08_listBox.py
|
2d5af720f62d7bbd09ee2a26784e198af2b8b98a
|
[] |
no_license
|
frclasso/revisao_Python_modulo1
|
77928fa4409c97d49cc7deccdf291f44c337d290
|
1e83d0ef9657440db46a8e84b136ac5f9a7c556e
|
refs/heads/master
| 2020-06-25T05:37:28.768343
| 2019-07-27T22:23:58
| 2019-07-27T22:23:58
| 199,217,969
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 236
|
py
|
#!/usr/bin/env python3
from tkinter import *
top = Tk()
Lb1 = Listbox(top)
Lb1.insert(1, "Python")
Lb1.insert(2, "Perl")
Lb1.insert(3, "C")
Lb1.insert(4, "Julia")
Lb1.insert(5, "Djago")
Lb1.insert(6, "Go")
Lb1.pack()
top.mainloop()
|
[
"frcalsso@yahoo.com.br"
] |
frcalsso@yahoo.com.br
|
521b4bf3657fd33b18f0dd8c026ee69df493d283
|
f9d564f1aa83eca45872dab7fbaa26dd48210d08
|
/huaweicloud-sdk-vpc/huaweicloudsdkvpc/v2/model/batch_create_subnet_tags_request.py
|
9c6ab367a7d382993a6806837feaf9a13b280a97
|
[
"Apache-2.0"
] |
permissive
|
huaweicloud/huaweicloud-sdk-python-v3
|
cde6d849ce5b1de05ac5ebfd6153f27803837d84
|
f69344c1dadb79067746ddf9bfde4bddc18d5ecf
|
refs/heads/master
| 2023-09-01T19:29:43.013318
| 2023-08-31T08:28:59
| 2023-08-31T08:28:59
| 262,207,814
| 103
| 44
|
NOASSERTION
| 2023-06-22T14:50:48
| 2020-05-08T02:28:43
|
Python
|
UTF-8
|
Python
| false
| false
| 4,003
|
py
|
# coding: utf-8
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class BatchCreateSubnetTagsRequest:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'subnet_id': 'str',
'body': 'BatchCreateSubnetTagsRequestBody'
}
attribute_map = {
'subnet_id': 'subnet_id',
'body': 'body'
}
def __init__(self, subnet_id=None, body=None):
"""BatchCreateSubnetTagsRequest
The model defined in huaweicloud sdk
:param subnet_id: 子网ID
:type subnet_id: str
:param body: Body of the BatchCreateSubnetTagsRequest
:type body: :class:`huaweicloudsdkvpc.v2.BatchCreateSubnetTagsRequestBody`
"""
self._subnet_id = None
self._body = None
self.discriminator = None
self.subnet_id = subnet_id
if body is not None:
self.body = body
@property
def subnet_id(self):
"""Gets the subnet_id of this BatchCreateSubnetTagsRequest.
子网ID
:return: The subnet_id of this BatchCreateSubnetTagsRequest.
:rtype: str
"""
return self._subnet_id
@subnet_id.setter
def subnet_id(self, subnet_id):
"""Sets the subnet_id of this BatchCreateSubnetTagsRequest.
子网ID
:param subnet_id: The subnet_id of this BatchCreateSubnetTagsRequest.
:type subnet_id: str
"""
self._subnet_id = subnet_id
@property
def body(self):
"""Gets the body of this BatchCreateSubnetTagsRequest.
:return: The body of this BatchCreateSubnetTagsRequest.
:rtype: :class:`huaweicloudsdkvpc.v2.BatchCreateSubnetTagsRequestBody`
"""
return self._body
@body.setter
def body(self, body):
"""Sets the body of this BatchCreateSubnetTagsRequest.
:param body: The body of this BatchCreateSubnetTagsRequest.
:type body: :class:`huaweicloudsdkvpc.v2.BatchCreateSubnetTagsRequestBody`
"""
self._body = body
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, BatchCreateSubnetTagsRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"hwcloudsdk@huawei.com"
] |
hwcloudsdk@huawei.com
|
cffe50dd97a932ebc3250fdbf9a6349b509f3431
|
f576f0ea3725d54bd2551883901b25b863fe6688
|
/sdk/powerbidedicated/azure-mgmt-powerbidedicated/generated_samples/create_auto_scale_vcore.py
|
67ad65f12c53cbe937f4d46b56b858ef521762f8
|
[
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
] |
permissive
|
Azure/azure-sdk-for-python
|
02e3838e53a33d8ba27e9bcc22bd84e790e4ca7c
|
c2ca191e736bb06bfbbbc9493e8325763ba990bb
|
refs/heads/main
| 2023-09-06T09:30:13.135012
| 2023-09-06T01:08:06
| 2023-09-06T01:08:06
| 4,127,088
| 4,046
| 2,755
|
MIT
| 2023-09-14T21:48:49
| 2012-04-24T16:46:12
|
Python
|
UTF-8
|
Python
| false
| false
| 1,885
|
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
from azure.mgmt.powerbidedicated import PowerBIDedicated
"""
# PREREQUISITES
pip install azure-identity
pip install azure-mgmt-powerbidedicated
# USAGE
python create_auto_scale_vcore.py
Before run the sample, please set the values of the client ID, tenant ID and client secret
of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
"""
def main():
client = PowerBIDedicated(
credential=DefaultAzureCredential(),
subscription_id="613192d7-503f-477a-9cfe-4efc3ee2bd60",
)
response = client.auto_scale_vcores.create(
resource_group_name="TestRG",
vcore_name="testvcore",
v_core_parameters={
"location": "West US",
"properties": {"capacityLimit": 10, "capacityObjectId": "a28f00bd-5330-4572-88f1-fa883e074785"},
"sku": {"capacity": 0, "name": "AutoScale", "tier": "AutoScale"},
"tags": {"testKey": "testValue"},
},
)
print(response)
# x-ms-original-file: specification/powerbidedicated/resource-manager/Microsoft.PowerBIdedicated/stable/2021-01-01/examples/createAutoScaleVCore.json
if __name__ == "__main__":
main()
|
[
"noreply@github.com"
] |
Azure.noreply@github.com
|
3739f0b84f86b376235cade51f9071d7d62c8b18
|
f0d713996eb095bcdc701f3fab0a8110b8541cbb
|
/eraBhwF8HkJDAa2pS_7.py
|
4d28a63e1772e975610e4ed6a58348f01d38c9a7
|
[] |
no_license
|
daniel-reich/turbo-robot
|
feda6c0523bb83ab8954b6d06302bfec5b16ebdf
|
a7a25c63097674c0a81675eed7e6b763785f1c41
|
refs/heads/main
| 2023-03-26T01:55:14.210264
| 2021-03-23T16:08:01
| 2021-03-23T16:08:01
| 350,773,815
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,759
|
py
|
"""
A group of pirates each have a distribution of gold coins, which can be
represented as a list:
[3, 9, 4, 5, 5]
# Pirate 1 has 3 gold, Pirate 2 has 9 gold, etc.
The difference between each pirate's share of gold and that of the richest
pirate is represented as:
[6, 0, 5, 4, 4]
# Since 6 = 9 - 3, 0 = 9 - 9, 4 = 9 - 5, etc.
Pirates have a keen sense of fairness, and a pirate will kill the others if he
deems his share to be too little. Each pirate has a **unique inequality
threshold** \- the maximum difference he is willing to tolerate before he
kills his comrades.
Using the above gold distribution:
[5, 0, 5, 5, 5]
# Pirates killed, since 5 < 6.
# 5 is Pirate 1's inequality distribution and 6 is his gold difference.
[7, 0, 5, 5, 5]
# Pirate 1 is satisfied, since 7 > 6.
# All other pirates are satisfied as well.
Given a distribution of coins and a list of inequality thresholds, create a
function that returns `True` if any pirates are killed, or `False` otherwise.
### Examples
pirates_killed([3, 5, 8, 3, 4], [10, 4, 2, 5, 5]) ➞ False
pirates_killed([3, 5, 8, 3, 4], [10, 4, 2, 5, 1]) ➞ True
pirates_killed([3, 3, 10], [7, 7, 0]) ➞ False
pirates_killed([3, 3, 10], [6, 6, 0]) ➞ True
### Notes
* A pirate kills if the difference in his share of gold from the riches pirate is **strictly greater** than his **inequality threshold**.
* Gold and inequality distribution lists are both ordered the same. (e.g. Pirate 1 is index 0 for both lists, Pirate 2 is index 1 for both lists, etc).
"""
def pirates_killed(gold, tolerance):
m=max(gold)
a=len(gold)
for i in range(0,a):
if(m-gold[i]>tolerance[i]):
return True
return False
|
[
"daniel.reich@danielreichs-MacBook-Pro.local"
] |
daniel.reich@danielreichs-MacBook-Pro.local
|
1a7a445a0183da5b57d25a858acc8af2e8320d0d
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02833/s059211372.py
|
ed4bbbaccfabc03d67edc5bcf26aebe9e1659cf8
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 316
|
py
|
n = input()
m = len(n)
n = int(n)
if n%2==1:
print(0)
else:
if m==1:
print(0)
else:
ans = 0
i = 1
while True:
ans_plus=n//(2*5**i)
if ans_plus==0:
break
ans += ans_plus
i += 1
print(ans)
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
f76e2a9511e5d7f982695f3e4c56ae5960b336e7
|
771d7c30e9984eb07ba88f0feb2a52c0ee510549
|
/备份/1804爬虫/爬虫文件/第三天 (1)/mzcookie_opener.py
|
c7d9dc7b5c5721f122a7802a30b94c735c19698d
|
[] |
no_license
|
1615961606/-test
|
5eae5cab4e82136ecf8f4cbdb9990c3bb9e4839f
|
81a822d303a07310dafe2af612f932d9f34503c3
|
refs/heads/master
| 2020-03-19T01:07:01.851584
| 2019-04-08T10:54:35
| 2019-04-08T10:54:35
| 135,523,902
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 739
|
py
|
#有时候我们需要将获取到的cookie保存在本地文件中,
# 我们需要使用到MozillaCookieJar
from http import cookiejar
from urllib import request
#1设置一个文件名,将cookie保存在这个文件下
filename = 'cookie.txt'
#2.创建一个cookiejar对象,用来管理和存储cookie
mz_cookiejar = cookiejar.MozillaCookieJar(filename)
#3.创建一个HTTPCookieprocessor处理器对象,管理cookiejar
handler = request.HTTPCookieProcessor(mz_cookiejar)
#自定义一个opener
opener = request.build_opener(handler)
#使用opener对象发起请求
req = request.Request('http://www.baidu.com/')
response = opener.open(req)
print(response.status)
#使用save方法,保存cookie
mz_cookiejar.save()
|
[
"c1615961606@163.com"
] |
c1615961606@163.com
|
d76568dec057241b650414033a7585fd422344b3
|
549317bc0a7230ec163914c75f75dd008900c57b
|
/pyroomacoustics/tests/test_room_is_insided.py
|
3d49962456b8076f646ddc7bbaee2b7887245503
|
[
"MIT"
] |
permissive
|
oucxlw/pyroomacoustics
|
0bb633427cd7ce3e93392cdc9d0bc3afc5f2dbf3
|
0adc91579c9c6daf1b73d2c4863a9fc66b308dbb
|
refs/heads/master
| 2023-06-17T17:43:49.743201
| 2021-07-21T05:36:46
| 2021-07-21T05:36:46
| 288,884,904
| 1
| 0
|
MIT
| 2021-07-21T05:36:47
| 2020-08-20T02:22:54
|
Python
|
UTF-8
|
Python
| false
| false
| 2,248
|
py
|
import numpy as np
import pyroomacoustics as pra
def test_room_is_inside():
# fix the seed for repeatable testing
np.random.seed(0)
# This was a problematic case
# if the source is placed at the same height as one of the corners
# the test would fail, even though the source is in the room
floorplan = [ [0, 6, 6, 2, 0],
[0, 0, 5, 5, 3] ]
source_loc = [ 2, 3 ] # same y-coordinate as the corner at [0, 3]
room = pra.Room.from_corners(floorplan)
room.add_source(source_loc)
for i in range(100):
# because the test is randomized, let's check many times
assert room.is_inside([0,0], include_borders=True)
assert not room.is_inside([0,0], include_borders=False)
assert room.is_inside([3,0], include_borders=True)
assert not room.is_inside([3,0], include_borders=False)
assert room.is_inside([1,4], include_borders=True)
assert not room.is_inside([1,4], include_borders=False)
assert room.is_inside([0,1], include_borders=True)
assert not room.is_inside([0,1], include_borders=False)
assert not room.is_inside([0.5,4], include_borders=False)
# now test in 3D
room.extrude(4.)
for i in range(100):
# because the test is randomized, let's check many times
assert room.is_inside([2, 3, 1.7])
assert not room.is_inside([0.5, 4, 1.8])
assert not room.is_inside([0.5, 4, 1.8])
assert room.is_inside([0,0,0], include_borders=True)
assert not room.is_inside([0,0,0], include_borders=False)
assert room.is_inside([3,0,0], include_borders=True)
assert not room.is_inside([3,0,0], include_borders=False)
assert room.is_inside([0,1,0], include_borders=True)
assert not room.is_inside([0,1,0], include_borders=False)
assert room.is_inside([3,2,0], include_borders=True)
assert not room.is_inside([3,2,0], include_borders=False)
assert room.is_inside([1,4,3], include_borders=True)
assert not room.is_inside([1,4,3], include_borders=False)
assert not room.is_inside([2,2,7])
assert not room.is_inside([2,2,-7])
if __name__ == '__main__':
test_room_is_inside()
|
[
"fakufaku@gmail.com"
] |
fakufaku@gmail.com
|
f832f81e49e1c70f75a87ad822f0a71408bcd878
|
dc221edce0ad617aac3b9ad8f4f347ff84f56bf9
|
/.history/env/sim_20200805143251.py
|
242268c2da33aec8fc7e1b122fd06d49cdc6e028
|
[] |
no_license
|
zlm05170/cacontroller
|
310014c83ecf130643230eba87990e635fe1575f
|
e76d2eb5d58d6adfe7823e0dcd0059027c52b6bc
|
refs/heads/master
| 2022-12-21T08:05:58.315017
| 2020-09-23T11:45:07
| 2020-09-23T11:45:07
| 284,527,141
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 332
|
py
|
class Sim():
def __init__(self):
self.running = True
self.actor_list = []
def isRunning(self):
return self.running
def update(self):
for actor in self.actor_list:
actor.update()
def add_actor(self):
print(0)
sim = Sim()
while sim.isRunning():
sim.update()
|
[
"angelxx05170@gmail.com"
] |
angelxx05170@gmail.com
|
e35f76e578a9be895bc6fe5bd17d24525b6c2d83
|
5ae01ab82fcdedbdd70707b825313c40fb373fa3
|
/scripts/charonInterpreter/parsers/MaterialBlock/IncompleteIonizationDonor/charonLineParserIonizationEnergy.py
|
2ef6d351a57e960f50d56e19dfefba626502aca9
|
[] |
no_license
|
worthenmanufacturing/tcad-charon
|
efc19f770252656ecf0850e7bc4e78fa4d62cf9e
|
37f103306952a08d0e769767fe9391716246a83d
|
refs/heads/main
| 2023-08-23T02:39:38.472864
| 2021-10-29T20:15:15
| 2021-10-29T20:15:15
| 488,068,897
| 0
| 0
| null | 2022-05-03T03:44:45
| 2022-05-03T03:44:45
| null |
UTF-8
|
Python
| false
| false
| 5,602
|
py
|
from __future__ import print_function
import copy
class charonLineParserIonizationEnergy:
"IonizationEnergy parser"
def __init__(self):
# Register the parsing keys
self.parserName = "IonizationEnergy"
self.parsingKey = "ionization energy"
self.parsingKeyOptional = []
self.interpreterHelpLine = "ionization energy = {valueIonizationEnergy} "
self.interpreterQuickHelp = "Specify incomplete ionization energy for donor dopant type."
self.interpreterLongHelp = "Specify incomplete ionization energy for donor dopant type."
# Register the xml required lines
self.xmlRequiredLines = []
self.xmlRequiredLinePriority = []
self.xmlRequiredLines.append("Charon->Closure Models->{MaterialBlockName}->Incomplete Ionized Donor->Model,Ionization Energy,double,{valueIonizationEnergy}")
self.xmlRequiredLinePriority.append(2)
self.xmlNewRequiredLines = []
# Register the xml required arguments and their indexes
self.xmlRequiredArgument = []
self.xmlRequiredArgument.append("{valueIonizationEnergy}")
self.xmlRequiredArgumentIndexes = []
self.xmlRequiredArgumentIndexes.append("3")
# Register the xml optional lines
self.xmlOptionalLines = [[]]
self.xmlOptionalLinePriority = [[]]
# Register the xml optional arguments and their indexes
self.xmlOptionalArgument = []
self.xmlOptionalArgumentIndexes = []
# Register the xml default lines
self.xmlDefaultLines = []
self.xmlDefaultLinePriority = []
self.xmlReturned = []
self.xmlPriorityCode = []
def isThisMe(self,tokenizer,line):
# Tokenize the line
lineTokens = tokenizer.tokenize(line)
# Tokenize the parsing key
parsingTokens = self.parsingKey.split()
returnType = True
for itoken in range(len(parsingTokens)):
if itoken+1 > len(lineTokens):
return False
if lineTokens[itoken].lower() != parsingTokens[itoken].lower():
returnType = False
return returnType
def getName(self):
# Return parser name
return self.parserName
def getHelp(self,verbosity):
# Return help content
if verbosity.lower() == "long":
return (self.interpreterHelpLine,self.interpreterLongHelp)
else:
return (self.interpreterHelpLine,self.interpreterQuickHelp)
def generateXML(self,tokenizer,line):
# Tokenize the line
lineTokens = tokenizer.tokenize(line)
self.xmlNewRequiredLines[:] = []
for xL in self.xmlRequiredLines:
self.xmlNewRequiredLines.append(xL)
for ipar in range(len(self.xmlRequiredArgument)):
line.replace(self.xmlRequiredArgument[ipar],lineTokens[int(self.xmlRequiredArgumentIndexes[ipar])])
for iRLine in range(len(self.xmlRequiredLines)):
self.xmlNewRequiredLines[iRLine]=self.xmlNewRequiredLines[iRLine].replace(self.xmlRequiredArgument[ipar],lineTokens[int(self.xmlRequiredArgumentIndexes[ipar])])
for index,xmlLine in enumerate(self.xmlNewRequiredLines):
self.xmlReturned.append(xmlLine)
self.xmlPriorityCode.append(self.xmlRequiredLinePriority[index]) #required lines have priority code 2
# Look over input line to see if any options are called out.
optCounter = 0
optIndex = 0
for optKey in self.parsingKeyOptional:
# Tokenize the opt keys
foundOptionalKey = False
optKeyTokens = optKey.split()
for iLT in range(len(lineTokens)):
if lineTokens[iLT].lower() == optKeyTokens[0]:
if len(optKeyTokens) == 1:
optIndex = iLT
foundOptionalKey = True
else:
for iPK in range(len(optKeyTokens)-1):
optIndex = iLT
if iLT+iPK+1 > len(lineTokens)-1:
continue
if optKeyTokens[iPK+1] == lineTokens[iLT+iPK+1].lower():
if iPK+2 == len(optKeyTokens):
foundOptionalKey = True
else:
continue
#Found the key, now create the xml line
if foundOptionalKey == True:
self.Returned=copy.deepcopy(self.xmlOptionalLines[optCounter])
for iopt in range(len(self.xmlOptionalLines[optCounter])):
for ipar in range(len(self.xmlOptionalArgument[optCounter])):
self.Returned[iopt] = self.Returned[iopt].replace(self.xmlOptionalArgument[optCounter][ipar],lineTokens[optIndex+int(self.xmlOptionalArgumentIndexes[optCounter][ipar])])
for ipar in range(len(self.xmlRequiredArgument)):
self.Returned[iopt] = self.Returned[iopt].replace(self.xmlRequiredArgument[ipar],lineTokens[int(self.xmlRequiredArgumentIndexes[ipar])])
self.xmlReturned.append(self.Returned[iopt])
self.xmlPriorityCode.append(2) #optional lines have priority code 2
optCounter += 1
for xmlLine in self.xmlDefaultLines:
self.xmlReturned.append(xmlLine)
self.xmlPriorityCode.append(1) #optional lines have priority code 1
return (self.xmlReturned,self.xmlPriorityCode)
|
[
"juan@tcad.com"
] |
juan@tcad.com
|
a50c8907cabee913bacdeff6e3fb16fbd0d147ca
|
47b49ee4d14254cea00a839123fe5d68f0938959
|
/notifierlib/channels/jabber.py
|
109b7df750b2db4e45884d0d3509212e97fcd8d8
|
[
"MIT"
] |
permissive
|
wefner/notifierlib
|
35d8b9c754803821462e647239bfd0be564c0a40
|
0eeec7aef278f66262b1dceab296b5f115e372c3
|
refs/heads/master
| 2021-06-30T22:27:06.147554
| 2017-09-19T13:14:37
| 2017-09-19T13:14:37
| 104,245,027
| 0
| 0
| null | 2017-09-20T17:12:26
| 2017-09-20T17:12:26
| null |
UTF-8
|
Python
| false
| false
| 6,372
|
py
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
# File: jabber.py
import sleekxmpp
import logging
from notifierlib.notifierlib import Channel
__author__ = '''Costas Tyfoxylos <costas.tyf@gmail.com>, Argiris Gounaris <agounaris@gmail.com>'''
__docformat__ = 'plaintext'
__date__ = '''19-09-2017'''
class XmppClient(sleekxmpp.ClientXMPP):
"""A basic SleekXMPP bot, logs in, sends message, logs out."""
def __init__(self,
user_id,
password,
recipient,
message,
server,
port,
tls=False,
ssl=True,
reattempt=False):
super(XmppClient, self).__init__(user_id, password)
self._logger = logging.getLogger(self.__class__.__name__)
self.recipient = recipient
self.message = message
self.server = server
self.port = port
self.tls = tls
self.ssl = ssl
self.reattempt = reattempt
self.add_event_handler('session_start', self.start)
self.register_plugin('xep_0030') # Service Discovery
self.register_plugin('xep_0199') # XMPP Ping
# Connect to the XMPP server and start processing XMPP stanzas.
if not self.connect((self.server, self.port),
use_tls=self.tls,
use_ssl=self.ssl,
reattempt=self.reattempt):
message = ('Could not connect to '
'{server}:{port}').format(server=self.server,
port=self.port)
self._logger.error(message)
raise SyntaxError(message)
self.process(block=True)
def start(self, event):
_ = event # noqa
self.send_message(mto=self.recipient,
mbody=self.message,
mtype='chat')
self.disconnect(wait=True)
class XmppGroupClient(sleekxmpp.ClientXMPP):
"""A basic SleekXMPP bot, logs in, sends message, logs out."""
def __init__(self,
user_id,
password,
room,
nickname,
message,
server,
port,
room_password=None,
tls=False,
ssl=True,
reattempt=False):
super(XmppGroupClient, self).__init__(user_id, password)
self._logger = logging.getLogger(self.__class__.__name__)
self.room = room
self.room_password = room_password
self.nickname = nickname
self.message = message
self.server = server
self.port = port
self.tls = tls
self.ssl = ssl
self.reattempt = reattempt
self.add_event_handler('session_start', self.start)
self.register_plugin('xep_0030') # Service Discovery
self.register_plugin('xep_0045') # Multi-User Chat
self.register_plugin('xep_0199') # XMPP Ping
# Connect to the XMPP server and start processing XMPP stanzas.
if not self.connect((self.server, self.port),
use_tls=self.tls,
use_ssl=self.ssl,
reattempt=self.reattempt):
message = ('Could not connect to '
'{server}:{port}').format(server=self.server,
port=self.port)
self._logger.error(message)
raise SyntaxError(message)
self.process(block=True)
def start(self, event):
_ = event # noqa
self.plugin['xep_0045'].joinMUC(self.room,
self.nickname,
# If a room password is needed, use:
password=self.room_password,
wait=True)
self.send_message(mto=self.room,
mbody=self.message,
mtype='groupchat')
self.disconnect(wait=True)
class Jabber(Channel):
def __init__(self,
name,
user_id,
password,
recipient_id,
server,
port,
tls=False,
ssl=True,
reattempt=False):
super(Jabber, self).__init__(name)
self.user = user_id
self.password = password
self.server = server
self.recipient = recipient_id
self.port = port
self.tls = tls
self.ssl = ssl
self.reattempt = reattempt
def notify(self, **kwargs):
message = kwargs.get('message')
_ = XmppClient(self.user, # noqa
self.password,
self.recipient,
message,
self.server,
self.port,
self.tls,
self.ssl,
self.reattempt)
return True
class JabberGroup(Channel):
def __init__(self,
name,
user_id,
password,
room,
nickname,
server,
port,
room_password=None,
tls=False,
ssl=True,
reattempt=False):
super(JabberGroup, self).__init__(name)
self.user = user_id
self.password = password
self.nickname = nickname
self.room = room
self.room_password = room_password
self.server = server
self.port = port
self.tls = tls
self.ssl = ssl
self.reattempt = reattempt
def notify(self, **kwargs):
message = kwargs.get('message')
_ = XmppGroupClient(self.user, # noqa
self.password,
self.room,
self.nickname,
message,
self.server,
self.port,
self.room_password,
self.tls,
self.ssl,
self.reattempt)
return True
|
[
"costas.tyf@gmail.com"
] |
costas.tyf@gmail.com
|
d8458f6e25f605602bc2ada1071b2c5365b26943
|
54c08823016949aa23ff1d372cf70778e6f88758
|
/raylab/policy/model_based/sampling_mixin.py
|
bea9c129c44620da8d861320dda315db82a141cb
|
[
"MIT"
] |
permissive
|
jCrompton/raylab
|
b4d41c446bc4d8d9ea42ebfdfad59c61956bfe98
|
9773d6fb942c06c65fe5297a8275f86649966abd
|
refs/heads/master
| 2022-11-05T21:59:06.704075
| 2020-06-08T15:36:20
| 2020-06-08T15:36:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,338
|
py
|
"""Environment model handling mixins for TorchPolicy."""
from dataclasses import dataclass
from typing import List
import numpy as np
import torch
from dataclasses_json import DataClassJsonMixin
from numpy.random import Generator
from ray.rllib import SampleBatch
from torch.nn import Module
@dataclass(frozen=True)
class SamplingSpec(DataClassJsonMixin):
"""Specifications for sampling from the model.
Attributes:
num_elites: Use this number of best performing models to sample
transitions
rollout_length: Lenght of model-based rollouts from each initial
state extracted from input sample batch
"""
num_elites: int = 1
rollout_length: int = 1
def __post_init__(self):
assert self.num_elites > 0, "Must have at least one elite model to sample from"
assert (
self.rollout_length > 0
), "Length of model-based rollouts must be positive"
class ModelSamplingMixin:
"""Adds model sampling behavior to a TorchPolicy class.
Expects:
* A `self.reward_fn` callable that computes the reward tensors for a batch
of transitions
* A `self.termination_fn` callable that computes the termination tensors for
a batch of transitions
* A `models` attribute in `self.module`
* A `self.config` dict attribute
* A `model_sampling` dict in `self.config`
* A `seed` int in `self.config`
Attributes:
model_sampling_spec: Specifications for model training and sampling
elite_models: Sequence of the `num_elites` best models sorted by
performance. Initially set using the policy's model order.
rng: Random number generator for choosing from the elite models for
sampling.
"""
model_sampling_spec: SamplingSpec
elite_models: List[Module]
rng: Generator
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.model_sampling_spec = SamplingSpec.from_dict(self.config["model_sampling"])
models = self.module.models
num_elites = self.model_sampling_spec.num_elites
assert num_elites <= len(models), "Cannot have more elites than models"
self.elite_models = list(models[:num_elites])
self.rng = np.random.default_rng(self.config["seed"])
def setup_sampling_models(self, losses: List[float]):
"""Update the elite models based on model losses.
Args:
losses: list of model losses following the order of the ensemble
"""
models = self.module.models
self.elite_models = [models[i] for i in np.argsort(losses)]
@torch.no_grad()
def generate_virtual_sample_batch(self, samples: SampleBatch) -> SampleBatch:
"""Rollout model with latest policy.
Produces samples for populating the virtual buffer, hence no gradient
information is retained.
If a transition is terminal, the next transition, if any, is generated from
the initial state passed through `samples`.
Args:
samples: the transitions to extract initial states from
Returns:
A batch of transitions sampled from the model
"""
virtual_samples = []
obs = init_obs = self.convert_to_tensor(samples[SampleBatch.CUR_OBS])
for _ in range(self.model_sampling_spec.rollout_length):
model = self.rng.choice(self.elite_models)
action, _ = self.module.actor.sample(obs)
next_obs, _ = model.sample(obs, action)
reward = self.reward_fn(obs, action, next_obs)
done = self.termination_fn(obs, action, next_obs)
transition = {
SampleBatch.CUR_OBS: obs,
SampleBatch.ACTIONS: action,
SampleBatch.NEXT_OBS: next_obs,
SampleBatch.REWARDS: reward,
SampleBatch.DONES: done,
}
virtual_samples += [
SampleBatch({k: v.numpy() for k, v in transition.items()})
]
obs = torch.where(done.unsqueeze(-1), init_obs, next_obs)
return SampleBatch.concat_samples(virtual_samples)
@staticmethod
def model_sampling_defaults():
"""The default configuration dict for model sampling."""
return SamplingSpec().to_dict()
|
[
"angelolovatto@gmail.com"
] |
angelolovatto@gmail.com
|
88c81310c694092a2f288b858519b4dc9d54fdca
|
0b38b3d237044b605a519e1aadb298e254c96a6a
|
/app.py
|
98eaa7437e240c19d9f07fb20498e32121e58693
|
[] |
no_license
|
imapex/boilerplate
|
100355bec3414bd0874fc47e5ff8cdad4464b054
|
810a2de2ceb6120dd57a64324c6b8581113d348f
|
refs/heads/master
| 2023-02-03T19:54:25.074233
| 2019-08-22T15:13:21
| 2019-08-22T15:13:21
| 64,156,013
| 2
| 4
| null | 2023-02-02T06:13:08
| 2016-07-25T17:53:14
|
HTML
|
UTF-8
|
Python
| false
| false
| 862
|
py
|
from flask import Flask, render_template, request
from flask_restful import Api
from views.topology import topology
from views.dashboard import dashboard
from views.device import device_list
from views.patterns import patterns
from api.device import Device
from api.topology import Topology
app = Flask(__name__)
api = Api(app)
@app.route('/')
def index():
return render_template('index.html')
api.add_resource(Device, '/api/device')
api.add_resource(Topology, '/api/topology')
app.add_url_rule('/topology', endpoint='topology-view', view_func=topology)
app.add_url_rule('/device', endpoint='device-list', view_func=device_list)
app.add_url_rule('/dashboard', endpoint='dashboard', view_func=dashboard)
app.add_url_rule('/patterns', endpoint='patterns', view_func=patterns)
if __name__ == '__main__':
app.run(host='127.0.0.1', port=5000, debug=True)
|
[
"kecorbin@cisco.com"
] |
kecorbin@cisco.com
|
7d41b30ed78b8b2b0274008e2dd827b812ad3c4b
|
69e6c93e5d9cc0ad3fcc8d595aff95c71609f13e
|
/tests/test_nh_32bit.py
|
2fe18a2a1d465780ee3c2bc08a8405deba44d4fa
|
[
"GPL-2.0-only",
"BSD-2-Clause",
"BSD-3-Clause"
] |
permissive
|
atsgen/tf-vrouter
|
2d27e233774bc5116e964c403f3332df7633afc7
|
c95daa24744bdeb4839f63ebd057552f18404171
|
refs/heads/master
| 2023-02-16T15:37:58.986288
| 2021-01-15T06:45:23
| 2021-01-15T06:45:23
| 290,211,517
| 0
| 0
|
BSD-2-Clause
| 2020-08-25T12:40:00
| 2020-08-25T12:39:59
| null |
UTF-8
|
Python
| false
| false
| 3,288
|
py
|
#!/usr/bin/python
import os
import sys
sys.path.append(os.getcwd())
sys.path.append(os.getcwd() + '/lib/')
from imports import * # noqa
# anything with *test* will be assumed by pytest as a test
class TestNh32(unittest.TestCase):
@classmethod
def setup_class(cls):
ObjectBase.setUpClass()
ObjectBase.set_auto_features(cleanup=True)
@classmethod
def teardown_class(cls):
ObjectBase.tearDownClass()
def setup_method(self, method):
ObjectBase.setUp(method)
def teardown_method(self, method):
ObjectBase.tearDown()
# tc to add, del nh with nhid > 65k
def test1_nh32(self):
# Add a Vif interface
vif = VirtualVif(
name="tap_1",
ipv4_str="1.1.1.10",
mac_str="de:ad:be:ef:00:02",
idx=1,
flags=None,
nh_idx=494949,
ipv6_str="571:3896:c427:3738:30c4:fd9f:720e:fefe")
vif.sync()
# Query the vif back
vif_get = VirtualVif(
name="tap_1",
ipv4_str="1.1.1.10",
mac_str="fe:ad:be:ef:00:02",
idx=1,
flags=None,
h_op=constants.SANDESH_OPER_GET)
vif_get.sync()
self.assertEqual(494949, vif_get.get_vif_nh_id())
# Add NH
encap_nh = EncapNextHop(
encap_oif_id=vif.idx(),
encap="de ad be ef 01 02 de ad be ef 00 01 08 00",
nh_idx=490496,
nh_family=constants.AF_BRIDGE)
encap_nh.sync()
# Get the same NH back
nh_get = EncapNextHop(
encap_oif_id=vif.idx(),
encap=None,
nh_idx=490496,
nh_family=constants.AF_BRIDGE,
h_op=constants.SANDESH_OPER_GET)
nh_get.sync()
self.assertEqual(490496, nh_get.get_nh_idx())
self.assertEqual(constants.AF_BRIDGE, nh_get.get_nh_family())
self.assertEqual(constants.NH_TYPE_ENCAP, nh_get.get_nh_type())
# tc to add, del flow with nhid > 65k
def test2_nh32(self):
# Add vif - 10.1.1.1
vif1 = VirtualVif(
name="tap_1",
ipv4_str="10.1.1.1",
mac_str="de:ad:be:ef:00:02",
idx=1,
nh_idx=494949,
flags=None)
vif1.sync()
# Add 2nd vif - 10.1.1.2
vif2 = VirtualVif(
name="tap_2",
ipv4_str="10.1.1.2",
mac_str="ed:da:eb:fe:00:03",
nh_idx=474747,
flags=None,
idx=2)
vif2.sync()
# Add NH
encap_nh = EncapNextHop(
encap_oif_id=vif2.idx(),
encap="de ad be ef 01 02 de ad be ef 00 01 08 00",
nh_idx=474747,
nh_family=constants.AF_BRIDGE)
encap_nh.sync()
# Add route which points to the NH
rt = BridgeRoute(
vrf=0,
mac_str="de:ad:be:ef:02:02",
nh_idx=474747)
rt.sync()
# Add flow
flow = InetFlow(
sip='1.1.1.1',
dip='2.2.2.2',
sport=31,
dport=31,
proto=17,
action=2,
src_nh_idx=494949,
flow_nh_idx=594949)
flow.sync(resp_required=True)
flow.delete()
|
[
"andrey-mp@yandex.ru"
] |
andrey-mp@yandex.ru
|
37b53d420486df7030ee6727a9bb4e5d031b98de
|
d5463d1efdf17941ca3fd79ef106a3db9be48fbc
|
/booktest/admin.py
|
61b22a340409278043c70630021506ed840b0214
|
[] |
no_license
|
holyzhang1314/test11
|
8d81a408f67bbd010c15429eecfe70ce82ab60c8
|
24f927a87d4e96a0a5632e73408c9c5abce5c8ab
|
refs/heads/master
| 2020-03-19T00:13:35.110758
| 2018-05-30T15:39:18
| 2018-05-30T15:39:18
| 135,464,185
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 385
|
py
|
from django.contrib import admin
from booktest.models import BookInfo,HeroInfo
# Register your models here.
class BookInfoAdmin(admin.ModelAdmin):
list_display = ['id','btitle','bpub_date']
class HeroInfoAdmin(admin.ModelAdmin):
list_display = ['id','hname','hgender','hcomment']
admin.site.register(BookInfo,BookInfoAdmin)
admin.site.register(HeroInfo,HeroInfoAdmin)
|
[
"xwp_fullstack@163.com"
] |
xwp_fullstack@163.com
|
fa3a1c7dc5841b9a1f6b5f0dc876c827ea15a115
|
9e84a433007ed4f6b2f9fc40f17a6fc5deb8603c
|
/frontbackend/Polaris/migrations_bak/0032_auto_20190514_1130.py
|
5143de7c52d0c09775f5fb48da2b2e4d6aa5af9b
|
[] |
no_license
|
wellwang1993/glbs
|
8a654bcd2b5e06a823112b6f07f324753f8a1034
|
a96cd8a949bfae06026c2b9f9fa2ec0230997932
|
refs/heads/master
| 2020-05-16T04:56:07.019399
| 2019-07-26T11:22:21
| 2019-07-26T11:22:21
| 182,794,117
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 425
|
py
|
# Generated by Django 2.1.2 on 2019-05-14 11:30
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('Polaris', '0031_tb_fact_dnszone_info'),
]
operations = [
migrations.RemoveField(
model_name='tb_fact_view_info',
name='view_type',
),
migrations.DeleteModel(
name='tb_fact_view_info',
),
]
|
[
"root@localhost"
] |
root@localhost
|
141cdf073eba87447e04afe683e7d304141d170d
|
10920b11a22a20f9a7f63157818327f3c4e41888
|
/Final_Project_BIR/Robot_arm/demo_002.py
|
1a17f31ef610acdfaaa212131cad6479e3b77a34
|
[] |
no_license
|
dsall/computerv
|
e331b3d025c8cec0119b789107d1fef18d08f02a
|
40671d618c31ad9d9b20fc902a218a8e281098bc
|
refs/heads/master
| 2021-09-15T09:33:08.495580
| 2018-05-29T23:41:42
| 2018-05-29T23:41:42
| 135,363,332
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 461
|
py
|
import numpy
from arm import Arm
from board import Board
a = Arm.connect()
b = Board.connect(verbose=True)
while 1:
pot = b.get_pot()
pho = b.get_photo()
fx = round(numpy.interp(pot, [0, 1], [190, 210]))
fy = round(numpy.interp(pot, [0, 1], [-200, 200]))
fz = round(numpy.interp(pot, [0, 0.5, 1], [50, 300, 50]))
print(pot, fx, fy, fz)
a.goto(fx, fy, fz, wait=True)
if pho > 0.8: a.grab(False)
if pho < 0.8: a.grab(True)
|
[
"djiby45@outlook.com"
] |
djiby45@outlook.com
|
c6aabf9bb04b6c5f151ac01e844bdefdc7b49cb2
|
6a2a4f97009e31e53340f1b4408e775f3051e498
|
/Iniciante/p2139.py
|
4fe3e0111f40176ba65920fe08df23ba24da9d0c
|
[] |
no_license
|
rafacasa/OnlineJudgePythonCodes
|
34c31f325cccb325f074492b40591ad880175816
|
030c18f9020898fdc4f672f9cc17723236e1271d
|
refs/heads/master
| 2023-07-15T12:09:45.534873
| 2023-06-27T00:24:03
| 2023-06-27T00:24:03
| 250,595,994
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 609
|
py
|
from datetime import datetime
while True:
try:
mes, dia = map(int, input().split())
data = datetime(day=dia, month=mes, year=2016)
natal = datetime(day=25, month=12, year=2016)
if data > natal:
print('Ja passou!')
continue
if data == natal:
print('E natal!')
continue
diferenca = natal - data
dias = diferenca.days
if dias == 1:
print('E vespera de natal!')
else:
print('Faltam {:d} dias para o natal!'.format(dias))
except EOFError:
break
|
[
"rafaelluizcasa@gmail.com"
] |
rafaelluizcasa@gmail.com
|
f85638ba486b3870c34ad06f582151b51107b7b2
|
56470dbd199578f73f9c5b676d19b4774960a68d
|
/src/CNVRepeat/main.py
|
af7442c4e6be8f0a2d80549918df51ca5b995614
|
[
"MIT"
] |
permissive
|
bioCKO/CNVRepeat
|
a646fa729db6f2b8cca718b0a1dc78b5b848b149
|
e6b9b90599bf973f523a879ad66f836f82a45bf2
|
refs/heads/master
| 2021-03-12T01:26:51.315608
| 2017-09-15T22:56:20
| 2017-09-15T22:56:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,856
|
py
|
#the package use modules from grocsvs (https://github.com/grocsvs/grocsvs) as basis for designing the pipeline.
from __future__ import print_function
import argparse
import collections
import json
import logging
import sys
import os
from CNVRepeat import log
from CNVRepeat import options as opts
from CNVRepeat import pipeline
from CNVRepeat import analysis
def load_config(config_path):
try:
config = json.load(open(config_path))
except ValueError as err:
print("Error parsing configuration file '{}': '{}'\n Check that this is a properly formatted JSON file!".format(config_path, err))
sys.exit(1)
options = opts.Options.deserialize(config, config_path)
return options
def run(options):
analysis_steps = prepare_analysis(options)
runner = pipeline.Runner(options)
print("Running")
for analysis_name, analysis_step in analysis_steps.items():
print ('Running analysis: "{}"'.format(analysis_name))
runner.run_stage(analysis_step, analysis_name)
def prepare_analysis(options):
analysis_steps = collections.OrderedDict()
if options.method == 'single_copy_exon':
if not os.path.exists(options.bed) or not os.path.splitext(options.bed)[1] == '.bed':
analysis_steps["Single Copy Exon"] = analysis.single_copy_exon.SingleCopyExonStep
analysis_steps["Genome Coverage Estimator"] = analysis.estimate_genome_coverage_bed.EstimateGenomeCoverageStep
analysis_steps["Genome Coverage Merger"] = analysis.estimate_genome_coverage_bed.CombineGenomeCoverageStep
elif options.method == 'single_copy_exon':
if not os.path.exists(options.bed) or not os.path.splitext(options.bed)[1] == '.bed':
analysis_steps["Random Region"] = analysis.single_copy_exon.RandomRegionStep
analysis_steps["Genome Coverage Estimator"] = analysis.estimate_genome_coverage_bed.EstimateGenomeCoverageStep
analysis_steps["Genome Coverage Merger"] = analysis.estimate_genome_coverage_bed.CombineGenomeCoverageStep
elif options.method == 'goleft':
analysis_steps["Genome Coverage Estimator Goleft"] = analysis.estimate_genome_coverage_bed.EstimateGenomeCoverageGoleftStep
analysis_steps["Repaet Coverage Estimator"] = analysis.estimate_repeat_coverage.EstimateRepeatCoverageStep
analysis_steps["Repeat Copy Number"] = analysis.estimate_repeat_copy_number.EstimateRepeatCopyNumberStep
return analysis_steps
def main():
parser = argparse.ArgumentParser(description="CNVRepeat: estimate copy number of repeat sequence in the genome")
parser.add_argument("--config", help="Path to configuration.json file")
parser.add_argument("--local", action="store_true", help="run job locally in multiprocess mode")
parser.add_argument("--scheduler", help="run job using scheduler, SLURM, SGE, PBS/Torque")
parser.add_argument("--cpu", default=1, help="number of cpu")
parser.add_argument("--method", default='goleft', help="method for estimation of genome coverage: goleft, single_copy_exon, random_region")
parser.add_argument("--random_dna_length", default=1000, help="length of DNA for random selection of method random_region")
parser.add_argument("--random_dna_number", default=100000, help="number of DNA for random selection of method random_region")
parser.add_argument("--debug", action="store_true", help="run in debug mode")
args = parser.parse_args()
if len(sys.argv) <= 1:
parser.print_help()
sys.exit(1)
options = load_config(args.config)
options.debug = args.debug
options.method = args.method
options.random_dna_length = args.random_dna_length
options.random_dna_number = args.random_dna_number
log.log_command(options, sys.argv)
run(options)
if __name__ == '__main__':
main()
|
[
"jinfeng7chen@gmail.com"
] |
jinfeng7chen@gmail.com
|
95de5df7a22b6a56135887f66ed343865118bf9f
|
ea2b40a2b2209db1c363833e33d77086e1a4b023
|
/tests/robust/test_min_param_perturbation.py
|
aeed86f73b1fcb5e15fcf33cf1dd469ca4ea4944
|
[
"BSD-3-Clause"
] |
permissive
|
kopalgarg/captum
|
3ecb8de09c2a0e0efa487c67638abb0bb7870d1f
|
67a3ddcb627f008cf0c23df7b10bc50d75324efe
|
refs/heads/master
| 2023-06-15T18:00:54.449011
| 2021-07-04T21:45:58
| 2021-07-04T21:46:55
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,092
|
py
|
#!/usr/bin/env python3
from typing import List
import torch
from torch import Tensor
from captum.robust import MinParamPerturbation
from tests.helpers.basic import BaseTest, assertTensorAlmostEqual
from tests.helpers.basic_models import BasicModel, BasicModel_MultiLayer
def inp_subtract(inp: Tensor, ind: int = 0, add_arg: int = 0) -> Tensor:
inp_repeat = 1.0 * inp
inp_repeat[0][ind] -= add_arg
return inp_repeat
def add_char(inp: List[str], ind: int = 0, char_val: int = 0) -> List[str]:
list_copy = list(inp)
list_copy[ind] = chr(122 - char_val) + list_copy[ind]
return list_copy
def add_char_batch(inp: List[List[str]], ind: int, char_val: int) -> List[List[str]]:
return [add_char(elem, ind, char_val) for elem in inp]
def text_preproc_fn(inp: List[str]) -> Tensor:
return torch.tensor([float(ord(elem[0])) for elem in inp]).unsqueeze(0)
def batch_text_preproc_fn(inp: List[List[str]]) -> Tensor:
return torch.cat([text_preproc_fn(elem) for elem in inp])
def alt_correct_fn(model_out: Tensor, target: int, threshold: float) -> bool:
if all(model_out[:, target] > threshold):
return True
return False
class Test(BaseTest):
def test_minimal_pert_basic_linear(self) -> None:
model = BasicModel()
inp = torch.tensor([[2.0, -9.0, 9.0, 1.0, -3.0]])
minimal_pert = MinParamPerturbation(
forward_func=lambda x: model(x)
+ torch.tensor([[0.000001, 0.0, 0.0, 0.0, 0.0]]),
attack=inp_subtract,
arg_name="add_arg",
arg_min=0.0,
arg_max=1000.0,
arg_step=1.0,
)
target_inp, pert = minimal_pert.evaluate(
inp, target=0, attack_kwargs={"ind": 0}
)
self.assertAlmostEqual(pert, 2.0)
assertTensorAlmostEqual(
self, target_inp, torch.tensor([[0.0, -9.0, 9.0, 1.0, -3.0]])
)
def test_minimal_pert_basic_binary(self) -> None:
model = BasicModel()
inp = torch.tensor([[2.0, -9.0, 9.0, 1.0, -3.0]])
minimal_pert = MinParamPerturbation(
forward_func=lambda x: model(x)
+ torch.tensor([[0.000001, 0.0, 0.0, 0.0, 0.0]]),
attack=inp_subtract,
arg_name="add_arg",
arg_min=0.0,
arg_max=1000.0,
arg_step=1.0,
mode="binary",
)
target_inp, pert = minimal_pert.evaluate(
inp,
target=0,
attack_kwargs={"ind": 0},
perturbations_per_eval=10,
)
self.assertAlmostEqual(pert, 2.0)
assertTensorAlmostEqual(
self, target_inp, torch.tensor([[0.0, -9.0, 9.0, 1.0, -3.0]])
)
def test_minimal_pert_preproc(self) -> None:
model = BasicModel_MultiLayer()
text_inp = ["abc", "zyd", "ghi"]
minimal_pert = MinParamPerturbation(
forward_func=model,
attack=add_char,
arg_name="char_val",
arg_min=0,
arg_max=26,
arg_step=1,
preproc_fn=text_preproc_fn,
apply_before_preproc=True,
)
target_inp, pert = minimal_pert.evaluate(
text_inp, target=1, attack_kwargs={"ind": 1}
)
self.assertEqual(pert, None)
self.assertEqual(target_inp, None)
def test_minimal_pert_alt_correct(self) -> None:
model = BasicModel_MultiLayer()
text_inp = ["abc", "zyd", "ghi"]
minimal_pert = MinParamPerturbation(
forward_func=model,
attack=add_char,
arg_name="char_val",
arg_min=0,
arg_max=26,
arg_step=1,
preproc_fn=text_preproc_fn,
apply_before_preproc=True,
correct_fn=alt_correct_fn,
num_attempts=5,
)
expected_list = ["abc", "ezyd", "ghi"]
target_inp, pert = minimal_pert.evaluate(
text_inp,
target=1,
attack_kwargs={"ind": 1},
correct_fn_kwargs={"threshold": 1200},
perturbations_per_eval=5,
)
self.assertEqual(pert, 21)
self.assertListEqual(target_inp, expected_list)
target_inp_single, pert_single = minimal_pert.evaluate(
text_inp,
target=1,
attack_kwargs={"ind": 1},
correct_fn_kwargs={"threshold": 1200},
)
self.assertEqual(pert_single, 21)
self.assertListEqual(target_inp_single, expected_list)
def test_minimal_pert_additional_forward_args(self) -> None:
model = BasicModel_MultiLayer()
text_inp = [["abc", "zyd", "ghi"], ["abc", "uyd", "ghi"]]
additional_forward_args = torch.ones((2, 3)) * -97
model = BasicModel_MultiLayer()
minimal_pert = MinParamPerturbation(
forward_func=model,
attack=add_char_batch,
arg_name="char_val",
arg_min=0,
arg_max=26,
arg_step=1,
preproc_fn=batch_text_preproc_fn,
apply_before_preproc=True,
correct_fn=alt_correct_fn,
)
expected_list = [["abc", "uzyd", "ghi"], ["abc", "uuyd", "ghi"]]
target_inp, pert = minimal_pert.evaluate(
text_inp,
target=1,
attack_kwargs={"ind": 1},
correct_fn_kwargs={"threshold": 100},
perturbations_per_eval=15,
additional_forward_args=(additional_forward_args,),
)
self.assertEqual(pert, 5)
self.assertListEqual(target_inp, expected_list)
target_inp_single, pert_single = minimal_pert.evaluate(
text_inp,
target=1,
attack_kwargs={"ind": 1},
correct_fn_kwargs={"threshold": 100},
additional_forward_args=(additional_forward_args,),
)
self.assertEqual(pert_single, 5)
self.assertListEqual(target_inp_single, expected_list)
def test_minimal_pert_tuple_test(self) -> None:
model = BasicModel_MultiLayer()
text_inp = (
[["abc", "zyd", "ghi"], ["abc", "uyd", "ghi"]],
torch.ones((2, 3)) * -97,
)
model = BasicModel_MultiLayer()
minimal_pert = MinParamPerturbation(
forward_func=lambda x: model(*x),
attack=lambda x, ind, char_val: (add_char_batch(x[0], ind, char_val), x[1]),
arg_name="char_val",
arg_min=0,
arg_max=26,
arg_step=1,
preproc_fn=lambda x: (batch_text_preproc_fn(x[0]), x[1]),
apply_before_preproc=True,
correct_fn=alt_correct_fn,
)
expected_list = [["abc", "uzyd", "ghi"], ["abc", "uuyd", "ghi"]]
target_inp, pert = minimal_pert.evaluate(
text_inp,
target=1,
attack_kwargs={"ind": 1},
correct_fn_kwargs={"threshold": 100},
perturbations_per_eval=15,
)
self.assertEqual(pert, 5)
self.assertListEqual(target_inp[0], expected_list)
|
[
"facebook-github-bot@users.noreply.github.com"
] |
facebook-github-bot@users.noreply.github.com
|
151f7484ec7c130b4403dd08c90830b52368805d
|
4202a7c678e0ec25ab2065c4c2804b0296f94480
|
/MOMI_FCS2/momi_priorset_8_stoch_asym.py
|
9d14d73e818b29893f66b4c9d5195a512cecadac
|
[] |
no_license
|
kaiyaprovost/whole_genome_pipeline
|
f1c479536560c5b8c68fe3a5ba0917140fbb0793
|
8e605d855c9f0cd6e11e1b73a97260e0d4aa3fae
|
refs/heads/master
| 2023-04-22T20:51:01.344297
| 2023-04-06T19:12:11
| 2023-04-06T19:12:11
| 237,044,399
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,387
|
py
|
import momi ## momi2 analysis
import logging ## create log file
import numpy as np
import datetime
print("-----\n-----\n-----")
now = datetime.datetime.now()
print("start logging\n")
logging.basicConfig(level=logging.INFO,
filename="momi_log_priorset_8_stoch_asym.txt")
print("load sfs\n")
sfspath = "/home/kprovost/nas1/momi2/cardcard16_sfs_filtered_changelength_monomorphic.txt"
## this is a two-population sfs with monomorphic sites included in "length"
sfs = momi.Sfs.load(sfspath)
print("Avg pairwise heterozygosity", sfs.avg_pairwise_hets[:5])
print("populations", sfs.populations)
print("percent missing data per population", sfs.p_missing)
## set up two-population model with parameters
## use Pure Isolation model as base for all of the models
## because only looking at this particular split, elected to change ranges to get
print("\nPRIORS")
print("MUT RATE: 2.21e-9")
mutrate=2.21e-9
print("GEN TIME: 1")
gentime=1
print("ANCESTRAL NE: 300,000")
ancne=300000
print("DIV TIME RANGE: 500,000 to 1,000,000")
divtimelow=500000
divtimehigh=1000000
print("NE RANGE: 500,000 to 2,000,000")
nelow=500000
nehigh=2000000
print("MIGRATION RANGE: 0 to 0.1")
migratelow=0
migratehigh=0.1
print("MIGRATION DATE RANGE: 25,000 to 250,000\n\n")
migtimelow=25000
migtimehigh=250000
print("begin setting up models\n")
##### PURE ISOLATION MODEL #####
print("\nPure Isolation model (base model)")
pure_isolation_model = momi.DemographicModel(N_e=ancne,muts_per_gen=mutrate,gen_time=gentime) ## why tho -- can you give it something?
pure_isolation_model.set_data(sfs)
## set up divergence times
pure_isolation_model.add_time_param("tdiv_sc",lower=divtimelow,upper=divtimehigh)
## set up effective population size
pure_isolation_model.add_size_param("ne_s",lower=nelow,upper=nehigh) ## this is from Brian's paper on cardinals
pure_isolation_model.add_size_param("ne_c",lower=nelow,upper=nehigh)
## set up populations and phylogeny
pure_isolation_model.add_leaf("Son",N="ne_s")
pure_isolation_model.add_leaf("Chi",N="ne_c")
pure_isolation_model.move_lineages("Son", "Chi", t="tdiv_sc")
## randomize parameters and check them
#pure_isolation_model.set_params(randomize=True)
#print(pure_isolation_model.get_params())
## set up the rest of the models
##### ASYMMETRIC MIGRATION #####
print("\nAsymmetric model (c2s as base)")
asym_model = pure_isolation_model.copy() ## copy isol
asym_model.add_pulse_param("mig_s2c",lower=migratelow,upper=migratehigh)
asym_model.add_pulse_param("mig_c2s",lower=migratelow,upper=migratehigh)
asym_model.add_time_param("tmig_asym",lower=migtimelow,upper=migtimehigh,upper_constraints=["tdiv_sc"])
asym_model.move_lineages("Chi","Son",t="tmig_asym",p="mig_s2c")
asym_model.move_lineages("Son","Chi",t="tmig_asym",p="mig_c2s")
## randomize and check parameters
asym_model.set_params(randomize=True)
print(asym_model.get_params())
## optimize each model once
print("#####")
models = [asym_model]
model_names = ["ASYM"]
AICs = []
count = 0
for model in models:
now = datetime.datetime.now()
name = str(model_names[count])
print("Stochastic optimizing "+name+" model: "+str(now))
model.stochastic_optimize(num_iters=10, n_minibatches=5, save_to_checkpoint="momi_checkpoint_priorset8_stoch_asym.txt", svrg_epoch=-1)
now = datetime.datetime.now()
print("Finished stochastic optimizing "+name+": "+str(now))
print(model.get_params())
print("Starting AIC likelihood for stochastic "+name)
lik = model.log_likelihood()
nparams = len(model.get_params())
aic = 2*nparams - 2*lik
print("AIC {}".format(aic))
AICs.append(aic)
count += 1
print("-----")
count = 0
for model in models:
now = datetime.datetime.now()
print("Fully optimizing "+name+" model: "+str(now))
model.optimize(method="L-BFGS-B")
now = datetime.datetime.now()
print("Finished fully optimizing "+name+": "+str(now))
print(model.get_params())
print("Starting AIC likelihood for full "+name)
lik = model.log_likelihood()
nparams = len(model.get_params())
aic = 2*nparams - 2*lik
print("AIC {}".format(aic))
AICs.append(aic)
count += 1
print("-----")
minv = np.min(AICs)
delta_aic = np.array(AICs) - minv
print("Delta AIC per model: ", delta_aic)
print("AIC weight per model: ", np.exp(-0.5 * delta_aic))
## TODO: add searching multiple times
## TODO: add bootstrapping
|
[
"17089935+kaiyaprovost@users.noreply.github.com"
] |
17089935+kaiyaprovost@users.noreply.github.com
|
7eee6af617335a2d6c8c407680e67b2cc2e81dea
|
55f6a9b8f90ae308a90739fd8f77f4e7cd10ff19
|
/spacy/tests/lang/sk/test_tokenizer.py
|
247847284ad16613d04423f16cb12d3f7d98d573
|
[
"MIT"
] |
permissive
|
explosion/spaCy
|
cce07ee403aa398de7ba8941a2c11d22aea68021
|
3e4264899c3b12f8eabc5cd700146177a34824d0
|
refs/heads/master
| 2023-08-31T07:18:13.598768
| 2023-08-30T09:58:14
| 2023-08-30T09:58:14
| 21,467,110
| 26,348
| 4,983
|
MIT
| 2023-09-13T17:56:22
| 2014-07-03T15:15:40
|
Python
|
UTF-8
|
Python
| false
| false
| 453
|
py
|
import pytest
SK_BASIC_TOKENIZATION_TESTS = [
(
"Kedy sa narodil Andrej Kiska?",
["Kedy", "sa", "narodil", "Andrej", "Kiska", "?"],
),
]
@pytest.mark.parametrize("text,expected_tokens", SK_BASIC_TOKENIZATION_TESTS)
def test_sk_tokenizer_basic(sk_tokenizer, text, expected_tokens):
tokens = sk_tokenizer(text)
token_list = [token.text for token in tokens if not token.is_space]
assert expected_tokens == token_list
|
[
"noreply@github.com"
] |
explosion.noreply@github.com
|
25a66089f805038eee267a2ad15f97dabe903290
|
ccdbe6e17022aae05e6bee60b37fd92a0b44a6d8
|
/python/kivyapp/clock.py
|
b938a8ed87cc8a63c2595f40e017430675c55b3f
|
[
"Apache-2.0"
] |
permissive
|
bdastur/notes
|
e580c45ef38abd2b104dce3ec6898031e9c79f27
|
74341d8de88c8817c557af02c6e8bd470e56151f
|
refs/heads/master
| 2023-08-15T19:59:54.631621
| 2023-07-24T15:27:08
| 2023-07-24T15:27:08
| 92,065,482
| 4
| 1
|
Apache-2.0
| 2023-08-27T19:00:14
| 2017-05-22T14:52:34
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 958
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import datetime
from kivy.app import App
from kivy.uix.label import Label
from kivy.clock import Clock
from kivy.uix.gridlayout import GridLayout
class MyClock(GridLayout):
def __init__(self, **kwargs):
super(MyClock, self).__init__(**kwargs)
self.cols = 1
self.myLabel = Label(text="Clock: ")
self.add_widget(self.myLabel)
Clock.schedule_once(self.clockCallback, 3)
def clockCallback(self, duration):
now = datetime.datetime.now()
print("Duration: ", datetime.datetime.strftime(now, "%Y-%m-%d %H:%M:%S"))
self.myLabel.text = "Clock: %s" % datetime.datetime.strftime(now, "%Y-%m-%d %H:%M:%S")
class ClockApp(App):
def build(self):
myclock = MyClock()
Clock.schedule_interval(myclock.clockCallback, 5)
return myclock
def main():
ClockApp().run()
if __name__ == '__main__':
main()
|
[
"bdastur@gmail.com"
] |
bdastur@gmail.com
|
2adcff104db81c7a2defe60c9f677882631fe561
|
88ae8695987ada722184307301e221e1ba3cc2fa
|
/chrome/browser/ash/arc/PRESUBMIT.py
|
9b7c9a6ae4bf8c95ebee88233e1d5518e78f4fe7
|
[
"BSD-3-Clause"
] |
permissive
|
iridium-browser/iridium-browser
|
71d9c5ff76e014e6900b825f67389ab0ccd01329
|
5ee297f53dc7f8e70183031cff62f37b0f19d25f
|
refs/heads/master
| 2023-08-03T16:44:16.844552
| 2023-07-20T15:17:00
| 2023-07-23T16:09:30
| 220,016,632
| 341
| 40
|
BSD-3-Clause
| 2021-08-13T13:54:45
| 2019-11-06T14:32:31
| null |
UTF-8
|
Python
| false
| false
| 535
|
py
|
# Copyright 2017 The Chromium Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
USE_PYTHON3 = True
def CheckChangeOnUpload(input_api, output_api):
# Apply the same PRESUBMIT for components/arc.
presubmit_path = (
input_api.change.RepositoryRoot() + '/components/arc/PRESUBMIT.py')
presubmit_content = input_api.ReadFile(presubmit_path)
global_vars = {}
exec(presubmit_content, global_vars)
return global_vars['CheckChangeOnUpload'](input_api, output_api)
|
[
"jengelh@inai.de"
] |
jengelh@inai.de
|
c92082561955960009d752e4ffe94a930beeedb3
|
eeaf323a92254190195ecbb61c03171aae8f28ee
|
/accounts/migrations/0002_usercompanies_company_name.py
|
decf6fe96a53b78ea09b7794c2145c537e0264d5
|
[] |
no_license
|
abdullakn/job-portal1
|
44295db5c1169494454fa407ad5716f119e6017b
|
c5d299acae4262eb9d02317f3358aaa6d4314b13
|
refs/heads/master
| 2023-07-18T07:45:54.610627
| 2021-09-04T11:37:36
| 2021-09-04T11:37:36
| 403,040,146
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 411
|
py
|
# Generated by Django 3.2.5 on 2021-07-21 18:07
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accounts', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='usercompanies',
name='company_name',
field=models.CharField(blank=True, max_length=200, null=True),
),
]
|
[
"abdudebanjazz@gmail.com"
] |
abdudebanjazz@gmail.com
|
16bd10b11e4d259ea0624fbef79cfa7bedb25b02
|
78c4ccb183a99ebaabcdc3a3a69f029e4aee0f5c
|
/AlgorithmStudy/백준/무지성 랜덤풀이/9월/9.19/1057 토너먼트.py
|
3ebf3c81fb50fc00a0a769e5763627af7126e159
|
[] |
no_license
|
cladren123/study
|
ef2c45bc489fa658dbc9360fb0b0de53250500e5
|
241326e618f1f3bb1568d588bf6f53b78920587a
|
refs/heads/master
| 2023-09-02T02:21:24.560967
| 2021-11-05T12:20:06
| 2021-11-05T12:20:06
| 368,753,950
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 333
|
py
|
"""
문제유형 :
수학
브루트포스
1
1 3
1 2 3 4
두 수를 2로 나눈 몫을 서로 같아질때까지 빼다보면 라운드 수가 나온다.
"""
import sys
input = sys.stdin.readline
n, p1, p2 = map(int, input().split())
count = 0
while p1 != p2 :
p1 -= p1//2
p2 -= p2//2
count += 1
print(count)
|
[
"48821942+cladren123@users.noreply.github.com"
] |
48821942+cladren123@users.noreply.github.com
|
0051f0b263c771d0d796d609592be3b693a8b0bf
|
446571f13b3c1604cdfbcee8fdc2f956568d7c8d
|
/geeksforgeeks/arrays/zero_flip.py
|
dfa147be96fa075eaaa1e5a24d01659937c19f41
|
[] |
no_license
|
ssarangi/algorithms
|
2e8f0a4be6bf0f4a3d75b224ed993e1fb0ca0229
|
e151307f2706214cf8cefa6231290aeb2e5cfd82
|
refs/heads/master
| 2021-01-17T02:28:23.291588
| 2018-01-06T18:35:13
| 2018-01-06T18:35:13
| 51,458,833
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,125
|
py
|
def flip_zeros(arr, m):
current_not_flipped = m
start = 0
max_till_now = 0
positions = []
best_positions = None
cons_zero = 0
end_of_zeros = -1
while 0 <= start < len(arr):
i = start
while (i < len(arr)) and (current_not_flipped != 0 or arr[i] != 0):
if arr[i] == 0:
current_not_flipped -= 1
positions.append(i)
if current_not_flipped == 0:
end_of_zeros = i + 1
cons_zero += 1
i += 1
if cons_zero > max_till_now:
best_positions = [p for p in positions]
max_till_now = cons_zero
positions.clear()
cons_zero = 0
current_not_flipped = m
start = end_of_zeros
end_of_zeros = -1
return max_till_now, best_positions
def main():
arr = [1, 0, 0, 1, 1, 0, 1, 0, 1, 1, 1]
m = 2
print(flip_zeros(arr, m))
arr = [1, 0, 0, 1, 1, 0, 1, 0, 1, 1, 1]
m = 1
print(flip_zeros(arr, m))
arr = [0, 0, 0, 1]
m = 4
print(flip_zeros(arr, m))
if __name__ == "__main__":
main()
|
[
"satyajit.sarangi@gmail.com"
] |
satyajit.sarangi@gmail.com
|
6ca7f5d24c91b078fc989d351c41a011332afca9
|
099deeb2c308bdc00a2c423743e4b2aacdac866c
|
/week7/tuple/youngyun.py
|
890eaa5146674579d96aa1f65f6d9eff1b6cb958
|
[] |
no_license
|
Joonsun-Hwang/coding-test-study-lamda
|
76fed2f18a3220f6731775984425dff49b4379eb
|
0632ec9dd60024203ed10ebeab07aa7da4782806
|
refs/heads/main
| 2023-05-01T21:31:48.174944
| 2021-05-05T09:48:23
| 2021-05-05T09:48:23
| 329,205,708
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 317
|
py
|
import re
def solution(s):
s = eval(s[1:-1])
s = list(s)
if len(s) > 1:
s.sort(key=lambda x: len(x))
else:
return list(s)
answer = []
for sg in s:
for ssg in sg:
if ssg not in answer:
answer.append(ssg)
return answer
|
[
"fightnyy@naver.com"
] |
fightnyy@naver.com
|
11b31f35dc668112d0124a59dd723b8cb872acea
|
535a4d3c3f57f5f67d36be3d7d54fdbf9fc30a92
|
/やってみよう_必修編/chapter09/9_10_imported_restaurant.py
|
5ef1486dd7e1b597ed37100f39228f9f6ac49553
|
[] |
no_license
|
kyuugi/saitan-python
|
4d28c6ecacb4d0b846292ab94f54814dde1cbab0
|
8196a91a9aac5011bc29782381b93f143b0ae25a
|
refs/heads/master
| 2022-12-06T05:49:13.363864
| 2020-09-01T23:19:32
| 2020-09-01T23:19:32
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 159
|
py
|
from restaurant import Restaurant
restaurant = Restaurant('malaychan', '東南アジア料理')
restaurant.describe_restaurant()
restaurant.open_restaurant()
|
[
"takanori@takanory.net"
] |
takanori@takanory.net
|
be9c7f1407518d5042bb9f9141452c9827f5bc14
|
693c76bf548ad67232dba7951be51274a1d6e7d0
|
/CodeChef/forgotten language.py
|
eb4eaa2796a22da44e9b55467ed31e12f3533067
|
[] |
no_license
|
Kartavya-verma/Competitive-Pratice
|
7958e3034e5e766d6e1186fee2f23562fac70a9b
|
8c684996410b376263082a7421d4a14a85cf624b
|
refs/heads/master
| 2022-12-22T10:38:37.981529
| 2020-09-14T12:22:50
| 2020-09-14T12:22:50
| 295,405,048
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 288
|
py
|
for i in range(int(input())):
n, k = map(int, input().split())
a = input().split()
res = ['NO']*(n)
for j in range(k):
s = input().split()
s = s[1:]
for j in s:
if j in a:
res[a.index(j)] = 'YES'
print(' '.join(res))
|
[
"vermakartavya2000@gmail.com"
] |
vermakartavya2000@gmail.com
|
915c97efaf9ef352a967f2d1bed523eda5004a13
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02994/s580686016.py
|
a70a698f4cc8e26a206d2c2f0cdf4a1574d17b2c
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 206
|
py
|
n,k=map(int,input().split());a=[k+i for i in range(n)]
sumA=sum(a);min_num=float('inf');result=0
for i in a:
if min_num > abs(sumA-(sumA-i)):
min_num=abs(sumA-(sumA-i))
result=sumA-i
print(result)
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
48943f3f567ebfe249a08dc5ebe90c5e9841dd43
|
d68ca034018d66f73024223d4b2266b3c3c901d7
|
/prev/myworks/onelifefitness/chainxy/spiders/onelifefitness.py
|
446dae58b98362a5967083b62453bedc1b6f9e84
|
[] |
no_license
|
GoodyIT/scrapy
|
caff30d26660f778008ad50532e364ab36aba4c2
|
5ae80cf83dc62c4e1bd2bfa11049ca39a3ca3488
|
refs/heads/master
| 2021-01-20T10:10:18.365194
| 2017-07-22T13:09:07
| 2017-07-22T13:09:07
| 90,330,210
| 4
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,546
|
py
|
import scrapy
import json
import re
import csv
import requests
from scrapy.spiders import Spider
from scrapy.http import FormRequest
from scrapy.http import Request
from scrapy.selector import HtmlXPathSelector
from chainxy.items import ChainItem
import pdb
import usaddress
class Onelifefitness(scrapy.Spider):
name = "onelifefitness"
domain = "https://www.onelifefitness.com/"
start_urls = ["https://www.onelifefitness.com"]
store_id = []
def parse(self, response):
parents = response.xpath('.//div[@id="hs_menu_wrapper_module_14652775546295339"]//ul/li[2]')
for parent in parents:
if parent.xpath('.//a/text()').extract_first().find('Locations') != -1:
branch_list = parent.xpath('.//ul[contains(@class, "hs-menu-children-wrapper")]/li/a/@href').extract()
for branch in branch_list:
branch = branch.replace('https://www.onelifefitness.com', '')
if branch.find('onelifekc') == -1:
request = scrapy.Request(url="https://www.onelifefitness.com%s" % branch, callback=self.parse_clubs)
else:
request = scrapy.Request(url=branch, callback=self.parse_kensas)
yield request
def parse_kensas(self, response):
item = ChainItem()
item['store_number'] = ''
item['coming_soon'] = "0"
item['store_name'] = response.xpath('.//a[@class="standard-logo"]/img/@alt').extract_first()
address = response.xpath('.//address/a[1]/text()').extract()
address = [tp.strip().replace('\n', '') for tp in address if tp.strip() != ""]
addr = usaddress.parse(" ".join(address))
city = state = zip_code = street = ''
for temp in addr:
if temp[1] == 'PlaceName':
city += temp[0].replace(',','') + ' '
elif temp[1] == 'StateName':
state = temp[0].replace(',','')
elif temp[1] == 'ZipCode':
zip_code = temp[0].replace(',','')
else:
street += temp[0].replace(',','') + ' '
item['address'] = street
item['country'] = 'United States'
item['city'] = city
item['state'] = state
item['zip_code'] = zip_code
item['phone_number'] = response.xpath('.//address/a/text()').extract_first()
item['latitude'] = ''
item['longitude'] = ''
item['store_hours'] = ""
item['other_fields'] = ""
yield item
def parse_clubs(self, response):
club_list = response.xpath('.//ul[contains(@class, "gym_locations")]/li')
for club in club_list:
request = scrapy.Request(url="https://www.onelifefitness.com%s" % club.xpath('.//a/@href').extract_first(), callback=self.parse_store)
request.meta['lat'] = club.xpath('.//@data-lat').extract_first()
request.meta['lng'] = club.xpath('.//@data-ln').extract_first()
yield request
def parse_store(self, response):
# try:
item = ChainItem()
item['store_number'] = ''
item['store_name'] = response.xpath('.//div[@class="banner-header"]/h1/text()').extract_first()
item['address'] = response.xpath('.//span[@id="hs_cos_wrapper_module_14684752951104419"]/p/span[1]/text()').extract_first()
address = ''
# if item['store_name'].find('Windermere Gym') != -1:
# pdb.set_trace()
if item['address'] == None:
item['address'] = response.xpath('.//span[@id="hs_cos_wrapper_module_14684752951104419"]/p[1]/text()').extract_first()
if item['address'] == None:
item['address'] = response.xpath('.//span[@id="hs_cos_wrapper_module_14684752951104419"]/text()').extract_first()
address = response.xpath('.//span[@id="hs_cos_wrapper_module_14684752951104419"]/text()').extract()[1]
else:
address = response.xpath('.//span[@id="hs_cos_wrapper_module_14684752951104419"]/p/text()').extract()[1]
else:
address = response.xpath('.//span[@id="hs_cos_wrapper_module_14684752951104419"]/p/span[2]/text()').extract_first()
if len(address.split(',')) == 2:
item['city'] = address.split(',')[0].strip()
item['state'] = address.split(',')[1].strip().split(' ')[0].strip()
item['zip_code'] = address.split(',')[1].strip().split(' ')[1].strip()
elif len(address.split(',')) == 3:
item['city'] = address.split(',')[0].strip()
item['state'] = address.split(',')[1].strip()
item['zip_code'] = address.split(',')[2].strip()
else:
item['city'] = address.split(' ')[0].strip()
item['state'] = address.split(' ')[1].strip()
item['zip_code'] = address.split(' ')[2].strip()
item['address2'] = ''
item['country'] = 'United States'
item['coming_soon'] = "0"
item['latitude'] = response.meta['lat']
item['longitude'] = response.meta['lng']
item['other_fields'] = ""
phone = response.xpath('.//span[@id="hs_cos_wrapper_module_14684754122179512"]/p/text()').extract_first()
if phone == None:
phone = response.xpath('.//span[@id="hs_cos_wrapper_module_14684754122179512"]/p/a/text()').extract_first()
if phone == None:
item['phone_number'] = ''
elif phone.find('Coming Soon') == -1:
item['phone_number'] = self.validate(phone)
else:
item['coming_soon'] = "1"
item['store_hours'] = ""
hours = response.xpath('.//span[@id="hs_cos_wrapper_module_14684754134419869"]/p/text()').extract_first()
if hours != None and hours.find('Coming Soon') != -1:
item['coming_soon'] = "1"
else:
try:
item['store_hours'] = "; ".join(response.xpath('.//span[@id="hs_cos_wrapper_module_14684754134419869"]/p/text()').extract()).strip()
item['store_hours'] = item['store_hours'][2:].strip()
except:
item['store_hours'] = ""
# if item['store_name'].find('Crabapple Gym') != -1:
# pdb.set_trace()
item['store_hours'] = self.validate(item['store_hours'])
# except:
# pdb.set_trace()
yield item
def validate(self, value):
return value.encode('utf8').replace('\xc2\xa0', ' ')
|
[
"johnsondavid489@yahoo.com"
] |
johnsondavid489@yahoo.com
|
68fbb29c364a4f6b17cf269f611afac8fb2b7027
|
956f13e160b8381d3f8bbbb4b97bf66981ce0490
|
/index/migrations/0012_auto_20181119_1617.py
|
b89c05a472c13c4b6b1a52e677e55b53234dfbd9
|
[] |
no_license
|
yzp0111/zyogo
|
e910d0ad029fb1a3f95beb8422336fee474c635a
|
b4807f4418d8cb1d195097f87e4e74637346cb6d
|
refs/heads/master
| 2020-04-07T23:46:44.946618
| 2018-11-23T11:43:50
| 2018-11-23T11:43:50
| 158,824,828
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 798
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.8 on 2018-11-19 08:17
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('index', '0011_auto_20181119_0852'),
]
operations = [
migrations.AlterField(
model_name='goodsinfo',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='index.UserInfo', verbose_name='卖家'),
),
migrations.AlterField(
model_name='windows',
name='goods',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='index.GoodsInfo', verbose_name='商品'),
),
]
|
[
"tarena@tedu.cn"
] |
tarena@tedu.cn
|
cde2edf4a10c79aa2209df50159f375a1a6b8a53
|
3d37f595a8aaaa7c5723ddbd6758ecac5147dce2
|
/factorial-trailing-zeroes/factorial-trailing-zeroes.py
|
7087830bf6b61d0261c4e9cb1b2af12cdc58875f
|
[] |
no_license
|
baggy2797/Leetcode
|
ec218b155ebb972cd793253f25c3e18117216703
|
469c1541579401768f7a1da55d504a9e8656b21e
|
refs/heads/main
| 2023-06-24T17:03:42.708935
| 2021-07-16T22:31:24
| 2021-07-16T22:31:24
| 342,979,700
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 295
|
py
|
class Solution:
def trailingZeroes(self, n: int) -> int:
count = 0
for i in range(5,n+1,5):
temp = 5
while i% temp == 0:
count = count + 1
temp = temp * 5
return count
|
[
"bhagwataditya226@gmail.com"
] |
bhagwataditya226@gmail.com
|
e82c5392a6049ce180717cc3145908adaa3f3fc4
|
01fdd206c8c825b30870bdd3f6e75f0aa113b849
|
/test/record/parser/test_response_whois_sgnic_sg_property_nameservers_schema_2.py
|
8a8146c2727ea7d9dd262c2deba6226bf92c8635
|
[
"MIT"
] |
permissive
|
huyphan/pyyawhois
|
0fbc5a7d64a53ae6e3393fdc1c7ff0d0ac5f22b5
|
77fb2f73a9c67989f1d41d98f37037406a69d136
|
refs/heads/master
| 2021-01-23T22:42:55.989651
| 2015-09-19T16:40:06
| 2015-09-19T16:40:06
| 23,335,785
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,408
|
py
|
# This file is autogenerated. Do not edit it manually.
# If you want change the content of this file, edit
#
# spec/fixtures/responses/whois.sgnic.sg/property_nameservers_schema_2
#
# and regenerate the tests with the following script
#
# $ scripts/generate_tests.py
#
from nose.tools import *
from dateutil.parser import parse as time_parse
import yawhois
class TestWhoisSgnicSgPropertyNameserversSchema2(object):
def setUp(self):
fixture_path = "spec/fixtures/responses/whois.sgnic.sg/property_nameservers_schema_2.txt"
host = "whois.sgnic.sg"
part = yawhois.record.Part(open(fixture_path, "r").read(), host)
self.record = yawhois.record.Record(None, [part])
def test_nameservers(self):
eq_(self.record.nameservers.__class__.__name__, 'list')
eq_(len(self.record.nameservers), 4)
eq_(self.record.nameservers[0].__class__.__name__, 'Nameserver')
eq_(self.record.nameservers[0].name, "ns1.google.com")
eq_(self.record.nameservers[1].__class__.__name__, 'Nameserver')
eq_(self.record.nameservers[1].name, "ns2.google.com")
eq_(self.record.nameservers[2].__class__.__name__, 'Nameserver')
eq_(self.record.nameservers[2].name, "ns3.google.com")
eq_(self.record.nameservers[3].__class__.__name__, 'Nameserver')
eq_(self.record.nameservers[3].name, "ns4.google.com")
|
[
"dachuy@gmail.com"
] |
dachuy@gmail.com
|
9111ff6d364693b213af14c932a89fef59ae75aa
|
6b2a8dd202fdce77c971c412717e305e1caaac51
|
/solutions_5658571765186560_0/Python/Verum/D-OminousOmino.py
|
fc2ac8e040f5e876b4f9b9ae8a8a363707c04c6b
|
[] |
no_license
|
alexandraback/datacollection
|
0bc67a9ace00abbc843f4912562f3a064992e0e9
|
076a7bc7693f3abf07bfdbdac838cb4ef65ccfcf
|
refs/heads/master
| 2021-01-24T18:27:24.417992
| 2017-05-23T09:23:38
| 2017-05-23T09:23:38
| 84,313,442
| 2
| 4
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,118
|
py
|
#input_filename = "D-test.txt"
#output_filename = "D-test-out.txt"
input_filename = "D-small-attempt0.in"
output_filename = "D-small-attempt0.out"
#input_filename = "D-large.in"
#output_filename = "D-large.out"
def solve(x,r,c):
r, c = min(r,c), max(r,c)
if (r*c) % x != 0:
return False
elif x == 3 and r < 2:
return False
elif x == 4 and r < 3:
return False
elif x == 5 and r < 3:
return False
elif x == 5 and r == 3 and c < 10:
return False
elif x == 6 and r < 4:
return False
elif x > 6:
return False
else:
return True
with open(input_filename, "r") as ifile:
with open(output_filename, "w") as ofile:
T = int(ifile.readline())
for case in range(1, T+1):
x, r, c = map(int, ifile.readline().split())
print("\nCase %d" % case)
print("Task: %s" % str( (x,r,c) ))
result = solve(x,r,c)
result = "GABRIEL" if result else "RICHARD"
ofile.write("Case #%d: %s\n" % (case, result))
print("Solve: %s" % result)
|
[
"eewestman@gmail.com"
] |
eewestman@gmail.com
|
33fa2896293d944a73e801b87353e72d965ebd79
|
385c027fc4b9c09706a4d880bdb8aa5897d0ebca
|
/tests/greedy/test_transforms.py
|
b8189adbd88900a9f717953dc14be5efddba0e93
|
[
"MIT"
] |
permissive
|
rodluger/starry
|
076d46324473a6ac634781a3382021d02a5f4fdd
|
b72dff08588532f96bd072f2f1005e227d8e4ed8
|
refs/heads/master
| 2023-05-23T16:37:07.835744
| 2022-07-14T15:38:11
| 2022-07-14T15:38:11
| 120,621,593
| 131
| 31
|
MIT
| 2021-11-16T16:48:10
| 2018-02-07T13:54:20
|
Python
|
UTF-8
|
Python
| false
| false
| 639
|
py
|
import numpy as np
import starry
def test_latlon_grid():
# Just check that these don't cause errors
map = starry.Map(10)
lat, lon = map.get_latlon_grid(projection="rect")
lat, lon = map.get_latlon_grid(projection="ortho")
lat, lon = map.get_latlon_grid(projection="moll")
def test_pixel_transforms():
map = starry.Map(10)
lat, lon, Y2P, P2Y, Dx, Dy = map.get_pixel_transforms()
# Check that the back-and-forth transform is the identity (ish)
assert np.max(np.abs(P2Y @ Y2P - np.eye(map.Ny))) < 1e-6
# Just check that the derivatives are finite
assert not np.isnan(np.sum(Dx) + np.sum(Dy))
|
[
"rodluger@gmail.com"
] |
rodluger@gmail.com
|
ee4c3fe497c236a93beff8da80c41af918eabd5c
|
1cd37c59751344c0f89fe9102e25121e50f4cdfa
|
/python/ZvvHbb13TeVmacros/launchFakeMET.py
|
8255ef80ffa75f80954f890b1646c569a6eb13c5
|
[] |
no_license
|
jmduarte/Xbb
|
7bd2f919c320e4cda1306c0282f1d4c0df220b05
|
2f23c163b81c0d4d3f979369b86690ddfb7920fd
|
refs/heads/master
| 2021-01-12T16:57:59.009289
| 2016-06-15T09:43:41
| 2016-06-15T09:43:41
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,485
|
py
|
import sys
from doFakeMET import *
#from doFakeMETStupid import *
try:
fileNames = sys.argv[1]
outName = sys.argv[2]
print
print "Launching doFakeMET with:"
print "fileNames:", fileNames
print "outName:", outName
print
except:
print
print "example:"
print "python launchFakeMET.py tree_100_*.root newFile.root"
print sys.argv
print
print "fileNames: ",fileNames
from os import walk
dirpath_ = ""
dirnames_ = []
files_ = ""
# filenames = []
inputFiles = []
folder_prefix = ''
exit = False
for (dirpath_, dirnames_, files_) in walk(fileNames):
for filename_ in files_:
print file
if 'root' in filename_ and 'tree' in filename_ and not 'failed' in dirpath_:
exit = True
if exit: break
if exit: break
print dirpath_
path = dirpath_+'/'+ files_[0]
path = path.split("tree_")[0]
path = path + "tree_*.root"
inputs = []
for file_ in files_:
inputs.append((dirpath_+'/'+ file_,outName+'/'+file_))
quick = False
function = None
expoRatio = None
if quick:
firstFile = inputs[0][1]
gROOT.ProcessLine(".x "+firstFile.replace(".root","_fit.C"))
function = gDirectory.Get("histo")
function = copy.copy(function)
gROOT.ProcessLine(".x "+firstFile.replace(".root","_fit4.C"))
expoRatio = f4.Get("c1").GetPrimitive("expoRatio")
expoRatio = copy.copy(expoRatio)
for (inpt,outpt) in inputs:
function,expoRatio = doFile(inpt,outpt,function,expoRatio)
# print inpt,outpt
|
[
"silvio.donato@cern.ch"
] |
silvio.donato@cern.ch
|
6d18cbe38eee616d5462100561105c64e781a985
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02959/s005038203.py
|
72072b6b09a863006aae80c7da70edb873d1591c
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 243
|
py
|
n = int(input())
a = [int(i) for i in input().split()]
b = [int(i) for i in input().split()]
cnt = 0
for i in range(n):
dm = min(a[i], b[i])
b[i] -= dm; cnt += dm
dmn = min(a[i + 1], b[i])
a[i + 1] -= dmn; cnt += dmn
print(cnt)
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
2045f838ced5cbb44e3acff6b5588a986d821932
|
163bbb4e0920dedd5941e3edfb2d8706ba75627d
|
/Code/CodeRecords/2153/60634/269290.py
|
4a9e1e2f2528705a00fe22bb4edfda1efd0a27a3
|
[] |
no_license
|
AdamZhouSE/pythonHomework
|
a25c120b03a158d60aaa9fdc5fb203b1bb377a19
|
ffc5606817a666aa6241cfab27364326f5c066ff
|
refs/heads/master
| 2022-11-24T08:05:22.122011
| 2020-07-28T16:21:24
| 2020-07-28T16:21:24
| 259,576,640
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 276
|
py
|
def over(num):
result = 0
while num > 0:
result *= 10
result += num%10
num = int(num/10)
return result
num = int(input())
if num < 10 and num >= 0:
print(num)
elif num < 0:
print(-1*over(-1*num))
else:
print(over(num))
|
[
"1069583789@qq.com"
] |
1069583789@qq.com
|
e97e2648f03a2b000541078198a7bd237e410cbf
|
780b6cca690a213ac908b1cd5faef5366a18dc4e
|
/314_print_names_to_columns/save3_nopass.py
|
9877032426300f773ec84c56b3d2a2c1653a6085
|
[] |
no_license
|
katkaypettitt/pybites-all
|
899180a588e460b343c00529c6a742527e4ea1bc
|
391c07ecac0d92d5dc7c537bcf92eb6c1fdda896
|
refs/heads/main
| 2023-08-22T16:33:11.171732
| 2021-10-24T17:29:44
| 2021-10-24T17:29:44
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 331
|
py
|
from typing import List # not needed when we upgrade to 3.9
def print_names_to_columns(names: List[str], cols: int = 2) -> None:
name_list = [f'| {name:{10}}' for name in names]
output = ''
for i in range(0, len(name_list), cols):
output += ' '.join(name_list[i: i + cols]) + '\n'
print(output)
|
[
"70788275+katrinaalaimo@users.noreply.github.com"
] |
70788275+katrinaalaimo@users.noreply.github.com
|
961463af7f72a271cabbeb12200888b42613eece
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02820/s467587633.py
|
fa3163e453745247ba7d8f8b2cd48d58de4cc18e
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 750
|
py
|
n,k=map(int,input().split())
R,S,P=map(int,input().split())
T=input()
t=""
count=0
for i in range(n):
if i>k-1:
if T[i]=="r":
if t[i-k]!="p":
t=t+"p"
count+=P
else:
t=t+" "
if T[i]=="s":
if t[i-k]!="r":
t=t+"r"
count+=R
else:
t=t+" "
if T[i]=="p":
if t[i-k]!="s":
t=t+"s"
count+=S
else:
t=t+" "
else:
if T[i]=="r":
t=t+"p"
count+=P
if T[i]=="p":
t=t+"s"
count+=S
if T[i]=="s":
t=t+"r"
count+=R
print(count)
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
320588077a9f70d6444751783bb3c54e23696683
|
62e58c051128baef9452e7e0eb0b5a83367add26
|
/x12/4051/826004051.py
|
c80b8f5999f99f8b050d0f6bd2743ff2c9af953f
|
[] |
no_license
|
dougvanhorn/bots-grammars
|
2eb6c0a6b5231c14a6faf194b932aa614809076c
|
09db18d9d9bd9d92cefbf00f1c0de1c590fe3d0d
|
refs/heads/master
| 2021-05-16T12:55:58.022904
| 2019-05-17T15:22:23
| 2019-05-17T15:22:23
| 105,274,633
| 0
| 0
| null | 2017-09-29T13:21:21
| 2017-09-29T13:21:21
| null |
UTF-8
|
Python
| false
| false
| 1,776
|
py
|
from bots.botsconfig import *
from records004051 import recorddefs
syntax = {
'version' : '00403', #version of ISA to send
'functionalgroup' : 'TI',
}
structure = [
{ID: 'ST', MIN: 1, MAX: 1, LEVEL: [
{ID: 'BTI', MIN: 1, MAX: 1},
{ID: 'DTM', MIN: 0, MAX: 99999},
{ID: 'REF', MIN: 0, MAX: 99999},
{ID: 'TIA', MIN: 0, MAX: 99999},
{ID: 'YNQ', MIN: 0, MAX: 99999},
{ID: 'N1', MIN: 0, MAX: 99999, LEVEL: [
{ID: 'N2', MIN: 0, MAX: 2},
{ID: 'IN2', MIN: 0, MAX: 99999},
{ID: 'N3', MIN: 0, MAX: 2},
{ID: 'N4', MIN: 0, MAX: 1},
{ID: 'PER', MIN: 0, MAX: 99999},
]},
{ID: 'TFS', MIN: 0, MAX: 99999, LEVEL: [
{ID: 'REF', MIN: 0, MAX: 99999},
{ID: 'DTM', MIN: 0, MAX: 99999},
{ID: 'TIA', MIN: 0, MAX: 99999},
{ID: 'YNQ', MIN: 0, MAX: 99999},
{ID: 'N1', MIN: 0, MAX: 99999, LEVEL: [
{ID: 'N2', MIN: 0, MAX: 2},
{ID: 'IN2', MIN: 0, MAX: 99999},
{ID: 'N3', MIN: 0, MAX: 2},
{ID: 'N4', MIN: 0, MAX: 1},
{ID: 'PER', MIN: 0, MAX: 99999},
]},
{ID: 'FGS', MIN: 0, MAX: 99999, LEVEL: [
{ID: 'REF', MIN: 0, MAX: 99999},
{ID: 'DTM', MIN: 0, MAX: 99999},
{ID: 'TIA', MIN: 0, MAX: 99999},
{ID: 'YNQ', MIN: 0, MAX: 99999},
{ID: 'N1', MIN: 0, MAX: 99999, LEVEL: [
{ID: 'N2', MIN: 0, MAX: 2},
{ID: 'IN2', MIN: 0, MAX: 99999},
{ID: 'N3', MIN: 0, MAX: 2},
{ID: 'N4', MIN: 0, MAX: 1},
{ID: 'PER', MIN: 0, MAX: 99999},
]},
]},
]},
{ID: 'SE', MIN: 1, MAX: 1},
]}
]
|
[
"jason.capriotti@gmail.com"
] |
jason.capriotti@gmail.com
|
e5ea9b1ddf5e93da6d0a8e8a1c2d4abac86bd9bf
|
441b37974ac5f999001a773caa3fbf7584f82cc8
|
/Walmart Trip Type Classification/walmart_cv.py
|
208e036fd9d80b50bb13c7b6c0a95cddc512663d
|
[] |
no_license
|
lvraikkonen/Kaggle
|
bcdb653c774c211ae9e5a35fdacdb1205e81bebe
|
dbeac80d645619dc519819d4ed2c45f383dd1206
|
refs/heads/master
| 2021-01-23T03:33:17.804886
| 2016-09-03T11:51:11
| 2016-09-03T11:51:11
| 24,451,963
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,327
|
py
|
# -*- coding: utf-8 -*-
import numpy as np
import xgboost as xgb
from sklearn.metrics import log_loss
from hyperopt import fmin, tpe, hp, STATUS_OK
# These functions define the metric which we are trying to
# optimize.
def objective1(params):
print "Training model1 with parameters: "
print params
watchlist1 = [(dtrain1, 'train'), (dtestCV1, 'eval')]
model = xgb.train(params=params,
dtrain=dtrain1,
num_boost_round=1000,
early_stopping_rounds=10,
evals=watchlist1)
score = log_loss(dtestCV1.get_label(), model.predict(dtestCV1))
print "\tScore {0}\n\n".format(score)
return {'loss': score, 'status': STATUS_OK}
def objective2(params):
print "Training model2 with parameters: "
print params
watchlist2 = [(dtrain2, 'train'), (dtestCV2, 'eval')]
model = xgb.train(params=params,
dtrain=dtrain1,
num_boost_round=1000,
early_stopping_rounds=10,
evals=watchlist2)
score = log_loss(dtestCV2.get_label(), model.predict(dtestCV2))
print "\tScore {0}\n\n".format(score)
return {'loss': score, 'status': STATUS_OK}
# Load data from buffer files
dtrain1 = xgb.DMatrix('data/dtrain1.buffer')
dtestCV1 = xgb.DMatrix('data/dtestCV1.buffer')
dtrain2 = xgb.DMatrix('data/dtrain2.buffer')
dtestCV2 = xgb.DMatrix('data/dtestCV2.buffer')
# Define the hyperparameter space
space = {'eta': hp.quniform('eta', 0.025, 0.5, 0.025),
'max_depth': hp.quniform('max_depth', 1, 15, 1),
'min_child_weight': hp.quniform('min_child_weight', 1, 6, 1),
'subsample': hp.quniform('subsample', 0.5, 1, 0.05),
'gamma': hp.quniform('gamma', 0.5, 1, 0.05),
'colsample_bytree': hp.quniform('colsample_bytree', 0.5, 1, 0.05),
'num_class': 38,
'eval_metric': 'mlogloss',
'objective': 'multi:softprob'}
# Evaluate the function fmin over the hyperparameter space, and
# print the best hyperparameters.
best1 = fmin(objective1, space=space, algo=tpe.suggest, max_evals=250)
print "Optimal parameters for dtrain1 are: ", best1
#
best2 = fmin(objective2, space=space, algo=tpe.suggest, max_evals=250)
print "Optimal parameters for dtrain2 are: ", best2
#
|
[
"claus.lv@hotmail.com"
] |
claus.lv@hotmail.com
|
4fdf5b55ca8b34dbe7b97e293d8dace35dd2c25c
|
ce13eba2d3d1e7267b44cd322d309c0e1f3e6785
|
/pb_file_generation.py
|
26d7bf10581c85f22b12aec9ff2e341b9465e57b
|
[] |
no_license
|
parthnatekar/Brain-tumor-segmentation
|
fdfc9ba41d410a3618947c0b6d784ff013ded4a7
|
88aecfea58bf551457c2c8622cc23e74e48db7e7
|
refs/heads/master
| 2022-02-22T03:31:28.981931
| 2019-08-30T06:53:16
| 2019-08-30T06:53:16
| 192,159,231
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,089
|
py
|
import tensorflow as tf
import keras.backend as K
resnet_model_path = 'trained_models/U_resnet/ResUnet.h5'
resnet_weights_path = 'trained_models/U_resnet/ResUnet.15_0.491.hdf5'
resnet_pb_path = 'trained_models/U_resnet/resnet.pb'
sunet_model_path = 'trained_models/SimUnet/FCN.h5'
sunet_weights_path = 'trained_models/SimUnet/SimUnet.40_0.060.hdf5'
sunet_pb_path = 'trained_models/SimUnet/SUnet.pb'
dense_model_path = 'trained_models/densenet_121/densenet121.h5'
dense_weights_path = 'trained_models/densenet_121/densenet.55_0.522.hdf5'
dense_pb_path = 'trained_models/densenet_121/densenet.pb'
shallow_model_path = 'trained_models/shallowunet/shallow_unet.h5'
shallow_weights_path = 'trained_models/shallowunet/shallow_weights.hdf5'
shallow_pb_path = 'trained_models/shallowunet/shallow_unet.pb'
from keras.models import load_model
from models import *
from losses import *
def load_seg_model(model_='shallow'):
# model = unet_densenet121_imagenet((240, 240), weights='imagenet12')
# model.load_weights(weights_path)
if model_ == 'uresnet':
model = load_model(resnet_model_path, custom_objects={'gen_dice_loss': gen_dice_loss,'dice_whole_metric':dice_whole_metric,'dice_core_metric':dice_core_metric,'dice_en_metric':dice_en_metric})
model.load_weights(resnet_weights_path)
return model, resnet_weights_path, resnet_pb_path
elif model_ == 'fcn':
model = load_model(sunet_model_path, custom_objects={'dice_whole_metric':dice_whole_metric,'dice_core_metric':dice_core_metric,'dice_en_metric':dice_en_metric})
model.load_weights(sunet_weights_path)
return model, sunet_weights_path, sunet_pb_path
elif model_ == 'dense':
model = load_model(dense_model_path, custom_objects={'gen_dice_loss': gen_dice_loss,'dice_whole_metric':dice_whole_metric,'dice_core_metric':dice_core_metric,'dice_en_metric':dice_en_metric})
model.load_weights(dense_weights_path)
return model, dense_weights_path, dense_pb_path
elif model_ == 'shallow':
model = load_model(shallow_model_path, custom_objects={'gen_dice_loss': gen_dice_loss,'dice_whole_metric':dice_whole_metric,'dice_core_metric':dice_core_metric,'dice_en_metric':dice_en_metric})
model.load_weights(shallow_weights_path)
return model, shallow_weights_path, shallow_pb_path
def save_frozen_graph(filename):
output_graph_def = tf.compat.v1.graph_util.convert_variables_to_constants(
session,
K.get_session().graph.as_graph_def(),
['conv2d_32/BiasAdd']
)
with open(filename, "wb") as f:
f.write(output_graph_def.SerializeToString())
with tf.Session(graph=K.get_session().graph) as session:
session.run(tf.global_variables_initializer())
model_res, weights_path, pb_path = load_seg_model()
print (model_res.summary())
save_frozen_graph(pb_path)
import tensorflow as tf
graph_def = tf.GraphDef()
with open(pb_path, "rb") as f:
graph_def.ParseFromString(f.read())
for node in graph_def.node:
print(node.name)
|
[
"koriavinash1@gmail.com"
] |
koriavinash1@gmail.com
|
2e83ec6d1e2949ecaaf7d1bb3de03ea892f66966
|
1c6e5c808c1a3e6242e40b15ae711574e670c3b6
|
/food_management/views/update_meal_schedule/request_response_mocks.py
|
c10259d419f9263a235901ef89852547ecc07f81
|
[] |
no_license
|
KatakamVedaVandhana/smart_food_management-vandhana
|
dbe195994c110471d0ae7a5a53adef1441e86466
|
19e410a2aa792b22889a2dfed599312ba6b5a7ad
|
refs/heads/master
| 2023-07-09T05:43:17.491313
| 2020-06-15T06:44:00
| 2020-06-15T06:44:00
| 269,609,923
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 226
|
py
|
REQUEST_BODY_JSON = """
{
"meal_type": "Breakfast",
"date": "string",
"items_list": [
{
"item_id": 1,
"meal_course": "Half-meal",
"quantity": 1
}
]
}
"""
|
[
"vandhanakatakam@gmail.com"
] |
vandhanakatakam@gmail.com
|
3dcf93313868e6a333acf59112cec9cc100db628
|
20db5a27f2a8b2d324085f5e1ec6c46ad7c1e8c3
|
/djangoMovie/wsgi.py
|
bd36ec73a578e75dac2e61871a123692e83408c3
|
[] |
no_license
|
mortadagzar/djangoMovie
|
dae326fc83a31e485792b1ee42fa89b7d681049d
|
e83904c0c1ecc45992eed7516cb483bd2c97590b
|
refs/heads/master
| 2020-04-01T22:32:28.246877
| 2018-10-19T02:41:22
| 2018-10-19T02:41:22
| 153,713,907
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 399
|
py
|
"""
WSGI config for djangoMovie project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'djangoMovie.settings')
application = get_wsgi_application()
|
[
"mortadagzar@gmail.com"
] |
mortadagzar@gmail.com
|
016f142fbb09daf43f5feeb03bd08f1a32783e55
|
478071aed3612b8eefb5dc521b8fe18f95eaffdd
|
/Existing paper reading/model/GATA.py
|
105813c869a5edc310f98773b833ce8591293d86
|
[
"MIT"
] |
permissive
|
leiloong/PaperRobot
|
f913918671d758ae7e9d4098fe42cad19fbbbc6d
|
070972dc1548571c28d89d2c54fb379e87d172c7
|
refs/heads/master
| 2020-05-30T11:39:11.814416
| 2019-07-18T08:25:00
| 2019-07-18T08:25:00
| 189,710,771
| 0
| 0
|
MIT
| 2019-07-18T08:25:01
| 2019-06-01T08:47:51
|
Python
|
UTF-8
|
Python
| false
| false
| 1,782
|
py
|
# --------- Link Prediction Model with both TAT and GAT contained -----------
import torch.nn as nn
import torch
from .GAT import GAT
from .TAT import TAT
class GATA(nn.Module):
def __init__(self, emb_dim, hid_dim, out_dim, num_voc, num_heads, num_ent, num_rel, dropout, alpha, **kwargs):
super(GATA, self).__init__()
self.ent_embedding = nn.Embedding(num_ent, emb_dim)
self.rel_embedding = nn.Embedding(num_rel, emb_dim)
self.graph = GAT(nfeat=emb_dim, nhid=hid_dim, dropout=dropout, nheads=num_heads, alpha=alpha)
self.text = TAT(emb_dim, num_voc)
self.gate = nn.Embedding(num_ent, out_dim)
def forward(self, nodes, adj, pos, shifted_pos, h_sents, h_order, h_lengths, t_sents, t_order, t_lengths):
node_features = self.ent_embedding(nodes)
graph = self.graph(node_features, adj)
head_graph = graph[[shifted_pos[:, 0].squeeze()]]
tail_graph = graph[[shifted_pos[:, 1].squeeze()]]
head_text = self.text(h_sents, h_order, h_lengths, node_features[[shifted_pos[:, 0].squeeze()]])
tail_text = self.text(t_sents, t_order, t_lengths, node_features[[shifted_pos[:, 1].squeeze()]])
r_pos = self.rel_embedding(pos[:, 2].squeeze())
gate_head = self.gate(pos[:, 0].squeeze())
gate_tail = self.gate(pos[:, 1].squeeze())
score_pos = self._score(head_graph, head_text, tail_graph, tail_text, r_pos, gate_head, gate_tail)
return score_pos
def _score(self, hg, ht, tg, tt, r, gh, gt):
gate_h = torch.sigmoid(gh)
gate_t = torch.sigmoid(gt)
head = gate_h * hg + (1-gate_h) * ht
tail = gate_t * tg + (1-gate_t) * tt
s = torch.abs(head + r - tail)
return s
|
[
"dalewanghz@gmail.com"
] |
dalewanghz@gmail.com
|
50ac7d9499d215fdeee98e4acab4c2ba61d65aa5
|
a704c91ba38fb9f733102506f3bbf1325ab0e73b
|
/loans/asgi.py
|
e0a7f28c0e79d8818c407d48e9c27dbbbb8509f2
|
[] |
no_license
|
Nyakinyua/Loans-
|
e2b69ef00118ab2831df5b12a9e9987944bd23a2
|
fd9d9d51cfb02905001921f7c989ea11be0f68e4
|
refs/heads/master
| 2023-04-03T03:12:29.134845
| 2021-04-13T09:26:44
| 2021-04-13T09:26:44
| 348,429,796
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 387
|
py
|
"""
ASGI config for loans project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'loans.settings')
application = get_asgi_application()
|
[
"wanyakinyua968@gmail.com"
] |
wanyakinyua968@gmail.com
|
81500922d96e1dcf88f6749557790f570cda92ca
|
cf1431d3d4843fda317ec9c1d39cceaa0cbe69e2
|
/gewittergefahr/gg_utils/time_periods.py
|
102edab472e111ff3dd03896c662b6a0a7d926a4
|
[
"MIT"
] |
permissive
|
theweathermanda/GewitterGefahr
|
9dad0f5d4595db647d511a7b179b159201dff4f2
|
b8bcbf4c22457b3aa4613ff2c07b32a6e71068e2
|
refs/heads/master
| 2020-04-26T02:20:40.434434
| 2019-02-17T03:42:31
| 2019-02-17T03:42:31
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,831
|
py
|
"""Methods for handling time periods."""
import numpy
from gewittergefahr.gg_utils import number_rounding as rounder
from gewittergefahr.gg_utils import error_checking
def range_and_interval_to_list(start_time_unix_sec=None, end_time_unix_sec=None,
time_interval_sec=None, include_endpoint=True):
"""Converts time period from range and interval to list of exact times.
N = number of exact times
:param start_time_unix_sec: Start time (Unix format).
:param end_time_unix_sec: End time (Unix format).
:param time_interval_sec: Interval (seconds) between successive exact times.
:param include_endpoint: Boolean flag. If True, endpoint will be included
in list of time steps. If False, endpoint will be excluded.
:return: unix_times_sec: length-N numpy array of exact times (Unix format).
"""
error_checking.assert_is_integer(start_time_unix_sec)
error_checking.assert_is_not_nan(start_time_unix_sec)
error_checking.assert_is_integer(end_time_unix_sec)
error_checking.assert_is_not_nan(end_time_unix_sec)
error_checking.assert_is_integer(time_interval_sec)
error_checking.assert_is_boolean(include_endpoint)
if include_endpoint:
error_checking.assert_is_geq(end_time_unix_sec, start_time_unix_sec)
else:
error_checking.assert_is_greater(end_time_unix_sec, start_time_unix_sec)
start_time_unix_sec = int(rounder.floor_to_nearest(
float(start_time_unix_sec), time_interval_sec))
end_time_unix_sec = int(rounder.ceiling_to_nearest(
float(end_time_unix_sec), time_interval_sec))
if not include_endpoint:
end_time_unix_sec -= time_interval_sec
num_time_steps = 1 + (end_time_unix_sec -
start_time_unix_sec) / time_interval_sec
return numpy.linspace(start_time_unix_sec, end_time_unix_sec,
num=num_time_steps, dtype=int)
def time_and_period_length_to_range(unix_time_sec, period_length_sec):
"""Converts single time and period length to range (start/end of period).
:param unix_time_sec: Single time (Unix format).
:param period_length_sec: Length of time period (seconds).
:return: start_time_unix_sec: Beginning of time period (Unix format).
:return: end_time_unix_sec: End of time period (Unix format).
"""
error_checking.assert_is_integer(unix_time_sec)
error_checking.assert_is_not_nan(unix_time_sec)
error_checking.assert_is_integer(period_length_sec)
start_time_unix_sec = int(rounder.floor_to_nearest(
float(unix_time_sec), period_length_sec))
return start_time_unix_sec, start_time_unix_sec + period_length_sec
def time_and_period_length_and_interval_to_list(unix_time_sec=None,
period_length_sec=None,
time_interval_sec=None,
include_endpoint=True):
"""Converts single time, period length, and interval to list of exact times.
:param unix_time_sec: Single time (Unix format).
:param period_length_sec: Length of time period (seconds).
:param time_interval_sec: Interval (seconds) between successive exact times.
:param include_endpoint: Boolean flag. If True, endpoint will be included
in list of time steps. If False, endpoint will be excluded.
:return: unix_times_sec: length-N numpy array of exact times (Unix format).
"""
(start_time_unix_sec, end_time_unix_sec) = time_and_period_length_to_range(
unix_time_sec, period_length_sec)
return range_and_interval_to_list(
start_time_unix_sec=start_time_unix_sec,
end_time_unix_sec=end_time_unix_sec,
time_interval_sec=time_interval_sec, include_endpoint=include_endpoint)
|
[
"ryan.lagerquist@ou.edu"
] |
ryan.lagerquist@ou.edu
|
cda754c843996deb186c2e23dde533e8fee2c7e6
|
4a6ee62745aaad67326bf6e3bb2001f5ef84b8ab
|
/music/admin.py
|
a2ccca151a409e344792bd5071c78414ad43b21b
|
[] |
no_license
|
sandeep201451066/MusicAlbum
|
437c79599fa56a6b21354e3d376a21d0b48f47a9
|
bb3b1c176eb2f2bccdd88547fc1f04ee8262ac28
|
refs/heads/master
| 2021-01-12T02:36:59.740581
| 2017-01-07T06:57:05
| 2017-01-07T06:57:05
| 78,078,547
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 171
|
py
|
from django.contrib import admin
# Register your models here.
from music.models import AlbumList, SongList
admin.site.register(AlbumList)
admin.site.register(SongList)
|
[
"you@example.com"
] |
you@example.com
|
49f6372be4dbd4d5b5afa73800df88ec46448170
|
e035e042c425b7abbaee50b8f331b1d4ebcbd388
|
/test/testapikeyfilegenerator.py
|
4447a5f57529b553266526308950cbab3a2cfe21
|
[] |
no_license
|
Archanciel/C2
|
6ae2589c23c1499db3ca3d362459afecb52283ed
|
8abf29d6798743d06d1949e22ff5c4bece2cd85d
|
refs/heads/master
| 2020-03-21T11:19:38.298586
| 2018-09-01T10:23:50
| 2018-09-01T10:23:50
| 138,500,734
| 0
| 1
| null | 2018-07-28T18:33:25
| 2018-06-24T17:09:59
|
Python
|
UTF-8
|
Python
| false
| false
| 1,318
|
py
|
import inspect
import os
import sys
import unittest
import pickle
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(currentdir)
sys.path.insert(0, parentdir)
sys.path.insert(0,currentdir) # this instruction is necessary for successful importation of utilityfortest module when
# the test is executed standalone
from apikey.apikeyfilegenerator import ApiKeyFileGenerator
class TestApiKeyFileGenerator(unittest.TestCase):
def testCreateKeyFile(self):
'''
This test demonsttates how to test a command line script using artparse.
:return:
'''
ap = ApiKeyFileGenerator()
ap.createKeyFile(['-a', 'key', '-s', 'secret key', '-f', 'testfile', '-pw', 'monpw'])
with open(ap.FILE_PATH + 'testfile.bin', 'rb') as handle:
encryptedKeyList = pickle.load(handle)
self.assertEqual(['w5jDlMOn', 'w6DDlMORw6LDnMOhwo_DmcOVw7A='], encryptedKeyList)
self.assertEqual('key', ap.decode('monpw', encryptedKeyList[0]))
self.assertEqual('secret key', ap.decode('monpw', encryptedKeyList[1]))
def testCreateKeyFileNoArgs(self):
ap = ApiKeyFileGenerator()
with self.assertRaises(SystemExit):
ap.createKeyFile([])
|
[
"jp.schnyder@gmail.com"
] |
jp.schnyder@gmail.com
|
a6f3acb6b119c6a7b83bda654ccc2e610af29885
|
af4eb8204923b5848fce3158c6f8a89a480ea1d8
|
/script/AGNSS/AGNSS_Test_0084.py
|
afe06efc62851c32680c7b868f5dba30c5625c8c
|
[] |
no_license
|
wanghaoplus/gatog
|
4ab0c77d4f9eb93da136ad3933a68cbf0b5c5bca
|
8935e20a426638462cd1cc7bc048a16751287a2f
|
refs/heads/master
| 2022-04-10T21:36:20.395304
| 2020-03-26T10:09:28
| 2020-03-26T10:09:28
| 248,264,601
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,425
|
py
|
# -*- coding: utf-8 -*-
# @Time : 2020/02/21 22:14
# @Author : wangdelei
# @Site :
# @File : AGNSS_Test_0084.py
# @Software: PyCharm
from aw.LbsTestCase import LbsTestCase
import time
from aw.core.Input import *
import threading
class AGNSS_Test_0084(LbsTestCase):
def __init__(self):
super(AGNSS_Test_0084, self).__init__()
self.TestCaseList = ["AGNSS_Test_0084"]
def setup(self):
self.setupStep('labsat')
super(AGNSS_Test_0084, self).setup()
self.aw_initLabsat()
loopTimes = self.data.LoopTimes
sceneId = self.data.sceneId
print(self.sceneData)
def AGNSS_Test_0084(self):
self.testStep("开始测试")
self.testStep("播放labsat场景")
self.labsat.aw_labsatPlay(self.sceneData["fileName"], self.sceneData['startTime'], self.sceneData['duarTime'])
self.labsat.aw_labsatATTN(20)
time.sleep(self.sceneData['duarTime'])
self.testStep("停止labsat播放")
self.labsat.aw_labsatStopPlay()
self.testStep("停止串口读取")
self.lbs.aw_stopReadPort()
self.testStep("分析Nmea数据")
self.lbs.aw_nmeanalysis(self.sceneData['utcStartTime'], self.sceneData['utcEndTime'], sceneId=self.sceneData['sceneId'])
def teardown(self):
self.teardownStep("ֹͣ测试结束")
|
[
"418816179@qq.com"
] |
418816179@qq.com
|
fc9a91f8832a9e9583dfed7a9b8dbfdde0d1adee
|
e42a61b7be7ec3412e5cea0ffe9f6e9f34d4bf8d
|
/a10sdk/core/counter/counter_vtep_stats.py
|
f46f33a278bb54b92bf117eca714b56cd163fb28
|
[
"Apache-2.0"
] |
permissive
|
amwelch/a10sdk-python
|
4179565afdc76cdec3601c2715a79479b3225aef
|
3e6d88c65bd1a2bf63917d14be58d782e06814e6
|
refs/heads/master
| 2021-01-20T23:17:07.270210
| 2015-08-13T17:53:23
| 2015-08-13T17:53:23
| 40,673,499
| 0
| 0
| null | 2015-08-13T17:51:35
| 2015-08-13T17:51:34
| null |
UTF-8
|
Python
| false
| false
| 8,169
|
py
|
from a10sdk.common.A10BaseClass import A10BaseClass
class Stats(A10BaseClass):
"""This class does not support CRUD Operations please use parent.
:param rx_bad_inner_ipv4_len_pkts: {"description": "Packets received with Bad Inner IPv4 Payload length", "format": "counter", "type": "number", "oid": "34", "optional": true, "size": "2"}
:param rx_pkts: {"description": "In Total Packets", "format": "counter", "type": "number", "oid": "15", "optional": true, "size": "8"}
:param tx_encap_missing_pkts: {"description": "Remote Vtep unreachable: Drop Tx", "format": "counter", "type": "number", "oid": "4", "optional": true, "size": "2"}
:param rx_mcast_pkts: {"description": "Out Multicast Packets", "format": "counter", "type": "number", "oid": "19", "optional": true, "size": "2"}
:param cfg_vtep_error: {"description": "Config Error: Drop Packet", "format": "counter", "type": "number", "oid": "1", "optional": true, "size": "2"}
:param rx_reassembled_pkts: {"description": "Reassembled Packets", "format": "counter", "type": "number", "oid": "33", "optional": true, "size": "2"}
:param rx_ucast_pkts: {"description": "In Unicast Packets", "format": "counter", "type": "number", "oid": "17", "optional": true, "size": "8"}
:param rx_lif_uninit: {"description": "Lif not UP: Drop Rx", "format": "counter", "type": "number", "oid": "36", "optional": true, "size": "2"}
:param rx_lif_invalid: {"description": "Invalid Lif: Drop Rx", "format": "counter", "type": "number", "oid": "9", "optional": true, "size": "2"}
:param rx_dot1q_ptks: {"description": "Dot1q Packet: Drop Rx", "format": "counter", "type": "number", "oid": "31", "optional": true, "size": "2"}
:param rx_bad_checksum_pkts: {"description": "Packet reveived with Bad Inner checksum", "format": "counter", "type": "number", "oid": "22", "optional": true, "size": "2"}
:param tx_bcast_pkts: {"description": "Out Broadcast Packets", "format": "counter", "type": "number", "oid": "27", "optional": true, "size": "8"}
:param tx_fragmented_pkts: {"description": "Fragmented Packets", "format": "counter", "type": "number", "oid": "32", "optional": true, "size": "2"}
:param rx_host_learned: {"description": "Number of Host =", "format": "counter", "type": "number", "oid": "7", "optional": true, "size": "2"}
:param rx_unhandled_pkts: {"description": "Unhandled Packets: Drop Rx", "format": "counter", "type": "number", "oid": "13", "optional": true, "size": "2"}
:param tx_arp_req_sent_pkts: {"description": "Number of Arp Requests Sent", "format": "counter", "type": "number", "oid": "6", "optional": true, "size": "2"}
:param rx_host_learn_error: {"description": "Number of Host =", "format": "counter", "type": "number", "oid": "8", "optional": true, "size": "2"}
:param rx_encap_miss_pkts: {"description": "Remote Vtep unreachable: Drop Tx", "format": "counter", "type": "number", "oid": "21", "optional": true, "size": "2"}
:param rx_requeued_pkts: {"description": "Packets requeued to another CPU", "format": "counter", "type": "number", "oid": "23", "optional": true, "size": "2"}
:param tx_lif_invalid: {"description": "Invalid Lif: Drop Tx", "format": "counter", "type": "number", "oid": "10", "optional": true, "size": "2"}
:param rx_vtep_unknown: {"description": "Vtep unknown: Drop Rx", "format": "counter", "type": "number", "oid": "12", "optional": true, "size": "2"}
:param rx_dropped_pkts: {"description": "In Dropped Packets", "format": "counter", "type": "number", "oid": "20", "optional": true, "size": "2"}
:param tx_flood_pkts: {"description": "Out Flooded Packets", "format": "counter", "type": "number", "oid": "2", "optional": true, "size": "8"}
:param rx_bad_inner_ipv6_len_pkts: {"description": "Packets received with Bad Inner IPv6 Payload length", "format": "counter", "type": "number", "oid": "35", "optional": true, "size": "2"}
:param rx_pkts_too_large: {"description": "Packet too large: Drop Rx", "format": "counter", "type": "number", "oid": "30", "optional": true, "size": "2"}
:param tx_bytes: {"description": "Out Total Octets", "format": "counter", "type": "number", "oid": "25", "optional": true, "size": "8"}
:param tx_mcast_pkts: {"description": "Out Multicast Packets", "format": "counter", "type": "number", "oid": "28", "optional": true, "size": "2"}
:param tx_vtep_unknown: {"description": "Vtep unknown: Drop Tx", "format": "counter", "type": "number", "oid": "11", "optional": true, "size": "2"}
:param tx_encap_unresolved_pkts: {"description": "Remote Vtep unreachable: Drop Tx", "format": "counter", "type": "number", "oid": "3", "optional": true, "size": "2"}
:param tx_encap_bad_pkts: {"description": "Remote Vtep unreachable: Drop Tx", "format": "counter", "type": "number", "oid": "5", "optional": true, "size": "2"}
:param rx_bcast_pkts: {"description": "In Broadcast Packets", "format": "counter", "type": "number", "oid": "18", "optional": true, "size": "8"}
:param tx_unhandled_pkts: {"description": "Unhandled Packets: Drop Tx", "format": "counter", "type": "number", "oid": "14", "optional": true, "size": "2"}
:param tx_dropped_pkts: {"description": "Out Dropped Packets", "format": "counter", "type": "number", "oid": "29", "optional": true, "size": "2"}
:param tx_ucast_pkts: {"description": "Out Unicast Packets", "format": "counter", "type": "number", "oid": "26", "optional": true, "size": "8"}
:param tx_pkts: {"description": "Out Total Packets", "format": "counter", "type": "number", "oid": "24", "optional": true, "size": "8"}
:param rx_bytes: {"description": "In Total Octets", "format": "counter", "type": "number", "oid": "16", "optional": true, "size": "8"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.b_key = "stats"
self.DeviceProxy = ""
self.rx_bad_inner_ipv4_len_pkts = ""
self.rx_pkts = ""
self.tx_encap_missing_pkts = ""
self.rx_mcast_pkts = ""
self.cfg_vtep_error = ""
self.rx_reassembled_pkts = ""
self.rx_ucast_pkts = ""
self.rx_lif_uninit = ""
self.rx_lif_invalid = ""
self.rx_dot1q_ptks = ""
self.rx_bad_checksum_pkts = ""
self.tx_bcast_pkts = ""
self.tx_fragmented_pkts = ""
self.rx_host_learned = ""
self.rx_unhandled_pkts = ""
self.tx_arp_req_sent_pkts = ""
self.rx_host_learn_error = ""
self.rx_encap_miss_pkts = ""
self.rx_requeued_pkts = ""
self.tx_lif_invalid = ""
self.rx_vtep_unknown = ""
self.rx_dropped_pkts = ""
self.tx_flood_pkts = ""
self.rx_bad_inner_ipv6_len_pkts = ""
self.rx_pkts_too_large = ""
self.tx_bytes = ""
self.tx_mcast_pkts = ""
self.tx_vtep_unknown = ""
self.tx_encap_unresolved_pkts = ""
self.tx_encap_bad_pkts = ""
self.rx_bcast_pkts = ""
self.tx_unhandled_pkts = ""
self.tx_dropped_pkts = ""
self.tx_ucast_pkts = ""
self.tx_pkts = ""
self.rx_bytes = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
class Vtep(A10BaseClass):
"""Class Description::
Statistics for the object vtep.
Class vtep supports CRUD Operations and inherits from `common/A10BaseClass`.
This class is the `"PARENT"` class for this module.`
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
URL for this object::
`https://<Hostname|Ip address>//axapi/v3/counter/vtep/{sampling_enable}/stats`.
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.required = [ "sampling_enable"]
self.b_key = "vtep"
self.a10_url="/axapi/v3/counter/vtep/{sampling_enable}/stats"
self.DeviceProxy = ""
self.stats = {}
for keys, value in kwargs.items():
setattr(self,keys, value)
|
[
"doug@parksidesoftware.com"
] |
doug@parksidesoftware.com
|
2241a6690333ae6509e556b65fccb35af2f114c3
|
18a6b272d4c55b24d9c179ae1e58959674e53afe
|
/tf_rl/examples/NerveNet/scripts/ppos/mujoco/test_env.py
|
66cb7b6d26ae7b5ddd7b2ed0f8b11ee777bf4a40
|
[
"MIT"
] |
permissive
|
Rowing0914/TF2_RL
|
6cce916f409b3d4ef2a5a40a0611908f20d08b2c
|
c1b7f9b376cbecf01deb17f76f8e761035ed336a
|
refs/heads/master
| 2022-12-10T09:58:57.456415
| 2021-05-23T02:43:21
| 2021-05-23T02:43:21
| 233,476,950
| 9
| 1
|
MIT
| 2022-12-08T07:02:42
| 2020-01-12T23:53:48
|
Python
|
UTF-8
|
Python
| false
| false
| 621
|
py
|
import gym
# you can choose other environments.
# possible environments: Ant-v2, HalfCheetah-v2, Hopper-v2, Humanoid-v2,
# HumanoidStandup-v2, InvertedPendulum-v2, Reacher-v2, Swimmer-v2, Walker2D-v2
env = gym.make("Walker2d-v2")
num_inputs = env.observation_space.shape[0]
num_actions = env.action_space.shape[0]
print('state size:', num_inputs)
print('action size:', num_actions)
env.reset()
for _ in range(1000):
env.render()
state, reward, done, _ = env.step(env.action_space.sample())
# print('state:', state)
# reward = forward velocity - sum(action^2) + live_bonus
print('reward:', reward)
|
[
"kosakaboat@gmail.com"
] |
kosakaboat@gmail.com
|
8929de1dd4441865c55d4dcb7a063a6b3ee7b872
|
637bb3f080ff18001a732d9bf607ef962b09c5dd
|
/AtiviadeMeioDisciplina/marte.py
|
f9f948a67a836eaaa4301207c7e9bf074231a62a
|
[] |
no_license
|
magnoazneto/IFPI_Algoritmos
|
995296fa22445c57981a1fad43e1ef7a8da83e5e
|
3b5e79c79b7a1fb7a08206719fd418fba1b39691
|
refs/heads/master
| 2022-02-27T10:59:17.123895
| 2019-11-17T13:51:35
| 2019-11-17T13:51:35
| 186,868,451
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 514
|
py
|
def main():
msg = input()
print('Mensagens alteradas:', identify_mars(msg))
def identify_mars(string):
default = 'HELP'
lenght = len(default)
i = 0
def_idx = 0
infected_msgs = 0
while i < len(string):
if string[i] == default[def_idx]:
def_idx += 1
i += 1
if def_idx == 4:
def_idx = 0
else:
infected_msgs += 1
i += lenght - def_idx
def_idx = 0
return infected_msgs
main()
|
[
"magnoazneto@gmail.com"
] |
magnoazneto@gmail.com
|
f6b74de65dfb9b450d827c0b8c8a01263f7b6766
|
64b135891387dac3a4bb29f3001a524830d0e4e4
|
/news/forms.py
|
d5177748c1fd666136f6e163c39fd7bca1f0fca6
|
[
"LicenseRef-scancode-warranty-disclaimer",
"MIT"
] |
permissive
|
dynamicguy/treeio
|
9ad52802722b64a212e22710c04dbb0bb50d831e
|
4f674898cff2331711639a9b5f6812c874a2cb25
|
refs/heads/master
| 2021-08-28T11:25:41.504635
| 2014-01-31T17:16:22
| 2014-01-31T17:16:22
| 11,323,559
| 0
| 0
|
NOASSERTION
| 2021-08-16T20:18:53
| 2013-07-10T20:31:31
|
Python
|
UTF-8
|
Python
| false
| false
| 2,878
|
py
|
# encoding: utf-8
# Copyright 2011 Tree.io Limited
# This file is part of Treeio.
# License www.tree.io/license
"""
News module forms
"""
from django import forms
from django.utils.translation import ugettext as _
from django.core.urlresolvers import reverse
from treeio.core.conf import settings
from treeio.core.models import UpdateRecord, ModuleSetting, Object
class UpdateRecordForm(forms.ModelForm):
""" UpdateRecord form """
def __init__(self, *args, **kwargs):
self.user = kwargs.pop('user', None)
super(UpdateRecordForm, self).__init__(*args, **kwargs)
self.fields['body'].required = True
self.fields['body'].label = _("Details")
self.fields['recipients'].help_text = ""
self.fields['recipients'].required = False
self.fields['recipients'].widget.attrs.update({'class': 'multicomplete',
'callback': reverse('identities_ajax_access_lookup')})
# get default permissions from settings
try:
conf = ModuleSetting.get_for_module('treeio.core', 'default_permissions')[0]
default_permissions = conf.value
except:
default_permissions = settings.HARDTREE_DEFAULT_PERMISSIONS
if self.user and 'userallgroups' in default_permissions:
self.fields['recipients'].initial = [i.id for i in self.user.other_groups.all().only('id')]
self.fields['recipients'].initial.append(self.user.default_group.id)
elif self.user and 'usergroup' in default_permissions:
self.fields['recipients'].initial = [self.user.default_group.id]
class Meta:
"TaskRecordForm"
model = UpdateRecord
fields = ['body', 'recipients']
class UpdateRecordFilterForm(forms.ModelForm):
""" Filter form definition """
def __init__(self, user, *args, **kwargs):
super(UpdateRecordFilterForm, self).__init__(*args, **kwargs)
self.fields['author'].label = _("Author")
self.fields['about'].label = _("About")
self.fields['author'].required = False
self.fields['author'].widget.attrs.update({'class': 'autocomplete',
'callback': reverse('identities_ajax_user_lookup')})
self.fields['about'].queryset = Object.filter_permitted(user, Object.objects, mode='x')
self.fields['about'].required = False
self.fields['about'].null = True
self.fields['about'].help_text = ""
self.fields['about'].widget.attrs.update({'class': 'multicomplete',
'callback': reverse('core_ajax_object_lookup')})
class Meta:
"Filter"
model = UpdateRecord
fields = ('author', 'about')
|
[
"letoosh@gmail.com"
] |
letoosh@gmail.com
|
ebe61823f230bf6d45bdfab7c10060e7919519bb
|
76cba124f60bf963b2e6bf4dbf03e74bfdb37899
|
/democode/comet.py
|
54d41fbd8968dc7de0a04921458475eae9bd6b57
|
[] |
no_license
|
gasman/exogenesis
|
b51d553bdb20fe523fe00f5fe585f6d7aaa84b72
|
511af1c990bb82d5364d6f950125c057610c8404
|
refs/heads/master
| 2021-01-22T11:51:35.771421
| 2013-09-08T10:53:59
| 2013-09-08T10:53:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 838
|
py
|
from democode.antialias import Antialiaser
import draw
import math
class CometScene(object):
def __init__(self, lp):
self.aa = Antialiaser(lp)
def tick(self, pattern, beat):
self.aa.clear()
for x in range(2, 16):
b = beat - x / 4
head_y = max(2, 14 - (b / 4)) - 2 * math.sin(b * math.pi / 4)
greenness = (16 - x) / 16.0
draw.disc(self.aa.screen, x, head_y, greenness * 2, (1, greenness))
self.aa.render()
class CircleCometScene(object):
def __init__(self, lp):
self.aa = Antialiaser(lp)
def tick(self, pattern, beat):
self.aa.clear()
for i in range(16, 2, -1):
b = beat - i * 0.8
a = math.pi * b / 12
r = 7 - (beat / 16)
x = 9 + r * math.sin(a)
y = 9 + r * math.cos(a)
greenness = (16 - i) / 16.0
draw.disc(self.aa.screen, x, y, greenness * 2, (1, greenness))
self.aa.render()
|
[
"matt@west.co.tt"
] |
matt@west.co.tt
|
eec486626a15cfc2fe84cee4502784e8873d9f69
|
3fb660ec514a7e2d2f225313991f16b58974f708
|
/Learning/assert-keyword.py
|
434293e5a78caed67cee76c60559b793cca2795a
|
[] |
no_license
|
jwatson-CO-edu/py_info
|
e2c89bbe05b411d2086c182630d165f87a99ec3f
|
297f9f5733fe256e5c96f2da82f49d82c2a4ba9d
|
refs/heads/master
| 2022-02-06T13:03:14.178557
| 2022-01-22T03:01:46
| 2022-01-22T03:01:46
| 230,357,664
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 590
|
py
|
"""
RESULT: If the conditional following the 'assert' keyword evaluates False , then an 'AssertionError' is raised , Just like C++
You can optionally provide a message string to give more information about the assertion was violated , which is a good idea anyway
"""
def picky_arg( arg ):
""" 'arg' must be greater than 5 """
assert arg > 5 , "'arg' was too small!"
print "You make a compelling argument"
# picky_arg( 4 ) # AssertionError: 'arg' was too small! , program crashes with unhandled exception
picky_arg( 6 ) # "You make a compelling argument"
|
[
"james.r.watson@utah.edu"
] |
james.r.watson@utah.edu
|
b0737e53bab3a189f26c79793e6107c969e82108
|
7004661440b908a622ccc01809864971ed6f6d7b
|
/main.py
|
d594a99dc4f2b81f4a0b3a1fce2710a201520858
|
[] |
no_license
|
hanj2270/E-HentaiCrawler
|
a00aa91d4fae2fe1082eebc0daa30ad152ca9f41
|
46f778db61da2166c35bcf03aaf930177c3acd8d
|
refs/heads/master
| 2021-01-19T02:33:13.803416
| 2017-03-08T08:34:18
| 2017-03-08T08:34:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,176
|
py
|
# -*- coding: utf-8 -*-
import threading
from multiprocessing import Queue, Process
from Proxy.IPPool import getIP
from config import PROCESS_MAX, THREAD_MAX
from crawler.indexgeter import indexgeter
from crawler.webDataGeter import webdatageter
from database.data_writer import data_writer
from error.error_handling import error_handing
if __name__ == "__main__":
# 目录队列
qindex = Queue()
# 数据队列
qdata = Queue()
# 错误队列
qerror = Queue()
# 代理队列
qip = Queue()
# 启动目录发生器进程
Process(target=indexgeter, args=(qindex,)).start()
# 开始启动爬虫进程
n = 0
while n < PROCESS_MAX:
Process(target=webdatageter, args=(qindex, qdata, qerror, qip)).start()
n += 1
# 获取THREAD_COUNT个代理用于开启爬虫线程
n = 0
while n < THREAD_MAX:
try:
qip.put(getIP())
n += 1
except BaseException:
break
# 数据写入器线程
threading.Thread(target=data_writer, args=(qdata,)).start()
# 错误处理线程
threading.Thread(target=error_handing, args=(qdata, qerror, qip, n)).start()
|
[
"shuiqukeyou@gmail.com"
] |
shuiqukeyou@gmail.com
|
2665af70a535b15961a85fcd7f63751772321701
|
080c13cd91a073457bd9eddc2a3d13fc2e0e56ae
|
/MY_REPOS/awesome-4-new-developers/OVERFLOW/DS-ALGO-OFFICIAL/temp/algorithms/algorithms/maths/base_conversion.py
|
b538f2b3bfc0b89f2a4c708831f88b8ba847a996
|
[] |
no_license
|
Portfolio-Projects42/UsefulResourceRepo2.0
|
1dccc8961a09347f124d3ed7c27c6d73b9806189
|
75b1e23c757845b5f1894ebe53551a1cf759c6a3
|
refs/heads/master
| 2023-08-04T12:23:48.862451
| 2021-09-15T12:51:35
| 2021-09-15T12:51:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 973
|
py
|
"""
Integer base conversion algorithm
int2base(5, 2) return '101'.
base2int('F', 16) return 15.
"""
import string
def int_to_base(n, base):
"""
:type n: int
:type base: int
:rtype: str
"""
is_negative = False
if n == 0:
return "0"
elif n < 0:
is_negative = True
n *= -1
digit = string.digits + string.ascii_uppercase
res = ""
while n > 0:
res += digit[n % base]
n //= base
if is_negative:
return "-" + res[::-1]
else:
return res[::-1]
def base_to_int(s, base):
"""
Note : You can use int() built-in function instead of this.
:type s: str
:type base: int
:rtype: int
"""
digit = {}
for i, c in enumerate(string.digits + string.ascii_uppercase):
digit[c] = i
multiplier = 1
res = 0
for c in s[::-1]:
res += digit[c] * multiplier
multiplier *= base
return res
|
[
"bryan.guner@gmail.com"
] |
bryan.guner@gmail.com
|
cf6364cfe0b6485a6cb715e366ab55a7804f6714
|
dd7e2cb3366855584fc27bbb2e0dc7ed63977117
|
/vida/vida/migrations/0019_auto_20160204_1447.py
|
f82aefc2ee0ce4b604551cdbd531c2578fedde9d
|
[
"MIT"
] |
permissive
|
ProminentEdge/flintlock
|
3d3eb6efbe4a2d0e731f8a26e69ffcd314e25cb9
|
271c897b332f0c24e00a23c1fe86f5172fb9dd30
|
refs/heads/master
| 2021-01-17T07:58:49.650273
| 2016-03-05T02:06:59
| 2016-03-05T02:06:59
| 50,030,647
| 1
| 1
| null | 2016-03-04T05:10:45
| 2016-01-20T13:06:30
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 645
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.contrib.gis.db.models.fields
class Migration(migrations.Migration):
dependencies = [
('vida', '0018_auto_20160204_1131'),
]
operations = [
migrations.AlterModelOptions(
name='report',
options={'ordering': ['-timestamp'], 'get_latest_by': 'timestamp'},
),
migrations.AlterField(
model_name='report',
name='geom',
field=django.contrib.gis.db.models.fields.PointField(srid=4326, null=True, blank=True),
),
]
|
[
"garnertb@gmail.com"
] |
garnertb@gmail.com
|
56071c9dbe2ba7e34e38e15fca15143a1dcf295b
|
db9140a12939db6226d68624eecc3cc3fdadf3dd
|
/adage/node.py
|
417b8feaaa05c5f55453175c310e5311253af50e
|
[] |
no_license
|
nextiams/adage
|
dedb09a64bdbd714d7043a00a51f556b6224e129
|
57525f7ed67d765525009b639bb355c74b1054e1
|
refs/heads/master
| 2022-09-08T23:25:24.163519
| 2017-10-09T08:13:33
| 2017-10-09T08:13:33
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,978
|
py
|
import time
import uuid
import logging
import adage.nodestate as nodestate
log = logging.getLogger(__name__)
class Node(object):
def __init__(self,name,task,identifier = None, define_time = None):
self.identifier = identifier or str(uuid.uuid4())
self.name = name
self.task = task
# the timestamps
self.define_time = define_time or time.time()
self.submit_time = None
self.ready_by_time = None
# backend to update state against
self.backend = None
# relevant state data
self.resultproxy = None
self._result = None
self._state = nodestate.DEFINED
def __repr__(self):
return '<Node name: {} id: {} state: {}>'.format(self.name,self.identifier,self.state)
def update_state(self):
#if we do not have a result object
#that means it's not submitted yet
if not self.resultproxy:
self._state = nodestate.DEFINED
return
#if we have a resultobject
#but the result is not ready
#the node is still running
if not self.backend.ready(self.resultproxy):
self._state = nodestate.RUNNING
return
#if it's ready it's either successful
#or failed
if self.backend.successful(self.resultproxy):
self._state = nodestate.SUCCESS
self._result = self.backend.result(self.resultproxy)
else:
self._state = nodestate.FAILED
#it's ready so set time stamp it not already set
if not self.ready_by_time:
self.ready_by_time = time.time()
log.info('node ready %s',self)
@property
def result(self):
return self._result
@property
def state(self):
return self._state
def ready(self):
return self.state in [nodestate.SUCCESS, nodestate.FAILED]
def successful(self):
return self.state == nodestate.SUCCESS
|
[
"lukas.heinrich@gmail.com"
] |
lukas.heinrich@gmail.com
|
52c025692aaad19bacf308a26578cc9bf8277340
|
57c570d1b5a621158d8763f935e2069be6b8c90a
|
/tykj-operation/tykj-operation/service/service/estoreservice/api/tests.py
|
5be1dda2c5f44a1c0e04cdf10ddf55e7dc9f2c39
|
[] |
no_license
|
liuliainio/liuli
|
e011decf45f7eca7009a12ad4a96f33a17055945
|
203fbf4f135efb6432c77b937633003ce2f2c9a2
|
refs/heads/master
| 2021-01-10T20:35:08.070770
| 2018-08-21T05:52:59
| 2018-08-21T05:52:59
| 25,625,853
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,781
|
py
|
from __future__ import print_function, division, absolute_import
from bson.objectid import ObjectId
from django.test import TestCase
from estorecore.servemodels.push import PushMongodbStorage
from estoreservice import settings
import logging
import os
import time
from estorecore.test import get_all_test_cases
logger = logging.getLogger('django')
# import all test cases from test directory
test_case_dir = os.path.abspath(
os.path.join(os.path.abspath(__file__), '..', 'test'))
test_cases = get_all_test_cases(test_case_dir)
for pkg, mod in test_cases:
exec 'from %s.%s import *' % (pkg, mod)
class SimpleTest(TestCase):
def test_basic_addition(self):
"""
Tests that 1 + 1 always equals 2.
"""
self.assertEqual(1 + 1, 2)
def dtest_push_message_update_perf():
push_db = PushMongodbStorage(settings.MONGODB_CONF)
# cond = {'_id': ObjectId("51064adb9813f3ea9cc702bc")}
cond = {'id': 73}
n_round = 10
message_coll = push_db._db.messages
for msg in message_coll.find(cond):
print(msg)
total_time = 0.0
for _ in range(n_round):
start = time.time()
message_coll.update(cond, {'$inc': {'sent_count': 1}})
total_time += (time.time() - start) * 1000.0
print('inc sent_count sync took: %0.3f ms' % (total_time / n_round))
total_time = 0.0
for _ in range(n_round):
start = time.time()
message_coll.update(cond, {'$inc': {'sent_count': 1}}, w=0)
total_time += (time.time() - start) * 1000.0
print('inc sent_count async took: %0.3f ms' % (total_time / n_round))
# Revert above changes.
message_coll.update(cond, {'$inc': {'sent_count': -1 * 2 * n_round}})
if __name__ == '__main__':
dtest_push_message_update_perf()
|
[
"liuliainio@163.com"
] |
liuliainio@163.com
|
2835c6426469a8c114fb1a60b7567e563589339c
|
cca53e4b21ca8d31f4b66519ac678e37ebce47a8
|
/exercises/ex8.py
|
90790acc57dae4156d8660a53d5b61fb7b4b6265
|
[] |
no_license
|
hancush/pynotes
|
b115a92ac9eb9b9d4e410177da855a828487adbd
|
be27097a8726d4cc22740b1b96398a7a77289d22
|
refs/heads/master
| 2021-01-19T00:47:23.246090
| 2015-09-15T21:20:30
| 2015-09-15T21:20:30
| 40,318,266
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 409
|
py
|
formatter = "%r %r %r %r"
print formatter % (1, 2, 3, 4)
print formatter % ("one", "two", "three", "four")
print formatter % (True, False, False, True)
print formatter % (formatter, formatter, formatter, formatter)
print formatter % (
"I had this thing.",
"That 'you' could type up right.",
"But it didn't sing.", # shows up w double quotes bc single quote in string?
"So I said goodnight."
)
|
[
"hannah.cushman@gmail.com"
] |
hannah.cushman@gmail.com
|
291249ca174379bb84d42e63c78fdf78cee4b58c
|
27e2b4b14d8217fcad3b57ef8918cb857931d89f
|
/learning/modules/cuda_module.py
|
abc88462975e47882fa8c9515c8385d895769c71
|
[
"BSD-2-Clause"
] |
permissive
|
jhu-lcsr/good_robot
|
205b31e9a2399032488ffa094d9f1e0d07592aa4
|
61217d65f040d536e54804150ce8abcf97343410
|
refs/heads/master
| 2022-03-31T23:49:04.958796
| 2022-03-25T18:14:14
| 2022-03-25T18:14:14
| 198,902,668
| 95
| 24
|
BSD-2-Clause
| 2022-02-18T20:45:13
| 2019-07-25T21:21:41
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 365
|
py
|
import torch
from torch import nn as nn
"""
class CudaModule(torch.nn.Module):
def __init__(self):
super(CudaModule, self).__init__()
self.is_cuda = False
self.cuda_device = None
def cuda(self, device=None):
nn.Module.cuda(self, device)
self.is_cuda = True
self.cuda_device = device
return self
"""
|
[
"elias.stengel@gmail.com"
] |
elias.stengel@gmail.com
|
62dffbb21ad8bfd2fb90e766057c1a6e49c6ac04
|
10fddce056973c339b1d939110ca2b29591e77f7
|
/wc_utils/workbook/__init__.py
|
57e2da10440dbb50fab80104f1190e0ddc100808
|
[
"MIT"
] |
permissive
|
KarrLab/wc_utils
|
4ed3bdfa558171ab32293a452f9e6e02b3fc16f1
|
a4c0e2e8b9bd88356729e38faf5c0d09d61ff921
|
refs/heads/master
| 2022-05-01T06:17:54.910791
| 2020-09-28T22:38:30
| 2020-09-28T22:38:30
| 69,289,809
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 181
|
py
|
from .core import (Workbook, Worksheet, Row, Formula,
WorkbookDifference, WorksheetDifference,
RowDifference, CellDifference)
from . import io
|
[
"jonrkarr@gmail.com"
] |
jonrkarr@gmail.com
|
05b1f77092060e63d75e668a1221e319952163a7
|
e7a9bac3b02a3849c1ab5d6990012510b8592c47
|
/src/briefcase/commands/__init__.py
|
5ed9108309d7fddad2a7683abe59158b365c852f
|
[
"BSD-3-Clause"
] |
permissive
|
saroad2/briefcase
|
3d15dabfa6462a3b123053042532d0ae482b689d
|
afbe8ed499c08afbeaa837ea032fa24d20b320a5
|
refs/heads/main
| 2023-03-06T12:33:52.929614
| 2022-10-04T22:27:03
| 2022-10-04T22:27:03
| 245,388,096
| 1
| 0
|
BSD-3-Clause
| 2023-02-26T20:59:27
| 2020-03-06T10:12:29
|
Python
|
UTF-8
|
Python
| false
| false
| 402
|
py
|
from .build import BuildCommand # noqa
from .create import CreateCommand # noqa
from .dev import DevCommand # noqa
from .new import NewCommand # noqa
from .open import OpenCommand # noqa
from .package import PackageCommand # noqa
from .publish import PublishCommand # noqa
from .run import RunCommand # noqa
from .update import UpdateCommand # noqa
from .upgrade import UpgradeCommand # noqa
|
[
"russell@keith-magee.com"
] |
russell@keith-magee.com
|
ebc22e699980c3f5b7b8234f2aad220fa055e67a
|
7f0548b7191b7589712af19baebafddae1d0505f
|
/dojoassignments/python/django/full_stack_django/bad_travel_buddy/apps/login_registration/migrations/0004_auto_20170627_1628.py
|
cb00a717c150b7df258a975ef221794b3969c1e7
|
[] |
no_license
|
mtjhartley/codingdojo
|
dd8eab1bd61fb847e44766e89fe3db2340468102
|
65dc558d19adbe62f85ad61c32cb1c392b56567c
|
refs/heads/master
| 2022-12-14T23:06:11.927445
| 2017-08-16T21:08:35
| 2017-08-16T21:08:35
| 92,218,728
| 1
| 5
| null | 2022-12-07T23:59:48
| 2017-05-23T20:46:03
|
Python
|
UTF-8
|
Python
| false
| false
| 581
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2017-06-27 16:28
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('login_registration', '0003_remove_user_birthday'),
]
operations = [
migrations.RenameField(
model_name='user',
old_name='first_name',
new_name='name',
),
migrations.RenameField(
model_name='user',
old_name='last_name',
new_name='user_name',
),
]
|
[
"mtjhartley@gmail.com"
] |
mtjhartley@gmail.com
|
9fb38590c5f71af6d4e4a18577416948774e05a2
|
90c6262664d013d47e9a3a9194aa7a366d1cabc4
|
/tests/operations/onn22RuTehcaQS1zaHxEHrrSRdCPTXUbHByyutPHL2EppBtd7Yg/test_forge_onn22R.py
|
cf5f4f0b31136df8e8cedf859b2a06b0447a6563
|
[
"MIT"
] |
permissive
|
tqtezos/pytezos
|
3942fdab7aa7851e9ea81350fa360180229ec082
|
a4ac0b022d35d4c9f3062609d8ce09d584b5faa8
|
refs/heads/master
| 2021-07-10T12:24:24.069256
| 2020-04-04T12:46:24
| 2020-04-04T12:46:24
| 227,664,211
| 1
| 0
|
MIT
| 2020-12-30T16:44:56
| 2019-12-12T17:47:53
|
Python
|
UTF-8
|
Python
| false
| false
| 567
|
py
|
from unittest import TestCase
from tests import get_data
from pytezos.operation.forge import forge_operation_group
class OperationForgingTestonn22R(TestCase):
def setUp(self):
self.maxDiff = None
def test_forge_onn22R(self):
expected = get_data(
path='operations/onn22RuTehcaQS1zaHxEHrrSRdCPTXUbHByyutPHL2EppBtd7Yg/forged.hex')
actual = forge_operation_group(get_data(
path='operations/onn22RuTehcaQS1zaHxEHrrSRdCPTXUbHByyutPHL2EppBtd7Yg/unsigned.json'))
self.assertEqual(expected, actual)
|
[
"mz@baking-bad.org"
] |
mz@baking-bad.org
|
36532ee5c85b919df895937c26f85572f220875a
|
6a4e1e4f320ae81a8069fbe2587d62420976dbf1
|
/mysite/polls/urls.py
|
949fd485050af7d1c38bb5918e4fc6677c444057
|
[] |
no_license
|
YuriiKhomych/First-Django-App
|
2f192ec00be6f84318905d4b18595b1cbad2b955
|
440a4618053adddf3b647975218bb6a003260243
|
refs/heads/master
| 2020-03-18T16:17:20.352467
| 2018-08-22T06:44:13
| 2018-08-22T06:44:13
| 134,957,491
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 371
|
py
|
from django.urls import path
from . import views
app_name = 'polls'
urlpatterns = [
path('', views.IndexView.as_view(), name='index'),
path('<int:pk>/', views.DetailView.as_view(), name='detail'),
path('<int:pk>/results/', views.ResultsView.as_view(), name='results'),
# ex: /polls/5/vote/
path('<int:question_id>/vote/', views.vote, name='vote'),
]
|
[
"yuriykhomich@gmail.com"
] |
yuriykhomich@gmail.com
|
1a2a79c4c2018e104f9467715ad305e56df8cec2
|
ad6cd0aa5d96ef7a4116ec20737a3026082b9e16
|
/src/test/directory_lister_test.py
|
d3008e8dc00832476b28d24880a822d71170e5c7
|
[
"MIT"
] |
permissive
|
pgecsenyi/router-fs
|
0a00ad9f5cb5995048aa7fe08c20ee7eaf0621b7
|
1d4f579fb3cccd022fe1ab0e61aa00693e7234c1
|
refs/heads/master
| 2020-04-19T06:47:26.791739
| 2019-04-15T18:21:13
| 2019-04-15T18:21:45
| 168,028,417
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 666
|
py
|
import unittest
from unittest.mock import patch
from filesystem.transformation.directory_lister import DirectoryLister
class DirectoryListerTest(unittest.TestCase):
@patch('os.walk')
def test_list_directory(self, mock_walk):
dirpath = '/home/root/doc/fruits'
expected_files = [dirpath + '/apple.txt', dirpath + '/banana.txt']
mock_walk.return_value = [(dirpath, [], ['apple.txt', 'banana.txt'])]
directory_lister = DirectoryLister(dirpath)
result = [i for i in directory_lister.list_directory()]
mock_walk.assert_called_once_with(dirpath)
self.assertEqual(sorted(expected_files), sorted(result))
|
[
"pgecsenyi@protonmail.com"
] |
pgecsenyi@protonmail.com
|
0b2d748e251d874ac3698205a67fcbe62158aaa9
|
1156b7cde01b0cc467c22cfb75cde0a74887da1a
|
/bin/mongrey_web_sqlite.py
|
13ddaaf630e27e50d70faab7f82756f995022afd
|
[] |
no_license
|
davidpolet/mongrey-build
|
d1ee2e12ffbbd8061f4f2faa753572b3e328291f
|
8782b0e87474f2662cf35b3cb545516d76d9340d
|
refs/heads/master
| 2021-01-21T09:34:07.283966
| 2015-05-26T19:00:31
| 2015-05-26T19:00:31
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 498
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import itsdangerous
import passlib
import flask_babelex
import babel
import blinker
import six
import flask
import redis
import arrow
import pygeoip
import regex
from werkzeug.contrib import cache
import peewee
import playhouse
import wtforms
import wtfpeewee
import simplekv
import flask_kvsession
from mongrey.web import settings
from mongrey.web.manager import main
from mongrey.web import extensions
from mongrey.storage.sql import models
main()
|
[
"stephane.rault@radicalspam.org"
] |
stephane.rault@radicalspam.org
|
17e23071e1107ae08f77200b82db87c0be5f516e
|
925fa0208e07ac2aeb64f9201249a91f48b900fa
|
/LeetCode/DP/MaxProductSubarray.py
|
a7f24f03a8350ba8ed682a2ef192a351af047334
|
[] |
no_license
|
Sanchi02/Dojo
|
984eb3cba26e43a8f6f0ef9c93f7aed24527b3ae
|
b25288c42a67d8639195f3fddef698f5cd179aac
|
refs/heads/master
| 2022-03-18T02:58:27.506082
| 2022-03-13T12:11:18
| 2022-03-13T12:11:18
| 197,040,319
| 0
| 0
| null | 2019-07-15T17:14:20
| 2019-07-15T17:06:36
| null |
UTF-8
|
Python
| false
| false
| 1,033
|
py
|
# https://leetcode.com/problems/maximum-product-subarray/
# Given an integer array nums, find a contiguous non-empty subarray within the array that has the largest product, and return the product.
# The test cases are generated so that the answer will fit in a 32-bit integer.
# A subarray is a contiguous subsequence of the array.
# Example 1:
# Input: nums = [2,3,-2,4]
# Output: 6
# Explanation: [2,3] has the largest product 6.
# Example 2:
# Input: nums = [-2,0,-1]
# Output: 0
# Explanation: The result cannot be 2, because [-2,-1] is not a subarray.
# Constraints:
# 1 <= nums.length <= 2 * 104
# -10 <= nums[i] <= 10
# The product of any prefix or suffix of nums is guaranteed to fit in a 32-bit integer.
class Solution:
def maxProduct(self, nums: List[int]) -> int:
rmaxV = max(nums)
maxV = 1
minV = 1
for n in nums:
tmp = maxV
maxV = max(n,n*maxV,n*minV)
minV = min(n,n*tmp,n*minV)
rmaxV = max(maxV,rmaxV)
return rmaxV
|
[
"sanchibadkas@gmail.com"
] |
sanchibadkas@gmail.com
|
a4523d04d79a2270fc27804ff1d7958b47125de7
|
96e38b89fa057fa0c1cf34e498b4624041dfc6e2
|
/BOJ/Implementation/Python/9816.py
|
08902f566b9df267a88b9d25bfa3f4149acf31de
|
[] |
no_license
|
malkoG/polyglot-cp
|
66059246b01766da3c359dbd16f04348d3c7ecd2
|
584763144afe40d73e72dd55f90ee1206029ca8f
|
refs/heads/master
| 2021-11-24T13:33:49.625237
| 2019-10-06T07:42:49
| 2019-10-06T07:42:49
| 176,255,722
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 498
|
py
|
while True:
s=input()
if s=='-1':
break
print("N={}:".format(s))
if s==s[::-1]:
print("No!!")
continue
counter = 0
while True:
if s=='6174' or s=='0':
break
ss=sorted(list(s))
rs=reversed(ss)
n1=int(''.join(ss))
n2=int(''.join(rs))
print("{}-{}={}".format(n2,n1,n2-n1))
s = str(n2-n1)
counter += 1
print("Ok!! {} times".format(counter))
|
[
"rijgndqw012@gmail.com"
] |
rijgndqw012@gmail.com
|
4dfd047c301995c55f6708521634af7a8edb0fcf
|
f576f0ea3725d54bd2551883901b25b863fe6688
|
/sdk/azurestackhci/azure-mgmt-azurestackhci/generated_samples/put_extension.py
|
bfff134a861b2f0791e6f673fdd6721b86ca5f88
|
[
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] |
permissive
|
Azure/azure-sdk-for-python
|
02e3838e53a33d8ba27e9bcc22bd84e790e4ca7c
|
c2ca191e736bb06bfbbbc9493e8325763ba990bb
|
refs/heads/main
| 2023-09-06T09:30:13.135012
| 2023-09-06T01:08:06
| 2023-09-06T01:08:06
| 4,127,088
| 4,046
| 2,755
|
MIT
| 2023-09-14T21:48:49
| 2012-04-24T16:46:12
|
Python
|
UTF-8
|
Python
| false
| false
| 2,073
|
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
from azure.mgmt.azurestackhci import AzureStackHCIClient
"""
# PREREQUISITES
pip install azure-identity
pip install azure-mgmt-azurestackhci
# USAGE
python put_extension.py
Before run the sample, please set the values of the client ID, tenant ID and client secret
of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
"""
def main():
client = AzureStackHCIClient(
credential=DefaultAzureCredential(),
subscription_id="fd3c3665-1729-4b7b-9a38-238e83b0f98b",
)
response = client.extensions.begin_create(
resource_group_name="test-rg",
cluster_name="myCluster",
arc_setting_name="default",
extension_name="MicrosoftMonitoringAgent",
extension={
"properties": {
"extensionParameters": {
"protectedSettings": {"workspaceKey": "xx"},
"publisher": "Microsoft.Compute",
"settings": {"workspaceId": "xx"},
"type": "MicrosoftMonitoringAgent",
"typeHandlerVersion": "1.10",
}
}
},
).result()
print(response)
# x-ms-original-file: specification/azurestackhci/resource-manager/Microsoft.AzureStackHCI/preview/2021-09-01-preview/examples/PutExtension.json
if __name__ == "__main__":
main()
|
[
"noreply@github.com"
] |
Azure.noreply@github.com
|
c9e40167565b06b7278440ba15f8c57c606277f2
|
c65d512975feed7dfe74f1117cdd1337293d9d60
|
/python/my_py_notes_万物皆对象/db_and_数据持久化/sqlite3/hm_cards/cards_main.py
|
d1dfa4992d1e534df7edb53c99e41041e76f22ad
|
[] |
no_license
|
Rockyzsu/StudyRepo
|
e5c6420e325917c2df7dc51d606be5fa3c2ee1b8
|
385785c09bebb56df156fd149a088043f38d0aab
|
refs/heads/master
| 2022-12-09T13:45:38.332899
| 2020-09-15T09:56:09
| 2020-09-15T09:56:09
| 295,388,871
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,374
|
py
|
# 处理app的主要业务逻辑
# coding: utf-8
import os
import cards_utils
def init_menu():
while True:
print('欢迎使用<名片系统> v0.1beta'.center(35, '*'))
print('1.新建名片'.center(35, ' '))
print('2.显示全部'.center(35, ' '))
print('3.查询名片'.center(35, ' '))
print('4.删除名片'.center(35, ' '))
print('5.更改名片'.center(35, ' '))
print('0.退出'.center(35, ' '))
print(''.center(40, '*'))
msg = int(input('请输入功能编号:'))
if msg == 1:
cards_utils.new_card()
elif msg == 2:
os.system('clear')
cards_utils.show_all_cards()
input('请输入任意值继续')
os.system('clear')
elif msg == 3:
os.system('clear')
cards_utils.index_card()
input('请输入任意值继续')
elif msg == 4:
os.system('clear')
cards_utils.del_card()
input('请输入任意值继续')
elif msg == 5:
os.system('clear')
elif msg == 0:
# os.system('clear')
print('欢迎再次使用!')
break
else:
print('输入错误,请重新输入!!')
input('请输入任意值继续')
os.system('clear')
init_menu()
|
[
"jinweizsu@gmail.com"
] |
jinweizsu@gmail.com
|
cc52b3e3d8f14a3a38726bf19dbd85b7b8c7d351
|
fc948981497ccbf47dcc8f039845ffb153a41140
|
/03_Bigdata/02_Standardization_Analysis/2. Excel/2excel_introspect_workbook.py
|
31e4aaaa19941801c9629ec772eb955e4db193ec
|
[] |
no_license
|
jeongwoohong/iot_python2019
|
683b8d46b4035991700ae2caaee17d5db0743bd6
|
799733e845ab8eea3a6f9fa6a4c5acce66fce6f7
|
refs/heads/master
| 2022-03-04T17:17:15.456408
| 2019-11-08T00:10:57
| 2019-11-08T00:10:57
| 195,142,245
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 551
|
py
|
import sys
from xlrd import open_workbook
from xlwt import Workbook
input_file = sys.argv[1]
output_file = sys.argv[2]
output_workbook = Workbook()
output_worksheet = output_workbook.add_sheet('jan_2013_output')
with open_workbook(input_file) as workbook:
worksheet = workbook.sheet_by_name('january_2013')
for row_index in range(worksheet.nrows):
for column_index in range(worksheet.ncols):
output_worksheet.write(row_index, column_index, worksheet.cell_value(row_index, column_index))
output_workbook.save(output_file)
|
[
"you@example.com"
] |
you@example.com
|
b3337f038c0d0beab33e9d4d79a6151c08668cc0
|
f47a1c59fb69e2005c6e87db254b156f2b49ad65
|
/trajan/core/urls.py
|
79f7c2ecaa96f671a939cb277bbeb7635c6e2a5b
|
[] |
no_license
|
wd5/trajan
|
ac1206345bd359b01aa8312641ed5f545d844dc3
|
cd7b48c38c31cf63dabf19cee3d76cd6c09d887d
|
HEAD
| 2016-09-10T03:25:50.199591
| 2012-12-26T19:39:49
| 2012-12-26T19:39:49
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 480
|
py
|
from django.conf.urls.defaults import patterns, include, url
from trajan.core.api import PageResource
from tastypie.api import Api
from django.views.generic.simple import direct_to_template
v1_api = Api(api_name='v1')
v1_api.register(PageResource())
urlpatterns = patterns('',
url(r'^$', direct_to_template, {'template': 'core/home.html'}, name="homepage"),
url(r'^(?P<page_slug>[-\w]+)/$', 'trajan.core.views.render_page'),
url(r'^pages/api/', include(v1_api.urls)),
)
|
[
"dstegelman@gmail.com"
] |
dstegelman@gmail.com
|
89384c946bc98181b410bdbd2524b8ff13b12143
|
40af81296e8f07788f8b613643a62ae23b2063d6
|
/hw0_release/.env/share/doc/networkx-2.0/examples/graph/words.py
|
065a96678d591b179a1ffc957c0f82e9c26c53ad
|
[
"BSD-3-Clause",
"MIT"
] |
permissive
|
ChienyehLin/Computer_Vision_CS131
|
97b6c89d86b747deb80b2d643bdb66f6f5432a4a
|
de23015ac08a899adfd03ff28485c7b6a74d4d1e
|
refs/heads/master
| 2022-11-28T08:49:06.345587
| 2020-02-28T07:29:54
| 2020-02-28T07:29:54
| 228,156,278
| 3
| 0
|
NOASSERTION
| 2022-11-22T02:10:30
| 2019-12-15T09:01:31
|
Python
|
UTF-8
|
Python
| false
| false
| 2,816
|
py
|
"""
=====
Words
=====
Words/Ladder Graph
------------------
Generate an undirected graph over the 5757 5-letter words in the
datafile `words_dat.txt.gz`. Two words are connected by an edge
if they differ in one letter, resulting in 14,135 edges. This example
is described in Section 1.1 in Knuth's book (see [1]_ and [2]_).
References
----------
.. [1] Donald E. Knuth,
"The Stanford GraphBase: A Platform for Combinatorial Computing",
ACM Press, New York, 1993.
.. [2] http://www-cs-faculty.stanford.edu/~knuth/sgb.html
"""
# Authors: Aric Hagberg (hagberg@lanl.gov),
# Brendt Wohlberg,
# hughdbrown@yahoo.com
# Copyright (C) 2004-2017 by
# Aric Hagberg <hagberg@lanl.gov>
# Dan Schult <dschult@colgate.edu>
# Pieter Swart <swart@lanl.gov>
# All rights reserved.
# BSD license.
import gzip
from string import ascii_lowercase as lowercase
import networkx as nx
#-------------------------------------------------------------------
# The Words/Ladder graph of Section 1.1
#-------------------------------------------------------------------
def generate_graph(words):
G = nx.Graph(name="words")
lookup = dict((c, lowercase.index(c)) for c in lowercase)
def edit_distance_one(word):
for i in range(len(word)):
left, c, right = word[0:i], word[i], word[i + 1:]
j = lookup[c] # lowercase.index(c)
for cc in lowercase[j + 1:]:
yield left + cc + right
candgen = ((word, cand) for word in sorted(words)
for cand in edit_distance_one(word) if cand in words)
G.add_nodes_from(words)
for word, cand in candgen:
G.add_edge(word, cand)
return G
def words_graph():
"""Return the words example graph from the Stanford GraphBase"""
fh = gzip.open('words_dat.txt.gz', 'r')
words = set()
for line in fh.readlines():
line = line.decode()
if line.startswith('*'):
continue
w = str(line[0:5])
words.add(w)
return generate_graph(words)
if __name__ == '__main__':
G = words_graph()
print("Loaded words_dat.txt containing 5757 five-letter English words.")
print("Two words are connected if they differ in one letter.")
print("Graph has %d nodes with %d edges"
% (nx.number_of_nodes(G), nx.number_of_edges(G)))
print("%d connected components" % nx.number_connected_components(G))
for (source, target) in [('chaos', 'order'),
('nodes', 'graph'),
('pound', 'marks')]:
print("Shortest path between %s and %s is" % (source, target))
try:
sp = nx.shortest_path(G, source, target)
for n in sp:
print(n)
except nx.NetworkXNoPath:
print("None")
|
[
"linchienyeh_jaden@outlook.com"
] |
linchienyeh_jaden@outlook.com
|
b0a123cf6c2621279d5a96f58ca86274001fbb83
|
2d4380518d9c591b6b6c09ea51e28a34381fc80c
|
/CIM16/CDPSM/Geographical/IEC61970/Wires/LoadBreakSwitch.py
|
f2ffb57bbe01797bc0ff4c7e1a391185331dd6af
|
[
"MIT"
] |
permissive
|
fran-jo/PyCIM
|
355e36ae14d1b64b01e752c5acd5395bf88cd949
|
de942633d966bdf2bd76d680ecb20517fc873281
|
refs/heads/master
| 2021-01-20T03:00:41.186556
| 2017-09-19T14:15:33
| 2017-09-19T14:15:33
| 89,480,767
| 0
| 1
| null | 2017-04-26T12:57:44
| 2017-04-26T12:57:44
| null |
UTF-8
|
Python
| false
| false
| 1,653
|
py
|
# Copyright (C) 2010-2011 Richard Lincoln
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from CIM16.CDPSM.Geographical.IEC61970.Core.ConductingEquipment import ConductingEquipment
class LoadBreakSwitch(ConductingEquipment):
"""A mechanical switching device capable of making, carrying, and breaking currents under normal operating conditions.
"""
def __init__(self, *args, **kw_args):
"""Initialises a new 'LoadBreakSwitch' instance.
"""
super(LoadBreakSwitch, self).__init__(*args, **kw_args)
_attrs = []
_attr_types = {}
_defaults = {}
_enums = {}
_refs = []
_many_refs = []
|
[
"fran_jo@hotmail.com"
] |
fran_jo@hotmail.com
|
821885c333f39e248c3b4d48680946323bb48106
|
0fb3b73f8e6bb9e931afe4dcfd5cdf4ba888d664
|
/myapi/fullfeblog/webdev/urls.py
|
975db698ce1d8db571ad43d6fed54ff0e576c3a5
|
[] |
no_license
|
mrpal39/ev_code
|
6c56b1a4412503604260b3346a04ef53a2ba8bf2
|
ffa0cf482fa8604b2121957b7b1d68ba63b89522
|
refs/heads/master
| 2023-03-24T03:43:56.778039
| 2021-03-08T17:48:39
| 2021-03-08T17:48:39
| 345,743,264
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 777
|
py
|
from django.contrib import admin
from django.urls import path,include
from django.conf import settings
from django.conf.urls.static import static
from django.contrib.sitemaps.views import sitemap
from blog.sitemaps import PostSitemap
from django.conf.urls import url, include
# from .. import core
sitemaps={
'posts':PostSitemap,
}
urlpatterns = [
path('admin/', admin.site.urls, ),
path('',include('blog.urls')),
path('core/',include('core.urls')),
path('api/',include('api.urls')),
# path('oauth/',include('oauth.urls')),
path('accounts/', include('allauth.urls')),
path('sitemap.xml', sitemap, {'sitemaps': sitemaps},
name='django.contrib.sitemaps.views.sitemap')
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
[
"rp9545416@gmail.com"
] |
rp9545416@gmail.com
|
ae78afbd5d7364b1b0f03645d1d1d6ca47cd0ec9
|
d6716aade568d34adc4152aa83be2b19df30b58f
|
/yt_pb71_cs/str_repr_diff.py
|
ee2311e00ec53b10a27d2e89e63b07b6c7c48594
|
[] |
no_license
|
salma-shaik/python-projects
|
a47e7ba79284b6ae9d3cf9489f1d21c12d573ce5
|
ba0234844e1ad938271486ec8c0aac0954326ad5
|
refs/heads/master
| 2021-01-19T22:46:46.433144
| 2018-09-03T19:43:42
| 2018-09-03T19:43:42
| 88,865,277
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 627
|
py
|
# The goal of __repr__ is to be unambiguous
# The goal of __str__ is to be readable
# a = [1, 2, 3, 4]
# b = 'sample string'
#
# print(str(a))
# print(repr(a))
#
# print(str(b))
# print(repr(b))
import datetime
import pytz
a = datetime.datetime.utcnow().replace(tzinfo=pytz.UTC)
b = str(a)
print('str(a): {}'.format(str(a)))
print('repr(a): {}'.format(repr(a)))
print()
print('str(b): {}'.format(str(b)))
print('repr(b): {}'.format(repr(b)))
print()
c = 3+2
d = str(c)
print('str(c): {}'.format(str(c)))
print('repr(c): {}'.format(repr(c)))
print()
print('str(d): {}'.format(str(d)))
print('repr(d): {}'.format(repr(d)))
|
[
"salmashaik1611@gmail.com"
] |
salmashaik1611@gmail.com
|
f805830a20c2ba67d1a4bd9b2b0a978cc9522401
|
8f736b5cc28cc1d46506abf1b001eb41cc1f9423
|
/apps/users/signals.py
|
77ba064c53a024e746d0ffa4db5b81f43702a965
|
[] |
no_license
|
tang1323/MxShop
|
6ac68502f59ae07b483b6145e1b557399192e3dd
|
831b5bdd8abdf7d6e547b0bd3fff9341261e4afa
|
refs/heads/master
| 2023-04-04T07:09:32.759476
| 2021-04-14T14:36:00
| 2021-04-14T14:36:00
| 357,937,706
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,144
|
py
|
from django.db.models.signals import post_save
from django.dispatch import receiver
from rest_framework.authtoken.models import Token
from django.contrib.auth import get_user_model
# 返回此项目的用户模型(model),这是内置的
User = get_user_model()
# 这是一个装饰器,而sender就是接收哪个models传递过来的
@receiver(post_save, sender=User)
def create_auth_token(sender, instance=None, created=False, **kwargs): # created=False是不是新建的用户
# 新创建用户的时候才能对密码加密
if created:
password = instance.password
# instance翻译过来就是实例,而instance就是我们的User,里面有一个set_password方法,专门对密码加密的
instance.set_password(password)
# 做完信号量以后得到app.py里做一个def ready(self): import users.signals
instance.save()
# 因为我们用民jwt。所以不再用token
# Token.objects.create(user=instance)
"""
使用django自己内置的 Model signals信号,它会自己发送
如果设置其它的信号,就要自己去发送,再接收
"""
|
[
"1171242903@qq.com"
] |
1171242903@qq.com
|
88e07364d82188f3d72bd929b38d3ef1008e5d7c
|
8b39393897cd4cdf47d6520607aac094ec535779
|
/workflows/tests/cosmo/celery.py
|
635c70b01d5db27cf9013967a775b89b3b12fea8
|
[] |
no_license
|
yishaibeeri/cosmo-manager
|
d4debde3ff20e82b70514aea6991c7fd036f6bf3
|
9bc2b80cc360098939d6a6efe726d5df59f6982a
|
refs/heads/master
| 2021-01-21T18:21:29.711024
| 2014-01-13T09:13:37
| 2014-01-13T09:13:37
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,526
|
py
|
########
# Copyright (c) 2013 GigaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
from __future__ import absolute_import
from celery import Celery
from cosmo import includes
from celery.signals import after_setup_task_logger
import logging
__author__ = 'idanmo'
celery = Celery('cosmo.celery',
broker='amqp://',
backend='amqp://',
include=includes)
# Optional configuration, see the application user guide.
celery.conf.update(
CELERY_TASK_SERIALIZER="json",
CELERY_DEFAULT_QUEUE="cloudify.management"
)
@after_setup_task_logger.connect
def setup_logger(loglevel=None, **kwargs):
logger = logging.getLogger("cosmo")
if not logger.handlers:
handler = logging.StreamHandler()
handler.setFormatter(logging.Formatter('| %(message)s'))
logger.addHandler(handler)
logger.setLevel(loglevel)
logger.propagate = True
if __name__ == '__main__':
celery.start()
|
[
"idan@gigaspaces.com"
] |
idan@gigaspaces.com
|
62c86946bd35096e59efdd673f88673fc50b9f53
|
ac5e52a3fc52dde58d208746cddabef2e378119e
|
/exps-sblp/sblp_ut=3.5_rd=0.5_rw=0.04_rn=4_u=0.075-0.325_p=harmonic-2/sched=RUN_trial=80/params.py
|
805a7c663cc5294f124978200a747eb82a7c714a
|
[] |
no_license
|
ricardobtxr/experiment-scripts
|
1e2abfcd94fb0ef5a56c5d7dffddfe814752eef1
|
7bcebff7ac2f2822423f211f1162cd017a18babb
|
refs/heads/master
| 2023-04-09T02:37:41.466794
| 2021-04-25T03:27:16
| 2021-04-25T03:27:16
| 358,926,457
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 250
|
py
|
{'cpus': 4,
'duration': 30,
'final_util': '3.527714',
'max_util': '3.5',
'periods': 'harmonic-2',
'release_master': False,
'res_distr': '0.5',
'res_nmb': '4',
'res_weight': '0.04',
'scheduler': 'RUN',
'trial': 80,
'utils': 'uni-medium-3'}
|
[
"ricardo.btxr@gmail.com"
] |
ricardo.btxr@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.