blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
72d95d9d79cef5cbeca6ee583d2ebc7314606d6e
|
54d2887e3c910f68366bd0aab3c692d54245e22a
|
/arc/arc025/a.py
|
bbcdf5f5008d4ea5e2a27ebd6595406ba81f4c0e
|
[] |
no_license
|
Kevinrobot34/atcoder
|
7aec367fd2c6b589e9d583dae7b3c7520ce9fa12
|
482ea508f098f81e4f19522fe518dd22c781aca9
|
refs/heads/master
| 2022-07-10T23:44:45.290022
| 2022-06-29T11:30:26
| 2022-06-29T11:30:26
| 158,081,477
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 181
|
py
|
d = list(map(int, input().split()))
j = list(map(int, input().split()))
ans = 0
for i in range(7):
if d[i] > j[i]:
ans += d[i]
else:
ans += j[i]
print(ans)
|
[
"kevinrobot34@yahoo.co.jp"
] |
kevinrobot34@yahoo.co.jp
|
eb5616cfb71f50d5ece42954779cc6d30ab2fd09
|
19ce218477449ece043c0e40fb47ba55c4bfd38e
|
/network/tagged_delegate.py
|
2d321c1ae60e772d8f2caa140fde192c45bd4de7
|
[] |
no_license
|
vickylunna/PyAuthServer
|
7aef8b86f3a748bdd1b50a8c10872cfa39d6cdef
|
9b6be1a805938f48292a231bad3f9006c667e06b
|
refs/heads/master
| 2021-01-18T04:50:15.790653
| 2014-12-01T18:27:02
| 2014-12-01T18:27:02
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,284
|
py
|
from .decorators import get_tag, has_tag
from .metaclasses.register import TypeRegister
from .world_info import WorldInfo
__all__ = ['DelegateByNetmode', 'DelegateByTag', 'FindByTag']
class FindByTag(metaclass=TypeRegister):
"""Provides an interface to select a subclass by a tag value"""
@classmethod
def register_type(cls):
cls._cache = {}
@classmethod
def update_cache(cls, from_cls=None):
try:
subclasses = cls.subclasses
except AttributeError:
if from_cls is None:
raise TypeError("Subclass dictionary was not implemented by {}".format(cls.type_name))
else:
return
cls._cache.update({get_tag(c): c for c in subclasses.values() if has_tag(c)})
try:
parent = next(c for c in cls.__mro__[1:] if getattr(c, "subclasses", subclasses) is not subclasses)
except StopIteration:
pass
else:
parent.update_cache(from_cls=cls)
@classmethod
def find_subclass_for(cls, tag_value):
"""Find subclass with a tag value
:param tag_value: value of tag to isolate
"""
try:
cache = cls._cache
except AttributeError:
raise TypeError("Subclass dictionary was not implemented by {}".format(cls.type_name))
try:
return cache[tag_value]
except KeyError:
raise TypeError("Tag: {} is not supported by {}".format(tag_value, cls.type_name))
class DelegateByTag(FindByTag):
def __new__(cls, *args, **kwargs):
tag = cls.get_current_tag()
delegated_class = cls.find_subclass_for(tag)
if delegated_class.is_delegate:
return delegated_class.__new__(delegated_class, *args, **kwargs)
return super().__new__(delegated_class)
@classmethod
def register_type(cls):
super().register_type()
cls.is_delegate = True
@classmethod
def register_subtype(cls):
super().register_subtype()
cls.is_delegate = False
@staticmethod
def get_current_tag():
raise NotImplementedError()
class DelegateByNetmode(DelegateByTag):
@staticmethod
def get_current_tag():
return WorldInfo.netmode
|
[
"goosey15@gmail.com"
] |
goosey15@gmail.com
|
1775764213fa055a85117b3e4513753453f10093
|
45fc3ca374b15e63fee49a78c54d1a7d5949ebd6
|
/vega/core/evaluator/gpu_evaluator.py
|
00caec4c26234cd3c8f389fc5f8fbe1b036e6a3b
|
[
"MIT"
] |
permissive
|
1ziyanW1/vega
|
d0849d09ba9dea85f1e135f515202e90533972aa
|
9640a32d3cbb22e39593e2786b825b946aa6b281
|
refs/heads/master
| 2022-11-25T20:30:50.517879
| 2020-08-04T08:17:28
| 2020-08-04T08:17:28
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,892
|
py
|
# -*- coding: utf-8 -*-
# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the MIT License.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# MIT License for more details.
"""GpuEvaluator used to do evaluate process on gpu."""
import os
import time
import logging
import errno
import pickle
import torch
from vega.core.common.class_factory import ClassFactory, ClassType
from vega.core.trainer.pytorch.trainer import Trainer
from vega.core.trainer.utils import WorkerTypes
from vega.core.common import FileOps, init_log
from vega.datasets.pytorch import Dataset
from vega.core.metrics.pytorch import Metrics
from vega.core.common.utils import update_dict
@ClassFactory.register(ClassType.GPU_EVALUATOR)
class GpuEvaluator(Trainer):
"""Evaluator is a gpu evaluator.
:param args: arguments from user and default config file
:type args: dict or Config, default to None
:param train_data: training dataset
:type train_data: torch dataset, default to None
:param valid_data: validate dataset
:type valid_data: torch dataset, default to None
:param worker_info: the dict worker info of workers that finished train.
:type worker_info: dict or None.
"""
def __init__(self, worker_info=None, model=None, hps=None, load_checkpoint=False, **kwargs):
"""Init GpuEvaluator."""
self._reference_trainer_settings()
super(GpuEvaluator, self).__init__(self.cfg)
self.worker_type = WorkerTypes.GPU_EVALUATOR
self.worker_info = worker_info
if worker_info is not None and "step_name" in worker_info and "worker_id" in worker_info:
self.step_name = self.worker_info["step_name"]
self.worker_id = self.worker_info["worker_id"]
self._flag_load_checkpoint = load_checkpoint
self.hps = hps
self.model = model
self.evaluate_result = None
def _reference_trainer_settings(self):
"""Set reference Trainer."""
ref = self.cfg.get('ref')
if ref:
ref_dict = ClassFactory.__configs__
for key in ref.split('.'):
ref_dict = ref_dict.get(key)
update_dict(ref_dict, self.cfg)
def _init_all_settings(self):
"""Init all settings from config."""
self._reference_trainer_settings()
if self.cfg.cuda:
self._init_cuda_setting()
self._init_hps(self.hps)
if self.model is None:
self.model = self._init_model()
if self.model is not None and self.cfg.cuda:
self.model = self.model.cuda()
# TODO
if self._flag_load_checkpoint:
self.load_checkpoint()
else:
self._load_pretrained_model()
self._init_dataloader()
def _init_dataloader(self):
"""Init dataloader."""
valid_dataset = Dataset(mode='test')
self.valid_loader = valid_dataset.dataloader
def valid(self, valid_loader):
"""Validate one step of mode.
:param loader: valid data loader
"""
self.model.eval()
metrics = Metrics(self.cfg.metric)
data_num = 0
latency_sum = 0.0
with torch.no_grad():
for step, (data, target) in enumerate(valid_loader):
if self.cfg.cuda:
data, target = data.cuda(), target.cuda()
self.model = self.model.cuda()
time_start = time.time()
logits = self.model(data)
latency_sum += time.time() - time_start
metrics(logits, target)
n = data.size(0)
data_num += n
if self._first_rank and step % self.cfg.report_freq == 0:
logging.info("step [{}/{}], valid metric [{}]".format(
step + 1, len(valid_loader), str(metrics.results_dict)))
latency = latency_sum / data_num
pfms = metrics.results_dict
performance = [pfms[list(pfms.keys())[0]]]
if self.cfg.evaluate_latency:
performance.append(latency)
logging.info("valid performance: {}".format(performance))
return performance
def train_process(self):
"""Validate process for the model validate worker."""
init_log(log_file="gpu_eva_{}.txt".format(self.worker_id))
logging.info("start evaluate process")
self._init_all_settings()
performance = self.valid(self.valid_loader)
self._save_performance(performance)
logging.info("finished evaluate for id {}".format(self.worker_id))
self.evaluate_result = performance
return
|
[
"zhangjiajin@huawei.com"
] |
zhangjiajin@huawei.com
|
0731c42c5febe59fc7aa48a0d10201bb653fca1c
|
42dce5a3a1ace43022968ec059df1b17f94776ba
|
/tat_aws_creator_auto_tag/tests/__init__.py
|
a989bb3d56e264743ade2655c449c9e32a1eec94
|
[
"MIT"
] |
permissive
|
techantllc/aws-creator-auto-tagger
|
647db1c5cbed275608c0cb1f28622ef3d11c92bb
|
413c9f6c91cfaa088bbc45bed0f6c9f09e02f48a
|
refs/heads/master
| 2020-08-22T13:40:05.495905
| 2019-10-30T18:27:52
| 2019-10-30T18:27:52
| 216,407,093
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 188
|
py
|
# -*- coding: utf-8 -*-
import boto3
aws_profile = "tat_sanhe"
boto_ses = boto3.Session(profile_name=aws_profile)
iam_client = boto_ses.client("iam")
s3_client = boto_ses.client("s3")
|
[
"husanhe@gmail.com"
] |
husanhe@gmail.com
|
a0a756527c4bea878ec4ec9a51f27f2aa7b0e709
|
0c9ec5d4bafca45505f77cbd3961f4aff5c10238
|
/openapi-python-client/openapi_client/models/multi_form_variable_binary_dto.py
|
5b444e02f29acbb121a3784c4daa78f23cced18b
|
[
"Apache-2.0"
] |
permissive
|
yanavasileva/camunda-bpm-examples
|
98cd2930f5c8df11a56bf04845a8ada5b3bb542d
|
051f8f28c62845e68ce4059ab64264c5a0bdc009
|
refs/heads/master
| 2022-10-19T20:07:21.278160
| 2020-05-27T15:28:27
| 2020-05-27T15:28:27
| 267,320,400
| 0
| 0
|
Apache-2.0
| 2020-05-27T14:35:22
| 2020-05-27T13:00:01
| null |
UTF-8
|
Python
| false
| false
| 5,035
|
py
|
# coding: utf-8
"""
Camunda BPM REST API
OpenApi Spec for Camunda BPM REST API. # noqa: E501
The version of the OpenAPI document: 7.13.0
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from openapi_client.configuration import Configuration
class MultiFormVariableBinaryDto(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'data': 'file',
'value_type': 'str'
}
attribute_map = {
'data': 'data',
'value_type': 'valueType'
}
def __init__(self, data=None, value_type=None, local_vars_configuration=None): # noqa: E501
"""MultiFormVariableBinaryDto - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._data = None
self._value_type = None
self.discriminator = None
self.data = data
if value_type is not None:
self.value_type = value_type
@property
def data(self):
"""Gets the data of this MultiFormVariableBinaryDto. # noqa: E501
The binary data to be set. For File variables, this multipart can contain the filename, binary value and MIME type of the file variable to be set Only the filename is mandatory. # noqa: E501
:return: The data of this MultiFormVariableBinaryDto. # noqa: E501
:rtype: file
"""
return self._data
@data.setter
def data(self, data):
"""Sets the data of this MultiFormVariableBinaryDto.
The binary data to be set. For File variables, this multipart can contain the filename, binary value and MIME type of the file variable to be set Only the filename is mandatory. # noqa: E501
:param data: The data of this MultiFormVariableBinaryDto. # noqa: E501
:type: file
"""
self._data = data
@property
def value_type(self):
"""Gets the value_type of this MultiFormVariableBinaryDto. # noqa: E501
The name of the variable type. Either Bytes for a byte array variable or File for a file variable. # noqa: E501
:return: The value_type of this MultiFormVariableBinaryDto. # noqa: E501
:rtype: str
"""
return self._value_type
@value_type.setter
def value_type(self, value_type):
"""Sets the value_type of this MultiFormVariableBinaryDto.
The name of the variable type. Either Bytes for a byte array variable or File for a file variable. # noqa: E501
:param value_type: The value_type of this MultiFormVariableBinaryDto. # noqa: E501
:type: str
"""
allowed_values = ["Bytes", "File"] # noqa: E501
if self.local_vars_configuration.client_side_validation and value_type not in allowed_values: # noqa: E501
raise ValueError(
"Invalid value for `value_type` ({0}), must be one of {1}" # noqa: E501
.format(value_type, allowed_values)
)
self._value_type = value_type
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, MultiFormVariableBinaryDto):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, MultiFormVariableBinaryDto):
return True
return self.to_dict() != other.to_dict()
|
[
"noreply@github.com"
] |
yanavasileva.noreply@github.com
|
66ec8c7f71b6759f16016e52cff49cb461b2a215
|
78518b65b6823ac42b20515ae7716ada4b59db3d
|
/手写代码/第2章 数据处理与可视化/Pex2_12.py
|
b0c9c2b22908f384e5fe922680d7a77078d6e2a7
|
[] |
no_license
|
YunHao-Von/Mathematical-Modeling
|
70d6ad8f2f543751883afdc85aa19b1c80a106a0
|
4fe153453cccb4b474166c104e08d13ed72bc5ac
|
refs/heads/master
| 2023-03-02T23:16:40.839865
| 2021-02-15T09:52:24
| 2021-02-15T09:52:24
| 339,029,227
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 174
|
py
|
import numpy as np
a=np.arange(4).reshape(2,2)
b=np.arange(4,8).reshape(2,2)
c1=np.vstack([a,b])
c2=np.r_[a,b]
d1=np.hstack([a,b])
d2=np.c_[a,b]
print(c1)
print(c2)
print(d1)
|
[
"Alctrain@163.com"
] |
Alctrain@163.com
|
d6ae188b2c356f7e8c7b1e9f6b73323adeed445a
|
a8b37bd399dd0bad27d3abd386ace85a6b70ef28
|
/airbyte-integrations/connectors/source-google-analytics-data-api/setup.py
|
d93d394a43545a29e58da42374364ee3ea361f13
|
[
"MIT",
"LicenseRef-scancode-free-unknown",
"Elastic-2.0"
] |
permissive
|
thomas-vl/airbyte
|
5da2ba9d189ba0b202feb952cadfb550c5050871
|
258a8eb683634a9f9b7821c9a92d1b70c5389a10
|
refs/heads/master
| 2023-09-01T17:49:23.761569
| 2023-08-25T13:13:11
| 2023-08-25T13:13:11
| 327,604,451
| 1
| 0
|
MIT
| 2021-01-07T12:24:20
| 2021-01-07T12:24:19
| null |
UTF-8
|
Python
| false
| false
| 679
|
py
|
#
# Copyright (c) 2023 Airbyte, Inc., all rights reserved.
#
from setuptools import find_packages, setup
MAIN_REQUIREMENTS = ["airbyte-cdk", "PyJWT==2.4.0", "cryptography==37.0.4", "requests"]
TEST_REQUIREMENTS = [
"freezegun",
"pytest~=6.1",
"pytest-mock~=3.6.1",
"requests-mock",
]
setup(
name="source_google_analytics_data_api",
description="Source implementation for Google Analytics Data Api.",
author="Airbyte",
author_email="contact@airbyte.io",
packages=find_packages(),
install_requires=MAIN_REQUIREMENTS,
package_data={"": ["*.json", "schemas/*.json"]},
extras_require={
"tests": TEST_REQUIREMENTS,
},
)
|
[
"noreply@github.com"
] |
thomas-vl.noreply@github.com
|
f5d44c1fcfada39187884766cab499aa2b45c3d8
|
1b36425f798f484eda964b10a5ad72b37b4da916
|
/ee/clickhouse/materialized_columns/test/test_query.py
|
c78623a831c3ed5c5ef9a4c39315d40ea239b639
|
[
"LicenseRef-scancode-warranty-disclaimer",
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] |
permissive
|
dorucioclea/posthog
|
0408baa2a7ae98e5bea352c516f741ddc17c0a3e
|
8848981baf237117fb22d28af0770a0165881423
|
refs/heads/master
| 2023-01-23T11:01:57.942146
| 2023-01-13T09:03:00
| 2023-01-13T09:03:00
| 241,222,000
| 0
| 0
|
MIT
| 2020-02-17T22:34:37
| 2020-02-17T22:34:36
| null |
UTF-8
|
Python
| false
| false
| 945
|
py
|
from posthog.test.base import APIBaseTest, ClickhouseTestMixin
class TestQuery(ClickhouseTestMixin, APIBaseTest):
def test_get_queries_detects(self):
# some random
with self.capture_select_queries() as queries:
self.client.post(
f"/api/projects/{self.team.id}/insights/funnel/",
{
"events": [{"id": "step one", "type": "events", "order": 0}],
"funnel_window_days": 14,
"funnel_order_type": "unordered",
"insight": "funnels",
},
).json()
self.assertTrue(len(queries))
# make sure that the queries start with a discoverable prefix.
# If this changes, also update ee/clickhouse/materialized_columns/analyze.py::_get_queries to
# filter on the right queries
for q in queries:
self.assertTrue(q.startswith("/* user_id"))
|
[
"noreply@github.com"
] |
dorucioclea.noreply@github.com
|
a1bffc2fe20ca4257e795c2a592f63257efca427
|
76d27bfccbd24c86c8a528d634e8c53a884bd331
|
/blinking_image.py
|
bb94efc930b23102ec429ef8a70f687c4f2d0028
|
[] |
no_license
|
nickdelgrosso/SSVEP-CardGui
|
07bb2d539ffe0bd46e4a80959466422c7e79e63b
|
edbccaccbe93f088019a545111b2fbf0f2af54bd
|
refs/heads/master
| 2023-06-24T22:47:31.656748
| 2021-07-22T21:15:21
| 2021-07-22T21:15:21
| 388,243,533
| 0
| 1
| null | 2021-07-21T22:38:39
| 2021-07-21T20:55:53
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 764
|
py
|
from psychopy import visual, core
class BlinkingImage:
def __init__(self, win, blink_frequency = 10., **kwargs):
self.win = win
self.image = visual.ImageStim(win=win, **kwargs)
self.clock = core.CountdownTimer(1. / blink_frequency)
self.blink_frequency = blink_frequency
@property
def blink_frequency(self):
return self._blink_frequency
@blink_frequency.setter
def blink_frequency(self, value: float):
self._blink_frequency = value
self.clock.reset(.5 / value)
def draw(self):
time_to_flip = -self.win.getFutureFlipTime(clock=self.clock)
if time_to_flip <= 0:
self.clock.reset()
self.image.setAutoDraw(not self.image.autoDraw)
|
[
"delgrosso.nick@gmail.com"
] |
delgrosso.nick@gmail.com
|
6a47ec21f205e0e2961b22fcd6f376cc90061d1b
|
8c730ccb9ec23fd9cbcb5903abecda86bf50e6ab
|
/config/memory_network_adeb.py
|
a9d6fef7a06170bbbae130786c8052f74d71095d
|
[] |
no_license
|
JimStearns206/taxi
|
93278b49e485bbe2feaa08a3e11f0f79e884bd0d
|
b6566c010be7c871a5b6c199feaf1dfda0910ade
|
refs/heads/master
| 2021-01-21T07:07:59.274279
| 2015-07-16T22:59:50
| 2015-07-16T22:59:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,486
|
py
|
from blocks.initialization import IsotropicGaussian, Constant
from blocks.algorithms import AdaDelta, CompositeRule, GradientDescent, RemoveNotFinite, StepRule, Momentum
import data
from model.memory_network import Model, Stream
n_begin_end_pts = 5 # how many points we consider at the beginning and end of the known trajectory
dim_embeddings = [
('origin_call', data.origin_call_train_size, 10),
('origin_stand', data.stands_size, 10),
('week_of_year', 52, 10),
('day_of_week', 7, 10),
('qhour_of_day', 24 * 4, 10),
('day_type', 3, 10),
]
class MLPConfig(object):
__slots__ = ('dim_input', 'dim_hidden', 'dim_output', 'weights_init', 'biases_init')
prefix_encoder = MLPConfig()
prefix_encoder.dim_input = n_begin_end_pts * 2 * 2 + sum(x for (_, _, x) in dim_embeddings)
prefix_encoder.dim_hidden = [100, 100]
prefix_encoder.weights_init = IsotropicGaussian(0.001)
prefix_encoder.biases_init = Constant(0.0001)
candidate_encoder = MLPConfig()
candidate_encoder.dim_input = n_begin_end_pts * 2 * 2 + sum(x for (_, _, x) in dim_embeddings)
candidate_encoder.dim_hidden = [100, 100]
candidate_encoder.weights_init = IsotropicGaussian(0.001)
candidate_encoder.biases_init = Constant(0.0001)
embed_weights_init = IsotropicGaussian(0.001)
step_rule = Momentum(learning_rate=0.001, momentum=0.9)
batch_size = 32
valid_set = 'cuts/test_times_0'
max_splits = 1
num_cuts = 1000
train_candidate_size = 1000
valid_candidate_size = 10000
load_model = False
|
[
"adbrebs@gmail.com"
] |
adbrebs@gmail.com
|
053626d020f792d2a77cd4ec679ef747ea630e2d
|
f7139e3979fc32d96de0082d6402d94be64f0f2e
|
/Exercise-3/sensor_stick/scripts/segmentation.py
|
6c7fc70194ee0bd5fa128ec4b7da45cd26ff2b2e
|
[
"MIT"
] |
permissive
|
jaycode/RoboND-Perception-Exercises
|
9eb652b15ad5eccd171b4894035fbcbaa185b4a1
|
8918730423fa4e788dc01352872644c065d58b4c
|
refs/heads/master
| 2021-01-16T20:04:15.198564
| 2018-02-14T06:30:44
| 2018-02-14T06:30:44
| 100,196,660
| 0
| 1
| null | 2017-08-13T18:30:43
| 2017-08-13T18:30:43
| null |
UTF-8
|
Python
| false
| false
| 3,791
|
py
|
#!/usr/bin/env python
# Import modules
from pcl_helper import *
# TODO: Define functions as required
# Callback function for your Point Cloud Subscriber
def pcl_callback(pcl_msg):
# TODO: Convert ROS msg to PCL data
pcl_data = ros_to_pcl(pcl_msg)
# TODO: Voxel Grid Downsampling
vox = pcl_data.make_voxel_grid_filter()
LEAF_SIZE = 0.01
vox.set_leaf_size(LEAF_SIZE, LEAF_SIZE, LEAF_SIZE)
cloud_filtered = vox.filter()
# TODO: PassThrough Filter
passthrough = cloud_filtered.make_passthrough_filter()
filter_axis = 'z'
passthrough.set_filter_field_name(filter_axis)
axis_min = 0.6
axis_max = 1.1
passthrough.set_filter_limits(axis_min, axis_max)
cloud_filtered = passthrough.filter()
# Extract outliers
outlier_filter = cloud_filtered.make_statistical_outlier_filter()
outlier_filter.set_mean_k(50)
x = 1.0
outlier_filter.set_std_dev_mul_thresh(x)
cloud_filtered = outlier_filter.filter()
# TODO: RANSAC Plane Segmentation
seg = cloud_filtered.make_segmenter()
seg.set_model_type(pcl.SACMODEL_PLANE)
seg.set_method_type(pcl.SAC_RANSAC)
max_distance = 0.01
seg.set_distance_threshold(max_distance)
inliers, coefficients = seg.segment()
# TODO: Extract inliers and outliers
cloud_objects = cloud_filtered.extract(inliers, negative=True)
cloud_table = cloud_filtered.extract(inliers, negative=False)
# TODO: Euclidean Clustering
white_cloud = XYZRGB_to_XYZ(cloud_objects)
tree = white_cloud.make_kdtree()
ec = white_cloud.make_EuclideanClusterExtraction()
ec.set_ClusterTolerance(0.05)
ec.set_MinClusterSize(10)
ec.set_MaxClusterSize(1500)
ec.set_SearchMethod(tree)
cluster_indices = ec.Extract()
# TODO: Create Cluster-Mask Point Cloud to visualize each cluster separately
#Assign a color corresponding to each segmented object in scene
cluster_color = get_color_list(len(cluster_indices))
color_cluster_point_list = []
for j, indices in enumerate(cluster_indices):
for i, indice in enumerate(indices):
color_cluster_point_list.append([white_cloud[indice][0],
white_cloud[indice][1],
white_cloud[indice][2],
rgb_to_float(cluster_color[j])])
#Create new cloud containing all clusters, each with unique color
cluster_cloud = pcl.PointCloud_PointXYZRGB()
cluster_cloud.from_list(color_cluster_point_list)
# TODO: Convert PCL data to ROS messages
ros_cloud_objects = pcl_to_ros(cloud_objects)
ros_cloud_table = pcl_to_ros(cloud_table)
ros_cluster_cloud = pcl_to_ros(cluster_cloud)
# TODO: Publish ROS messages
pcl_objects_pub.publish(ros_cloud_objects)
pcl_table_pub.publish(ros_cloud_table)
pcl_cluster_pub.publish(ros_cluster_cloud)
if __name__ == '__main__':
# TODO: ROS node initialization
rospy.init_node('clustering', anonymous=True)
# TODO: Create Subscribers
pcl_sub = rospy.Subscriber("/sensor_stick/point_cloud",
pc2.PointCloud2, pcl_callback,
queue_size=1)
# TODO: Create Publishers
pcl_objects_pub = rospy.Publisher("/pcl_objects", PointCloud2,
queue_size=1)
pcl_table_pub = rospy.Publisher("/pcl_table", PointCloud2,
queue_size=1)
pcl_cluster_pub = rospy.Publisher("/pcl_cluster", PointCloud2,
queue_size=1)
# Initialize color_list
get_color_list.color_list = []
# TODO: Spin while node is not shutdown
while not rospy.is_shutdown():
rospy.spin()
|
[
"teguhwpurwanto@gmail.com"
] |
teguhwpurwanto@gmail.com
|
9d38d3cb8de2e003c85e45d93eff2ac7cf72a2b1
|
9e780f17eb49171d1f234944563225ca22b3c286
|
/postgresqleu/membership/migrations/0008_membermail.py
|
1d9ae5505932c90b5884e9723a0702bec6d8cd49
|
[
"MIT"
] |
permissive
|
pgeu/pgeu-system
|
e5216d5e90eec6c72770b88a5af4b3fd565cda59
|
885cfdcdadd4a721f72b699a39f26c94d1f636e0
|
refs/heads/master
| 2023-08-06T13:03:55.606562
| 2023-08-03T12:47:37
| 2023-08-03T12:47:37
| 161,434,221
| 15
| 27
|
MIT
| 2023-05-30T11:21:24
| 2018-12-12T04:48:14
|
Python
|
UTF-8
|
Python
| false
| false
| 812
|
py
|
# Generated by Django 3.2.11 on 2022-02-07 12:21
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('membership', '0007_meeting_reminders'),
]
operations = [
migrations.CreateModel(
name='MemberMail',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sentat', models.DateTimeField(auto_now_add=True, db_index=True)),
('sentfrom', models.CharField(max_length=100)),
('subject', models.CharField(max_length=100)),
('message', models.TextField(max_length=8000)),
('sentto', models.ManyToManyField(to='membership.Member')),
],
),
]
|
[
"magnus@hagander.net"
] |
magnus@hagander.net
|
53ea2685d2b68240d10424f11046e839be8682ef
|
b72d0900bec98fcee6c725cef035c02ca29bbf1b
|
/Python/VirtualEnvironment/portfolio/.history/portfolio/views_20201119102645.py
|
b701bf886f4d03f578254db395f0d10035583ba1
|
[
"MIT"
] |
permissive
|
sugamkarki/NAMI-Year-II-TERM-I-Group_Project
|
68b8808c8607858a313e8b4d601d8d12c6edda2b
|
f0a9a5f219ccbec024eb5316361db3fca46e171c
|
refs/heads/master
| 2023-06-28T19:07:19.330236
| 2021-07-24T03:05:42
| 2021-07-24T03:05:42
| 312,819,148
| 0
| 0
|
MIT
| 2021-07-24T12:45:06
| 2020-11-14T13:08:08
|
Python
|
UTF-8
|
Python
| false
| false
| 183
|
py
|
from django.shortcuts import render
from django.http import HttpResponse
def index(request):
return HttpResponse("Hello, world. You're at the polls index.")
def cat():
|
[
"sugamkarki7058@gmail.com"
] |
sugamkarki7058@gmail.com
|
49defc368cb5599babac9b0973cbb3f3da2485e8
|
acad69f0abe162eea0cb13cbe15bfd88f6da08b4
|
/down-stream-tasks/mmdetection/tests/test_models/test_dense_heads/test_tood_head.py
|
9d8c79c47fd1d441b368e1d7246d27775dd50d7e
|
[
"Apache-2.0"
] |
permissive
|
zhangzjn/EMO
|
69afcac53800d8b9a390f1214e178e2ca4da3b24
|
141afbdbce04683790f0699f256327ec420be442
|
refs/heads/main
| 2023-08-27T19:04:23.313676
| 2023-08-15T04:09:55
| 2023-08-15T04:09:55
| 584,987,542
| 139
| 9
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,070
|
py
|
# Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import torch
from mmdet.models.dense_heads import TOODHead
def test_tood_head_loss():
"""Tests paa head loss when truth is empty and non-empty."""
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'scale_factor': 1,
'pad_shape': (s, s, 3)
}]
train_cfg = mmcv.Config(
dict(
initial_epoch=4,
initial_assigner=dict(type='ATSSAssigner', topk=9),
assigner=dict(type='TaskAlignedAssigner', topk=13),
alpha=1,
beta=6,
allowed_border=-1,
pos_weight=-1,
debug=False))
test_cfg = mmcv.Config(
dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.6),
max_per_img=100))
# since Focal Loss is not supported on CPU
self = TOODHead(
num_classes=80,
in_channels=1,
stacked_convs=6,
feat_channels=256,
anchor_type='anchor_free',
anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
octave_base_scale=8,
scales_per_octave=1,
strides=[8, 16, 32, 64, 128]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[0.1, 0.1, 0.2, 0.2]),
initial_loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
activated=True, # use probability instead of logit as input
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_cls=dict(
type='QualityFocalLoss',
use_sigmoid=True,
activated=True, # use probability instead of logit as input
beta=2.0,
loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=2.0),
train_cfg=train_cfg,
test_cfg=test_cfg)
self.init_weights()
feat = [
torch.rand(1, 1, s // feat_size, s // feat_size)
for feat_size in [8, 16, 32, 64, 128]
]
cls_scores, bbox_preds = self(feat)
# test initial assigner and losses
self.epoch = 0
# Test that empty ground truth encourages the network to predict background
gt_bboxes = [torch.empty((0, 4))]
gt_labels = [torch.LongTensor([])]
gt_bboxes_ignore = None
empty_gt_losses = self.loss(cls_scores, bbox_preds, gt_bboxes, gt_labels,
img_metas, gt_bboxes_ignore)
# When there is no truth, the cls loss should be nonzero but there should
# be no box loss.
empty_cls_loss = empty_gt_losses['loss_cls']
empty_box_loss = empty_gt_losses['loss_bbox']
assert sum(empty_cls_loss).item() > 0, 'cls loss should be non-zero'
assert sum(empty_box_loss).item() == 0, (
'there should be no box loss when there are no true boxes')
# When truth is non-empty then both cls and box loss should be nonzero for
# random inputs
gt_bboxes = [
torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874]]),
]
gt_labels = [torch.LongTensor([2])]
one_gt_losses = self.loss(cls_scores, bbox_preds, gt_bboxes, gt_labels,
img_metas, gt_bboxes_ignore)
onegt_cls_loss = one_gt_losses['loss_cls']
onegt_box_loss = one_gt_losses['loss_bbox']
assert sum(onegt_cls_loss).item() > 0, 'cls loss should be non-zero'
assert sum(onegt_box_loss).item() > 0, 'box loss should be non-zero'
# test task alignment assigner and losses
self.epoch = 10
# Test that empty ground truth encourages the network to predict background
gt_bboxes = [torch.empty((0, 4))]
gt_labels = [torch.LongTensor([])]
gt_bboxes_ignore = None
empty_gt_losses = self.loss(cls_scores, bbox_preds, gt_bboxes, gt_labels,
img_metas, gt_bboxes_ignore)
# When there is no truth, the cls loss should be nonzero but there should
# be no box loss.
empty_cls_loss = empty_gt_losses['loss_cls']
empty_box_loss = empty_gt_losses['loss_bbox']
assert sum(empty_cls_loss).item() > 0, 'cls loss should be non-zero'
assert sum(empty_box_loss).item() == 0, (
'there should be no box loss when there are no true boxes')
# When truth is non-empty then both cls and box loss should be nonzero for
# random inputs
gt_bboxes = [
torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874]]),
]
gt_labels = [torch.LongTensor([2])]
one_gt_losses = self.loss(cls_scores, bbox_preds, gt_bboxes, gt_labels,
img_metas, gt_bboxes_ignore)
onegt_cls_loss = one_gt_losses['loss_cls']
onegt_box_loss = one_gt_losses['loss_bbox']
assert sum(onegt_cls_loss).item() > 0, 'cls loss should be non-zero'
assert sum(onegt_box_loss).item() > 0, 'box loss should be non-zero'
|
[
"186368@zju.edu.cn"
] |
186368@zju.edu.cn
|
5b115168345a1652c56f759de440862388473ee0
|
5da988c176252fca1b558190eff74ef3b89afc9f
|
/instrumentation/opentelemetry-instrumentation-falcon/tests/test_falcon.py
|
fe33a2f2dd1cca06c37f6d6179aa05655d9378c6
|
[
"Apache-2.0"
] |
permissive
|
kinvolk/opentelemetry-python
|
3801376ee6bdb46d85d8876a97713e698e1241ce
|
47483865854c7adae7455f8441dab7f814f4ce2a
|
refs/heads/master
| 2023-05-25T19:36:05.130267
| 2020-11-02T17:29:59
| 2020-11-02T17:29:59
| 201,488,070
| 1
| 2
|
Apache-2.0
| 2023-05-16T18:48:46
| 2019-08-09T14:56:28
|
Python
|
UTF-8
|
Python
| false
| false
| 7,123
|
py
|
# Copyright The OpenTelemetry Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from unittest.mock import Mock, patch
from falcon import testing
from opentelemetry.instrumentation.falcon import FalconInstrumentor
from opentelemetry.test.test_base import TestBase
from opentelemetry.trace.status import StatusCode
from opentelemetry.util import ExcludeList
from .app import make_app
class TestFalconInstrumentation(TestBase):
def setUp(self):
super().setUp()
FalconInstrumentor().instrument()
self.app = make_app()
def client(self):
return testing.TestClient(self.app)
def tearDown(self):
super().tearDown()
with self.disable_logging():
FalconInstrumentor().uninstrument()
def test_get(self):
self._test_method("GET")
def test_post(self):
self._test_method("POST")
def test_patch(self):
self._test_method("PATCH")
def test_put(self):
self._test_method("PUT")
def test_delete(self):
self._test_method("DELETE")
def test_head(self):
self._test_method("HEAD")
def _test_method(self, method):
self.client().simulate_request(method=method, path="/hello")
spans = self.memory_exporter.get_finished_spans()
self.assertEqual(len(spans), 1)
span = spans[0]
self.assertEqual(
span.name, "HelloWorldResource.on_{0}".format(method.lower())
)
self.assertEqual(span.status.status_code, StatusCode.UNSET)
self.assert_span_has_attributes(
span,
{
"component": "http",
"http.method": method,
"http.server_name": "falconframework.org",
"http.scheme": "http",
"host.port": 80,
"http.host": "falconframework.org",
"http.target": "/",
"net.peer.ip": "127.0.0.1",
"net.peer.port": "65133",
"http.flavor": "1.1",
"falcon.resource": "HelloWorldResource",
"http.status_text": "Created",
"http.status_code": 201,
},
)
self.memory_exporter.clear()
def test_404(self):
self.client().simulate_get("/does-not-exist")
spans = self.memory_exporter.get_finished_spans()
self.assertEqual(len(spans), 1)
span = spans[0]
self.assertEqual(span.name, "HTTP GET")
self.assertEqual(span.status.status_code, StatusCode.ERROR)
self.assert_span_has_attributes(
span,
{
"component": "http",
"http.method": "GET",
"http.server_name": "falconframework.org",
"http.scheme": "http",
"host.port": 80,
"http.host": "falconframework.org",
"http.target": "/",
"net.peer.ip": "127.0.0.1",
"net.peer.port": "65133",
"http.flavor": "1.1",
"http.status_text": "Not Found",
"http.status_code": 404,
},
)
def test_500(self):
try:
self.client().simulate_get("/error")
except NameError:
pass
spans = self.memory_exporter.get_finished_spans()
self.assertEqual(len(spans), 1)
span = spans[0]
self.assertEqual(span.name, "ErrorResource.on_get")
self.assertFalse(span.status.is_ok)
self.assertEqual(span.status.status_code, StatusCode.ERROR)
self.assertEqual(
span.status.description,
"NameError: name 'non_existent_var' is not defined",
)
self.assert_span_has_attributes(
span,
{
"component": "http",
"http.method": "GET",
"http.server_name": "falconframework.org",
"http.scheme": "http",
"host.port": 80,
"http.host": "falconframework.org",
"http.target": "/",
"net.peer.ip": "127.0.0.1",
"net.peer.port": "65133",
"http.flavor": "1.1",
"http.status_code": 500,
},
)
def test_uninstrument(self):
self.client().simulate_get(path="/hello")
spans = self.memory_exporter.get_finished_spans()
self.assertEqual(len(spans), 1)
self.memory_exporter.clear()
FalconInstrumentor().uninstrument()
self.app = make_app()
self.client().simulate_get(path="/hello")
spans = self.memory_exporter.get_finished_spans()
self.assertEqual(len(spans), 0)
@patch(
"opentelemetry.instrumentation.falcon._excluded_urls",
ExcludeList(["ping"]),
)
def test_exclude_lists(self):
self.client().simulate_get(path="/ping")
span_list = self.memory_exporter.get_finished_spans()
self.assertEqual(len(span_list), 0)
self.client().simulate_get(path="/hello")
span_list = self.memory_exporter.get_finished_spans()
self.assertEqual(len(span_list), 1)
def test_traced_request_attributes(self):
self.client().simulate_get(path="/hello?q=abc")
span = self.memory_exporter.get_finished_spans()[0]
self.assertNotIn("query_string", span.attributes)
self.memory_exporter.clear()
middleware = self.app._middleware[0][ # pylint:disable=W0212
0
].__self__
with patch.object(
middleware, "_traced_request_attrs", ["query_string"]
):
self.client().simulate_get(path="/hello?q=abc")
span = self.memory_exporter.get_finished_spans()[0]
self.assertIn("query_string", span.attributes)
self.assertEqual(span.attributes["query_string"], "q=abc")
def test_traced_not_recording(self):
mock_tracer = Mock()
mock_span = Mock()
mock_span.is_recording.return_value = False
mock_tracer.start_span.return_value = mock_span
mock_tracer.use_span.return_value.__enter__ = mock_span
mock_tracer.use_span.return_value.__exit__ = mock_span
with patch("opentelemetry.trace.get_tracer") as tracer:
tracer.return_value = mock_tracer
self.client().simulate_get(path="/hello?q=abc")
self.assertFalse(mock_span.is_recording())
self.assertTrue(mock_span.is_recording.called)
self.assertFalse(mock_span.set_attribute.called)
self.assertFalse(mock_span.set_status.called)
|
[
"noreply@github.com"
] |
kinvolk.noreply@github.com
|
c41b811032ac7e36dd9d4ab116a69cd029889b32
|
ec726dac5bcd01ea807a3b24c9e25f9951f6e910
|
/scripts/Lock.py
|
42e6bcceb8660339fc43fc88988d5086f2b9182d
|
[] |
no_license
|
cms-sw/int-build
|
06d7fd680b0fcc06cce58869b928f2547b4731b1
|
c0f2ca0ef96cdd46d9588ef908d11e9a9cab6618
|
refs/heads/master
| 2021-01-01T18:02:04.757560
| 2015-10-26T08:28:17
| 2015-10-26T08:28:17
| 11,394,116
| 0
| 3
| null | 2015-10-16T15:02:19
| 2013-07-13T21:00:45
|
Python
|
UTF-8
|
Python
| false
| false
| 2,432
|
py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
from os import getpid, makedirs, kill
from os.path import join, getmtime
from commands import getstatusoutput
from time import sleep, time
def isProcessRunning(pid):
running = False
try:
kill(pid, 0)
running = True
except:
pass
return running
class Lock(object):
def __init__(
self,
dirname,
checkStale=False,
stableGap=600,
):
self.piddir = dirname
self.pidfile = join(self.piddir, 'pid')
self.pid = str(getpid())
self.locktime = 0
self._hasLock = self._get()
if not self._hasLock and self.locktime and checkStale \
and time() - self.locktime >= stableGap:
self._release(True)
self._hasLock = self._get()
def getLock(self, waitStep=2, maxTries=0):
if waitStep <= 0:
waitStep = 2
while not self._hasLock:
sleep(waitStep)
self._hasLock = self._get()
if maxTries > 0:
maxTries -= 1
if maxTries <= 0:
break
return
def __del__(self):
self._release()
def __nonzero__(self):
return self._hasLock
def _release(self, force=False):
if not force and self._hasLock and self._get():
force = True
if force:
getstatusoutput('rm -rf %s' % self.piddir)
self.locktime = 0
self._hasLock = False
def _get(self, tries=3, success=3):
if tries <= 0:
return False
pid = self._readPid()
if pid:
if pid == self.pid:
if success <= 0:
return True
sleep(0.001)
return self._get(tries, success - 1)
if isProcessRunning(int(pid)):
return False
self._create()
sleep(1)
return self._get(tries - 1, success)
def _readPid(self):
pid = None
try:
pid = open(self.pidfile).readlines()[0]
self.locktime = getmtime(self.pidfile)
except:
pid = None
return pid
def _create(self):
self._release(True)
try:
makedirs(self.piddir)
lock = open(self.pidfile, 'w')
lock.write(self.pid)
lock.close()
except:
pass
|
[
"giulio.eulisse@cern.ch"
] |
giulio.eulisse@cern.ch
|
6912fbc6262eef219acef4bb0f46fcdca0cde550
|
9e988c0dfbea15cd23a3de860cb0c88c3dcdbd97
|
/sdBs/AllRun/pg_1544+107/sdB_PG_1544+107_coadd.py
|
d34ff0b3147d3f6d2c4257e6a18c9c3868d100a4
|
[] |
no_license
|
tboudreaux/SummerSTScICode
|
73b2e5839b10c0bf733808f4316d34be91c5a3bd
|
4dd1ffbb09e0a599257d21872f9d62b5420028b0
|
refs/heads/master
| 2021-01-20T18:07:44.723496
| 2016-08-08T16:49:53
| 2016-08-08T16:49:53
| 65,221,159
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 430
|
py
|
from gPhoton.gMap import gMap
def main():
gMap(band="NUV", skypos=[236.659375,10.503742], skyrange=[0.0333333333333,0.0333333333333], stepsz = 30., cntfile="/data2/fleming/GPHOTON_OUTPUT/LIGHTCURVES/sdBs/sdB_PG_1544+107/sdB_PG_1544+107_movie_count.fits", cntcoaddfile="/data2/fleming/GPHOTON_OUTPUT/LIGHTCURVES/sdB/sdB_PG_1544+107/sdB_PG_1544+107_count_coadd.fits", overwrite=True, verbose=3)
if __name__ == "__main__":
main()
|
[
"thomas@boudreauxmail.com"
] |
thomas@boudreauxmail.com
|
55e9e54940ecf7d642f3da6d4a45d111c723a725
|
0a091d537701f93cbab5c7f76c7bf8d117c93887
|
/alembic/versions/138007156428_fix_komm_fylke_mapping.py
|
d97348a0a5fe9f5cdfe87180d6ed2538ad1ffdc7
|
[
"MIT"
] |
permissive
|
atlefren/beerdatabase
|
61ecfc8b9de6e6b202c50b501ccded87387de289
|
d3acf2b02966e058d3840cd167c1c787c0cb88ce
|
refs/heads/master
| 2021-05-04T11:01:47.117527
| 2016-09-14T22:00:34
| 2016-09-14T22:00:34
| 45,804,354
| 10
| 3
| null | 2016-01-25T20:45:08
| 2015-11-08T23:38:31
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 1,164
|
py
|
"""fix komm fylke mapping
Revision ID: 138007156428
Revises: 13603cc8e9a7
Create Date: 2015-12-06 23:12:25.241712
"""
# revision identifiers, used by Alembic.
revision = '138007156428'
down_revision = '13603cc8e9a7'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade():
op.execute('DROP MATERIALIZED VIEW komm_fylke CASCADE;')
op.execute('''
CREATE MATERIALIZED VIEW komm_fylke AS
SELECT
k.name as name,
k.kommnr as kommnr,
f.name as fylke_name,
f.fylkesnr as fylkesnr,
k.geom as geom
FROM
kommune k, fylke f
WHERE
k.geom && f.geom
AND
st_contains(f.geom, k.geom)
''')
op.execute('''
CREATE VIEW pol_shop_komm_fylke as
SELECT
s.*,
k.name as komm_name,
k.kommnr as kommnr,
k.fylke_name as fylke_name,
k.fylkesnr as fylkesnr
FROM
pol_shop s,
komm_fylke k
WHERE
k.geom::geography && s.geog
AND
st_contains(k.geom, s.geog::geometry);
''')
def downgrade():
pass
|
[
"atle@frenviksveen.net"
] |
atle@frenviksveen.net
|
e9f551acfb12c47aa1548d24eee4b522ceb8073b
|
8cd15fba24b6dfa431f3764932101969f5fb524f
|
/JAMediaImagenes/gtk2/Interfaz/ToolbarPrincipal.py
|
01fb926646a3946f16ca0652c42039c7313e706a
|
[] |
no_license
|
srevinsaju/JAMediaSuite
|
c872b4781657bf1bcf63908f71abeca799b8c666
|
1813d1205cf31f89be3c4512eb495baed427494f
|
refs/heads/master
| 2020-12-04T12:14:53.794749
| 2019-01-05T12:52:13
| 2019-01-05T12:52:13
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,462
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import gtk
import gobject
import commands
class ToolbarPrincipal(gtk.Toolbar):
__gsignals__ = {
"accion": (gobject.SIGNAL_RUN_LAST,
gobject.TYPE_NONE, (gobject.TYPE_STRING,))}
def __init__(self):
gtk.Toolbar.__init__(self)
abrir = gtk.ToolButton()
abrir.set_stock_id(gtk.STOCK_OPEN)
abrir.set_tooltip_text("Abrir")
abrir.connect("clicked", self.__emit_senial, "open")
self.insert(abrir, -1)
self.__guardar = gtk.ToolButton()
self.__guardar.set_stock_id(gtk.STOCK_SAVE)
self.__guardar.set_tooltip_text("Guardar")
self.__guardar.connect("clicked", self.__emit_senial, "save")
self.insert(self.__guardar, -1)
self.__guardar_como = gtk.ToolButton()
self.__guardar_como.set_stock_id(gtk.STOCK_SAVE_AS)
self.__guardar_como.set_tooltip_text("Guardar Como")
self.__guardar_como.connect("clicked", self.__emit_senial, "save_as")
self.insert(self.__guardar_como, -1)
self.insert(gtk.SeparatorToolItem(), -1)
self.__zoom_in = gtk.ToolButton()
self.__zoom_in.set_stock_id(gtk.STOCK_ZOOM_IN)
self.__zoom_in.set_tooltip_text("Acercar")
self.__zoom_in.connect("clicked", self.__emit_senial, "zoom_in")
self.insert(self.__zoom_in, -1)
self.__zoom_out = gtk.ToolButton()
self.__zoom_out.set_stock_id(gtk.STOCK_ZOOM_OUT)
self.__zoom_out.set_tooltip_text("Alejar")
self.__zoom_out.connect("clicked", self.__emit_senial, "zoom_out")
self.insert(self.__zoom_out, -1)
self.__zoom_100 = gtk.ToolButton()
self.__zoom_100.set_stock_id(gtk.STOCK_ZOOM_100)
self.__zoom_100.set_tooltip_text("Ver a tamaño original")
self.__zoom_100.connect("clicked", self.__emit_senial, "zoom_100")
self.insert(self.__zoom_100, -1)
self.__zoom_fit = gtk.ToolButton()
self.__zoom_fit.set_stock_id(gtk.STOCK_ZOOM_FIT)
self.__zoom_fit.set_tooltip_text("Ocupar todo el espacio disponible")
self.__zoom_fit.connect("clicked", self.__emit_senial, "zoom_fit")
self.insert(self.__zoom_fit, -1)
self.insert(gtk.SeparatorToolItem(), -1)
self.__izquierda = gtk.ToolButton()
self.__izquierda.set_stock_id(gtk.STOCK_UNDO)
self.__izquierda.set_tooltip_text("Rotar a la izquierda")
self.__izquierda.connect("clicked", self.__emit_senial, "izquierda")
self.insert(self.__izquierda, -1)
self.__derecha = gtk.ToolButton()
self.__derecha.set_stock_id(gtk.STOCK_REDO)
self.__derecha.set_tooltip_text("Rotar a la derecha")
self.__derecha.connect("clicked", self.__emit_senial, "derecha")
self.insert(self.__derecha, -1)
self.insert(gtk.SeparatorToolItem(), -1)
self.__anterior = gtk.ToolButton()
self.__anterior.set_stock_id(gtk.STOCK_GO_BACK)
self.__anterior.set_tooltip_text("Ver imagen anterior")
self.__anterior.connect("clicked", self.__emit_senial, "anterior")
self.insert(self.__anterior, -1)
self.__siguiente = gtk.ToolButton()
self.__siguiente.set_stock_id(gtk.STOCK_GO_FORWARD)
self.__siguiente.set_tooltip_text("Ver imagen siguiente")
self.__siguiente.connect("clicked", self.__emit_senial, "siguiente")
self.insert(self.__siguiente, -1)
self.show_all()
def __emit_senial(self, widget, senial):
self.emit("accion", senial)
def has_file(self, hasfile, acceso, dirpath=False):
buttons = [self.__guardar, self.__guardar_como, self.__zoom_in,
self.__zoom_out, self.__zoom_100, self.__zoom_fit,
self.__izquierda, self.__derecha, self.__anterior,
self.__siguiente]
for button in buttons:
button.set_sensitive(hasfile)
self.__guardar.set_sensitive(acceso)
paths = 0
if dirpath:
files = os.listdir(dirpath)
for f in files:
path = os.path.join(dirpath, f)
datos = commands.getoutput(
'file -ik %s%s%s' % ("\"", path, "\""))
if "image" in datos:
paths += 1
if paths > 1:
break
for button in [self.__anterior, self.__siguiente]:
button.set_sensitive(bool(paths > 1))
|
[
"fdanesse@gmail.com"
] |
fdanesse@gmail.com
|
5cf5686aab6d2c0ad688cea8069de3ca8a68b512
|
2df47589ca457d16fbffd4e1bccf5133174a0b97
|
/highcharts/core/tests/test_product_json_view.py
|
b51a7fddda42ab847b32c19939890f918833a368
|
[] |
no_license
|
bguerbas/highcharts
|
a805419cb8d5a00bc3f82b5c4df285598f7685d8
|
571fba58465136c5040266b3d4ba2d65a5cc740c
|
refs/heads/master
| 2022-02-12T19:33:12.244474
| 2016-06-04T05:00:24
| 2016-06-04T05:00:24
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 856
|
py
|
from django.test import TestCase
from highcharts.core.models import Category, Product
class TestGet(TestCase):
def setUp(self):
category = Category.objects.create(category='Papelaria')
Product.objects.create(
product='A4',
price=4.2,
category=category
)
self.resp = self.client.get('/product_json/')
def test_get(self):
self.assertEqual(200, self.resp.status_code)
def test_mimetype(self):
self.assertEqual('application/json', self.resp['Content-Type'])
def test_contents(self):
data = self.resp.json()
self.assertIn('products', data.keys())
self.assertEqual(1, len(data['products']))
self.assertEqual('Papelaria', data['products'][0]['categoria'])
self.assertEqual(100.0, data['products'][0]['porcentagem'])
|
[
"cuducos@gmail.com"
] |
cuducos@gmail.com
|
92bca2bb4a0d14e571d6c8140331872c810d61c0
|
de066f2aaf7810a9377020a2d25d674a52547a15
|
/Cap05_GUI_Tkinter/extras/03_Barron_Stone/Chap05_Advanced_Widgets/06_configuring_widgets_styles.py
|
08de715b89e479ed091ff2a6fa3d1a6052b49cc0
|
[] |
no_license
|
frclasso/2nd_Step_Python_Fabio_Classo
|
91092b24c442cced4f7c5c145e1e9e8e5f7483d9
|
ad6eefaeb4e6283c461562e7fddcb0aa81f2d90e
|
refs/heads/master
| 2022-12-10T09:42:52.724283
| 2019-07-27T20:28:17
| 2019-07-27T20:28:17
| 146,759,779
| 1
| 0
| null | 2022-12-08T05:55:53
| 2018-08-30T14:13:49
|
Python
|
UTF-8
|
Python
| false
| false
| 2,311
|
py
|
#!/usr/bin/env python3
# Configurando o estilo dos widgetes
# styles descreve como o widget vai ser exibido de acordo com com o estado em que se
# econtra
# active, disabled, focus, pressed, selected, background, readonly, alternate, invalid, hover
# Vamos trabalhar com temas, que são uma coleção de estilos para widgets
from tkinter import *
from tkinter import ttk
root = Tk()
button1 = ttk.Button(root, text='Botão 1')
button2 = ttk.Button(root, text='Botão 2')
button1.pack()
button2.pack()
# instanciando um objeto style
style = ttk.Style()
# varificando os estilos diposniveis no sistema
print(style.theme_names()) # ('aqua', 'clam', 'alt', 'default', 'classic')
# para verificar qual tema está em uso utilizamos style.theme_use()sem parametros
print(style.theme_use())
# para alterar o tema utilizamos styel.them_use() passando o nome do tema como parametro
style.theme_use('classic')
# retornando ao tema anterior
style.theme_use('aqua')
# Os nomes dos widgets por conveção usam a letra "T" antes do nome
# exemplo: TButton é o nome padrão para Button
# exceção Treeview, não TTreview
# para descobrir o nome padrão que o widget está utilizando usamos winfo_class()
print(button1.winfo_class()) # TButton
# para configurar o estilo do TButton para alterar a aparencia de todos os botões do programa
style.configure('TButton', foreground='blue')
# Podemos ainda criar estilos customizados derivados de outros estilos existentes
style.configure('Alarm.TButton', foreground='orange', font=('Arial', 24, 'bold'))
# aplicando o estilo customizado a button2
button2.config(style='Alarm.TButton')
# Podemos configurar o estilo baseado no estado no widget utilizando style.map()
style.map('Alarm.TButton', foreground=[('pressed', 'purple'), ('disabled', 'grey')])
button2.state(['disabled'])
# Para verificar todos os componentes intenos de estilo utilizamos o método layout()
print(style.layout('TButton')) # passando o nome do estilo como paramentro
# Para verificar as opções disponíveis para cada componente utilizamos
# styel.element_options('nome do componente')
print(style.element_options('Button_label'))
# para verificar qual configuração está em uso em um estilo específico
print(style.lookup('TButton', 'foreground')) # style, property
root = mainloop()
|
[
"frclasso@yahoo.com.br"
] |
frclasso@yahoo.com.br
|
88f772fda60dad89eacffbe396367d9c5fd3de8f
|
fd67592b2338105e0cd0b3503552d188b814ad95
|
/egoi_api/apis/paths/__init__.py
|
d0fcf2e17c920e939c549a52c875cecfa03fb1db
|
[] |
no_license
|
E-goi/sdk-python
|
175575fcd50bd5ad426b33c78bdeb08d979485b7
|
5cba50a46e1d288b5038d18be12af119211e5b9f
|
refs/heads/master
| 2023-04-29T20:36:02.314712
| 2023-04-18T07:42:46
| 2023-04-18T07:42:46
| 232,095,340
| 5
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 235
|
py
|
# do not import all endpoints into this module because that uses a lot of memory and stack frames
# if you need the ability to import all endpoints from this module, import them with
# from egoi_api.apis.path_to_api import path_to_api
|
[
"integrations@e-goi.com"
] |
integrations@e-goi.com
|
1efc75fd3a62326bea8b5dfc1ee2e3d82520b1cc
|
18887a0808c0a06a69be3e66c6337295bfc7d99e
|
/menus/models.py
|
b9443c0e078dc25a3ecb36e23f2231136700b1d8
|
[] |
no_license
|
teedee22/tobyd
|
78adf69d7a02cce42dc5a94e0e58007b8e6be196
|
5c54a817608a3911dd44840be82d2bbea44f35c3
|
refs/heads/master
| 2022-12-14T08:29:12.952292
| 2019-09-25T20:13:31
| 2019-09-25T20:13:31
| 205,281,622
| 0
| 0
| null | 2022-12-08T06:35:44
| 2019-08-30T01:33:39
|
Python
|
UTF-8
|
Python
| false
| false
| 1,776
|
py
|
from django.db import models
from django_extensions.db.fields import AutoSlugField
from modelcluster.fields import ParentalKey
from modelcluster.models import ClusterableModel
from wagtail.admin.edit_handlers import (
MultiFieldPanel,
InlinePanel,
FieldPanel,
PageChooserPanel,
)
from wagtail.core.models import Orderable
from wagtail.snippets.models import register_snippet
class MenuItem(Orderable):
"""For each menu item in the menu"""
link_title = models.CharField(max_length=100, blank=True, null=True)
link_url = models.URLField(max_length=500, blank=True, null=True)
link_page = models.ForeignKey(
"wagtailcore.page",
null=True,
blank=True,
related_name="+",
on_delete=models.CASCADE,
)
open_in_new_tab = models.BooleanField(default=False, blank=True)
page = ParentalKey("Menu", related_name="menu_items")
highlighted = models.BooleanField(default=False, blank=True)
panels = [
FieldPanel("link_title"),
FieldPanel("link_url"),
PageChooserPanel("link_page"),
FieldPanel("open_in_new_tab"),
FieldPanel("highlighted"),
]
@property
def link(self):
if self.link_url:
return self.link_url
elif self.link_page:
return self.link_page.url
return "missing page url"
@register_snippet
class Menu(ClusterableModel):
"""The main menu"""
title = models.CharField(max_length=100)
slug = AutoSlugField(populate_from="title", editable=True)
panels = [
MultiFieldPanel(
[FieldPanel("title"), FieldPanel("slug")], heading="Menu"
),
InlinePanel("menu_items", label="Menu Items")
]
def __str__(self):
return self.title
|
[
"tdudleyoffice@gmail.com"
] |
tdudleyoffice@gmail.com
|
d21cd8904bb3eea88d7fd3e5c4a93fff99003a16
|
26dd0732426322eb7c411b7f53d72ec3dddd63fe
|
/ABC_169/B.py
|
42659b9159cc54e9ceb89999a6cf9a30bc723e12
|
[] |
no_license
|
Jinmin-Goh/AtCoder
|
417290531bf92f79e1285052eb82d9a8a3c7b138
|
68d3f7840748814123beebabf478c9316268f166
|
refs/heads/master
| 2022-12-10T21:31:41.411567
| 2020-08-29T15:02:18
| 2020-08-29T15:02:18
| 262,826,100
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 472
|
py
|
# Contest No.: ABC169
# Problem No.: B
# Solver: JEMINI
# Date: 20200531
import sys
import heapq
def main():
n = int(input())
nums = list(map(int, sys.stdin.readline().split()))
ans = 1
for i in nums:
if i == 0:
ans = 0
break
if ans > 10 ** 18:
continue
ans *= i
if ans > 10 ** 18:
print(-1)
else:
print(ans)
return
if __name__ == "__main__":
main()
|
[
"eric970901@gmail.com"
] |
eric970901@gmail.com
|
ca05b371020a184e2d08e3162cf7c33c53f0a712
|
1fa2ad5ad57f08f805b9175ab2a961a24d621101
|
/src/test_net.py
|
6808c27a0db116df6527ec8343064fde04b06359
|
[] |
no_license
|
ZQPei/Lenet_cifar10_pytorch
|
e61626bdcadd1abec4389b5f1e40edd665db68fd
|
911fd66e41d4ad7c03f13b603df8bc7c187acb38
|
refs/heads/master
| 2020-03-16T17:36:24.723685
| 2018-05-10T02:55:00
| 2018-05-10T02:55:00
| 132,839,844
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,571
|
py
|
import torch
import torch.nn as nn
import torchvision
import numpy as np
import matplotlib.pyplot as plt
from Net import Net
# load net
mynet = torch.load('model/net.pth')
print(mynet)
#import ipdb; ipdb.set_trace()
# show me the weight
weight_conv1 = list(mynet.parameters())[0]
weight_conv1 = (weight_conv1-weight_conv1.min())/(weight_conv1.max()-weight_conv1.min())
weight_conv1 = weight_conv1.cpu()
weight_conv1 = torchvision.utils.make_grid(weight_conv1)
weight_conv1_np = weight_conv1.detach().numpy()
weight_conv1_np = weight_conv1_np.transpose(1,2,0)
weight_conv2 = list(mynet.parameters())[2]
weight_conv2 = (weight_conv2-weight_conv2.min())/(weight_conv2.max()-weight_conv2.min())
weight_conv2 = weight_conv2.cpu()
weight_conv2 = weight_conv2.view(16*6,1,5,5)
print(weight_conv2.shape)
weight_conv2 = torchvision.utils.make_grid(weight_conv2)
weight_conv2_np = weight_conv2.detach().numpy()
print(weight_conv2_np.shape)
weight_conv2_np = weight_conv2_np.transpose(1,2,0)
#weight_conv2_np = weight_conv2_np.squeeze(1)
plt.figure()
plt.imshow(weight_conv1_np)
plt.figure()
plt.imshow(weight_conv2_np)
plt.show()
# test on my img
img = plt.imread("myimg/4.jpg")
print(img.shape)
img = img.transpose(2,1,0)
img = torch.unsqueeze(torch.from_numpy(img),0)
print(img.shape)
img = img.type(torch.float)
img = (img-img.min())/(img.max()-img.min())
img = (img-0.5)/0.5
img = img.cuda()
pred = mynet(img)
print(pred)
pred = pred.max(1)[1]
classes = ['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']
print(classes[pred])
|
[
"dfzspzq@163.com"
] |
dfzspzq@163.com
|
7921cac556470b0a4ac9583697ede3ba8d7170d3
|
2d58c1351ab970eb55f4832b09582592e96468d5
|
/p74.py
|
7dbccc89adfe89703af596f378511832a3be92eb
|
[] |
no_license
|
0x0400/LeetCode
|
832bc971c2cae9eecb55f5b14e8c34eaec0d9e26
|
94bb9fedc908490cc52d87def317c057fadaeceb
|
refs/heads/master
| 2023-02-24T20:13:11.345873
| 2023-02-10T16:46:31
| 2023-02-10T16:46:31
| 84,653,745
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 659
|
py
|
# https://leetcode.com/problems/search-a-2d-matrix/
from typing import List
class Solution:
def searchMatrix(self, matrix: List[List[int]], target: int) -> bool:
rowLen = len(matrix)
colLen = len(matrix[0])
for rowIdx in range(0, rowLen):
if matrix[rowIdx][colLen-1] < target:
continue
if matrix[rowIdx][colLen-1] == target:
return True
for colIdx in range(0, colLen):
if matrix[rowIdx][colIdx] == target:
return True
if matrix[rowIdx][colIdx] > target:
return False
return False
|
[
"0x0400@users.noreply.github.com"
] |
0x0400@users.noreply.github.com
|
f0890b75c3c25dfcbb281f0881f8e215f9a72c1e
|
2f98aa7e5bfc2fc5ef25e4d5cfa1d7802e3a7fae
|
/python/python_23844.py
|
c65bb53c5f20a6b1517566f9d2447106889237be
|
[] |
no_license
|
AK-1121/code_extraction
|
cc812b6832b112e3ffcc2bb7eb4237fd85c88c01
|
5297a4a3aab3bb37efa24a89636935da04a1f8b6
|
refs/heads/master
| 2020-05-23T08:04:11.789141
| 2015-10-22T19:19:40
| 2015-10-22T19:19:40
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 138
|
py
|
# Python create tuples using function returning multiple values
new_list = [(name, value) + extra_info(name) for name, value in old_list]
|
[
"ubuntu@ip-172-31-7-228.us-west-2.compute.internal"
] |
ubuntu@ip-172-31-7-228.us-west-2.compute.internal
|
43082e96c8ebbd7f7616eece51e9a54b4937c2c7
|
52b5773617a1b972a905de4d692540d26ff74926
|
/.history/clouds_20200703155604.py
|
15b31a2ea014762a7c9ddce1050ddf21dfc44820
|
[] |
no_license
|
MaryanneNjeri/pythonModules
|
56f54bf098ae58ea069bf33f11ae94fa8eedcabc
|
f4e56b1e4dda2349267af634a46f6b9df6686020
|
refs/heads/master
| 2022-12-16T02:59:19.896129
| 2020-09-11T12:05:22
| 2020-09-11T12:05:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 344
|
py
|
def jumpingClouds(c):
i = 0
jumps = 0
while i < len(c)-2:
if c[i] == 0 and c[i+2] == 0:
print('here')
print('c---->',c[i],'i-->')
jumps +=1
elif c[i] == 0 and c[i+1] == 0:
jumps +=1
i +=1
print(jumps)
jumpingClouds([0,0,1,0,0,1,0])
|
[
"mary.jereh@gmail.com"
] |
mary.jereh@gmail.com
|
071413884073143eac16e6b991abd4fee106fc0e
|
9bcb5032d27ca321f489c035f7d46019ffdf4b85
|
/numericalFunctions/ptwXY/Python/Test/UnitTesting/thicken/thicken.py
|
e40b151f77d3bf9074457d4b03816df30afbfdc4
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
LLNL/gidiplus
|
128ef4d4acbcb264e31794a535cd95e8c77d8a96
|
e1c6f0e4de51bc4d7616c5c4676b9818c4b9817c
|
refs/heads/master
| 2023-08-31T06:21:14.519577
| 2023-02-13T18:35:20
| 2023-02-13T18:35:20
| 187,251,526
| 10
| 3
|
NOASSERTION
| 2021-12-23T00:28:07
| 2019-05-17T16:48:24
|
C++
|
UTF-8
|
Python
| false
| false
| 2,306
|
py
|
# <<BEGIN-copyright>>
# Copyright 2019, Lawrence Livermore National Security, LLC.
# See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: MIT
# <<END-copyright>>
import os
from numericalFunctions import pointwiseXY_C
if( 'CHECKOPTIONS' in os.environ ) :
options = os.environ['CHECKOPTIONS'].split( )
if( '-e' in options ) : print( __file__ )
CPATH = '../../../../Test/UnitTesting/thicken'
os.system( 'cd %s; thicken -v > v' % CPATH )
f = open( os.path.join( CPATH, 'v' ) )
ls = f.readlines( )
f.close( )
def getData( ls, hasLabel ) :
i = 0
for l in ls :
if( l.strip( ) != '' ) : break
i = i + 1
ls = ls[i:]
if( len( ls ) == 0 ) : return( None, None, None )
label = None
if( hasLabel ) : label, ls = ls[0].strip( ), ls[1:]
length, ls = ls[0], ls[1:]
if( '# length = ' != length[:11] ) : raise Exception( 'Line does not contain length info: "%s"' % ls[0].strip( ) )
length = int( length.split( '=' )[1] )
data = [ list( map( float, ls[i].split( )[:2] ) ) for i in range( length ) ]
return( ls[length:], label, pointwiseXY_C.pointwiseXY_C( data, initialSize = 10, overflowSize = 10 ) )
def compareValues( label, i, v1, v2 ) :
sv1, sv2 = '%.7g' % v1, '%.7g' % v2
if( sv1 != sv2 ) : raise Exception( 'Values %s %s diff at %d for label = %s' % ( v1, v2, i, label ) )
def thicken( label, original, data ) :
values = label.split( ':' )[1].split( '=' )
sectionSubdivideMax = int( values[1].split( )[0] )
dxMax = float( values[2].split( )[0] )
fxMax = float( values[3].split( )[0] )
thick = original.thicken( sectionSubdivideMax = sectionSubdivideMax, dDomainMax = dxMax, fDomainMax = fxMax )
if( len( data ) != len( thick ) ) : raise Exception( 'len( data ) = %d != len( thick ) = %d for label = "%s"' % \
( len( data ), len( thick ), label ) )
if( 'log-log' in label ) : return
for i, xy in enumerate( data ) :
xc, yc = xy
xp, yp = thick[i]
compareValues( label, i, xc, xp )
compareValues( label, i, yc, yp )
hasLabel = False
while( 1 ) :
ls, label, data = getData( ls, hasLabel )
if( ls is None ) : break
if( hasLabel ) :
thicken( label, original, data )
else :
original = data
hasLabel = True
|
[
"mattoon1@llnl.gov"
] |
mattoon1@llnl.gov
|
686f9dae605c6b38e4db127878cb0fbcd0360617
|
b665374878dd4a0b565afb3be8f41c97515b9d33
|
/elifecrossref/preprint.py
|
71cc91577a502fd31e407296fa7b1751fd1ebd91
|
[
"MIT"
] |
permissive
|
elifesciences/elife-crossref-xml-generation
|
e5ab62fec56653fee32ba7b9a2751df37b691b66
|
56440ebc20d4a652201011c9511ed64dfcc80c3d
|
refs/heads/develop
| 2023-08-21T16:34:24.019886
| 2023-08-12T00:50:17
| 2023-08-12T00:50:17
| 95,716,515
| 5
| 4
|
MIT
| 2023-09-07T19:45:58
| 2017-06-28T22:21:40
|
Python
|
UTF-8
|
Python
| false
| false
| 753
|
py
|
from xml.etree.ElementTree import Element, SubElement
from elifearticle import utils as eautils
from elifecrossref import related
def set_preprint(parent, preprint):
"""
add rel:inter_work_relation tag for a preprint
"""
related_item_tag = SubElement(parent, "rel:related_item")
related_item_type = "intra_work_relation"
relationship_type = "hasPreprint"
if preprint.doi:
identifier_type = "doi"
related_item_text = preprint.doi
elif preprint.uri:
identifier_type = "uri"
related_item_text = preprint.uri
related.set_related_item_work_relation(
related_item_tag,
related_item_type,
relationship_type,
identifier_type,
related_item_text,
)
|
[
"gnott@starglobal.ca"
] |
gnott@starglobal.ca
|
38916d27a6cbfea28a51061f2baec16d7065fd62
|
3056736f013b25d1e70adb355082c578e5091314
|
/ml_quality/datasets/shit/snake_board_test.py
|
92a2badc6ac91075e56ab0ca18e1394e5e96ab35
|
[] |
no_license
|
flerchy/codestyle-core
|
dcad1385b76678e4473f0804d2ecfaa03866d124
|
a009bcd2f17dadd440ea5ff24bd3167e38948bff
|
refs/heads/master
| 2021-01-20T03:21:16.694322
| 2017-06-05T05:52:35
| 2017-06-05T05:52:35
| 89,524,181
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,825
|
py
|
import pygame, sys, random
from pygame.locals import *
from basics import snake,point,board, food
from basics.direction import Direction
def encrypt_snake(snake):
"""Returns encrpted Body list to send over netowrk."""
enc_data = "%%body%%"
for point in snake.get_body_points():
enc_data += str(point.get_x()) + "%%sep_xy%%"
enc_data += str(point.get_y())
enc_data += "%%eop%%"
enc_data += "%%body%%"
enc_data += "%%dir%%"
enc_data += snake.get_direction()
enc_data += "%%dir%%"
enc_data += "%%color%%"
enc_data += snake.get_color()
enc_data += "%%color%%"
return enc_data
def get_snake_points(enc_data):
"""Returns Snake object for given encypted string."""
body_list = []
for points in enc_data.split("%%body%%")[1].split("%%eop%%")[:-1]:
x_y = points.split("%%sep_xy%%")
body_list.append(point.Point(int(x_y[0]), int(x_y[1])))
return body_list
def get_snake_direction(enc_data):
return enc_data.split("%%dir%%")[1]
def get_snake_color(enc_data):
return enc_data.split("%%color%%")[1]
def get_food_location():
"""Returns random x and y coordinates for food."""
return (random.randint(0,20), random.randint(0,15))
##First Snake
point1 = point.Point(0,0)
point2 = point.Point(0,1)
point3 = point.Point(0,2)
snake1 = snake.Snake([point1, point2, point3], Direction.RIGHT)
snake_food = food.Food(20,15)
#PyGame Variables
pygame.init()
FPS = 6
GAME_OVER = False
fpsClock = pygame.time.Clock()
DISPLAYSURF = pygame.display.set_mode((400, 300), 0, 32)
pygame.display.set_caption('Snakes')
myfont = pygame.font.SysFont("Comic Sans MS", 30)
game_over_text = myfont.render("Game Over!", 1, (0,0,0))
WHITE = (255, 255, 255)
snake_body = pygame.image.load('imgs/snake/'+snake1.get_color()+'/snake_body.png')
snake_mouth_icon = {}
snake_mouth_icon['yellow'] = {
'right' : pygame.image.load('imgs/snake/yellow/snake_mouth_right.gif'),
'left' : pygame.image.load('imgs/snake/yellow/snake_mouth_left.gif'),
'up' : pygame.image.load('imgs/snake/yellow/snake_mouth_up.gif'),
'down' : pygame.image.load('imgs/snake/yellow/snake_mouth_down.gif'),
}
snake_food_icon = pygame.image.load('imgs/frog.png')
#Networking Part
while True:
#snake_mouth = pygame.image.load('imgs/snake/'+snake1.get_color()+'/snake_mouth_'+snake1.get_direction()+'.gif')
DISPLAYSURF.fill(WHITE)
snake_body_points = snake1.get_body_points()
snake_mouth_point = snake_body_points[-1]
enc_data = encrypt_snake(snake1)
#print enc_data
#print snake_body_points == get_snake_points(enc_data),
#print get_snake_direction(enc_data), get_snake_color(enc_data)
print snake_food
for body_point in snake_body_points[:-1]:
DISPLAYSURF.blit(snake_body, (20*body_point.get_x(), 20*body_point.get_y()))
DISPLAYSURF.blit(snake_mouth_icon[snake1.get_color()][snake1.get_direction()],
(20*snake_mouth_point.get_x(), 20*snake_mouth_point.get_y()))
DISPLAYSURF.blit(snake_food_icon, (20*snake_food.get_x(), 20*snake_food.get_y()))
#direction = random.choice([0,1,3,4])
#print direction
key_pressed = False
if snake1.has_eaten_food(snake_food):
snake_food.update_position()
snake1.grow_snake()
if snake1.is_bitten_by_itself():
GAME_OVER = True
for event in pygame.event.get():
if event.type == QUIT:
pygame.quit()
sys.exit()
elif event.type == KEYUP:
if event.key == K_RIGHT:
snake1.update_direction(Direction.RIGHT)
elif event.key == K_LEFT:
snake1.update_direction(Direction.LEFT)
elif event.key == K_UP:
snake1.update_direction(Direction.DOWN)
elif event.key == K_DOWN:
snake1.update_direction(Direction.UP)
if not GAME_OVER:
snake1.move_snake()
key_pressed = True
if not GAME_OVER and not key_pressed:
snake1.move_snake()
if GAME_OVER:
DISPLAYSURF.blit(game_over_text, (100, 100))
break
pygame.display.update()
fpsClock.tick(FPS)
|
[
"flerchy@gmail.com"
] |
flerchy@gmail.com
|
99e2f8abf29c8d3a63efa35b197921747f42b243
|
d3cdceb672f3ffa9d0f7cddfb03062d48a118427
|
/hoods/admin.py
|
cde1e05c3f36d0a160cd75f271913491702fc12e
|
[] |
no_license
|
sethmuriuki/Nights-Watch
|
e6e4020276ab87801f964e59da3f29f47b4e5c88
|
57a5bd3444b88442cb5ea988fc0ea5d64f44df2f
|
refs/heads/master
| 2021-05-02T02:28:23.335867
| 2018-03-22T05:49:46
| 2018-03-22T05:49:46
| 120,884,306
| 0
| 1
| null | 2018-06-19T09:30:41
| 2018-02-09T09:18:05
|
Python
|
UTF-8
|
Python
| false
| false
| 198
|
py
|
from django.contrib import admin
from . import models
# Register your models here.
class GroupMemberInline(admin.TabularInline):
model = models.GroupMember
admin.site.register(models.Group)
|
[
"sethkrm@gmail.com"
] |
sethkrm@gmail.com
|
35bb586667a937f6c2d36795e7b13b063fa3de4d
|
0466559817d3a1be9409da2c83db99c4db3bacfe
|
/hubcheck/pageobjects/widgets/members_profile_website.py
|
7aae462a94a557e4bcab95a0af413cd84964ba13
|
[
"MIT"
] |
permissive
|
ken2190/hubcheck
|
955cf9b75a1ee77e28256dfd3a780cfbc17de961
|
2ff506eb56ba00f035300862f8848e4168452a17
|
refs/heads/master
| 2023-03-20T15:17:12.949715
| 2015-09-29T16:11:18
| 2015-09-29T16:11:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,080
|
py
|
from hubcheck.pageobjects.basepageelement import Select
from hubcheck.pageobjects.basepageelement import Text
from hubcheck.pageobjects.widgets.members_profile_element import MembersProfileElement
class MembersProfileWebsite(MembersProfileElement):
def __init__(self, owner, locatordict={}):
super(MembersProfileWebsite,self).__init__(owner,locatordict)
# load hub's classes
MembersProfileWebsite_Locators = self.load_class('MembersProfileWebsite_Locators')
# update this object's locator
self.locators.update(MembersProfileWebsite_Locators.locators)
# update the locators with those from the owner
self.update_locators_from_owner()
# setup page object's components
self.website = Text(self,{'base':'website'})
self.access = Select(self,{'base':'access'})
# update the component's locators with this objects overrides
self._updateLocators()
def value(self):
"""return a dictionary with website and access values"""
return {'website' : self.website.value(),
'access' : self.access.value()}
def update(self,website=None,access=None):
"""update the website and access values"""
if website != None:
self.website.value = website
if access != None:
self.access.value = access
self.save.click()
class MembersProfileWebsite_Locators_Base(object):
"""locators for MembersProfileWebsite object"""
locators = {
'base' : "css=.profile-web",
'website' : "css=#profile-url",
'access' : "css=.profile-web select[name='access[org]']",
'sectionkey' : "css=.profile-web .key",
'sectionvalue' : "css=.profile-web .value",
'open' : "css=.profile-web .edit-profile-section",
'close' : "css=.profile-web .edit-profile-section",
'save' : "css=.profile-web .section-edit-submit",
'cancel' : "css=.profile-web .section-edit-cancel",
}
|
[
"telldsk@gmail.com"
] |
telldsk@gmail.com
|
f6aab59774f510e241f1046f6ec4b42798bd38b4
|
8633ec7985ffd7f849210b93bc20e632f8ae8707
|
/tree/CMSSW_4_2_8_patch7/src/Validation/RecoTrack/test/borisTests/MTV.py
|
4f90e6fb5980d818f54e52ed358645a99d9d285f
|
[] |
no_license
|
liis/el_track
|
2ed5b3b7a64d57473328df0e5faf28808bab6166
|
cd7978e5fa95d653bab5825b940911b465172c1a
|
refs/heads/master
| 2016-09-10T20:09:07.882261
| 2015-01-08T14:41:59
| 2015-01-08T14:41:59
| 14,494,773
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,494
|
py
|
import FWCore.ParameterSet.Config as cms
process = cms.Process("MULTITRACKVALIDATOR")
# message logger
process.MessageLogger = cms.Service("MessageLogger",
default = cms.untracked.PSet( limit = cms.untracked.int32(-1) )
)
# source
readFiles = cms.untracked.vstring()
secFiles = cms.untracked.vstring()
source = cms.Source ("PoolSource",fileNames = readFiles)
#source = cms.Source ("PoolSource",fileNames = readFiles, secondaryFileNames = secFiles)
readFiles.extend( ['file:./aod.root'])
process.source = source
process.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(-1) )
### conditions
process.load("Configuration.StandardSequences.FrontierConditions_GlobalTag_cff")
process.GlobalTag.globaltag = 'START42_V11::All'
### standard includes
process.load('Configuration/StandardSequences/Services_cff')
process.load('Configuration.StandardSequences.GeometryPilot2_cff')
process.load("Configuration.StandardSequences.RawToDigi_cff")
process.load("Configuration.EventContent.EventContent_cff")
process.load("Configuration.StandardSequences.Reconstruction_cff")
process.load("Configuration.StandardSequences.MagneticField_cff")
### validation-specific includes
process.load("SimTracker.TrackAssociation.TrackAssociatorByHits_cfi")
process.load("SimTracker.TrackAssociation.trackingParticleRecoTrackAsssociation_cfi")
process.load("Validation.RecoTrack.cuts_cff")
process.load("Validation.RecoTrack.MultiTrackValidator_cff")
process.load("DQMServices.Components.EDMtoMEConverter_cff")
process.load("Validation.Configuration.postValidation_cff")
process.load("Validation.RecoTrack.TrackValidation_cff")
process.TrackAssociatorByHits.SimToRecoDenominator = cms.string('reco')
#--- To change the fraction of sHits that are required to be matched by the associator
# The default is 0.75
#process.TrackAssociatorByHits.Purity_SimToReco = cms.double(0.60)
#process.TrackAssociatorByHits.Cut_RecoToSim = cms.double(0.60)
#---
########### configuration MultiTrackValidator ########
process.multiTrackValidator.outputFile = 'multitrackvalidator.root'
process.multiTrackValidator.associators = ['TrackAssociatorByHits']
process.multiTrackValidator.skipHistoFit=cms.untracked.bool(False)
process.multiTrackValidator.runStandalone=cms.bool(True)
#process.multiTrackValidator.label=cms.VInputTag(cms.InputTag("generalTracks"),
process.multiTrackValidator.label=cms.VInputTag(
cms.InputTag("cutsRecoTracksHp"),
# cms.InputTag("cutsRecoTracksZero"),
# cms.InputTag("cutsRecoTracksZeroHp"),
# cms.InputTag("cutsRecoTracksFirst"),
# cms.InputTag("cutsRecoTracksFirstHp"),
# cms.InputTag("cutsRecoTracksSecond"),
# cms.InputTag("cutsRecoTracksSecondHp"),
# cms.InputTag("cutsRecoTracksThird"),
# cms.InputTag("cutsRecoTracksThirdHp"),
# cms.InputTag("cutsRecoTracksFourth"),
# cms.InputTag("cutsRecoTracksFourthHp"),
# cms.InputTag("cutsRecoTracksFifth"),
# cms.InputTag("cutsRecoTracksFifthHp")
)
process.multiTrackValidator.useLogPt=cms.untracked.bool(True)
process.multiTrackValidator.minPt = cms.double(0.1)
process.multiTrackValidator.maxPt = cms.double(300.0)
process.multiTrackValidator.nintPt = cms.int32(40)
#--- Filter on TrackingParticles
# pt in [0,2.8] when calculating the tracking Fake rate
# pt in [0,2.5] when calculating the efficiency vs eta
# pt in eta slice when calculating the efficiency vs pt for barrel/transition/endcap
process.multiTrackValidator.useLogPt=cms.untracked.bool(True)
process.multiTrackValidator.histoProducerAlgoBlock.minPt = cms.double(0.1)
process.multiTrackValidator.histoProducerAlgoBlock.maxPt = cms.double(300.0)
process.multiTrackValidator.histoProducerAlgoBlock.nintPt = cms.int32(40)
#process.multiTrackValidator.minAbsEtaTP = cms.double(0.0)
#process.multiTrackValidator.maxAbsEtaTP = cms.double(0.9)
#process.multiTrackValidator.minAbsEtaTP = cms.double(0.9)
#process.multiTrackValidator.maxAbsEtaTP = cms.double(1.4)
#process.multiTrackValidator.minAbsEtaTP = cms.double(1.4)
#process.multiTrackValidator.maxAbsEtaTP = cms.double(2.5)
#process.multiTrackValidator.minAbsEtaTP = cms.double(0.0)
#process.multiTrackValidator.maxAbsEtaTP = cms.double(2.8)
process.multiTrackValidator.minAbsEtaTP = cms.double(0.0)
process.multiTrackValidator.maxAbsEtaTP = cms.double(2.5)
#---
#--- uncomment this part to run MTV on GsfTrack collection
#
#process.cutsRecoTracksHp = cms.EDFilter("RecoGsfTrackSelector",
# src = cms.InputTag("electronGsfTracks"),
### src = cms.InputTag("elGsfTracksWithQuality"),
# beamSpot = cms.InputTag("offlineBeamSpot"),
# algorithm = cms.vstring(),
# maxChi2 = cms.double(10000.0),
### #quality = cms.vstring('highPurity'), ## NEW
### quality = cms.vstring('loose'), ## NEW
# quality = cms.vstring(), ## NEW
# minRapidity = cms.double(-5.0),
# maxRapidity = cms.double(5.0),
# tip = cms.double(120.0),
# lip = cms.double(300.0),
# ptMin = cms.double(0.1),
# min3DHit = cms.int32(0),
# minHit = cms.int32(3),
# minAbsEta = cms.double(1.4),
# maxAbsEta = cms.double(2.5)
#)
#process.multiTrackValidator.histoProducerAlgoBlock.useGsf = cms.bool(True)
#---
#--- Filter on track collection
# pt in [0,2.8] when calculating the tracking efficiency
# pt in eta slice when calculating the fake rate vs pt for barrel/transition/endcap
#process.cutsRecoTracksHp.minAbsEta = cms.double(0.0)
#process.cutsRecoTracksHp.maxAbsEta = cms.double(0.9)
#process.cutsRecoTracksHp.minAbsEta = cms.double(0.9)
#process.cutsRecoTracksHp.maxAbsEta = cms.double(1.4)
#process.cutsRecoTracksHp.minAbsEta = cms.double(1.4)
#process.cutsRecoTracksHp.maxAbsEta = cms.double(2.5)
process.cutsRecoTracksHp.minAbsEta = cms.double(0.0)
process.cutsRecoTracksHp.maxAbsEta = cms.double(2.8)
#process.cutsRecoTracksHp.minAbsEta = cms.double(0.0)
#process.cutsRecoTracksHp.maxAbsEta = cms.double(2.5)
process.multiTrackValidator.UseAssociators = cms.bool(True)
process.ValidationSelectors = cms.Sequence( process.cutsRecoTracksHp
# process.cutsRecoTracksZero*
# process.cutsRecoTracksZeroHp*
# process.cutsRecoTracksFirst*
# process.cutsRecoTracksFirstHp*
# process.cutsRecoTracksSecond*
# process.cutsRecoTracksSecondHp*
# process.cutsRecoTracksThird*
# process.cutsRecoTracksThirdHp*
# process.cutsRecoTracksFourth*
# process.cutsRecoTracksFourthHp*
# process.cutsRecoTracksFifth*
# process.cutsRecoTracksFifthHp
)
process.validation = cms.Sequence(
process.multiTrackValidator
)
# paths
process.p = cms.Path(
process.ValidationSelectors *
process.validation
)
process.schedule = cms.Schedule(
process.p
)
#process.MTVHistoProducerAlgoForTrackerBlock.TpSelectorForEfficiencyVsEta.tip = cms.double(0.5)
|
[
"polaarrebane@gmail.com"
] |
polaarrebane@gmail.com
|
0c167adf308cbd963e674e936b502ec49de9948c
|
8010b4640a79c5c1fb58dd9b011723b744b3dd47
|
/src/unv/web/helpers.py
|
3399a083be504cc40f7d81610d2c57ed2b2bf153
|
[
"MIT"
] |
permissive
|
c137digital/unv_web
|
277578e2a128e193b12e88465fe7c93a4a2019f1
|
52bea090c630b4e2a393c70907d35c9558d259fa
|
refs/heads/master
| 2020-04-15T07:39:27.027454
| 2019-08-29T20:50:53
| 2019-08-29T20:50:53
| 164,498,575
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,494
|
py
|
import urllib
from aiohttp import web
from unv.utils.files import calc_crc32_for_file
from .deploy import SETTINGS as DEPLOY_SETTINGS
async def render_template(
request, template_name, context=None, status=web.HTTPOk.status_code):
template = request.app['jinja2'].get_template(template_name)
return web.Response(
text=await template.render_async(context or {}),
status=status, charset='utf-8',
content_type='text/html'
)
def url_for_static(path: str, with_hash: bool = False):
url = DEPLOY_SETTINGS.static_url
directory = DEPLOY_SETTINGS.static_dir
real_path = directory / path.lstrip('/')
hash_ = ''
if with_hash:
hash_ = '?hash={}'.format(calc_crc32_for_file(real_path))
path = str(path).replace(str(directory), '', 1).lstrip('/')
return f"{url}/{path}{hash_}"
def url_with_domain(path: str):
protocol = 'http'
path = path.lstrip('/')
if DEPLOY_SETTINGS.use_https:
protocol = 'https'
return f'{protocol}://{DEPLOY_SETTINGS.domain}/{path}'
def make_url_for_func(app, with_domain=False):
def url_for(route, **parts):
parts = {key: str(value) for key, value in parts.items()}
url = app.router[route].url_for(**parts)
if with_domain:
url = url_with_domain(str(url))
return url
return url_for
def inline_static_from(path):
with (DEPLOY_SETTINGS.static_dir / path).open('r') as f:
return f.read().replace("\n", "")
|
[
"morty.space@gmail.com"
] |
morty.space@gmail.com
|
b945ff417f597ac2d5502e5f5d09f0d02334f7d4
|
d44d11bb5e8a3245a8f2db39f9e460eae2b50c82
|
/HKDataBase/tests.py
|
ec3068a14f0c6b395c56965e5930bdedf44af2db
|
[] |
no_license
|
banziha104/HKServerA1
|
636f50de907ed5a8764b678b434acbc37bb7ee65
|
14a20432fdf3bcb6574e249f95b8c3662a30ee26
|
refs/heads/master
| 2021-08-10T12:42:49.889987
| 2017-11-12T11:55:46
| 2017-11-12T11:55:46
| 110,443,875
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 366
|
py
|
from django.test import TestCase
import os
# Create your tests here.
from os.path import abspath, dirname, join
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = dirname(dirname(dirname(abspath(__file__))))
print(os.path.join(BASE_DIR, 'db.sqlite3'))
print(os.environ.setdefault("DJANGO_SETTINGS_MODULE", "mysite.settings.common"))
|
[
"they6687@naver.com"
] |
they6687@naver.com
|
2f392a90b98f1dbc100c0fdd1c399ce2843eddef
|
700f9f9e319ebd26d2557d64ea3827808dfad2f5
|
/tests/fixtures/test_appendices_json/content_02_expected.py
|
23c6bae8a17b15b59da7860417ea52c7ae82ffa6
|
[
"MIT"
] |
permissive
|
elifesciences/elife-tools
|
1b44e660e916a82ef8ff64dd5a6ee5506e517359
|
bc16e7dd5d6245077e39f8561b99c9acd510ddf7
|
refs/heads/develop
| 2023-03-06T08:37:47.424282
| 2023-02-20T20:40:49
| 2023-02-20T20:40:49
| 30,274,058
| 13
| 11
|
MIT
| 2023-02-20T20:40:50
| 2015-02-04T01:14:41
|
Python
|
UTF-8
|
Python
| false
| false
| 3,392
|
py
|
from collections import OrderedDict
expected = [
OrderedDict(
[
("id", u"app1"),
(
"title",
u"Appendix 1: Details of the automated linear stability analysis",
),
(
"content",
[
OrderedDict(
[
("type", "paragraph"),
(
"text",
u"We consider a reaction-diffusion system of the form",
),
]
),
OrderedDict(
[
("type", "mathml"),
("id", u"equ5"),
("label", u"(1)"),
(
"mathml",
'<math><mrow><mi mathvariant="bold">c</mi></mrow></math>',
),
]
),
OrderedDict([("type", "paragraph"), ("text", u"where etc.")]),
OrderedDict(
[
("type", "section"),
("id", u"s16"),
("title", u"Step 1. Possible networks of size ..."),
(
"content",
[
OrderedDict(
[
("type", "paragraph"),
(
"text",
u"We first generate a list of possible networks with ...",
),
]
)
],
),
]
),
OrderedDict(
[
("type", "paragraph"),
("text", u"Test another section with no title"),
]
),
OrderedDict(
[
("type", "section"),
("id", u"test2"),
("title", u"Section with title"),
(
"content",
[
OrderedDict(
[
("type", "paragraph"),
("text", u"Section content"),
]
)
],
),
]
),
OrderedDict(
[
("type", "paragraph"),
("text", u"Second section with no title"),
]
),
],
),
]
)
]
|
[
"gnott@starglobal.ca"
] |
gnott@starglobal.ca
|
702e48c2aa3d8d52708984ce1249139d83d40039
|
f0d713996eb095bcdc701f3fab0a8110b8541cbb
|
/FJk4mJwRk2TYZhkeQ_11.py
|
f8eebc3f73f01afb69527a31188520efad9b25d1
|
[] |
no_license
|
daniel-reich/turbo-robot
|
feda6c0523bb83ab8954b6d06302bfec5b16ebdf
|
a7a25c63097674c0a81675eed7e6b763785f1c41
|
refs/heads/main
| 2023-03-26T01:55:14.210264
| 2021-03-23T16:08:01
| 2021-03-23T16:08:01
| 350,773,815
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 550
|
py
|
"""
Create a function that takes a string and returns a new string with each new
character accumulating by +1. Separate each set with a dash.
### Examples
accum("abcd") ➞ "A-Bb-Ccc-Dddd"
accum("RqaEzty") ➞ "R-Qq-Aaa-Eeee-Zzzzz-Tttttt-Yyyyyyy"
accum("cwAt") ➞ "C-Ww-Aaa-Tttt"
### Notes
* Capitalize the first letter of each set.
* All tests contain valid strings with alphabetic characters (a-z, A-Z).
"""
def accum(txt):
return '-'.join([char.upper()+char.lower()*index for index,char in enumerate(txt)])
|
[
"daniel.reich@danielreichs-MacBook-Pro.local"
] |
daniel.reich@danielreichs-MacBook-Pro.local
|
f5c64a21035dbbdf79fe9300cdf1ba736915177c
|
29afbde1e2d5497f5ab023a99ea37b0c59e6bb09
|
/password.py
|
d86fbaee471a50c76295435c6499e1e6078e8bdb
|
[
"MIT"
] |
permissive
|
kahenya-anita/Password-Manager
|
5358fc048ba90d0827e4ff8c7f54919107c5aa95
|
b3e7683e61e55574b846dfe49bf5d4016085261c
|
refs/heads/master
| 2022-12-15T08:04:08.350058
| 2020-09-08T19:42:48
| 2020-09-08T19:42:48
| 293,597,852
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,196
|
py
|
import string
import random
class Password:
"""
Class that generates system given passwords.
"""
password_letters=list(string.ascii_letters)
password_nums=list(string.digits)
password_symbols=["#","@","&","$","%"]
password_chars=[]
password_chars.extend(password_letters)
password_chars.extend(password_nums)
password_chars.extend(password_symbols)
@classmethod
def gen_password(cls):
"""
Method to generate system given passwords.
Returns:
System generated password
"""
pass_length=10
num_valid=True
while num_valid:
try:
pass_length=int(input("Enter password length (at least 5): "))
if pass_length<5:
print("**Length should be at least 5. Try again.")
num_valid=True
else:
num_valid=False
except ValueError:
print("**Invalid input. Use numbers.")
num_valid=True
sys_password="".join(random.sample(cls.password_chars, k=pass_length))
return sys_password
|
[
"anitakahenya1@gmail.com"
] |
anitakahenya1@gmail.com
|
ee28a8aa66f3d6fec24591cae31c80251e0b6f07
|
4bf5f83a8e5cd4c3ee700569e4a6f07a87dd209c
|
/students/11th/jiwonko/project/User/migrations/0003_auto_20200812_1307.py
|
88bb82d97b9338ddd0609dc2118a3630dbab1049
|
[] |
no_license
|
gledong12/westagram-backend
|
6e066f4c741aa19df13224ba530b0d9f43a405f7
|
1842f065c599885ad5dcb9ec5fb267eaf3295872
|
refs/heads/master
| 2023-03-11T20:32:47.055525
| 2021-03-04T01:04:31
| 2021-03-04T01:04:31
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 455
|
py
|
# Generated by Django 3.0.8 on 2020-08-12 13:07
import User.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('User', '0002_auto_20200805_1106'),
]
operations = [
migrations.AlterField(
model_name='user',
name='password',
field=models.CharField(max_length=156, validators=[User.validators.validate_password]),
),
]
|
[
"lsheon93@gmail.com"
] |
lsheon93@gmail.com
|
598fa7f17dd1d125cc395d7e717deb6f7a89dab0
|
685fa2cb16ff8bce96b603dee8117ed3e9a1adcb
|
/dlib-find-min-global/parse_xml.py
|
6a073916c47463b9d891a91cdabd8ad99ff0a563
|
[] |
no_license
|
albertofernandezvillan/pyimagesearch
|
352ec1ec678cb628524c476fdcc86c22238a1a2f
|
8c87e6c5d218e42a8864778c032c0fd20261bcdd
|
refs/heads/master
| 2023-02-27T22:02:25.581660
| 2021-02-08T15:15:15
| 2021-02-08T15:15:15
| 338,087,397
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,366
|
py
|
# ------------------------
# USAGE
# ------------------------
# python parse_xml.py --input ibug_300W_large_face_landmark_dataset/labels_ibug_300W_train.xml
# --output ibug_300W_large_face_landmark_dataset/labels_ibug_300W_train_eyes.xml
# python parse_xml.py --input ibug_300W_large_face_landmark_dataset/labels_ibug_300W_test.xml
# --output ibug_300W_large_face_landmark_dataset/labels_ibug_300W_test_eyes.xml
# ------------------------
# IMPORTS
# ------------------------
# Import the necessary packages
import argparse
import re
# Construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--input", required=True, help="path to iBug 300-W data split XML file")
ap.add_argument("-t", "--output", required=True, help="path output data split XML file")
args = vars(ap.parse_args())
# In the iBUG 300-W dataset, each (x, y)-coordinate maps to a specific facial feature (i.e., eye, mouth, nose, etc.)
# -- in order to train a dlib shape predictor on *just* the eyes, we must first
# define the integer indexes that belong to the eyes
LANDMARKS = set(list(range(36, 48)))
# To easily parse out the eye locations from the XML file we can utilize regular expressions
# to determine if there is a 'part' element on any given line
PART = re.compile("part name='[0-9]+'")
# Load the contents of the original XML file and open the output file for writing
print("[INFO] parsing data split XML file...")
rows = open(args["input"]).read().strip().split("\n")
output = open(args["output"], "w")
# Loop over the rows of the data split file
for row in rows:
# Check to see if the current line has the (x, y)-coordinates for the facial landmarks we are interested in
parts = re.findall(PART, row)
# If there is no information related to the (x, y)-coordinates of the facial landmarks,
# we can write the current line out to disk with no further modifications
if len(parts) == 0:
output.write("{}\n".format(row))
# Otherwise, there is annotation information that we must process
else:
# Parse out the name of the attribute from the row
attr = "name='"
i = row.find(attr)
j = row.find("'", i + len(attr) + 1)
name = int(row[i + len(attr):j])
# If the facial landmark name exists within the range of the indexes, write it to our output file
if name in LANDMARKS:
output.write("{}\n".format(row))
|
[
"silva.mfpedro@gmail.com"
] |
silva.mfpedro@gmail.com
|
9a24020666e786dd930310c84812123cc468ef3a
|
c5758c1f4c880f4530df1a5ffb4c30ee2da445ee
|
/pytracking/vot_ep/sk3x3_meanmax_adaptive/vot_wrapper_sk3x3_meanmax_adaptive_ep0020.py
|
68408effc2c17172004adef9bfbecef3d34e37ef
|
[] |
no_license
|
bfjei2825401/d3s
|
6d662fc301181a0e3ad831b0db6111e3cf8f4097
|
32140a3c67252f0e98cbfbf6ad6d2a79267c221b
|
refs/heads/master
| 2023-02-27T09:57:25.692878
| 2021-01-27T14:20:57
| 2021-01-27T14:20:57
| 297,217,521
| 0
| 0
| null | 2020-09-21T03:23:09
| 2020-09-21T03:23:09
| null |
UTF-8
|
Python
| false
| false
| 2,523
|
py
|
import pytracking.vot as vot
import sys
import cv2
import os
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
from pytracking.tracker.segm_sk3x3_meanmax_adaptive import SegmSK3x3MeanMaxAdaptive
from pytracking.parameter.segm_sk3x3_meanmax_adaptive import default_params_ep as vot_params
def rect_to_poly(rect):
x0 = rect[0]
y0 = rect[1]
x1 = rect[0] + rect[2]
y1 = rect[1]
x2 = rect[0] + rect[2]
y2 = rect[1] + rect[3]
x3 = rect[0]
y3 = rect[1] + rect[3]
return [x0, y0, x1, y1, x2, y2, x3, y3]
def parse_sequence_name(image_path):
idx = image_path.find('/color/')
return image_path[idx - image_path[:idx][::-1].find('/'):idx], idx
def parse_frame_name(image_path, idx):
frame_name = image_path[idx + len('/color/'):]
return frame_name[:frame_name.find('.')]
# MAIN
handle = vot.VOT("polygon")
selection = handle.region()
imagefile = handle.frame()
if not imagefile:
sys.exit(0)
params = vot_params.parameters(20)
gt_rect = [round(selection.points[0].x, 2), round(selection.points[0].y, 2),
round(selection.points[1].x, 2), round(selection.points[1].y, 2),
round(selection.points[2].x, 2), round(selection.points[2].y, 2),
round(selection.points[3].x, 2), round(selection.points[3].y, 2)]
image = cv2.cvtColor(cv2.imread(imagefile), cv2.COLOR_BGR2RGB)
sequence_name, idx_ = parse_sequence_name(imagefile)
frame_name = parse_frame_name(imagefile, idx_)
params.masks_save_path = ''
params.save_mask = False
tracker = SegmSK3x3MeanMaxAdaptive(params)
# tell the sequence name to the tracker (to save segmentation masks to the disk)
tracker.sequence_name = sequence_name
tracker.frame_name = frame_name
tracker.initialize(image, gt_rect)
while True:
imagefile = handle.frame()
if not imagefile:
break
image = cv2.cvtColor(cv2.imread(imagefile), cv2.COLOR_BGR2RGB)
# tell the frame name to the tracker (to save segmentation masks to the disk)
frame_name = parse_frame_name(imagefile, idx_)
tracker.frame_name = frame_name
prediction = tracker.track(image)
if len(prediction) == 4:
prediction = rect_to_poly(prediction)
pred_poly = vot.Polygon([vot.Point(prediction[0], prediction[1]),
vot.Point(prediction[2], prediction[3]),
vot.Point(prediction[4], prediction[5]),
vot.Point(prediction[6], prediction[7])])
handle.report(pred_poly)
|
[
"752958525@qq.com"
] |
752958525@qq.com
|
c2e007ef13501c87c7fc8e93c7490f29a01b9e1d
|
8d06729522dbdf7c6391ffcd608d1f1bba3f3ae0
|
/bricks/utils/__init__.py
|
2a8d9c7b0e42ccc4c9d85c6758134e58f7b0da08
|
[] |
no_license
|
seler/django-bricks
|
14d9b1de356b698cd0c17574a8f0304e682febb0
|
fe1713971d0c881c0f0352217a69b196553f11aa
|
refs/heads/master
| 2021-04-05T04:26:47.311914
| 2013-01-21T05:45:52
| 2013-01-21T05:45:52
| 248,519,671
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 404
|
py
|
def inheritors(klass):
"""
Returns all inheritors of `klass`.
source: `http://stackoverflow.com/a/5883218/708764`
"""
subclasses = set()
work = [klass]
while work:
parent = work.pop()
for child in parent.__subclasses__():
if child not in subclasses:
subclasses.add(child)
work.append(child)
return subclasses
|
[
"rafal@selewonko.com"
] |
rafal@selewonko.com
|
1890a6e4eb9ea623fd1b3048b6bdd2df43af873f
|
a7058080e41af37eb77c146fc09a5e4db57f7ec6
|
/Solved/10817/10817.py
|
b367bcb33c195c11704251209f5f52883256727a
|
[] |
no_license
|
Jinmin-Goh/BOJ_PS
|
bec0922c01fbf6e440589cc684d0cd736e775066
|
09a285bd1369bd0d73f86386b343d271dc08a67d
|
refs/heads/master
| 2022-09-24T02:24:50.823834
| 2022-09-21T02:16:22
| 2022-09-21T02:16:22
| 223,768,547
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 202
|
py
|
# Problem No.: 10817
# Solver: Jinmin Goh
# Date: 191120
# URL: https://www.acmicpc.net/problem/10817
a = input()
a = a.split()
for i in range(3):
a[i] = int(a[i])
a.sort()
print(a[1])
|
[
"eric970901@gmail.com"
] |
eric970901@gmail.com
|
9cdaa82fda89ad9951a6b8d5c94f1ebd93c88177
|
2f963d7989749037a3ec27aaa39b31416b33cbb2
|
/ib_action/views/user_action_counts/tests/test_case_01.py
|
afa4eddc49ff53e95a58568a880e84464e0d4139
|
[] |
no_license
|
migsantos121/phd3-backend
|
3cd014908856c995de3c4473d82059bc9c1b5794
|
9d1d2bd6f55dc89719ce5a1916c5db3d573aec1e
|
refs/heads/master
| 2022-12-12T17:25:59.334509
| 2020-03-09T09:24:08
| 2020-03-09T09:24:08
| 245,991,086
| 0
| 0
| null | 2022-06-28T14:45:50
| 2020-03-09T09:17:18
|
Python
|
UTF-8
|
Python
| false
| false
| 1,559
|
py
|
from . import APP_NAME, OPERATION_NAME, REQUEST_METHOD, URL_SUFFIX
from django_swagger_utils.drf_server.utils.server_gen.custom_api_test_case import CustomAPITestCase
request_body = """
{
"action_types": [
"RATE",
"RATE"
],
"source": "string",
"action_values": [
"string",
"string"
],
"entity_types": [
"string",
"string"
]
}
"""
response_body = """
[{"action_value": "string", "entity_count": 1, "action_type": "RATE", "entity_type": "string"}, {"action_value": "string", "entity_count": 1, "action_type": "RATE", "entity_type": "string"}]
"""
test_case = {
"request": {
"path_params": {},
"query_params": {},
"header_params": {},
"securities": {"oauth": {"scopes": ["read", "write"], "tokenUrl": "http://auth.ibtspl.com/oauth2/", "flow": "password", "type": "oauth2"}},
"body": request_body,
},
"response": {
"status": 200,
"body": response_body,
"header_params": {}
}
}
class TestCase01UserActionCountsAPITestCase(CustomAPITestCase):
def __init__(self, *args, **kwargs):
super(TestCase01UserActionCountsAPITestCase, self).__init__(APP_NAME, OPERATION_NAME, REQUEST_METHOD, URL_SUFFIX, test_case,
*args, **kwargs)
def test_case(self):
response = super(TestCase01UserActionCountsAPITestCase, self).test_case()
# your extended implementation of test case
self.assertEqual(response.status_code, 200)
|
[
"migsantos121@outlook.com"
] |
migsantos121@outlook.com
|
69975169bb1b1827b925e00ea4eae20831cca216
|
7950c4faf15ec1dc217391d839ddc21efd174ede
|
/problems/0342.0_Power_of_Four.py
|
a0a5a32e27c28c00543bc71d410f6523723b5a68
|
[] |
no_license
|
lixiang2017/leetcode
|
f462ecd269c7157aa4f5854f8c1da97ca5375e39
|
f93380721b8383817fe2b0d728deca1321c9ef45
|
refs/heads/master
| 2023-08-25T02:56:58.918792
| 2023-08-22T16:43:36
| 2023-08-22T16:43:36
| 153,090,613
| 5
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 772
|
py
|
'''
Success
Details
Runtime: 16 ms, faster than 83.07% of Python online submissions for Power of Four.
Memory Usage: 11.9 MB, less than 11.11% of Python online submissions for Power of Four.
'''
class Solution(object):
def isPowerOfFour(self, num):
"""
:type num: int
:rtype: bool
"""
if num <= 0:
return False
while num > 1:
remainder = num % 4
if remainder != 0:
return False
else:
num = num / 4
return True
if __name__ == "__main__":
num = 16
assert Solution().isPowerOfFour(num)
num = 5
assert not Solution().isPowerOfFour(num)
num = 0
assert not Solution().isPowerOfFour(num)
|
[
"838255715@qq.com"
] |
838255715@qq.com
|
005ac079befb88d6eec83627239178a0d305530e
|
fd513d0ef02231526fc8225dfdb1bf84752b3541
|
/jaraco/packaging/depends.py
|
ebeddfe0fef449ba636d347cc3a54377cd87d3f4
|
[
"MIT"
] |
permissive
|
artynet/jaraco.packaging
|
810bfb83238a9075de6001deec9eaaca56db64a9
|
d68128b9c8c12334364128aec05b038102515159
|
refs/heads/master
| 2022-11-15T10:50:18.224592
| 2020-02-16T15:56:37
| 2020-02-16T15:56:37
| 277,494,378
| 0
| 0
| null | 2020-07-06T09:08:05
| 2020-07-06T09:08:04
| null |
UTF-8
|
Python
| false
| false
| 3,245
|
py
|
"""
This module should only import modules from stdlib and setuptools
"""
from __future__ import print_function, unicode_literals
import os
import re
import argparse
import subprocess
import setuptools
import pkg_resources
text_type = getattr(__builtins__, 'unicode', str)
req_help = "A setuptools requirement spec (e.g. 'eggmonster' or " "'eggmonster==0.1')"
python_help = "Use a remote environment rather than the local one."
def tree_cmd():
parser = argparse.ArgumentParser()
parser.add_argument('requirement', help=req_help)
parser.add_argument('--python', help=python_help)
args = parser.parse_args()
if args.python:
return check_dependencies_remote(args)
check_dependencies(args.requirement)
def print_package(requirement, indent):
r = requirement
print(' ' * indent + str(r), '[{0}]'.format(pkg_resources.get_distribution(r)))
def parse_extras(req):
pattern = re.compile(r'\[(.*)\]')
res = pattern.search(text_type(req))
return res.group(1).split(',') if res else []
def check_dependencies(req, indent=1, history=None):
"""
Given a setuptools package requirement (e.g. 'gryphon==2.42' or just
'gryphon'), print a tree of dependencies as they resolve in this
environment.
"""
# keep a history to avoid infinite loops
if history is None:
history = set()
if req in history:
return
history.add(req)
d = pkg_resources.get_distribution(req)
extras = parse_extras(req)
if indent == 1:
print_package(req, 0)
for r in d.requires(extras=extras):
print_package(r, indent)
check_dependencies(r, indent + 1, history)
def load_dependencies(req, history=None):
"""
Load the dependency tree as a Python object tree,
suitable for JSON serialization.
>>> deps = load_dependencies('jaraco.packaging')
>>> import json
>>> doc = json.dumps(deps)
"""
if history is None:
history = set()
dist = pkg_resources.get_distribution(req)
spec = dict(requirement=str(req), resolved=str(dist))
if req not in history:
# traverse into children
history.add(req)
extras = parse_extras(req)
depends = [
load_dependencies(dep, history=history)
for dep in dist.requires(extras=extras)
]
if depends:
spec.update(depends=depends)
return spec
class DependencyTree(setuptools.Command):
description = "Report a tree of resolved dependencies"
user_options = [
(str('requirement='), str('r'), req_help),
(str('python='), str('p'), python_help),
]
def finalize_options(self):
pass
def initialize_options(self):
self.requirement = self.distribution.get_name()
self.python = None
def run(self):
if self.python:
return check_dependencies_remote(self)
check_dependencies(self.requirement)
def check_dependencies_remote(args):
"""
Invoke this command on a remote Python.
"""
cmd = [args.python, '-m', 'depends', args.requirement]
env = dict(PYTHONPATH=os.path.dirname(__file__))
return subprocess.check_call(cmd, env=env)
if __name__ == '__main__':
tree_cmd()
|
[
"jaraco@jaraco.com"
] |
jaraco@jaraco.com
|
1a44630bcc49abc465f08b79db51861073294d16
|
321b4ed83b6874eeb512027eaa0b17b0daf3c289
|
/284/284.peeking-iterator.233519966.Accepted.leetcode.py
|
363695e26d3e5481086fc95101ac8197fd2fcef5
|
[] |
no_license
|
huangyingw/submissions
|
7a610613bdb03f1223cdec5f6ccc4391149ca618
|
bfac1238ecef8b03e54842b852f6fec111abedfa
|
refs/heads/master
| 2023-07-25T09:56:46.814504
| 2023-07-16T07:38:36
| 2023-07-16T07:38:36
| 143,352,065
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 454
|
py
|
class PeekingIterator(object):
def __init__(self, iterator):
self.front = None
self.it = iterator
if self.it.hasNext():
self.front = self.it.next()
def peek(self):
return self.front
def next(self):
temp = self.front
self.front = None
if self.it.hasNext():
self.front = self.it.next()
return temp
def hasNext(self):
return bool(self.front)
|
[
"huangyingw@gmail.com"
] |
huangyingw@gmail.com
|
13ad90a72e1503b0419dd453068d2ddfbf4c2ed3
|
71e5ce7abbf5f9e8887c16e7a89219a98a5827cf
|
/Python-for-everyone/01_For_Everyone/05_Loop_Iterations/06_count.py
|
1882c6ed7cd3efc4bb7006b59f6552575a63ad71
|
[] |
no_license
|
Python-Repository-Hub/Learn-Online-Learning
|
f62c9965b34c050abcc4b2ef9a4d09600a0e52ec
|
2845a0d66b1f7fa416182b99cef25cda871b3360
|
refs/heads/master
| 2023-07-27T17:00:44.498987
| 2021-09-14T06:10:37
| 2021-09-14T06:10:37
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 307
|
py
|
zork = 0
print('Before', zork)
numbers = [9, 41, 12, 3, 74, 15] # 강의와는 달리 numbers라는 int를 원소로 가지는 list를 선언하였습니다.
for thing in numbers :
zork = zork + 1
print(zork, thing)
print('After', zork)
# Before 0
# 1 9
# 2 41
# 3 12
# 4 3
# 5 74
# 6 15
# After 6
|
[
"danghyeona0113@gmail.com"
] |
danghyeona0113@gmail.com
|
d7e643eac7eefa815456f66073d2a633d5e65bc2
|
8fc2707bc30c8e56a607e0fd97122d3509ce6dbd
|
/Pithon/pithon-game/core/Player.py
|
89d02f6bf8ba37de6cc6a14df1ee70ebc16438f7
|
[] |
no_license
|
pithon/pithon
|
8c0239f527866ce05b327b436350dcc0e7fab4cb
|
9a183be17464a810c0c047fbc29b52451d39f641
|
refs/heads/master
| 2021-01-18T14:10:18.606353
| 2013-02-23T04:41:38
| 2013-02-23T04:41:38
| 7,985,688
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 970
|
py
|
import pygame, constants
from Tail import Tail
class Player(object):
def __init__(self, color):
self.tail=[]
head=Tail(pygame.color.Color(255,255,255), self, 116,100, constants.RIGHT)
self.tail.append(head)
self.color=color
self.direction=constants.RIGHT
self.extend=True
def add_tail(self):
self.extend=True
def update_tail(self, direction):
save_direction=direction
for i in self.tail:
save_direction=i.direction
i.direction=direction
direction=save_direction
i.update()
if self.extend:
last_part = self.tail[-1]
new_part = Tail(self.color, self, last_part.rect.x, last_part.rect.y, last_part.direction)
self.tail.append(new_part)
self.extend=False
def render_tail(self, screen):
for i in self.tail:
screen.blit(i.image, i.rect)
|
[
"louis@goessling.com"
] |
louis@goessling.com
|
8229bbf974f9c0d3d5cfb6455f6d0e95c0bc8258
|
d6617514df849a77b491159c4cc8a3bfc6599d83
|
/hw2/DecryptDES.py
|
7f2088a9af6d0c0cc5ae44fca4c41c088699bebb
|
[] |
no_license
|
LJP-TW/Information_Security_Class
|
409cc852480c611d89b60139705bc9de4374670d
|
d195b99c62e653ec551ce50077c6d29b23668b8c
|
refs/heads/master
| 2020-08-01T19:26:54.886014
| 2019-12-22T14:10:04
| 2019-12-22T14:10:04
| 211,090,532
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,145
|
py
|
#!/usr/bin/python3
import sys
import numpy as np
key = bin(int(sys.argv[1], 16))[2:]
text = bin(int(sys.argv[2], 16))[2:]
key = '0' * (64 - len(key)) + key
text = '0' * (64 - len(text)) + text
##### table
IP = [int(x)-1 for x in ('58 50 42 34 26 18 10 2 60 52 44 36 28 20 12 4 62 54 46 38 30 22 14 6 64 56 48 40 32 24 16 8 57 49 41 33 25 17 9 1 59 51 43 35 27 19 11 3 61 53 45 37 29 21 13 5 63 55 47 39 31 23 15 7'.split())]
IP_1 = [int(x)-1 for x in ('40 8 48 16 56 24 64 32 39 7 47 15 55 23 63 31 38 6 46 14 54 22 62 30 37 5 45 13 53 21 61 29 36 4 44 12 52 20 60 28 35 3 43 11 51 19 59 27 34 2 42 10 50 18 58 26 33 1 41 9 49 17 57 25'.split())]
E = [int(x)-1 for x in ('32 1 2 3 4 5 4 5 6 7 8 9 8 9 10 11 12 13 12 13 14 15 16 17 16 17 18 19 20 21 20 21 22 23 24 25 24 25 26 27 28 29 28 29 30 31 32 1'.split())]
PC_1 = [int(x)-1 for x in ('57 49 41 33 25 17 9 1 58 50 42 34 26 18 10 2 59 51 43 35 27 19 11 3 60 52 44 36 63 55 47 39 31 23 15 7 62 54 46 38 30 22 14 6 61 53 45 37 29 21 13 5 28 20 12 4'.split())]
PC_2 = [int(x)-1 for x in ('14 17 11 24 1 5 3 28 15 6 21 10 23 19 12 4 26 8 16 7 27 20 13 2 41 52 31 37 47 55 30 40 51 45 33 48 44 49 39 56 34 53 46 42 50 36 29 32'.split())]
P = [int(x)-1 for x in ('16 7 20 21 29 12 28 17 1 15 23 26 5 18 31 10 2 8 24 14 32 27 3 9 19 13 30 6 22 11 4 25'.split())]
Sbox = []
Sbox.append(np.array([int(x) for x in ('14 4 13 1 2 15 11 8 3 10 6 12 5 9 0 7 0 15 7 4 14 2 13 1 10 6 12 11 9 5 3 8 4 1 14 8 13 6 2 11 15 12 9 7 3 10 5 0 15 12 8 2 4 9 1 7 5 11 3 14 10 0 6 13'.split())]).reshape(4,16))
Sbox.append(np.array([int(x) for x in ('15 1 8 14 6 11 3 4 9 7 2 13 12 0 5 10 3 13 4 7 15 2 8 14 12 0 1 10 6 9 11 5 0 14 7 11 10 4 13 1 5 8 12 6 9 3 2 15 13 8 10 1 3 15 4 2 11 6 7 12 0 5 14 9'.split())]).reshape(4,16))
Sbox.append(np.array([int(x) for x in ('10 0 9 14 6 3 15 5 1 13 12 7 11 4 2 8 13 7 0 9 3 4 6 10 2 8 5 14 12 11 15 1 13 6 4 9 8 15 3 0 11 1 2 12 5 10 14 7 1 10 13 0 6 9 8 7 4 15 14 3 11 5 2 12'.split())]).reshape(4,16))
Sbox.append(np.array([int(x) for x in ('7 13 14 3 0 6 9 10 1 2 8 5 11 12 4 15 13 8 11 5 6 15 0 3 4 7 2 12 1 10 14 9 10 6 9 0 12 11 7 13 15 1 3 14 5 2 8 4 3 15 0 6 10 1 13 8 9 4 5 11 12 7 2 14'.split())]).reshape(4,16))
Sbox.append(np.array([int(x) for x in ('2 12 4 1 7 10 11 6 8 5 3 15 13 0 14 9 14 11 2 12 4 7 13 1 5 0 15 10 3 9 8 6 4 2 1 11 10 13 7 8 15 9 12 5 6 3 0 14 11 8 12 7 1 14 2 13 6 15 0 9 10 4 5 3'.split())]).reshape(4,16))
Sbox.append(np.array([int(x) for x in ('12 1 10 15 9 2 6 8 0 13 3 4 14 7 5 11 10 15 4 2 7 12 9 5 6 1 13 14 0 11 3 8 9 14 15 5 2 8 12 3 7 0 4 10 1 13 11 6 4 3 2 12 9 5 15 10 11 14 1 7 6 0 8 13'.split())]).reshape(4,16))
Sbox.append(np.array([int(x) for x in ('4 11 2 14 15 0 8 13 3 12 9 7 5 10 6 1 13 0 11 7 4 9 1 10 14 3 5 12 2 15 8 6 1 4 11 13 12 3 7 14 10 15 6 8 0 5 9 2 6 11 13 8 1 4 10 7 9 5 0 15 14 2 3 12'.split())]).reshape(4,16))
Sbox.append(np.array([int(x) for x in ('13 2 8 4 6 15 11 1 10 9 3 14 5 0 12 7 1 15 13 8 10 3 7 4 12 5 6 11 0 14 9 2 7 11 4 1 9 12 14 2 0 6 10 13 15 3 5 8 2 1 14 7 4 10 8 13 15 12 9 0 3 5 6 11'.split())]).reshape(4,16))
LS = [1, 2, 4, 6, 8, 10, 12, 14, 15, 17, 19, 21, 23, 25, 27, 0][::-1]
#### end
def convert(data, table):
newData = ''
for i in range(len(table)):
newData += data[table[i]]
return newData
def xor(s1, s2):
s = ''
for i in range(len(s1)):
if(s1[i] == s2[i]):
s += '0'
else:
s += '1'
return s
def SubKey(K, i):
C = K[0:28]
D = K[28:]
C = C[LS[i]:] + C[:LS[i]]
D = D[LS[i]:] + D[:LS[i]]
return convert(C + D, PC_2)
def foo(R, K):
B = xor(convert(R, E), K)
z = ''
for i in range(8):
b = B[i*6: i*6 + 6]
y = int((b[0]+b[5]), 2)
x = int(b[1:5], 2)
out = bin(Sbox[i][y][x])[2:]
out = '0' * (4 - len(out)) + out
z += out
return convert(z, P)
key = convert(key, PC_1)
text = convert(text, IP)
L = text[32:]
R = text[0:32]
LN = ''
RN = ''
for i in range(16):
LN = xor(R, foo(L, SubKey(key, i)))
RN = L
L = LN
R = RN
ans = hex(int(convert(L + R, IP_1), 2))[2:].lower()
ans = '0x' + ans
print(ans)
|
[
"accr94238@gmail.com"
] |
accr94238@gmail.com
|
d493b4b047bfc276bb5effed4193ae083b59c65b
|
e44c83395d2ddd1e1b7c1e521d360f2ef8d585d0
|
/gitlab-new/tt/bus8684/items.py
|
4c8f025a4aa785fb80e448fde15776b7fd3226b0
|
[] |
no_license
|
zhoujx4/Crawls
|
63ebcac5b4c0bbccdde56e6a2f5efbc4091d03e0
|
94b3ac88d7e49cb4a03e7b211a9437709d1c371c
|
refs/heads/master
| 2020-12-23T15:25:48.041965
| 2020-01-30T10:35:19
| 2020-01-30T10:35:19
| 237,189,197
| 3
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 432
|
py
|
# -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# https://doc.scrapy.org/en/latest/topics/items.html
import scrapy
from scrapy.item import Item, Field
class Bus8684Item(scrapy.Item):
# define the fields for your item here like:
content = Field()
page_type = Field()
# Housekeeping fields
url = Field()
project = Field()
spider = Field()
server = Field()
date = Field()
|
[
"673760239@qq.com"
] |
673760239@qq.com
|
499d2b9e316af87f449192a3c3de07b8cf55b4f0
|
03de685efae7d8f6de0e98c3008cb89f87825fb4
|
/test/compare/compare.py
|
c11c4504dcda17b5e254ee24b4672cc0d22cea78
|
[] |
no_license
|
gedeschaines/robotics-toolbox-python
|
161f7af8be91c51e1902021ba9f9dc3f6fc5b766
|
22eb2394172e60b1dbca03d4be9bb0ecaf49b183
|
refs/heads/master
| 2021-06-14T00:42:24.468518
| 2021-02-17T22:20:36
| 2021-02-17T22:20:36
| 140,235,483
| 13
| 3
| null | 2019-01-06T13:30:23
| 2018-07-09T05:29:19
|
Python
|
UTF-8
|
Python
| false
| false
| 1,522
|
py
|
"""
Compare the RNE implementations of Matlab and Python.
We take the Matlab results as the gold standard since they were cross-checked
against a Maple and other implementations a long time ago.
The process:
1. genpath.m creates random q, qd and qdd data and saves to path.dat. Random values
in the q, qd and qdd statespace are used with significant velocity and acceleration
so that errors in velocity and acceleration specific parts of the RNE algorithms will
be shown up. There are 60 rows:
rows 1-20, qd=qdd=0, gravity and friction torques only
rows 21-40 qdd=0, gravity, friction and centripetal/Coriolis forces
rows 41-60 all forces.
2. genpath.m creates tau for the Puma560 (DH) and saves to puma560.dat
3. genpath.m creates tau for the Puma560 (MDH) and saves to puma560m.dat
4. compare.py loads path.dat, computes the torques for DH and MDH cases and find the
difference from the Matlab versions
"""
from robot import *;
print "Compare Python and Matlab RNE implementations"
# load the (q,qd,qdd) data
path = loadtxt('path.dat');
# load the Matlab computed torques
matlab_dh = loadtxt('puma560.dat');
from robot.puma560 import *
tau = rne(p560, path);
diff = matlab_dh - tau;
#print diff
print "RNE DH, error norm =", linalg.norm(diff, 'fro')
#############
# load the Matlab computed torques
matlab_mdh = loadtxt('puma560m.dat');
from robot.puma560akb import *
tau = rne(p560m, path);
diff = matlab_mdh - tau;
#print diff
print "RNE MDH, error norm =", linalg.norm(diff, 'fro')
|
[
"peter.i.corke@gmail.com"
] |
peter.i.corke@gmail.com
|
a330d906dd0cdf05fc80449ad1547096442e3b8f
|
c5be188cf1231d62f7ad69c98ee71b4bc181f6f2
|
/image_resize.py
|
b94f3f35d8941b74d58a8b394552b06f0b5a1b6d
|
[] |
no_license
|
ashish1sasmal/OPENCV-Python
|
e2d81e4472413ba79c5cdd4fcaad534225a5d394
|
589accc3c16eb4bf515ba85ee46ae6fdc2347bc6
|
refs/heads/master
| 2021-07-17T17:56:30.438576
| 2020-10-16T17:12:09
| 2020-10-16T17:12:09
| 217,500,976
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 557
|
py
|
import cv2
l=[]
def click_event(event,x,y,flags,param):
global l
# print(l)
if event == cv2.EVENT_LBUTTONDOWN:
l.append([x,y])
if len(l) == 3:
print(l,l[2][0]+l[1][0]-l[0][0],l[2][1]+l[1][1]-l[0][1])
img[l[2][1]:l[2][1]+l[1][1]-l[0][1], l[2][0]:l[2][0]+l[1][0]-l[0][0]] = img[l[0][1]:l[1][1], l[1][0]:l[1][0]]
cv2.imshow('Frame',img)
l=[]
img = cv2.imread("app.jpg")
cv2.imshow('Frame',img)
cv2.setMouseCallback('Frame', click_event)
cv2.waitKey(0)
cv2.destroyAllWindows()
|
[
"ashishsasmal1@gmail.com"
] |
ashishsasmal1@gmail.com
|
fa69246e9b983af79f51aa0df615d294d267d396
|
0ba2c3776618b5b8b76f4a23f21e9c6ad3f6e2e1
|
/part2/04.py
|
278a036d3564d1aaba49d2e49151f5a96d565b99
|
[] |
no_license
|
WangDongDong1234/python_code
|
6dc5ce8210b1dcad7d57320c9e1946fd4b3fe302
|
6a785306a92d328a0d1427446ca773a9803d4cc0
|
refs/heads/master
| 2020-04-15T12:35:03.427589
| 2019-09-16T15:38:25
| 2019-09-16T15:38:25
| 164,681,323
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 760
|
py
|
class GameRole:
def __init__(self,name,ad,hp):
self.name=name
self.ad=ad
self.hp=hp
def attack(self,p):
p.hp=p.hp-self.ad
print("%s 攻击了%s,%s掉了%s,还剩%s" %(self.name,p.name,p.name,self.ad,p.hp))
def equip_weapon(self,wea):
self.wea=wea
self.ad=self.ad+wea.ad
p1=GameRole("亚瑟",20,500)
p2=GameRole("剑豪",50,300)
#p1.attack(p2)
class Weapon:
def __init__(self,name,ad):
self.name=name
self.ad=ad
def fight(self,p1,p2):
p2.hp=p2.hp-self.ad
print("%s用%s打%s,%s掉了%s血,还剩%s学" %(p1.name,self.name,p2.name,p2.name,p1.ad,p2.hp))
axe=Weapon("三板斧",2)
p1.equip_weapon(axe)
p1.attack(p2)
print(p1.wea.name)
print(p1.wea.ad)
|
[
"827495316@qq.com"
] |
827495316@qq.com
|
1bdffd60464eb5d1d0a4aa03bd1f51e6778e9f08
|
47eccd2a6b844bce32012017e2ad3eb62221763c
|
/producers/models/line.py
|
dc48f215d8734108a64ee764d036d040425954c1
|
[] |
no_license
|
zxy-zxy/udacity_data_streaming_traffic_optimization
|
953b95536e62a18e9bdd9fd8244fbef2966789fb
|
3bb8d0e6f5e4b78b0c0a9e0b3e85a3f8788f3c65
|
refs/heads/master
| 2023-03-07T07:42:20.397271
| 2021-02-20T07:57:41
| 2021-02-20T07:57:41
| 340,464,004
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,097
|
py
|
"""Defines functionality relating to train lines"""
from enum import IntEnum
import logging
from producers.models.station import Station
from producers.models.train import Train
logger = logging.getLogger(__name__)
class Line:
"""Contains Chicago Transit Authority (CTA) Elevated Loop Train ("L") Station Data"""
colors = IntEnum("colors", "blue green red", start=0)
num_directions = 2
def __init__(self, color, station_data, num_trains=10):
self.color = color
self.num_trains = num_trains
self.stations = self._build_line_data(station_data)
# We must always discount the terminal station at the end of each direction
self.num_stations = len(self.stations) - 1
self.trains = self._build_trains()
def _build_line_data(self, station_df):
"""Constructs all stations on the line"""
stations = station_df["station_name"].unique()
station_data = station_df[station_df["station_name"] == stations[0]]
line = [Station(station_data["station_id"].unique()[0], stations[0], self.color)]
prev_station = line[0]
for station in stations[1:]:
station_data = station_df[station_df["station_name"] == station]
new_station = Station(
station_data["station_id"].unique()[0],
station,
self.color,
prev_station,
)
prev_station.dir_b = new_station
prev_station = new_station
line.append(new_station)
return line
def _build_trains(self):
"""Constructs and assigns train objects to stations"""
trains = []
curr_loc = 0
b_dir = True
for train_id in range(self.num_trains):
tid = str(train_id).zfill(3)
train = Train(f"{self.color.name[0].upper()}L{tid}", Train.status.in_service)
trains.append(train)
if b_dir:
self.stations[curr_loc].arrive_b(train, None, None)
else:
self.stations[curr_loc].arrive_a(train, None, None)
curr_loc, b_dir = self._get_next_idx(curr_loc, b_dir)
return trains
def run(self, timestamp, time_step):
"""Advances trains between stations in the simulation. Runs turnstiles."""
self._advance_turnstiles(timestamp, time_step)
self._advance_trains()
def close(self):
"""Called to stop the simulation"""
_ = [station.close() for station in self.stations]
def _advance_turnstiles(self, timestamp, time_step):
"""Advances the turnstiles in the simulation"""
_ = [station.turnstile.run(timestamp, time_step) for station in self.stations]
def _advance_trains(self):
"""Advances trains between stations in the simulation"""
# Find the first b train
curr_train, curr_index, b_direction = self._next_train()
self.stations[curr_index].b_train = None
trains_advanced = 0
while trains_advanced < self.num_trains - 1:
# The train departs the current station
if b_direction is True:
self.stations[curr_index].b_train = None
else:
self.stations[curr_index].a_train = None
prev_station = self.stations[curr_index].station_id
prev_dir = "b" if b_direction else "a"
# Advance this train to the next station
curr_index, b_direction = self._get_next_idx(curr_index, b_direction, step_size=1)
if b_direction is True:
self.stations[curr_index].arrive_b(curr_train, prev_station, prev_dir)
else:
self.stations[curr_index].arrive_a(curr_train, prev_station, prev_dir)
# Find the next train to advance
move = 1 if b_direction else -1
next_train, curr_index, b_direction = self._next_train(curr_index + move, b_direction)
if b_direction is True:
curr_train = self.stations[curr_index].b_train
else:
curr_train = self.stations[curr_index].a_train
curr_train = next_train
trains_advanced += 1
# The last train departs the current station
if b_direction is True:
self.stations[curr_index].b_train = None
else:
self.stations[curr_index].a_train = None
# Advance last train to the next station
prev_station = self.stations[curr_index].station_id
prev_dir = "b" if b_direction else "a"
curr_index, b_direction = self._get_next_idx(curr_index, b_direction, step_size=1)
if b_direction is True:
self.stations[curr_index].arrive_b(curr_train, prev_station, prev_dir)
else:
self.stations[curr_index].arrive_a(curr_train, prev_station, prev_dir)
def _next_train(self, start_index=0, b_direction=True, step_size=1):
"""Given a starting index, finds the next train in either direction"""
if b_direction is True:
curr_index = self._next_train_b(start_index, step_size)
if curr_index == -1:
curr_index = self._next_train_a(len(self.stations) - 1, step_size)
b_direction = False
else:
curr_index = self._next_train_a(start_index, step_size)
if curr_index == -1:
curr_index = self._next_train_b(0, step_size)
b_direction = True
if b_direction is True:
return self.stations[curr_index].b_train, curr_index, True
return self.stations[curr_index].a_train, curr_index, False
def _next_train_b(self, start_index, step_size):
"""Finds the next train in the b direction, if any"""
for i in range(start_index, len(self.stations), step_size):
if self.stations[i].b_train is not None:
return i
return -1
def _next_train_a(self, start_index, step_size):
"""Finds the next train in the a direction, if any"""
for i in range(start_index, 0, -step_size):
if self.stations[i].a_train is not None:
return i
return -1
def _get_next_idx(self, curr_index, b_direction, step_size=None):
"""Calculates the next station index. Returns next index and if it is b direction"""
if step_size is None:
step_size = int((self.num_stations * Line.num_directions) / self.num_trains)
if b_direction is True:
next_index = curr_index + step_size
if next_index < self.num_stations:
return next_index, True
else:
return self.num_stations - (next_index % self.num_stations), False
else:
next_index = curr_index - step_size
if next_index > 0:
return next_index, False
else:
return abs(next_index), True
def __str__(self):
return "\n".join(str(station) for station in self.stations)
def __repr__(self):
return str(self)
|
[
"sinitsinvanya@gmail.com"
] |
sinitsinvanya@gmail.com
|
1321ca6e8b06bce640ef7a93d92acd4a9d7814d7
|
9f0f5816b9d810c9ce01c56588024e1c804809fe
|
/study/day9/9-1.py
|
1061d19b08630454e68f6d7886f13dbd06d09571
|
[] |
no_license
|
parkwisdom/Python-Study-step1
|
bf8cc8c5f89bfb9ccbb395a3827e23d4f0d6ae9a
|
bae2f5653c5a0d1eac1d4b89476ece7e0802d33b
|
refs/heads/master
| 2020-04-03T13:49:58.990930
| 2018-10-30T00:37:29
| 2018-10-30T00:37:29
| 155,300,210
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 479
|
py
|
# x=dict(zip(range(1,11),[2**i for i in range(1,11)]))
# print(x)
# print(len(x))
#
# test={'gildong':{'w':70,'h':170,'b':'a'},'donggil':{'w':75,'h':160,'b':'ab'},'gildo':{'w':72,'h':150,'b':'o'}}
# print(test)
# print(test['gildong'])
# print(test['gildong']['h'])
#
# test['gildong']['h']+=10
# print(test['gildong'])
#
# test['gildong']['h']=200
# print(test['gildong'])
# import sys
# print(sys.maxsize)
category_dict = {'광고':5,'중요':5}
print(category_dict.values())
|
[
"43980901+parkwisdom@users.noreply.github.com"
] |
43980901+parkwisdom@users.noreply.github.com
|
d4f12ad333b598e5780635248dd3309ec65023be
|
8f48d12b88048e424ebb0d72ca6dfab5cf12ae0f
|
/0001_0599/238.py
|
15a9a2eef432e94fcbe8a8a74aab146a918dbe24
|
[] |
no_license
|
renjieliu/leetcode
|
e1caf13c18a8107ed9252588b339fb76bcb1b246
|
4668b64fcb9320b6c316d8608fc61911ce43b6c7
|
refs/heads/master
| 2023-03-18T18:16:06.187741
| 2023-03-14T20:31:59
| 2023-03-14T20:31:59
| 128,823,819
| 7
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,217
|
py
|
class Solution:
def productExceptSelf(self, nums: 'List[int]') -> 'List[int]':
zeroloc = -1 # record the location of 0.
total = 1
for i, n in enumerate(nums):
if n == 0:
if zeroloc == -1:
zeroloc = i
else:
return [0 for _ in nums ] # if 2 zeroes, return all 0
else:
total *= n
output = []
if zeroloc != -1: # if no zero, using total//curr. if 1 zero, return total for the zero loc. others are 0
output = [0 for _ in nums]
output[zeroloc] = total
else:
for i, n in enumerate(nums):
output.append(total//n)
return output
# previous approach
# def productExceptSelf(nums: 'List[int]'):
# #from left to right, make the first element as 1.
# ini =1
# output =[]
# for i in range(len(nums)):
# output.append(ini)
# ini*=nums[i]
# #from right to left
# ini = 1
# for i in range(len(nums) - 1, -1, -1):
# output[i] = output[i] * ini
# ini *= nums[i]
# return output
# print(productExceptSelf([1,2,3,4]))
|
[
"anlrj@qq.com"
] |
anlrj@qq.com
|
7d3d9f6ca55f346b1a58b8b0bed7311a7c5743e0
|
98590747113ca3022c67c8bc6332b2bf48d7073e
|
/maximum_depth_of_binary_tree.py
|
7ce50d022a3ba784b2b41b94dbb425e71483882f
|
[] |
no_license
|
buxizhizhoum/leetcode
|
a54291519a23fe82e9f9620e5a2266833696f005
|
cf4235170db3629b65790fd0855a8a72ac5886f7
|
refs/heads/master
| 2022-06-04T02:54:26.381077
| 2022-04-01T06:58:19
| 2022-04-01T06:58:19
| 116,791,542
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,030
|
py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Given a binary tree, find its maximum depth.
The maximum depth is the number of nodes along the longest path from the root node down to the farthest leaf node.
Note: A leaf is a node with no children.
Example:
Given binary tree [3,9,20,null,null,15,7],
3
/ \
9 20
/ \
15 7
return its depth = 3.
"""
# Definition for a binary tree node.
class TreeNode(object):
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution(object):
def maxDepth(self, root):
"""
:type root: TreeNode
:rtype: int
"""
if root is None:
return 0
height = self.height(root)
return height + 1
@classmethod
def height(cls, node):
if node is None:
return -1
left = cls.height(node.left)
right = cls.height(node.right)
# the height of tree is growing from leaf node to root, if one of the
# subtree is missing, the height should not be calculated
# from the missing sub tree
# lines below is to process the situation when a sub tree is missing,
# it is necessary when calculate min depth, however it is
# not necessary in max depth, since it is to find the max height, if
# a node miss a sub tree, it will definitely not be the max height
# if node.left is None:
# height = right + 1
# return height
# if node.right is None:
# height = left + 1
# return height
height = max(left + 1, right + 1)
return height
if __name__ == "__main__":
r = TreeNode(6)
r.left = TreeNode(5)
r.right = TreeNode(9)
# r.left.left = TreeNode(3)
# r.left.left.left = TreeNode(3)
# r.left.right = TreeNode(4)
r.right.left = TreeNode(7)
r.right.right = TreeNode(11)
r.right.right.right = TreeNode(13)
s = Solution()
res = s.maxDepth(r)
print(res)
|
[
"mapeaks@126.com"
] |
mapeaks@126.com
|
461d2c40bb4f8a3b5ab23a90655470e60eda5a32
|
f663f7baa042c6d5e71ceb22ce3d304d5bcbfd15
|
/archived_lectures/Fall_2019/old_lectures/2018/Week_5/test_util.py
|
cbd0d779dd67ed6a702944cc723c2e001dbf44e6
|
[
"MIT"
] |
permissive
|
ModelEngineering/advancing-biomedical-models
|
2dcf0fb35629a4cce2b30b5c18490c9837875b64
|
7469576d484d894febb161c3cb48b723dfbcaf1b
|
refs/heads/master
| 2022-11-12T10:24:23.407006
| 2022-10-19T08:33:29
| 2022-10-19T08:33:29
| 132,825,741
| 2
| 9
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,974
|
py
|
"""
Tests for simulation utility functions.
To run: python test_util.py
"""
import util
import lmfit
import numpy as np
import unittest
ka = 0.4
v0 = 10
kb = 0.32
kc = 0.4
PARAMETERS = lmfit.Parameters()
NAMES = ["ka", "v0", "kb", "kc"]
for name in NAMES:
if name[0] == "v":
maxval = 20
else:
maxval = 2
PARAMETERS.add(name, value=eval(name), min=0, max=maxval)
PARAMETERS_COLLECTION = [PARAMETERS for _ in range(10)]
IGNORE_TEST = True
class TestFunctions(unittest.TestCase):
def testFoldGenerator(self):
NUM_FOLDS = 4
generator = util.foldGenerator(10, NUM_FOLDS)
size = len([g for g in generator])
self.assertEqual(size, NUM_FOLDS)
def testAggregateParameters(self):
parameters = util.aggregateParameters(PARAMETERS_COLLECTION)
self.assertTrue(isinstance(parameters, lmfit.Parameters))
for name in parameters:
self.assertTrue(np.isclose(
PARAMETERS.get(name), parameters.get(name)
))
def testMakeParametersStatistics(self):
parameters = util.aggregateParameters(PARAMETERS_COLLECTION)
result = util.makeParametersStatistics(PARAMETERS_COLLECTION)
for name in result.keys():
self.assertEqual(len(result[name]), 2)
self.assertTrue(np.isclose(result[name][0], parameters.get(name)))
def testPlotFit(self):
# Smoke test
util.plotFit(range(10), range(10), is_plot=False)
def testGenerateBootstrapData(self):
NUM = 1000
STD = 1.0
y_fit = np.array(range(NUM))
y_obs = y_fit + np.random.normal(0, STD, NUM)
for _ in range(10):
y_new = util.generateBootstrapData(y_obs, y_fit)
self.assertEqual(len(y_new), len(y_fit))
self.assertTrue(np.std(y_new - y_fit), STD)
def testGetParameterData(self):
result = util.getParameterData(PARAMETERS_COLLECTION)
for name in NAMES:
self.assertTrue(name in result)
self.assertTrue(np.isclose(np.std(result[name]), 0.0))
if __name__ == "__main__":
unittest.main()
|
[
"jlheller@uw.edu"
] |
jlheller@uw.edu
|
0856a12681340f456cc6cc7d768fd51a3f9de6b1
|
0d76ba0da5446f20e500b7e31f53821b14cb49d8
|
/Rosalind/python/revc.py
|
5f9a8f42871395c24b14702d4eb8085fa05e2fe5
|
[] |
no_license
|
filwaitman/playground
|
948aa687be06d456c86b65ee3ab5fb9792149459
|
dfdfab9002bff3a04f37e0c161363a864cd30f3e
|
refs/heads/master
| 2021-01-12T12:59:49.057832
| 2020-01-26T18:51:02
| 2020-01-26T18:51:02
| 68,865,259
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 402
|
py
|
# -*- coding: utf-8 -*-
# http://rosalind.info/problems/revc/
import sys
def reverse_complement(string):
reversed_ = string[::-1]
reversed_ = reversed_.replace('A', 'X').replace('T', 'A').replace('X', 'T')
reversed_ = reversed_.replace('C', 'X').replace('G', 'C').replace('X', 'G')
return reversed_
if __name__ == '__main__':
string = sys.argv[1]
reverse_complement(string)
|
[
"filwaitman@gmail.com"
] |
filwaitman@gmail.com
|
acacce0291355a9534ff0563af6514333051fb77
|
b6aa9768dbac327943e0220df1c56ce38adc6de1
|
/91_decode-ways.py
|
039d1992172f104ffa65150443dd06340ea20d3c
|
[] |
no_license
|
Khrystynka/LeetCodeProblems
|
f86e4c1e46f70f874924de137ec5efb2f2518766
|
917bd000c2a055dfa2633440a61ca4ae2b665fe3
|
refs/heads/master
| 2021-03-17T00:51:10.102494
| 2020-09-28T06:31:03
| 2020-09-28T06:31:03
| 246,954,162
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 639
|
py
|
# Problem Title: Decode Ways
class Solution(object):
def numDecodings(self, s):
"""
:type s: str
:rtype: int
"""
lst = map(int, s)
p_1 = 1
res = 1
for i in range(len(lst)):
p_2 = p_1
p_1 = res
if lst[i] == 0:
if i == 0 or lst[i-1] > 2 or lst[i-1] == 0:
return 0
else:
res = p_2
else:
res = p_1
if i != 0 and lst[i] > 0 and (lst[i-1] == 1 or (lst[i-1] == 2 and lst[i] <= 6)):
res += p_2
return res
|
[
"khrystyna@Khrystynas-MacBook-Pro.local"
] |
khrystyna@Khrystynas-MacBook-Pro.local
|
523230368fdadb0448796ee631c05daf2fe86381
|
8d0be6bdb480caa895b6cea1884b40c51cf25156
|
/experiment/python/convert-json-and-yaml.py
|
ed765819326d33d274bdfd85e90245538e9aa335
|
[
"MIT"
] |
permissive
|
atb00ker/scripts-lab
|
8642138574ac3403278c004b2c67f0200b29a83d
|
71a5cc9c7f301c274798686db4a227e84b65926a
|
refs/heads/master
| 2021-09-09T22:15:36.133142
| 2021-09-04T18:30:31
| 2021-09-04T18:30:54
| 163,108,084
| 2
| 0
|
MIT
| 2019-10-08T19:44:00
| 2018-12-25T19:54:54
|
Python
|
UTF-8
|
Python
| false
| false
| 318
|
py
|
import json
import yaml
with open('input.json') as js:
data = json.load(js)
with open('output.yaml', 'w') as yml:
yaml.dump(data, yml, default_flow_style=False, allow_unicode=True)
with open('input.yml') as yml:
data = yaml.load(yml)
with open('output.json', 'w') as js:
js.write(json.dumps(data))
|
[
"ajay39in@gmail.com"
] |
ajay39in@gmail.com
|
d33a22fa1084f81223eba1e0e845c919e416ab5f
|
713f9168a7ba68740bb9b4ea6994e853a56d2d5c
|
/2022-05-30-python/day1/save_wikipedia.py
|
3e83f5e8c22cdae5d35f0f32144e67b89203c3cc
|
[] |
no_license
|
marko-knoebl/courses-code
|
ba7723c9a61861b037422670b98276fed41060e2
|
faeaa31c9a156a02e4e9169bc16f229cdaee085d
|
refs/heads/master
| 2022-12-29T02:13:12.653745
| 2022-12-16T09:21:18
| 2022-12-16T09:21:18
| 142,756,698
| 16
| 10
| null | 2022-03-08T22:30:11
| 2018-07-29T11:51:04
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 270
|
py
|
from urllib.request import urlopen
# make a HTTP request
req = urlopen("https://en.wikipedia.org")
# read content as utf-8 string
content = req.read().decode("utf-8")
# save to file
file = open("wikipedia.html", "w", encoding="utf-8")
file.write(content)
file.close()
|
[
"marko.kn@gmail.com"
] |
marko.kn@gmail.com
|
17e90b8c30eaba457e03faf76fca9908aaf1b8b0
|
32cf9c3099c36a46804e393dd1491a8954f50263
|
/2019.04.11 - 프로필, 인스타 생성/PROJECT05_10/PROJECT05/boards/migrations/0001_initial.py
|
871a328c5df4bde811a7395a602cfaa923694ed8
|
[] |
no_license
|
ash92kr/s_code
|
ce3bda6a403600892750e181dca5ed8c4caebcb1
|
92eace551d132b91ee91db6c0afd38b93f9b647b
|
refs/heads/master
| 2020-04-12T00:27:07.043091
| 2019-05-21T08:17:39
| 2019-05-21T08:17:39
| 162,200,447
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 676
|
py
|
# Generated by Django 2.1.7 on 2019-03-21 11:37
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Board',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=10)),
('content', models.TextField()),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
],
),
]
|
[
"ash92kr@gmail.com"
] |
ash92kr@gmail.com
|
9150360bc6801d8550fa7251315721013196303e
|
34d8cbc0471be7e051466fbd14a0fc7730bff133
|
/peer_recieve.py
|
e8884c30547531ab674cc7f82cbb8b65bbe95427
|
[] |
no_license
|
jk-programs/basiccoin
|
59e35c1fb547add8d2fe1156fbc7fc348a1d8f0f
|
6fd285525febd82733b7e7ef604428c3a8fe3ff4
|
refs/heads/master
| 2021-01-18T18:45:19.900004
| 2014-09-04T21:01:31
| 2014-09-04T21:01:31
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,762
|
py
|
"""When a peer talks to us, this is how we generate a response. This is the external API.
"""
import networking, custom, tools, blockchain, time
def security_check(dic):
if 'version' not in dic or dic['version'] != custom.version:
return {'bool': False, 'error': 'version'}
else:
#we could add security features here.
return {'bool': True, 'newdic': dic}
def blockCount(dic, DB):
length = DB['length']
if length >= 0:
return {'length': length,
'diffLength': DB['diffLength']}
else:
return {'length': -1, 'diffLength': '0'}
def rangeRequest(dic, DB):
ran = dic['range']
out = []
counter = 0
while (len(tools.package(out)) < custom.max_download
and ran[0] + counter <= ran[1]):
block = tools.db_get(ran[0] + counter, DB)
if 'length' in block:
out.append(block)
counter += 1
return out
def txs(dic, DB):
return DB['txs']
def pushtx(dic, DB):
DB['suggested_txs'].put(dic['tx'])
return 'success'
def pushblock(dic, DB):
if 'blocks' in dic:
for block in dic['blocks']:
DB['suggested_blocks'].put([block, dic['peer']])
else:
DB['suggested_blocks'].put([dic['block'], dic['peer']])
return 'success'
def main(dic, DB):
funcs = {'blockCount': blockCount, 'rangeRequest': rangeRequest,
'txs': txs, 'pushtx': pushtx, 'pushblock': pushblock}
if 'type' not in dic:
return 'oops: ' +str(dic)
if dic['type'] not in funcs:
return ' '.join([dic['type'], 'is not in the api'])
check = security_check(dic)
if not check['bool']:
return check
try:
return funcs[dic['type']](check['newdic'], DB)
except:
pass
|
[
"zack.bitcoin@gmail.com"
] |
zack.bitcoin@gmail.com
|
7dfa2ddf781a50a0947412c4c7840743bc9f8eff
|
e3c6087ce7178c0608b1501e14c8f905fd14c3d2
|
/src/app/push/user/user_manager.py
|
d23e38bdf3b2d4b20ea7c0717b71d2bff3d2b60d
|
[] |
no_license
|
campanulamediuml/iot_server
|
314fcef9b6bec2f1354390ecad2d0acb18dcf06e
|
6657ad9895445fb668f027affc6346eeeda58e67
|
refs/heads/master
| 2022-12-14T02:53:05.007754
| 2020-02-01T08:12:02
| 2020-02-01T08:12:02
| 237,582,696
| 1
| 0
| null | 2022-12-08T07:03:48
| 2020-02-01T08:04:25
|
Python
|
UTF-8
|
Python
| false
| false
| 2,820
|
py
|
from app.push.user.user import User
from app.push.relay import Relay
from data.server import Data
import time
class UserManager(object):
def __init__(self):
self._user_dict = {}
self._user_handler_dict = {}
self._admin_dict = {}
def update(self):
# print('kill')
del_list = list()
for user_id, user in self._user_dict.items():
if user.has_heartbeat() is True:
continue
del_list.append(user_id)
for user_id in del_list:
Relay.user_exit(user_id)
# 更新用户管理器
exit_admin = []
for admin_sid,last_connect_time in self._admin_dict.items():
if int(time.time()) -last_connect_time > 10:
exit_admin.append(admin_sid)
for i in exit_admin:
self._admin_dict.pop(i)
print('管理员',i,'长时间无心跳,已被杀死')
return
def update_heart_beat(self,sid):
if sid in self._admin_dict:
self._admin_dict[sid] = int(time.time())
return
user = self.get_user_by_sid(sid)
if user is None:
Relay.send_disconnect(sid)
else:
user.update_heart_beat()
return
# print(self._admin_dict)
def get_user(self,user_id):
if user_id in self._user_dict:
return self._user_dict[user_id]
return None
def add_admin(self,sid):
self._admin_dict[sid] = int(time.time())
print(sid,'链接上推送服务器了!')
return
def is_admin(self,sid):
if sid in self._admin_dict:
return True
return False
# 后台链接
def kill_admin(self,sid):
if sid in self._admin_dict:
self._admin_dict.pop(sid)
return
# 清除admin
def get_all_admin(self):
return self._admin_dict
# =============管理后台链接=============
def login(self,user_id):
user = self.create_user(user_id)
# 注册用户信息
self._user_dict[user_id] = user
self._user_handler_dict[user._sid] = user
return
def create_user(self,user_id,sid):
user = User(user_id)
user.init_from_data(sid)
return user
# 创建用户
def get_user_by_sid(self,sid):
if sid in self._user_handler_dict:
return self._user_handler_dict[sid]
else:
return None
# 通过sid获取用户
def user_exit(self,user_id):
# 玩家退出
user = self._user_dict[user_id]
if user_id in self._user_dict:
self._user_dict.pop(user_id)
if user.get_sid() in self._user_handler_dict:
self._user_handler_dict.pop(user.get_sid())
return
|
[
"campanulamediuml@gmail.com"
] |
campanulamediuml@gmail.com
|
64114d8d6de771147d00d1ce7e7e5513c4b883ea
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/otherforms/_photostats.py
|
b655b753c702e900bf321ce1a12133ccbdddb3c3
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 234
|
py
|
#calss header
class _PHOTOSTATS():
def __init__(self,):
self.name = "PHOTOSTATS"
self.definitions = photostat
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['photostat']
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
0199c7123b0f48afe8371149ef949d0f1f1808d2
|
f662aa3ce7896ca0283cae38df8ef824c1b80c9a
|
/library/tests/test_setup.py
|
9579313b7b5b97b052f09122ff03f5decae81e69
|
[
"MIT"
] |
permissive
|
pimoroni/plasma
|
bd7ddebbc60ae7cc9c2561408b52fc46bf810672
|
7857c44255285aac061a9064dd033fd63bbbda29
|
refs/heads/master
| 2023-02-10T13:27:17.565867
| 2023-01-30T17:27:28
| 2023-01-30T17:27:28
| 155,544,928
| 12
| 9
|
MIT
| 2021-11-06T04:14:19
| 2018-10-31T11:17:40
|
Python
|
UTF-8
|
Python
| false
| false
| 369
|
py
|
"""Test Plasma basic initialisation."""
import mock
def test_legacy_setup(GPIO):
"""Test init succeeds and GPIO pins are setup."""
from plasma import legacy as plasma
plasma.show()
GPIO.setmode.assert_called_once_with(GPIO.BCM)
GPIO.setup.assert_has_calls([
mock.call(plasma.DAT, GPIO.OUT),
mock.call(plasma.CLK, GPIO.OUT)
])
|
[
"phil@gadgetoid.com"
] |
phil@gadgetoid.com
|
ce67b4c7a1213011dedbd40bb03a735ddee6c245
|
93a7db386dfa0ac0dc369cc7f4b974224c801d8d
|
/deploy/ngram-all/scripts/ngram-30.py
|
4575b6d9056180b9f17d03dcdbc9ea32e3fb42f8
|
[] |
no_license
|
lingxiao/good-great-combo
|
e051f20c89b7317a14ca5cee357bda7b095ce174
|
4d2691866bc21e2c542354ad3aae6f369eb86c87
|
refs/heads/master
| 2021-01-19T19:30:43.391759
| 2017-04-09T12:35:15
| 2017-04-09T12:35:15
| 83,699,772
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,420
|
py
|
############################################################
# Module : A series of measures on the graph for experiments
# Date : April 2nd, 2017
# Author : Xiao Ling
############################################################
import os
import re
import networkx as nx
from utils import *
from scripts import *
from app.config import PATH
############################################################
'''
paths
'''
_root = os.path.join(PATH['directories']['deploy'], 'ngram-all')
_word_dir = os.path.join(_root, 'words')
_word_pair_dir = os.path.join(_root, 'pairs')
_output_dir = os.path.join(_root, 'outputs')
_script_dir = os.path.join(_root ,'scripts')
'''
@Use: collect ngram counts
'''
batch = 30
word_path = os.path.join(_word_dir , 'batch-' + str(batch) + '.txt')
word_pair_path = os.path.join(_word_pair_dir , 'batch-' + str(batch) + '.txt')
pattern_path = PATH['assets']['patterns']
ngram_dir = PATH['ngrams']['full']
out_dir = _output_dir
log_dir = PATH['directories']['log']
# ngram_by_words( word_path
# , ngram_dir
# , os.path.join(out_dir,'batch-' + str(batch) + '.txt')
# , log_dir
# , debug = False)
collect_ngram_patterns( word_pair_path
, pattern_path
, ngram_dir
, out_dir
, log_dir
, debug = False)
|
[
"lingxiao@seas.upenn.edu"
] |
lingxiao@seas.upenn.edu
|
7418fe42514906ca3b54fe3292c0e73322f20b47
|
a2d36e471988e0fae32e9a9d559204ebb065ab7f
|
/huaweicloud-sdk-cdn/huaweicloudsdkcdn/v1/model/referer_rsp.py
|
5e464eeb471ed9c49f7aa9e96766d4e22f3f2f69
|
[
"Apache-2.0"
] |
permissive
|
zhouxy666/huaweicloud-sdk-python-v3
|
4d878a90b8e003875fc803a61414788e5e4c2c34
|
cc6f10a53205be4cb111d3ecfef8135ea804fa15
|
refs/heads/master
| 2023-09-02T07:41:12.605394
| 2021-11-12T03:20:11
| 2021-11-12T03:20:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,440
|
py
|
# coding: utf-8
import re
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class RefererRsp:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'referer_type': 'int',
'referer_list': 'str',
'include_empty': 'bool'
}
attribute_map = {
'referer_type': 'referer_type',
'referer_list': 'referer_list',
'include_empty': 'include_empty'
}
def __init__(self, referer_type=None, referer_list=None, include_empty=None):
"""RefererRsp - a model defined in huaweicloud sdk"""
self._referer_type = None
self._referer_list = None
self._include_empty = None
self.discriminator = None
if referer_type is not None:
self.referer_type = referer_type
if referer_list is not None:
self.referer_list = referer_list
if include_empty is not None:
self.include_empty = include_empty
@property
def referer_type(self):
"""Gets the referer_type of this RefererRsp.
Referer类型。取值:0代表不设置Referer过滤;1代表黑名单;2代表白名单。默认取值为0。
:return: The referer_type of this RefererRsp.
:rtype: int
"""
return self._referer_type
@referer_type.setter
def referer_type(self, referer_type):
"""Sets the referer_type of this RefererRsp.
Referer类型。取值:0代表不设置Referer过滤;1代表黑名单;2代表白名单。默认取值为0。
:param referer_type: The referer_type of this RefererRsp.
:type: int
"""
self._referer_type = referer_type
@property
def referer_list(self):
"""Gets the referer_list of this RefererRsp.
请输入域名或IP地址,以“;”进行分割,域名、IP地址可以混合输入,支持泛域名添加。输入的域名、IP地址总数不超过100个。当设置防盗链时,此项必填。
:return: The referer_list of this RefererRsp.
:rtype: str
"""
return self._referer_list
@referer_list.setter
def referer_list(self, referer_list):
"""Sets the referer_list of this RefererRsp.
请输入域名或IP地址,以“;”进行分割,域名、IP地址可以混合输入,支持泛域名添加。输入的域名、IP地址总数不超过100个。当设置防盗链时,此项必填。
:param referer_list: The referer_list of this RefererRsp.
:type: str
"""
self._referer_list = referer_list
@property
def include_empty(self):
"""Gets the include_empty of this RefererRsp.
是否包含空Referer。如果是黑名单并开启该选项,则表示无referer不允许访问。如果是白名单并开启该选项,则表示无referer允许访问。默认不包含。
:return: The include_empty of this RefererRsp.
:rtype: bool
"""
return self._include_empty
@include_empty.setter
def include_empty(self, include_empty):
"""Sets the include_empty of this RefererRsp.
是否包含空Referer。如果是黑名单并开启该选项,则表示无referer不允许访问。如果是白名单并开启该选项,则表示无referer允许访问。默认不包含。
:param include_empty: The include_empty of this RefererRsp.
:type: bool
"""
self._include_empty = include_empty
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, RefererRsp):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"hwcloudsdk@huawei.com"
] |
hwcloudsdk@huawei.com
|
53cfa943c71da187b76615b9e210acd591c177fb
|
e616ea35ead674ebb4e67cae54768aaaeb7d89c9
|
/project/alma/groups/migrations/0001_initial.py
|
ea300a72ba26028fa8b8e7cb76ef846668025ebd
|
[] |
no_license
|
VWApplications/VWAlmaAPI
|
12bb1888533cf987739b0e069737afa6337141e1
|
3a8009b17518384c269dfee3c8fe44cbe2567cc0
|
refs/heads/master
| 2022-04-02T10:26:49.832202
| 2020-02-12T04:46:31
| 2020-02-12T04:46:31
| 161,098,215
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,648
|
py
|
# Generated by Django 2.1.4 on 2019-10-20 03:18
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('disciplines', '0007_remove_discipline_was_group_provided'),
]
operations = [
migrations.CreateModel(
name='Group',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True, help_text='Data na qual o objeto foi criado.', verbose_name='Criado em')),
('updated_at', models.DateTimeField(auto_now=True, help_text='Data na qual o objeto foi atualizado.', verbose_name='Atualizado em')),
('title', models.CharField(help_text='Título do grupo', max_length=50, verbose_name='Título')),
('students_limit', models.PositiveIntegerField(default=0, help_text='Limite de estudantes do grupo', verbose_name='Limite de estudantes')),
('discipline', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='groups', to='disciplines.Discipline', verbose_name='Discipline')),
('students', models.ManyToManyField(blank=True, related_name='student_groups', to=settings.AUTH_USER_MODEL, verbose_name='Students')),
],
options={
'db_table': 'groups',
'ordering': ['title', 'created_at'],
},
),
]
|
[
"victorhad@gmail.com"
] |
victorhad@gmail.com
|
14aeae04e3c6f6a5a94f8f36cffad0f269855994
|
68f757e7be32235c73e316888ee65a41c48ecd4e
|
/백준_python/2000/2500_2599/2501.py
|
c49d55d4f683ab54758e774c9f7bb12d31669f5c
|
[] |
no_license
|
leejongcheal/algorithm_python
|
b346fcdbe9b1fdee33f689477f983a63cf1557dc
|
f5d9bc468cab8de07b9853c97c3db983e6965d8f
|
refs/heads/master
| 2022-03-05T20:16:21.437936
| 2022-03-03T01:28:36
| 2022-03-03T01:28:36
| 246,039,901
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 192
|
py
|
def f(N):
for i in range(1, N//2+1):
if N % i == 0:
yield i
yield N
N, K = map(int, input().split())
L = list(f(N))
try:
print(L[K - 1])
except:
print(0)
|
[
"aksndk123@naver.com"
] |
aksndk123@naver.com
|
bb3964e9c470d43dd2e31774435b4096a7129340
|
37ba62db61fc4ec62634638763a984cbfbe40fe3
|
/day09/02 作业讲解.py
|
5caee326901bbae6f5a9b2598f304b118ebe05ef
|
[] |
no_license
|
lt910702lt/python
|
ca2768aee91882c893a9bc6c1bdd1b455ebd511f
|
c6f13a1a9461b18df17205fccdc28f89854f316c
|
refs/heads/master
| 2020-05-09T22:54:22.587206
| 2019-09-17T09:02:53
| 2019-09-17T09:02:53
| 181,485,866
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,316
|
py
|
###第一题: 有如下文件,里面的内容如下,分别完成以下功能:
'''
老男孩是一个培训机构
为学生服务,
为学生未来,
都是骗人的,哈哈
'''
## 1.1. 将原文件全部读出来并打印
# f = open("oldboy", mode="r", encoding="utf-8")
# s = f.read()
# f.flush()
# f.close()
# print(s)
# 1.2. 在原文件后面追加一行内容:信不信由你,反正我信了
# f = open("oldboy", mode="a", encoding="utf-8")
# f.write("\n信不信由你.反正我信了")
# f.flush()
# f.close()
# 1.3. 将原文件全部读出来,并在后面添加一行内容:信不信由你,反正我信了
# f = open("oldboy", mode="r+", encoding="utf-8")
# f.read()
# f.write("\n信不信由你.反正我信了")
# f.flush()
# f.close()
# 1.4. 将源文件全部清空,换成下面内容
# f = open("oldboy", mode="w+", encoding="utf-8")
# f.write('''每天坚持一点,
# 每天努力一点,
# 每天多思考一点,
# 慢慢你会发现,
# 你的进步越来越大。
# ''')
# f.flush()
# f.close()
# 1.5. 将源文件内容全部读出来,并在"都是骗人的,哈哈"这一行前面加入"你们九信吧",将更改之后的新内容,写入到一个新的文件a1.txt
# import os
#
# with open("oldboy", mode="r", encoding="utf-8") as f1, open("oldboy_new", mode="w", encoding="utf-8") as f2:
# s = f1.read()
# ss = s.replace("都是骗人的,哈哈", "你们就信吧!\n都是骗人的,哈哈")
# f2.write(ss)
# os.remove("oldboy")
# os.rename("oldboy_new", "oldboy")
###第二题: 有文件内容如下,通过代码,将其构建成这种数据类型
# 序号 部门 人数 平均年龄 备注
# 1 Python 30 26 单身狗
# 2 Linux 26 30 没对象
# 3 运营部 20 24 女生多
# [{'序号': '1', '部门': 'Python', '人数': '30', "平均年龄": '26', '备注': '单身狗'}]
# ......
f = open("a6", mode="r", encoding='utf-8')
line = f.readline()
lst = line.split() # 第一行切割完成,基础数据就完成了
result = [] # 定义一个结果列表
for lin in f:
ll = lin.split() # 每一行的数据
dic = {} # 将每一行切割后的结果放入到不同的字典里
for i in range(len(ll)):
dic[lst[i]] = ll[i]
result.append(dic) # 将字典添加到结果列表里
print(result)
|
[
"1103631738@qq.com"
] |
1103631738@qq.com
|
8552b00e5269659b7760cfaf3cb055f996f35ec2
|
ff853d7b3773db8de783fd26a76bd92742f85384
|
/0x05-python-exceptions/4-list_division.py
|
ed4e25621fd48459d5909c213142dd2a0c614a32
|
[] |
no_license
|
stuartses/holbertonschool-higher_level_programming
|
1b3315f624f9c2dc0c63ee3481021c5ed093a81d
|
40497b632bf71c3b877cb61fce79b9d82b4519da
|
refs/heads/master
| 2020-09-29T00:51:57.791491
| 2020-05-14T16:51:44
| 2020-05-14T16:51:44
| 226,905,912
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 513
|
py
|
#!/usr/bin/python3
def list_division(my_list_1, my_list_2, list_length):
div = 0
div_list = []
for i in range(list_length):
try:
div = my_list_1[i] / my_list_2[i]
except ZeroDivisionError:
print("division by 0")
div = 0
except TypeError:
print("wrong type")
div = 0
except IndexError:
print("out of range")
div = 0
finally:
div_list.append(div)
return div_list
|
[
"stuart.ses@hotmail.com"
] |
stuart.ses@hotmail.com
|
2dae655d7f22efba5c7f9cf43a1aba9ceec09a15
|
48eb84be45129c5904447e36a66170e739a8f8a0
|
/Gconnect/housename/migrations/0005_auto_20180925_1333.py
|
adbd8659975c5a028fa555fbcdeceab9df929f0d
|
[] |
no_license
|
anjuz1/project
|
efd806e8fff976168d70711eda36fcf42e3e9dbc
|
295ed8efcfff622a64e9072bd2607fe8c147a957
|
refs/heads/master
| 2020-04-07T00:15:41.259953
| 2018-11-16T16:52:37
| 2018-11-16T16:52:37
| 157,896,677
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,283
|
py
|
# Generated by Django 2.0.7 on 2018-09-25 08:03
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('housename', '0004_housename_house_mail'),
]
operations = [
migrations.AlterField(
model_name='housename',
name='house_adhar',
field=models.CharField(default='', max_length=38),
),
migrations.AlterField(
model_name='housename',
name='house_district',
field=models.CharField(default='Ernakulam', max_length=50),
),
migrations.AlterField(
model_name='housename',
name='house_mail',
field=models.CharField(default='', max_length=50),
),
migrations.AlterField(
model_name='housename',
name='house_name',
field=models.CharField(default='', max_length=45),
),
migrations.AlterField(
model_name='housename',
name='house_po',
field=models.CharField(default='', max_length=50),
),
migrations.AlterField(
model_name='housename',
name='house_street',
field=models.CharField(default='', max_length=50),
),
]
|
[
"45093924+anjuz1@users.noreply.github.com"
] |
45093924+anjuz1@users.noreply.github.com
|
6f9382fca59ef7eb65bcb7683c81b7757f35b90d
|
f65163f0670b04ed3d68632b2d020186947cf2d7
|
/bundology/urls.py
|
69855c319f387c9c03135ffd03a1dd629ac94df2
|
[
"MIT"
] |
permissive
|
ImmaculateObsession/bundology
|
92fb23527b642c70393e710ed042c3e661d014b7
|
cbf3859fc51f4fcf7f0da608af37aeb55671aed4
|
refs/heads/master
| 2021-01-01T06:50:43.114328
| 2013-09-29T18:43:32
| 2013-09-29T18:43:32
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 589
|
py
|
from django.conf.urls import patterns, include, url
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
admin.autodiscover()
from bundles.views import HomeView
urlpatterns = patterns('',
# Examples:
url(r'^$', HomeView.as_view(), name='home'),
# url(r'^bundology/', include('bundology.foo.urls')),
# Uncomment the admin/doc line below to enable admin documentation:
# url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
url(r'^admin/', include(admin.site.urls)),
)
|
[
"pjj@philipjohnjames.com"
] |
pjj@philipjohnjames.com
|
2384c8e026b668140c8d5cad60d9811e0246ad3a
|
e6f144ff524a5e28bb9a3effe55f0e8b3d3f9d72
|
/less14/chat_v2/chat/chatApp/validators.py
|
321bd980636d9847dab114791bd09061c52d777d
|
[] |
no_license
|
an4p/python_oop
|
614382989914513b60b87d10e78c21a34debe162
|
5cff48072f30460df6e7300fa85038e18e986d4b
|
refs/heads/master
| 2021-01-23T03:27:09.496370
| 2017-06-08T12:14:24
| 2017-06-08T12:14:24
| 86,077,237
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 397
|
py
|
from django.core.exceptions import ValidationError
import re
def not_null(data):
if data == None:
raise ValidationError("This field cannot be null")
def email_validation(data):
pattern = re.compile(r'\w{1,}')
isemail = pattern.match(data)
#print("isemail"+str(isemail.group(0)))
if isemail==None:
raise ValidationError("Your login is not email")
|
[
"anna@3g.ua"
] |
anna@3g.ua
|
9acbf3882c7fd6e2cdd0af5022f6f471245c0b6e
|
c0032b63a0220bfd0fe8592b8f6d8382b808417c
|
/0x06-python-classes/3-square.py
|
e6a1ab3e3e526c71a4ae6b5ce0d8dd39fac0f71f
|
[] |
no_license
|
Arkadington/alx-higher_level_programming
|
2104d200aa3b8ff5026476d975fc7dfabe9db660
|
36d4aa2f25416b94cf2fca5598717bcec98d6211
|
refs/heads/master
| 2023-05-23T11:24:42.750209
| 2021-05-26T07:19:26
| 2021-05-26T07:19:26
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 505
|
py
|
#!/usr/bin/python3
"""defining a square based on 2-square.py"""
class Square:
"""defines a square by size"""
def __init__(self, size=0):
"""initializing size argument"""
self.__size = size
if not isinstance(size, int):
raise TypeError('size must be an integer')
if size < 0:
raise ValueError('size must be >= 0')
def area(self):
"""returns the area of a square"""
area = self.__size * self.__size
return area
|
[
"atienofaith12@gmail.com"
] |
atienofaith12@gmail.com
|
61c5a2c0cbafcf79a81a70720540685f9d224f30
|
82573b51d1188f653b673e1261e02fc9e4e12e66
|
/etc/beam_search.py
|
6feed3d5cdd861462535b44d844fc11b60a17694
|
[] |
no_license
|
JeeYz/git_from_the_hell
|
94a55b1c922c993383927d5aaa7cad066d645ddb
|
0826ab6b1fd760c145ac5dcab6a8aaa9e9af9e02
|
refs/heads/master
| 2022-09-25T02:17:55.636721
| 2022-09-17T18:03:26
| 2022-09-17T18:03:26
| 152,916,026
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,084
|
py
|
import random
import copy
input_data = list()
for i in range(10):
temp_val = random.random()
input_data.append(temp_val)
print(input_data)
beam_search_size = 3
def gen_beam_list(input_data):
arrange_list = copy.deepcopy(input_data)
global beam_search_size
idx_num = 0
flag_num = 0
while True:
target_idx = idx_num+1
if arrange_list[idx_num] < arrange_list[target_idx]:
curr_num = arrange_list[idx_num]
next_num = arrange_list[target_idx]
arrange_list[idx_num] = next_num
arrange_list[target_idx] = curr_num
flag_num += 1
idx_num += 1
if idx_num == (len(arrange_list)-1):
idx_num = 0
if flag_num == 0:
break
else:
flag_num = 0
return arrange_list[:beam_search_size]
first_beam_list = gen_beam_list(input_data)
print(first_beam_list)
idx_list = list()
for one_val in first_beam_list:
temp_idx = input_data.index(one_val)
idx_list.append(temp_idx)
print(idx_list)
sequence_lenth = 5
result_list = list()
for i in range(beam_search_size):
temp_dict = dict()
temp_dict['index_list'] = list()
temp_dict['probability'] = 0.
temp_dict['index_list'].append(idx_list[i])
temp_dict['probability'] = input_data[idx_list[i]]
result_list.append(temp_dict)
def gen_random_list():
return_list = list()
for i in range(10):
return_list.append(random.random())
return return_list
def gen_index_list_for_beam(input_val_list, origin_data):
idx_list = list()
for one_val in input_val_list:
temp_idx = origin_data.index(one_val)
idx_list.append(temp_idx)
return idx_list
for i in range(sequence_lenth-1):
print("{}th sequence....".format(i))
limit_loop_num = len(result_list)
print(limit_loop_num)
for n in range(limit_loop_num):
one_dict = result_list[n]
origin_dict = copy.deepcopy(one_dict)
input_list = gen_random_list()
temp_val_list = gen_beam_list(input_list)
target_index_list = gen_index_list_for_beam(temp_val_list, input_list)
for j, one_idx in enumerate(target_index_list):
if j == 0:
one_dict['index_list'].append(one_idx)
one_dict['probability'] *= input_list[one_idx]
else:
temp_dict = copy.deepcopy(origin_dict)
temp_dict['index_list'].append(one_idx)
temp_dict['probability'] *= input_list[one_idx]
result_list.append(temp_dict)
def decide_result(input_full):
max_val = 0.
max_index = 0
for i, one_dict in enumerate(input_full):
if max_val < one_dict['probability']:
max_val = one_dict['probability']
max_index = i
return max_val, input_full[max_index]
result_val, result_seq = decide_result(result_list)
print("final result : {pro}, {sequence}".format(pro=result_val, sequence=result_seq['index_list']))
|
[
"jkdspiegel@gmail.com"
] |
jkdspiegel@gmail.com
|
d66e5f3155ea380201b01e010d93e3f7459d1d9b
|
0de4549263e75a7614e7c430dac68822b18b0876
|
/med1_med12_example/before_redefine/group.py
|
8753e406ce81b7829d66cb354ff2fe1a00e8d5ae
|
[] |
no_license
|
oaxiom/redefine_peaks
|
7213630f021c69de5520300fe2121074159aa788
|
c62abfc2702bca5172f58563ff7c478035c697f6
|
refs/heads/master
| 2020-07-30T14:10:11.671975
| 2019-09-26T02:17:22
| 2019-09-26T02:17:22
| 210,259,020
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 893
|
py
|
import glob
from glbase3 import *
config.draw_mode = ["png", 'pdf']
filenames = [os.path.split(f)[1] for f in glob.glob("../clus/*.bed")]
trks = [
flat_track(filename='../flats/esc_med1.flat'),
flat_track(filename='../flats/esc_med12.flat'),
]
peaks = [
genelist(filename="../peaks/esc_med1.rp1_summits.bed.gz", format=format.minimal_bed, gzip=True),
genelist(filename="../peaks/esc_med12.rp1_summits.bed.gz", format=format.minimal_bed, gzip=True),
]
gl = glglob()
ret = gl.chip_seq_cluster_heatmap(peaks, trks, "heatmap.png",
cache_data="data.bin", bracket=[9,14],
imshow=True, log=2,
pileup_distance=2000,
bins=50, read_extend=0)
gl.chip_seq_cluster_pileup(filename="clus/clusters.png")
for cid in ret:
print("cid:", cid, "len:", len(ret[cid]["genelist"]))
ret[cid]["genelist"].saveBED(filename="clus/cid_%s.bed" % cid, uniqueID=True)
|
[
"oaxiom@gmail.com"
] |
oaxiom@gmail.com
|
e7c60a0ec63f5654ae032c35925d7ffd99117de3
|
a00ed711e3e08b50ad6e91cc07a2cddc4a1de5ea
|
/tests/system/providers/apache/druid/example_druid_dag.py
|
0552e10588950e78a6fc57488119b0e9bdf65845
|
[
"Apache-2.0",
"BSD-3-Clause",
"MIT"
] |
permissive
|
ishiis/airflow
|
4305794e36b611d01f49e3f2401be3dc49782670
|
292440d54f4db84aaf0c5a98cf5fcf34303f2fa8
|
refs/heads/master
| 2022-07-30T00:51:28.806940
| 2022-07-14T12:07:11
| 2022-07-14T12:07:11
| 209,801,072
| 1
| 0
|
Apache-2.0
| 2019-09-20T13:47:26
| 2019-09-20T13:47:26
| null |
UTF-8
|
Python
| false
| false
| 1,992
|
py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Example Airflow DAG to submit Apache Druid json index file using `DruidOperator`
"""
import os
from datetime import datetime
from airflow.models import DAG
from airflow.providers.apache.druid.operators.druid import DruidOperator
ENV_ID = os.environ.get("SYSTEM_TESTS_ENV_ID")
DAG_ID = "example_druid_operator"
with DAG(
dag_id=DAG_ID,
schedule_interval=None,
start_date=datetime(2021, 1, 1),
catchup=False,
tags=['example'],
) as dag:
# [START howto_operator_druid_submit]
submit_job = DruidOperator(task_id='spark_submit_job', json_index_file='json_index.json')
# Example content of json_index.json:
JSON_INDEX_STR = """
{
"type": "index_hadoop",
"datasource": "datasource_prd",
"spec": {
"dataSchema": {
"granularitySpec": {
"intervals": ["2021-09-01/2021-09-02"]
}
}
}
}
"""
# [END howto_operator_druid_submit]
from tests.system.utils import get_test_run # noqa: E402
# Needed to run the example DAG with pytest (see: tests/system/README.md#run_via_pytest)
test_run = get_test_run(dag)
|
[
"noreply@github.com"
] |
ishiis.noreply@github.com
|
046821a28fcd0f8c07500a8e1002b4d2c26a518c
|
c7f43c4cc0ee84a5fe246b67f51e30b8d726ebd5
|
/Competition/vision1/0203_8_private3.py
|
5739dd3f67dbb144f198fea38d30aec58e244ef2
|
[] |
no_license
|
89Mansions/AI_STUDY
|
d9f8bdf206f14ba41845a082e731ea844d3d9007
|
d87c93355c949c462f96e85e8d0e186b0ce49c76
|
refs/heads/master
| 2023-07-21T19:11:23.539693
| 2021-08-30T08:18:59
| 2021-08-30T08:18:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,056
|
py
|
# private 3등 코드
# train / test / validation (0.95) 분리
# batch_size = 16
# loss 줄었음, score 동일
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from keras.preprocessing.image import ImageDataGenerator
from numpy import expand_dims
from sklearn.model_selection import StratifiedKFold, cross_validate, train_test_split
from keras import Sequential
from keras.layers import *
from keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau
from keras.optimizers import Adam
######################################################
# 데이터 로드
train = pd.read_csv('../data/DACON_vision1/train.csv')
print(train.shape) # (2048, 787)
sub = pd.read_csv('../data/DACON_vision1/submission.csv')
print(sub.shape) # (20480, 2)
test = pd.read_csv('../data/DACON_vision1/test.csv')
print(test.shape) # (20480, 786)
######################################################
#1. DATA
# print(train, test, sub)
# print(train['digit'].value_counts()) # 0부터 9까지
train2 = train.drop(['id', 'digit','letter'],1)
test2 = test.drop(['id','letter'],1) # >> x_pred
train2 = train2.values # >>> x
test2 = test2.values # >>> x_pred
# plt.imshow(train2[100].reshape(28,28))
# plt.show()
train2 = train2.reshape(-1,28,28,1)
test2 = test2.reshape(-1,28,28,1)
# preprocess
train2 = train2/255.0
test2 = test2/255.0
# ImageDataGenerator >> 데이터 증폭 : 데이터 양을 늘림으로써 오버피팅을 해결할 수 있다.
idg = ImageDataGenerator(height_shift_range=(-1,1),width_shift_range=(-1,1))
# width_shift_range : 왼쪽 오른쪽으로 움직인다.
# height_shift_range : 위쪽 아래쪽으로 움직인다.
idg2 = ImageDataGenerator()
'''
sample_data = train2[100].copy()
sample = expand_dims(sample_data,0)
# expand_dims : 차원을 확장시킨다.
sample_datagen = ImageDataGenerator(height_shift_range=(-1,1),width_shift_range=(-1,1))
sample_generator = sample_datagen.flow(sample, batch_size=1) # flow : ImageDataGenerator 디버깅
plt.figure(figsize=(16,10))
for i in range(9) :
plt.subplot(3, 3, i+1)
sample_batch = sample_generator.next()
sample_image = sample_batch[0]
plt.imshow(sample_image.reshape(28, 28))
plt.show()
'''
# cross validation
skf = StratifiedKFold(n_splits=40, random_state=42, shuffle=True)
#2. Modeling
# %%time
reLR = ReduceLROnPlateau(patience=100, verbose=1, factor=0.5)
es = EarlyStopping(patience=120, verbose=1)
val_loss_min = []
val_acc_max = []
result = 0
nth = 0
for train_index, test_index in skf.split(train2, train['digit']) : # >>> x, y
path = '../data/DACON_vision1/cp/0203_4_cp.hdf5'
mc = ModelCheckpoint(path, save_best_only=True, verbose=1)
x_train = train2[train_index]
x_test = train2[test_index]
y_train = train['digit'][train_index]
y_test = train['digit'][test_index]
x_train, x_valid, y_train, y_valid = train_test_split(x_train, y_train, train_size=0.95, shuffle=True, random_state=47)
train_generator = idg.flow(x_train, y_train, batch_size=16)
test_generator = idg2.flow(x_test, y_test, batch_size=16)
valid_generator = idg2.flow(x_valid, y_valid)
pred_generator = idg2.flow(test2, shuffle=False)
print(x_train.shape, x_test.shape, x_valid.shape) # (1896, 28, 28, 1) (52, 28, 28, 1) (100, 28, 28, 1)
print(y_train.shape, y_test.shape, y_valid.shape) # (1896,) (52,) (100,)
#2. Modeling
model = Sequential()
model.add(Conv2D(16, (3,3), activation='relu', input_shape=(28, 28,1), padding='same'))
model.add(BatchNormalization())
# BatchNormalization >> 학습하는 동안 모델이 추정한 입력 데이터 분포의 평균과 분산으로 normalization을 하고자 하는 것
model.add(Dropout(0.3))
model.add(Conv2D(32, (3,3), activation='relu', padding='same'))
model.add(BatchNormalization())
model.add(Conv2D(32, (5, 5), activation='relu', padding='same'))
model.add(BatchNormalization())
model.add(Conv2D(32, (5, 5), activation='relu', padding='same'))
model.add(BatchNormalization())
model.add(Conv2D(32, (5, 5), activation='relu', padding='same'))
model.add(BatchNormalization())
model.add(MaxPooling2D(3,3))
model.add(Dropout(0.3))
model.add(Conv2D(64, (3, 3), activation='relu', padding='same'))
model.add(BatchNormalization())
model.add(Conv2D(64, (5, 5), activation='relu', padding='same'))
model.add(BatchNormalization())
model.add(MaxPooling2D(3,3))
model.add(Dropout(0.3))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(BatchNormalization())
model.add(Dropout(0.3))
model.add(Dense(64, activation='relu'))
model.add(BatchNormalization())
model.add(Dropout(0.3))
model.add(Dense(10, activation='softmax'))
#3. Compile, Train
model.compile(loss='sparse_categorical_crossentropy', optimizer=Adam(lr=0.002, epsilon=None), metrics=['acc'])
# epsilon : 0으로 나눠지는 것을 피하기 위함
learning_hist = model.fit_generator(train_generator, epochs=1000, validation_data=valid_generator, callbacks=[es, mc, reLR] )
model.load_weights('../data/DACON_vision1/cp/0203_4_cp.hdf5')
#4. Evaluate, Predict
loss, acc = model.evaluate(test_generator)
print("loss : ", loss)
print("acc : ", acc)
result += model.predict_generator(pred_generator, verbose=True)/40
# save val_loss
hist = pd.DataFrame(learning_hist.history)
val_loss_min.append(hist['val_loss'].min())
val_acc_max.append(hist['val_acc'].max())
nth += 1
print(nth, "번째 학습을 완료했습니다.")
print("val_loss_min :", np.mean(val_loss_min)) # val_loss_mean : 0.1835539501160383
print("val_acc_max :", np.mean(val_acc_max)) # val_acc_max : 0.9512500002980232
model.summary()
sub['digit'] = result.argmax(1)
print(sub)
sub.to_csv('../data/DACON_vision1/0203_4_private3.csv', index=False)
# xian submission 0203_4_pca
# score 0.9509803922
|
[
"hwangkei0212@gmail.com"
] |
hwangkei0212@gmail.com
|
ac7aa470914c9dcd181660f70288c52307f34e56
|
17889c693624186593a64bb2220035760316980b
|
/setup.py
|
bbc213031ca0fa6fb214a151fa3d269b11f6787b
|
[] |
no_license
|
vahtras/pdpack
|
f0b50931ce7c987c8f6a151b8529f2175f6de990
|
5fe9c377ebadaff82e5db0650db60709a8065720
|
refs/heads/master
| 2021-01-10T02:08:53.424347
| 2016-04-08T08:54:34
| 2016-04-08T08:54:34
| 55,702,170
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 376
|
py
|
try:
import numpy
except ImportError:
import subprocess
subprocess.call("pip install numpy", shell=True)
from numpy.distutils.core import setup, Extension
ext = Extension(
name='linextra',
sources=['pdpack/linextra.F', 'pdpack/linpack.F'],
include_dirs=['pdpack/include'],
libraries=['blas']
)
setup(
name='linextra',
ext_modules=[ext]
)
|
[
"vahtras@kth.se"
] |
vahtras@kth.se
|
69be95888037623078a6dcbdfe8f36e773a65944
|
d3e31f6b8da5c1a7310b543bbf2adc76091b5571
|
/Day24/vd1/app.py
|
563c66af2f3efd4d2c19b03b41765d7162a81322
|
[] |
no_license
|
pytutorial/py2103
|
224a5a7133dbe03fc4f798408694bf664be10613
|
adbd9eb5a32eb1d28b747dcfbe90ab8a3470e5de
|
refs/heads/main
| 2023-07-14T06:31:18.918778
| 2021-08-12T14:29:16
| 2021-08-12T14:29:16
| 355,163,185
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 183
|
py
|
#pip install flask
from flask import Flask, render_template
app = Flask(__name__)
@app.route('/')
def index():
return render_template('index.html') #'Hello'
app.run(debug=True)
|
[
"duongthanhtungvn01@gmail.com"
] |
duongthanhtungvn01@gmail.com
|
d7ef042d180c534b37eea101e0c194f1a9ff0e20
|
5d2bc0efb0e457cfd55a90d9754d5ced9c009cae
|
/venv/lib/python2.7/site-packages/tests/test_024_ForeignKeys.py
|
d1e1d2981a54530173a85a1af5462ba6c56c0d9f
|
[] |
no_license
|
michaelp1212/paxton
|
dafe08eca55557d036189d5242e47e89ec15bf2d
|
0bd1da471c3a594c0765a4bc5cd1288404791caf
|
refs/heads/master
| 2021-03-25T07:17:06.523340
| 2020-03-19T01:38:24
| 2020-03-19T01:38:24
| 247,598,121
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,878
|
py
|
#
# Licensed Materials - Property of IBM
#
# (c) Copyright IBM Corp. 2007-2008
#
# NOTE: IDS requires that you pass the schema name (cannot pass None)
from __future__ import print_function
import sys
import unittest
import ibm_db
import config
from testfunctions import IbmDbTestFunctions
class IbmDbTestCase(unittest.TestCase):
def test_024_ForeignKeys(self):
obj = IbmDbTestFunctions()
obj.assert_expect(self.run_test_024)
def run_test_024(self):
conn = ibm_db.connect(config.database, config.user, config.password)
server = ibm_db.server_info( conn )
if conn != 0:
drop = 'DROP TABLE test_primary_keys'
try:
result = ibm_db.exec_immediate(conn, drop)
except:
pass
drop = 'DROP TABLE test_keys'
try:
result = ibm_db.exec_immediate(conn, drop)
except:
pass
drop = 'DROP TABLE test_foreign_keys'
try:
result = ibm_db.exec_immediate(conn, drop)
except:
pass
statement = 'CREATE TABLE test_primary_keys (id INTEGER NOT NULL, PRIMARY KEY(id))'
result = ibm_db.exec_immediate(conn, statement)
statement = "INSERT INTO test_primary_keys VALUES (1)"
result = ibm_db.exec_immediate(conn, statement)
statement = 'CREATE TABLE test_keys (name VARCHAR(30) NOT NULL, idf INTEGER NOT NULL, FOREIGN KEY(idf) REFERENCES test_primary_keys(id), \
PRIMARY KEY(name))'
result = ibm_db.exec_immediate(conn, statement)
statement = "INSERT INTO test_keys VALUES ('vince', 1)"
result = ibm_db.exec_immediate(conn, statement)
statement = 'CREATE TABLE test_foreign_keys (namef VARCHAR(30) NOT NULL, id INTEGER NOT NULL, FOREIGN KEY(namef) REFERENCES test_keys(name))'
result = ibm_db.exec_immediate(conn, statement)
statement = "INSERT INTO test_foreign_keys VALUES ('vince', 1)"
result = ibm_db.exec_immediate(conn, statement)
if (server.DBMS_NAME[0:3] == 'IDS'):
stmt = ibm_db.foreign_keys(conn, None, config.user, 'test_primary_keys')
else:
stmt = ibm_db.foreign_keys(conn, None, None, 'TEST_PRIMARY_KEYS')
row = ibm_db.fetch_tuple(stmt)
print(row[2])
print(row[3])
print(row[6])
print(row[7])
if (server.DBMS_NAME[0:3] == 'IDS'):
stmt = ibm_db.foreign_keys(conn, None, None, None, None, config.user, 'test_keys')
else:
stmt = ibm_db.foreign_keys(conn, None, None, None, None, None, 'TEST_KEYS')
row = ibm_db.fetch_tuple(stmt)
print(row[2])
print(row[3])
print(row[6])
print(row[7])
if (server.DBMS_NAME[0:3] == 'IDS'):
stmt = ibm_db.foreign_keys(conn, None, config.user, 'test_keys', None, None, None)
else:
stmt = ibm_db.foreign_keys(conn, None, None, 'TEST_KEYS', None, None, None)
row = ibm_db.fetch_tuple(stmt)
print(row[2])
print(row[3])
print(row[6])
print(row[7])
if (server.DBMS_NAME[0:3] == 'IDS'):
stmt = ibm_db.foreign_keys(conn, None, config.user, 'test_keys', None, config.user, 'test_foreign_keys')
else:
stmt = ibm_db.foreign_keys(conn, None, None, 'TEST_KEYS', None, None, 'TEST_FOREIGN_KEYS')
row = ibm_db.fetch_tuple(stmt)
print(row[2])
print(row[3])
print(row[6])
print(row[7])
try:
stmt = ibm_db.foreign_keys(conn, None, None, None, None, None, None)
row = ibm_db.fetch_tuple(stmt)
except:
if (not stmt):
print(ibm_db.stmt_errormsg())
if (server.DBMS_NAME[0:3] == 'IDS'):
stmt = ibm_db.foreign_keys(conn, None, config.user, 'test_keys', None, 'dummy_schema')
else:
stmt = ibm_db.foreign_keys(conn, None, None, 'TEST_KEYS', None, 'dummy_schema')
row = ibm_db.fetch_tuple(stmt)
if(not row):
print("No Data Found")
else:
print(row)
ibm_db.close(conn)
else:
print(ibm_db.conn_errormsg())
print("Connection failed\n")
#__END__
#__LUW_EXPECTED__
#TEST_PRIMARY_KEYS
#ID
#TEST_KEYS
#IDF
#TEST_PRIMARY_KEYS
#ID
#TEST_KEYS
#IDF
#TEST_KEYS
#NAME
#TEST_FOREIGN_KEYS
#NAMEF
#TEST_KEYS
#NAME
#TEST_FOREIGN_KEYS
#NAMEF
#[IBM][CLI Driver] CLI0124E Invalid argument value. SQLSTATE=HY009 SQLCODE=-99999
#No Data Found
#__ZOS_EXPECTED__
#TEST_PRIMARY_KEYS
#ID
#TEST_KEYS
#IDF
#TEST_PRIMARY_KEYS
#ID
#TEST_KEYS
#IDF
#TEST_KEYS
#NAME
#TEST_FOREIGN_KEYS
#NAMEF
#TEST_KEYS
#NAME
#TEST_FOREIGN_KEYS
#NAMEF
#[IBM][CLI Driver] CLI0124E Invalid argument value. SQLSTATE=HY009 SQLCODE=-99999
#No Data Found
#__SYSTEMI_EXPECTED__
#TEST_PRIMARY_KEYS
#ID
#TEST_KEYS
#IDF
#TEST_PRIMARY_KEYS
#ID
#TEST_KEYS
#IDF
#TEST_KEYS
#NAME
#TEST_FOREIGN_KEYS
#NAMEF
#TEST_KEYS
#NAME
#TEST_FOREIGN_KEYS
#NAMEF
#[IBM][CLI Driver] CLI0124E Invalid argument value. SQLSTATE=HY009 SQLCODE=-99999
#__IDS_EXPECTED__
#test_primary_keys
#id
#test_keys
#idf
#test_primary_keys
#id
#test_keys
#idf
#test_keys
#name
#test_foreign_keys
#namef
#test_keys
#name
#test_foreign_keys
#namef
#[IBM][CLI Driver] CLI0124E Invalid argument value. SQLSTATE=HY009 SQLCODE=-99999
#No Data Found
|
[
"smartwebdev2017@gmail.com"
] |
smartwebdev2017@gmail.com
|
eb4b3921a93a20d061cc44081c98fc02a0a9321e
|
3bc4b502fdb5ffecdbecc9239a0c25746dc31022
|
/Ch03/p69.py
|
c164e5529ca7a074b8d57aedb66553a6e4e56930
|
[] |
no_license
|
pkc-3/python
|
68da873bbe7ad9a3e0db4e22ddaa412a9377720f
|
d8410d897c3784c6017f7edc215ce8763e557518
|
refs/heads/master
| 2023-05-31T06:40:30.279748
| 2021-06-10T09:00:09
| 2021-06-10T09:00:09
| 361,634,592
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 137
|
py
|
# break, continue 예
i = 0
while i < 10:
i += 1
if i == 3:
continue
if i == 6:
break
print(i, end=' ')
|
[
"pkc_3@naver.com"
] |
pkc_3@naver.com
|
712fdaba5b3b039c11c0a907327177ff5bd9909d
|
255e19ddc1bcde0d3d4fe70e01cec9bb724979c9
|
/all-gists/1219692/snippet.py
|
72feef967aeb0d54cf1d96dad510906647484037
|
[
"MIT"
] |
permissive
|
gistable/gistable
|
26c1e909928ec463026811f69b61619b62f14721
|
665d39a2bd82543d5196555f0801ef8fd4a3ee48
|
refs/heads/master
| 2023-02-17T21:33:55.558398
| 2023-02-11T18:20:10
| 2023-02-11T18:20:10
| 119,861,038
| 76
| 19
| null | 2020-07-26T03:14:55
| 2018-02-01T16:19:24
|
Python
|
UTF-8
|
Python
| false
| false
| 1,383
|
py
|
#!/usr/bin/env python
"""
This script looks up how many followers two different Twitter accounts do have in common.
Usage:
twitter_follower_intersect.py username username
You'll need Python and the Python Twitter Tools to get this running.
http://pypi.python.org/pypi/twitter/
Also you will have to create an app at https://dev.twitter.com/apps/
and enter your credentials below:
"""
auth_token = '...'
auth_token_secret = '...'
consumer_key = '...'
consumer_secret = '...'
from twitter import Twitter, OAuth
import sys, os
if len(sys.argv) != 3:
print 'Usage:\n\n'+os.path.basename(sys.argv[0])+' screenname1 screenname2';
t = Twitter(auth=OAuth(auth_token, auth_token_secret, consumer_key, consumer_secret))
user_a = sys.argv[1]
user_b = sys.argv[2]
a = t.followers.ids(user=user_a)
b = t.followers.ids(user=user_b)
c = []
for id in a:
try:
b.index(id)
c.append(id)
except:
True
print '\n'+user_a, 'has', len(a), 'follower'
print user_b, 'has', len(b), 'follower'
print user_a, 'and', user_b, 'have', len(c), 'followers in common'
if len(c) > 100:
c = c[:100]
print '\nfirst 100 common followers are:'
elif len(c) > 0:
print '\nthese are the common followers:'
if len(c) > 0:
common_info = t.users.lookup(user_id=','.join(map(str, c)))
common = []
for u in common_info:
common.append(u['screen_name'])
print ', '.join(common)
print
|
[
"gistshub@gmail.com"
] |
gistshub@gmail.com
|
529b9a96885880408cf9b4499a0d6e5912c55327
|
87e88e72991cc83eff200fb87cc3985d383e3292
|
/FPV_ANN/utils/resBlock.py
|
d64ec3238350a96644f8811f0ab578ff8330b10e
|
[
"MIT"
] |
permissive
|
ppbreda/combustionML
|
c04f3418d891a91af07c4522c507fef186743f1e
|
b441907276c39f127b8914b78656581b0bcde359
|
refs/heads/master
| 2020-04-29T18:05:47.425570
| 2019-03-15T08:40:36
| 2019-03-15T08:40:36
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,400
|
py
|
from keras.models import Model
from keras.layers import Dense, Activation, Input, BatchNormalization, Dropout, concatenate
from keras import layers
from keras.callbacks import ModelCheckpoint
def res_block_org(input_tensor, n_neuron, stage, block, bn=False):
conv_name_base = 'res' + str(stage) + block + '_branch'
bn_name_base = 'bn' + str(stage) + block + '_branch'
x = Dense(n_neuron, name=conv_name_base + '2a')(input_tensor)
if bn:
x = BatchNormalization(axis=-1, name=bn_name_base + '2a')(x)
x = Activation('relu')(x)
x = Dropout(0.)(x)
x = Dense(n_neuron, name=conv_name_base + '2b')(x)
if bn:
x = BatchNormalization(axis=-1, name=bn_name_base + '2b')(x)
x = layers.add([x, input_tensor])
x = Activation('relu')(x)
x = Dropout(0.)(x)
return x
def res_branch(bi, conv_name_base, bn_name_base, scale, input_tensor, n_neuron, stage, block, dp1, bn=False):
x_1 = Dense(scale * n_neuron, name=conv_name_base + '2a_' + str(bi))(input_tensor)
if bn:
x_1 = BatchNormalization(axis=-1, name=bn_name_base + '2a_' + str(bi))(x_1)
x_1 = Activation('relu')(x_1)
if dp1 > 0:
x_1 = Dropout(dp1)(x_1)
return x_1
# new resnet block implementation with bottle neck
def res_block(input_tensor, scale, n_neuron, stage, block, bn=False, branches=0):
conv_name_base = 'res' + str(stage) + block + '_branch'
bn_name_base = 'bn' + str(stage) + block + '_branch'
# scale = 2
x = Dense(scale * n_neuron, name=conv_name_base + '2a')(input_tensor)
if bn:
x = BatchNormalization(axis=-1, name=bn_name_base + '2a')(x)
x = Activation('relu')(x)
dp1 = 0
if dp1 > 0:
x = Dropout(dp1)(x)
branch_list = [x]
for i in range(branches - 1):
branch_list.append(
res_branch(i, conv_name_base, bn_name_base, scale, input_tensor, n_neuron, stage, block, dp1, bn))
if branches - 1 > 0:
x = Dense(n_neuron, name=conv_name_base + '2b')(concatenate(branch_list, axis=-1))
# x = Dense(n_neuron, name=conv_name_base + '2b')(layers.add(branch_list))
else:
x = Dense(n_neuron, name=conv_name_base + '2b')(x)
if bn:
x = BatchNormalization(axis=-1, name=bn_name_base + '2b')(x)
x = layers.add([x, input_tensor])
x = Activation('relu')(x)
if dp1 > 0:
x = Dropout(dp1)(x)
return x
|
[
"maximilian.hansinger@unibw.de"
] |
maximilian.hansinger@unibw.de
|
90e6ad0aa33ddfa5468bf5f1af143002cab456a2
|
cc1d44cf04e5b2b15bb296a434aad4ae4bcfc4be
|
/python3/euc_dist.py
|
e6536e74111caf0ec86e96ef6f4469cb1453d3bf
|
[] |
no_license
|
ericosur/ericosur-snippet
|
dda2200546b13fb9b84632d115a0f4ca5e3d5c47
|
0309eeb614612f9a35843e2f45f4080ae03eaa81
|
refs/heads/main
| 2023-08-08T04:54:05.907435
| 2023-07-25T06:04:01
| 2023-07-25T06:04:01
| 23,057,196
| 2
| 1
| null | 2022-08-31T09:55:19
| 2014-08-18T03:18:52
|
Perl
|
UTF-8
|
Python
| false
| false
| 217
|
py
|
#!/usr/bin/python3
# coding: utf-8
''' euclidean distance '''
import numpy as np
def main():
''' main '''
m = np.array([3, 4])
d = np.linalg.norm(m)
print(d)
if __name__ == '__main__':
main()
|
[
"ericosur@gmail.com"
] |
ericosur@gmail.com
|
b714c4900618c7eaaade3b45b921035c9bc2929a
|
222dbb2f43dccbd4538ef76798a26457edffe07c
|
/MFVI/distributions.py
|
805669cb27a08efcecf3874559184aaabb9b9751
|
[] |
no_license
|
MJHutchinson/PytorchBayes
|
9699351822416deeb61e95a34653580fdfbbb5ae
|
e95a9bd308c595b9603bdfb799288a0ed50cc7c6
|
refs/heads/master
| 2020-04-09T18:39:57.643468
| 2019-01-15T16:06:05
| 2019-01-15T16:06:05
| 160,519,698
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,433
|
py
|
import torch
import numpy as np
log_2pi = np.log(2*np.pi)
class Distribution(object):
def pdf(self, x):
raise NotImplementedError
def logpdf(self, x):
raise NotImplementedError
def cdf(self, x):
raise NotImplementedError
def logcdf(self, x):
raise NotImplementedError
def sample(self):
raise NotImplementedError
def forward(self, x):
raise NotImplementedError
class Normal(Distribution):
def __init__(self, mu, logvar):
self.mu = mu
self.logvar = logvar
self.shape = mu.size()
def logpdf(self, x):
return -0.5 * log_2pi \
- 0.5 * self.logvar \
- (x - self.mu).pow(2) / (2 * torch.exp(self.logvar))
def pdf(self, x):
return torch.exp(self.logpdf(x))
def sample(self):
if self.mu.is_cuda:
eps = torch.cuda.FloatTensor(self.shape).normal_()
else:
eps = torch.FloatTensor(self.shape).normal_()
return self.mu + torch.exp(0.5 * self.logvar) * eps
def kl(self, distribution):
if isinstance(distribution, self.__class__):
const_term = -0.5
log_var_diff = 0.5 * (-self.logvar + distribution.logvar)
mu_diff_term = 0.5 * ((self.mu - distribution.mu) ** 2 + torch.exp(self.logvar))/torch.exp(distribution.logvar)
return const_term + log_var_diff + mu_diff_term
|
[
"hutchinson.michael.john@gmail.com"
] |
hutchinson.michael.john@gmail.com
|
8ec1b7d766ec47742ccc9d987470864ab0da2c88
|
33338ccfe04112e7b7ea09aea240187cee5bab3f
|
/examples/motion.py
|
35867e74d44317195e2ecb4b470fc58a09d5d516
|
[
"MIT"
] |
permissive
|
levkovigor/pmw3901-python-pimoroni
|
b944e7e3f1b4b43fcd0259cbdd4d6548148fbacb
|
bf798f6d58a1527563822d152d2650cc72e06e09
|
refs/heads/master
| 2023-08-02T22:53:24.578868
| 2021-09-16T15:05:11
| 2021-09-16T15:05:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,366
|
py
|
#!/usr/bin/env python
import time
import argparse
from pmw3901 import PMW3901, PAA5100, BG_CS_FRONT_BCM, BG_CS_BACK_BCM
print("""motion.py - Detect flow/motion in front of the PMW3901 sensor.
Press Ctrl+C to exit!
""")
parser = argparse.ArgumentParser()
parser.add_argument('--board', type=str,
choices=['pmw3901', 'paa5100'],
required=True,
help='Breakout type.')
parser.add_argument('--rotation', type=int,
default=0, choices=[0, 90, 180, 270],
help='Rotation of sensor in degrees.')
parser.add_argument('--spi-slot', type=str,
default='front', choices=['front', 'back'],
help='Breakout Garden SPI slot.')
args = parser.parse_args()
# Pick the right class for the specified breakout
SensorClass = PMW3901 if args.board == 'pmw3901' else PAA5100
flo = SensorClass(spi_port=0, spi_cs=1, spi_cs_gpio=BG_CS_FRONT_BCM if args.spi_slot == 'front' else BG_CS_BACK_BCM)
flo.set_rotation(args.rotation)
tx = 0
ty = 0
try:
while True:
try:
x, y = flo.get_motion()
except RuntimeError:
continue
tx += x
ty += y
print("Relative: x {:03d} y {:03d} | Absolute: x {:03d} y {:03d}".format(x, y, tx, ty))
time.sleep(0.01)
except KeyboardInterrupt:
pass
|
[
"phil@gadgetoid.com"
] |
phil@gadgetoid.com
|
664688d7026deca7dfe13f826be489819a058db9
|
f188379dc9c1e5b63e432d434c782a4d6997872b
|
/7_Dictionaries/Exercises and More Exercises/11. Ranking.py
|
c8393f218b168b6acce06f6453a3b75ba56c95c2
|
[] |
no_license
|
GalyaBorislavova/SoftUni_Python_Fundamentals_January_2021
|
39d7eb8c28f60ff3c293855b074c49ac622a6036
|
7d479fd6c8e4136fb07b765458cc00088e09767a
|
refs/heads/main
| 2023-06-15T04:16:17.084825
| 2021-06-30T18:05:42
| 2021-06-30T18:05:42
| 381,785,764
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,479
|
py
|
data = input()
contest_with_pass = {}
while not data == "end of contests":
contest, password = data.split(":")
contest_with_pass[contest] = password
data = input()
data = input()
submissions = {}
while not data == "end of submissions":
contest, password, username, points = data.split("=>")
points = int(points)
if contest in contest_with_pass and contest_with_pass[contest] == password:
if username not in submissions:
submissions[username] = {contest: points}
if contest in submissions[username]:
if submissions[username][contest] < points:
submissions[username][contest] = points
else:
submissions[username][contest] = points
data = input()
sorted_submissions = {n: v for n, v in (sorted(submissions.items()))}
for key, value in sorted_submissions.items():
sorted_points = {k: p for k, p in sorted(value.items(), key=lambda x: -x[1])}
sorted_submissions[key] = sorted_points
max_points = 0
best_candidate = ''
for key, value in sorted_submissions.items():
current_points = 0
for c, p in value.items():
current_points += p
if current_points > max_points:
max_points = current_points
best_candidate = key
print(f"Best candidate is {best_candidate} with total {max_points} points.")
print("Ranking:")
for key, value in sorted_submissions.items():
print(key)
for c, p in value.items():
print(f"# {c} -> {p}")
|
[
"galyaborislavova888@gmail.com"
] |
galyaborislavova888@gmail.com
|
60be12b528080a1558cd6e0f9d842020db02dc29
|
5a1a695829a2d1dbf4daa0736f0fbd6feffc7e63
|
/0830/palindrome2.py
|
0042014807add9a1a311863e8d36a88cf927c9d9
|
[] |
no_license
|
juyi212/Algorithm_study
|
f5d263c5329c994a457bbe897e5e1405d2b1d67a
|
f225cc593a50b74686111f654f7133707a1d1310
|
refs/heads/master
| 2023-03-21T20:02:36.138688
| 2021-03-16T14:16:40
| 2021-03-16T14:16:40
| 325,008,034
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 648
|
py
|
import sys
sys.stdin=open("pal_input.txt","r")
def check(m):
global zwords
for i in range(100):
for j in range(100-m+1):
temp=words[i][j:j+m]
c_temp=zwords[i][j:j+m]
if temp==temp[::-1] or c_temp==c_temp[::-1]:
return True
return False
for tc in range(1,11):
t=int(input())
words=[list(input().rstrip()) for _ in range(100)]
zwords=list(zip(*words))
for i in range(100,0,-1):
if check(i): # 제일 긴 것을 구하는 것이기때문에 가장 긴 순으로 문자열의 길이를 설정해줌
result=i
break
print(result)
|
[
"dea8307@naver.com"
] |
dea8307@naver.com
|
4a7aae79d4dfa0b6ec639291564a1c9cc372c6c2
|
54f352a242a8ad6ff5516703e91da61e08d9a9e6
|
/Source Codes/AtCoder/abc005/C/4909327.py
|
2ce013dfa84bbabf437e5d6940c4493bb010db06
|
[] |
no_license
|
Kawser-nerd/CLCDSA
|
5cbd8a4c3f65173e4e8e0d7ed845574c4770c3eb
|
aee32551795763b54acb26856ab239370cac4e75
|
refs/heads/master
| 2022-02-09T11:08:56.588303
| 2022-01-26T18:53:40
| 2022-01-26T18:53:40
| 211,783,197
| 23
| 9
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 483
|
py
|
t = int(input())
n = int(input())
a_list = sorted(list(map(int, input().split())))
m = int(input())
b_list = sorted(list(map(int, input().split())))
if n < m:
print('no')
else:
count = 0
for b in b_list:
for a in range(len(a_list)):
if b-a_list[a] <= t and b-a_list[a] >= 0:
count += 1
a_list.pop(a)
break
if count == len(b_list):
print('yes')
else:
print('no')
|
[
"kwnafi@yahoo.com"
] |
kwnafi@yahoo.com
|
644a771174d5da427ab20d060a04b6064d28e480
|
7aec5d22b50ce8d4a18572396a4ab28d45dfcbef
|
/examples/coreir-tutorial/shift_register.py
|
668484a02603c8bb433e6a21b40527233274e552
|
[
"MIT"
] |
permissive
|
phanrahan/magmathon
|
db4ad04bd37034d5cee9ee0b507ec64ca9d0f204
|
68c9be1df0569a9d5d076b1bd986ed5ee3562d54
|
refs/heads/master
| 2020-09-15T08:16:36.985371
| 2020-03-04T17:50:27
| 2020-03-04T17:50:27
| 66,949,165
| 13
| 3
|
MIT
| 2020-03-02T18:14:35
| 2016-08-30T14:39:43
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 1,654
|
py
|
# coding: utf-8
# In[1]:
import magma as m
m.set_mantle_target("coreir")
import mantle
def DefineShiftRegister(n, init=0, has_ce=False, has_reset=False):
class _ShiftRegister(m.Circuit):
name = 'ShiftRegister_{}_{}_{}_{}'.format(n, init, has_ce, has_reset)
IO = ['I', m.In(m.Bit), 'O', m.Out(m.Bit)] + m.ClockInterface(has_ce, has_reset)
@classmethod
def definition(siso):
ffs = mantle.FFs(n, init=init, has_ce=has_ce, has_reset=has_reset)
reg = m.braid(ffs, foldargs={"I":"O"})
reg(siso.I)
m.wire(reg.O, siso.O)
m.wireclock(siso, reg)
return _ShiftRegister
# In[2]:
m.compile("build/DefineShiftRegister.json", DefineShiftRegister(2, has_ce=True), output="coreir")
get_ipython().magic('cat build/DefineShiftRegister.json')
# In[3]:
from magma.simulator.coreir_simulator import CoreIRSimulator
from bit_vector import BitVector
N = 3
ShiftRegisterNCE = DefineShiftRegister(N, has_ce=True)
simulator = CoreIRSimulator(ShiftRegisterNCE, clock=ShiftRegisterNCE.CLK)
outputs = []
for j in range(2):
simulator.advance()
for I, enable in [(1, 1), (0, 1), (1, 1), (0, 1), (1, 0), (0, 0), (1, 1), (1, 1), (1, 1), (1, 1)]:
simulator.set_value(ShiftRegisterNCE.I, bool(I))
simulator.set_value(ShiftRegisterNCE.CE, bool(enable))
for j in range(2):
simulator.advance()
O = simulator.get_value(ShiftRegisterNCE.O)
CLK = simulator.get_value(ShiftRegisterNCE.CLK)
outputs.append([O, I, enable, CLK])
# In[4]:
from magma.waveform import waveform
waveform(outputs, ["O", "I", "CE", "CLK"])
|
[
"lenny@stanford.edu"
] |
lenny@stanford.edu
|
8c3071706a8894ff8207c463c10c2281dd7b7f1c
|
1139841f6451c0e9e2a53a808966139dbde60f54
|
/nlp_tools/nlp_postprocessor/postprocessing_rule.py
|
43a30de044312219c39aedddaf33a6d6edb89b2b
|
[] |
no_license
|
abchapman93/nlp_tools
|
6728d158aa3bb06e2d5e6fa58d8924315e42ce09
|
876aac6203d596569983ca9920fde888f34cc3f3
|
refs/heads/master
| 2022-10-23T18:05:02.267427
| 2020-06-16T16:17:43
| 2020-06-16T16:17:43
| 251,348,604
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,407
|
py
|
class PostprocessingRule:
def __init__(self, patterns, action, name=None, description=None, action_args=None):
"""A PostprocessingRule checks conditions of a spaCy Span entity
and executes some action if all patterns are met.
patterns (list): A list of PostprocessingPatterns,
each of which check a condition of an entity.
action (function): A function to call with the entity as an argument.
This function should take ay least the following two arguments:
ent: the spacy span
i: the index of ent in doc.ents
Additional positional arguments can be provided in action_args.
name (str): Optional name of rule.
description (str): Optional description of the rule.
action_args (tuple or None): Optional tuple of positional arguments
to pass to action() if all patterns pass. Default is None,
in which case the rule will call action(ent, i).
"""
self.patterns = patterns
self.action = action
self.name = name
self.description = description
self.action_args = action_args
def __call__(self, ent, i, debug=False):
"""Iterate through all of the patterns in self.rules.
If any pattern does not pass (ie., return True), then returns False.
If they all pass, execute self.action and return True.
"""
for pattern in self.patterns:
# If this is a tuple, at least one has to pass
if isinstance(pattern, tuple):
passed = False
for subpattern in pattern:
rslt = subpattern(ent)
if rslt is True:
passed = True
break
if passed is False:
return False
# Otherwise just check a single value
else:
rslt = pattern(ent)
if rslt is False:
return False
# Every pattern passed - do the action
if debug:
print("Passed:", self, "on ent:", ent, ent.sent)
if self.action_args is None:
self.action(ent, i)
else:
self.action(ent, i, *self.action_args)
return True
def __repr__(self):
return f"PostprocessingRule: {self.name} - {self.description}"
|
[
"abchapman93@gmail.com"
] |
abchapman93@gmail.com
|
411947a1984c31f8fbdb8e6f9394716a8adaf64c
|
14956dbed8ae4fba1d65b9829d9405fcf43ac698
|
/Cyber Security/Capture the Flag Competitions/2020/DSTA CDDC 2020/Warp Gate 4/(UNDONE) What Time Is It [1]/solve.py
|
9ba0fc4d6cf73be6d252a5672169fe10759bb934
|
[] |
no_license
|
Hackin7/Programming-Crappy-Solutions
|
ae8bbddad92a48cf70976cec91bf66234c9b4d39
|
ffa3b3c26a6a06446cc49c8ac4f35b6d30b1ee0f
|
refs/heads/master
| 2023-03-21T01:21:00.764957
| 2022-12-28T14:22:33
| 2022-12-28T14:22:33
| 201,292,128
| 12
| 7
| null | 2023-03-05T16:05:34
| 2019-08-08T16:00:21
|
Roff
|
UTF-8
|
Python
| false
| false
| 718
|
py
|
datesString = '''\
2005.10.06 05:23:15
2020.10.05 22:39:46
2020.08.29 05:16:57
2020.08.12 10:05:39
2020.09.29 06:36:38
2020.09.27 00:41:56
2020.09.30 18:43:24
2020.08.10 03:54:13
2020.09.24 00:09:37
2020.09.16 09:20:23
2020.08.10 22:06:44
2020.08.10 23:19:09
2020.08.13 22:08:52
1987.04.11 00:43:13\
'''
import datetime
dates = datesString.split('\n')
for i in range(len(dates)):
print(dates[i])
dates[i] = datetime.datetime.strptime(dates[i], '%Y.%m.%d %H:%M:%S')
def avg(dates):
any_reference_date = datetime.datetime(1900, 1, 1)
return any_reference_date + sum([date - any_reference_date for date in dates], datetime.timedelta()) / len(dates)
print()
print(avg(dates))
|
[
"zunmun@gmail.com"
] |
zunmun@gmail.com
|
bf984644b822f43a0a3dabb801c8a47cc73ebe50
|
225bc8ac617a721ae79e8287ca0df47439740c6b
|
/strip_tags.py
|
62fa10f4d5cf0c2ed91f72923096389502b6475f
|
[
"Artistic-2.0"
] |
permissive
|
rec/strip-tags
|
558228d1e24679822a6c3ec9130d9d1867657251
|
4f6568fa26089275d77c82d4c55c6a0f05bbebd1
|
refs/heads/master
| 2020-04-14T21:28:04.464951
| 2014-05-17T18:51:22
| 2014-05-17T18:51:22
| 19,893,626
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 330
|
py
|
#!/usr/bin/env python
from BeautifulSoup import BeautifulSoup
def strip_all_tags(html):
return html and ''.join(BeautifulSoup(html).findAll(text=True))
def join_inputs():
results = []
while True:
try:
results.append(raw_input())
except EOFError:
return ''.join(results)
print(strip_all_tags(html))
|
[
"tom@swirly.com"
] |
tom@swirly.com
|
4f9db7146b298561b8def897b45f7e3ecbd7e31a
|
28f3e82c99fe3628f3d0b361f627408a2fdacfc2
|
/driver/migrations/0003_auto_20190316_2209.py
|
5fd2ff7c7dc5ae639e7af43cb2f363d6084df126
|
[] |
no_license
|
aballah-chamakh/Delivery_Tracker
|
09957eae173f30406eb285256bfb07b119ddce22
|
ffb07027e81aeb0fab90fc41963544625dd84fea
|
refs/heads/master
| 2020-04-27T22:31:49.854344
| 2019-04-09T18:11:05
| 2019-04-09T18:11:05
| 174,740,268
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 490
|
py
|
# Generated by Django 2.0 on 2019-03-16 21:09
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('driver', '0002_driver_image'),
]
operations = [
migrations.AlterField(
model_name='driver',
name='vehicle',
field=models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='vehicle.Vehicle'),
),
]
|
[
"chamakhabdallah8@gmail.com"
] |
chamakhabdallah8@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.