blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4cbbdd29fce7808644cba278f87b92dcc23abb59
|
83e6d9a4a7aa5ced682c6d44e0853264cd7bab58
|
/dixon_TIY6.1/dixon_TIY6.11.py
|
61d1f358e5de460ff4b75d8a52cf95c9e3ea6d19
|
[] |
no_license
|
JaxonDimes/Python
|
ad3aa8e8f01fe71d9bfc47e4cc6774a9238143c3
|
0fa5dea05e726a3af4374b907178b44db98499ff
|
refs/heads/master
| 2020-12-12T10:10:34.660981
| 2020-01-15T14:48:23
| 2020-01-15T14:48:23
| 234,101,371
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 919
|
py
|
cities = {
'indianapolis' : {
'country': 'United States',
'population': ' 2,028,614',
'fact': 'The Children’s Museum of Indianapolis is the largest children’s museum in the world.'},
'Fort Wayne': {
'country': 'United States',
'population': '419,453',
'fact': 'Fort Wayne is known as "the city of churches." How many churches do we have? 360.'},
'Terre Haute': {
'country': 'United States',
'population': '170,943',
'fact': 'The Crime Rate for this place is 41.38 per 1,000 residents.'
}
}
for city, place in cities.items():
print(city.title())
country = place['country']
population = place['population']
fact = place['fact']
print(f"\t{city.title()} is placed in {country.title()}")
print(f"\t{city.title()}'s population is {population}.")
print(f"\tFun Fact about {city.title()}: {fact.title()}")
|
[
"dillonduff1@frontier.com"
] |
dillonduff1@frontier.com
|
5d16aba73f3db549de99b273aa341d6a695cb19b
|
d4fdbd68c42d6b9babe347cb3b65535e4d782172
|
/tensorflow_datasets/image/horses_or_humans.py
|
a1ece687669b324b1db5a9a5d708b25eb5052d0a
|
[
"Apache-2.0"
] |
permissive
|
thanhkaist/datasets
|
2809260c5e95e96d136059bea042d1ed969a6fcf
|
02da35c558ec8ea704e744a2008c5cecb2e7a0a1
|
refs/heads/master
| 2020-06-04T16:13:14.603449
| 2019-06-14T22:01:33
| 2019-06-14T22:02:54
| 192,097,735
| 2
| 0
|
Apache-2.0
| 2019-06-15T16:02:18
| 2019-06-15T16:02:18
| null |
UTF-8
|
Python
| false
| false
| 3,016
|
py
|
# coding=utf-8
# Copyright 2019 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Horses or Humans dataset.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
import tensorflow_datasets.public_api as tfds
_CITATION = """\
@ONLINE {horses_or_humans,
author = "Laurence Moroney",
title = "Horses or Humans Dataset",
month = "feb",
year = "2019",
url = "http://laurencemoroney.com/horses-or-humans-dataset"
}
"""
_TRAIN_URL = "https://storage.googleapis.com/laurencemoroney-blog.appspot.com/horse-or-human.zip"
_TEST_URL = "https://storage.googleapis.com/laurencemoroney-blog.appspot.com/validation-horse-or-human.zip"
_IMAGE_SIZE = 300
_IMAGE_SHAPE = (_IMAGE_SIZE, _IMAGE_SIZE, 3)
_NAME_RE = re.compile(r"^(humans|horses)/[\w-]*\.png$")
class HorsesOrHumans(tfds.core.GeneratorBasedBuilder):
"""Horses or Humans dataset."""
VERSION = tfds.core.Version("1.0.0")
def _info(self):
return tfds.core.DatasetInfo(
builder=self,
description="A large set of images of horses and humans.",
features=tfds.features.FeaturesDict({
"image": tfds.features.Image(shape=_IMAGE_SHAPE),
"label": tfds.features.ClassLabel(
names=["horses", "humans"]),
}),
supervised_keys=("image", "label"),
urls=["http://laurencemoroney.com/horses-or-humans-dataset"],
citation=_CITATION
)
def _split_generators(self, dl_manager):
train_path, test_path = dl_manager.download([_TRAIN_URL, _TEST_URL])
return [
tfds.core.SplitGenerator(
name=tfds.Split.TRAIN,
num_shards=10,
gen_kwargs={
"archive": dl_manager.iter_archive(train_path)
}),
tfds.core.SplitGenerator(
name=tfds.Split.TEST,
num_shards=10,
gen_kwargs={
"archive": dl_manager.iter_archive(test_path)
}),
]
def _generate_examples(self, archive):
"""Generate horses or humans images and labels given the directory path.
Args:
archive: object that iterates over the zip.
Yields:
The image path and its corresponding label.
"""
for fname, fobj in archive:
res = _NAME_RE.match(fname)
if not res: # if anything other than .png; skip
continue
label = res.group(1).lower()
yield {
"image": fobj,
"label": label,
}
|
[
"copybara-worker@google.com"
] |
copybara-worker@google.com
|
75b89fdd96fa2eb6208750c4e7ab2b8b8bbd262c
|
edfdc0d3a2fdeed95ba7aa3d0e198eb9dafe4064
|
/operator_api/auditor/serializers/wallet.py
|
eaf23012b2e241068d6fe15719acea220a4c5144
|
[
"MIT",
"LicenseRef-scancode-warranty-disclaimer"
] |
permissive
|
xiaobai900/nocust-hub
|
880e72ba4e1d324ae36adea6c03c9761a7d91621
|
76f49f9b8a6c264fcbe9e0c110e98031d463c0a8
|
refs/heads/master
| 2023-05-28T08:18:17.402228
| 2020-11-01T19:48:17
| 2020-11-01T19:48:17
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,337
|
py
|
from eth_utils import remove_0x_prefix, add_0x_prefix
from rest_framework import serializers
from ledger.models import Wallet, Token
from operator_api.models import ErrorCode
class WalletSerializer(serializers.Serializer):
address = serializers.CharField(max_length=42)
token = serializers.CharField(max_length=42)
trail_identifier = serializers.IntegerField(read_only=True)
def to_internal_value(self, data):
if not isinstance(data, dict):
raise serializers.ValidationError(
detail='A valid dictionary is required.')
try:
token = Token.objects.get(
address=remove_0x_prefix(data.get('token')))
except Token.DoesNotExist:
raise serializers.ValidationError(
detail='', code=ErrorCode.TOKEN_NOT_REGISTERED)
try:
return Wallet.objects.get(address=remove_0x_prefix(data.get('address')), token=token)
except Wallet.DoesNotExist:
raise serializers.ValidationError(
detail='', code=ErrorCode.WALLET_NOT_ADMITTED)
def to_representation(self, instance):
return {
'address': add_0x_prefix(instance.address),
'token': add_0x_prefix(instance.token.address),
'trail_identifier': instance.trail_identifier
}
|
[
"guillaume@felley.io"
] |
guillaume@felley.io
|
e00019a74bd3688664d6e13f72dd4fcf038515a5
|
c7770d7631f2930cce80462f9c3ee7e2abe118bb
|
/src/muses/collection/migrations/0006_auto_20180207_0616.py
|
3443f887f88a09654de195714d5967556d14639e
|
[
"Apache-2.0"
] |
permissive
|
Aincient/cleo
|
4f277520a22792aa5b505601849a7ff3a4bd4196
|
933ef372fa7847d943206d72bfb03c201dbafbd6
|
refs/heads/master
| 2021-06-18T11:01:49.137359
| 2021-01-12T16:34:44
| 2021-01-12T16:34:44
| 150,566,366
| 0
| 3
|
NOASSERTION
| 2021-01-12T16:34:46
| 2018-09-27T10:00:20
|
Python
|
UTF-8
|
Python
| false
| false
| 1,246
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.10 on 2018-02-07 12:16
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('muses_collection', '0005_auto_20180207_0516'),
]
operations = [
migrations.AddField(
model_name='image',
name='active',
field=models.BooleanField(default=False, verbose_name='Active'),
),
migrations.AlterField(
model_name='image',
name='api_url',
field=models.TextField(unique=True, verbose_name='Image API URL'),
),
migrations.AlterField(
model_name='image',
name='created',
field=models.DateField(auto_now_add=True, verbose_name='Date imported'),
),
migrations.AlterField(
model_name='image',
name='image',
field=models.FileField(blank=True, null=True, upload_to='collection_images', verbose_name='Image'),
),
migrations.AlterField(
model_name='image',
name='updated',
field=models.DateField(auto_now=True, verbose_name='Date updated'),
),
]
|
[
"artur.barseghyan@gmail.com"
] |
artur.barseghyan@gmail.com
|
bf14ccd8c8be7d9c8499c927482512df16ec0245
|
d23dab09b21553353ad85246ebafaea790f2afbd
|
/src/python/pants/core/util_rules/asdf_test.py
|
f11f4a993b66fcffcb5fa2d7fbd49e8695986cb4
|
[
"Apache-2.0"
] |
permissive
|
asherf/pants
|
00e8c64b7831f814bac3c4fa8c342d2237fef17d
|
c94d9e08f65e9baf3793dff0ec2c571d682f6b90
|
refs/heads/master
| 2023-05-28T14:45:35.325999
| 2023-01-18T15:16:07
| 2023-01-18T15:16:07
| 185,082,662
| 0
| 0
|
Apache-2.0
| 2023-01-18T15:15:46
| 2019-05-05T21:09:43
|
Python
|
UTF-8
|
Python
| false
| false
| 6,611
|
py
|
# Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
from contextlib import contextmanager
from pathlib import Path, PurePath
from typing import Iterable, Mapping, Sequence, TypeVar
import pytest
from pants.core.util_rules import asdf
from pants.core.util_rules.asdf import AsdfToolPathsRequest, AsdfToolPathsResult, get_asdf_data_dir
from pants.core.util_rules.environments import (
DockerEnvironmentTarget,
DockerImageField,
EnvironmentTarget,
LocalEnvironmentTarget,
RemoteEnvironmentTarget,
)
from pants.engine.addresses import Address
from pants.engine.env_vars import CompleteEnvironmentVars, EnvironmentVars
from pants.engine.rules import QueryRule
from pants.testutil.rule_runner import RuleRunner
from pants.util.contextutil import temporary_dir
_T = TypeVar("_T")
def materialize_indices(sequence: Sequence[_T], indices: Iterable[int]) -> tuple[_T, ...]:
return tuple(sequence[i] for i in indices)
@contextmanager
def fake_asdf_root(
fake_versions: list[str],
fake_home_versions: list[int],
fake_local_versions: list[int],
*,
tool_name: str,
):
with temporary_dir() as home_dir, temporary_dir() as asdf_dir:
fake_dirs: list[Path] = []
fake_version_dirs: list[str] = []
fake_home_dir = Path(home_dir)
fake_tool_versions = fake_home_dir / ".tool-versions"
fake_home_versions_str = " ".join(materialize_indices(fake_versions, fake_home_versions))
fake_tool_versions.write_text(f"nodejs lts\njava 8\n{tool_name} {fake_home_versions_str}\n")
fake_asdf_dir = Path(asdf_dir)
fake_asdf_plugin_dir = fake_asdf_dir / "plugins" / tool_name
fake_asdf_installs_dir = fake_asdf_dir / "installs" / tool_name
fake_dirs.extend(
[fake_home_dir, fake_asdf_dir, fake_asdf_plugin_dir, fake_asdf_installs_dir]
)
for version in fake_versions:
fake_version_path = fake_asdf_installs_dir / version / "bin"
fake_version_dirs.append(f"{fake_version_path}")
fake_dirs.append(fake_version_path)
for fake_dir in fake_dirs:
fake_dir.mkdir(parents=True, exist_ok=True)
yield (
home_dir,
asdf_dir,
fake_version_dirs,
# fake_home_version_dirs
materialize_indices(fake_version_dirs, fake_home_versions),
# fake_local_version_dirs
materialize_indices(fake_version_dirs, fake_local_versions),
)
def test_get_asdf_dir() -> None:
home = PurePath("♡")
default_root = home / ".asdf"
explicit_root = home / "explicit"
assert explicit_root == get_asdf_data_dir(
EnvironmentVars({"ASDF_DATA_DIR": f"{explicit_root}"})
)
assert default_root == get_asdf_data_dir(EnvironmentVars({"HOME": f"{home}"}))
assert get_asdf_data_dir(EnvironmentVars({})) is None
def get_asdf_paths(
rule_runner: RuleRunner,
env_tgt: EnvironmentTarget,
env: Mapping[str, str],
*,
standard: bool,
local: bool,
) -> AsdfToolPathsResult:
rule_runner.set_session_values(
{
CompleteEnvironmentVars: CompleteEnvironmentVars(env),
}
)
return rule_runner.request(
AsdfToolPathsResult,
[
AsdfToolPathsRequest(
env_tgt=env_tgt,
tool_name="python",
tool_description="<test>",
resolve_standard=standard,
resolve_local=local,
paths_option_name="<test>",
)
],
)
@pytest.mark.parametrize(
("env_tgt_type", "should_have_values"),
(
(LocalEnvironmentTarget, True),
(None, True),
(DockerEnvironmentTarget, False),
(RemoteEnvironmentTarget, False),
),
)
def test_get_asdf_paths(
env_tgt_type: type[LocalEnvironmentTarget]
| type[DockerEnvironmentTarget]
| type[RemoteEnvironmentTarget]
| None,
should_have_values: bool,
) -> None:
# 3.9.4 is intentionally "left out" so that it's only found if the "all installs" fallback is
# used
all_python_versions = ["2.7.14", "3.5.5", "3.7.10", "3.9.4", "3.9.5"]
asdf_home_versions = [0, 1, 2]
asdf_local_versions = [2, 1, 4]
asdf_local_versions_str = " ".join(
materialize_indices(all_python_versions, asdf_local_versions)
)
rule_runner = RuleRunner(
rules=[
*asdf.rules(),
QueryRule(AsdfToolPathsResult, (AsdfToolPathsRequest,)),
]
)
rule_runner.write_files(
{
".tool-versions": "\n".join(
[
"nodejs 16.0.1",
"java current",
f"python {asdf_local_versions_str}",
"rust 1.52.0",
]
)
}
)
with fake_asdf_root(
all_python_versions, asdf_home_versions, asdf_local_versions, tool_name="python"
) as (
home_dir,
asdf_dir,
expected_asdf_paths,
expected_asdf_home_paths,
expected_asdf_local_paths,
):
extra_kwargs: dict = {}
if env_tgt_type is DockerEnvironmentTarget:
extra_kwargs = {
DockerImageField.alias: "my_img",
}
env_tgt = EnvironmentTarget(
env_tgt_type(extra_kwargs, Address("flem")) if env_tgt_type is not None else None
)
# Check the "all installed" fallback
result = get_asdf_paths(
rule_runner, env_tgt, {"ASDF_DATA_DIR": asdf_dir}, standard=True, local=False
)
all_paths = result.standard_tool_paths
result = get_asdf_paths(
rule_runner,
env_tgt,
{"HOME": home_dir, "ASDF_DATA_DIR": asdf_dir},
standard=True,
local=True,
)
home_paths = result.standard_tool_paths
local_paths = result.local_tool_paths
if should_have_values:
# The order the filesystem returns the "installed" folders is arbitrary
assert set(expected_asdf_paths) == set(all_paths)
# These have a fixed order defined by the `.tool-versions` file
assert expected_asdf_home_paths == home_paths
assert expected_asdf_local_paths == local_paths
else:
# asdf bails quickly on non-local environments
assert () == all_paths
assert () == home_paths
assert () == local_paths
|
[
"noreply@github.com"
] |
asherf.noreply@github.com
|
252d08db541ea7b978cda04b6a453f3f175f932e
|
fb5c5d50d87a6861393d31911b9fae39bdc3cc62
|
/Scripts/sims4communitylib/enums/relationship_bit_collections_enum.py
|
d8546691caa00683e94d009a6b38e0155c43d651
|
[
"CC-BY-4.0"
] |
permissive
|
ColonolNutty/Sims4CommunityLibrary
|
ee26126375f2f59e5567b72f6eb4fe9737a61df3
|
58e7beb30b9c818b294d35abd2436a0192cd3e82
|
refs/heads/master
| 2023-08-31T06:04:09.223005
| 2023-08-22T19:57:42
| 2023-08-22T19:57:42
| 205,197,959
| 183
| 38
| null | 2023-05-28T16:17:53
| 2019-08-29T15:48:35
|
Python
|
UTF-8
|
Python
| false
| false
| 1,917
|
py
|
"""
The Sims 4 Community Library is licensed under the Creative Commons Attribution 4.0 International public license (CC BY 4.0).
https://creativecommons.org/licenses/by/4.0/
https://creativecommons.org/licenses/by/4.0/legalcode
Copyright (c) COLONOLNUTTY
"""
from sims4communitylib.enums.enumtypes.common_int import CommonInt
class CommonRelationshipBitCollectionId(CommonInt):
"""Identifiers for vanilla relationship bit collections."""
INVALID: 'CommonRelationshipBitCollectionId' = 0
CHILD: 'CommonRelationshipBitCollectionId' = 195565
FAMILY: 'CommonRelationshipBitCollectionId' = 15806
FAMILY_ACQUIRED_NEGATIVE: 'CommonRelationshipBitCollectionId' = 162462
FAMILY_ACQUIRED_NEUTRAL: 'CommonRelationshipBitCollectionId' = 164649
FAMILY_ACQUIRED_POSITIVE: 'CommonRelationshipBitCollectionId' = 164128
FRIEND: 'CommonRelationshipBitCollectionId' = 15807
FRIEND_AT_LEAST_FRIEND: 'CommonRelationshipBitCollectionId' = 273773
ROMANTIC: 'CommonRelationshipBitCollectionId' = 240628
ROMANTIC_HAVE_BEEN_EXES: 'CommonRelationshipBitCollectionId' = 274292
SENTIMENT_ADORING: 'CommonRelationshipBitCollectionId' = 246492
SENTIMENT_BITTER: 'CommonRelationshipBitCollectionId' = 240104
SENTIMENT_CLOSE: 'CommonRelationshipBitCollectionId' = 240103
SENTIMENT_ENAMORED: 'CommonRelationshipBitCollectionId' = 240107
SENTIMENT_FURIOUS: 'CommonRelationshipBitCollectionId' = 240105
SENTIMENT_GUILTY: 'CommonRelationshipBitCollectionId' = 246491
SENTIMENT_HURT: 'CommonRelationshipBitCollectionId' = 246490
SENTIMENT_LONG_TERM: 'CommonRelationshipBitCollectionId' = 240114
SENTIMENT_MOTIVATING: 'CommonRelationshipBitCollectionId' = 252843
SENTIMENT_NEGATIVE: 'CommonRelationshipBitCollectionId' = 240110
SENTIMENT_POSITIVE: 'CommonRelationshipBitCollectionId' = 240109
SENTIMENT_SHORT_TERM: 'CommonRelationshipBitCollectionId' = 240113
|
[
"ColonolNutty@hotmail.com"
] |
ColonolNutty@hotmail.com
|
0d093c3ca016d6d2803a429a4bd995ada08b48ac
|
f576f0ea3725d54bd2551883901b25b863fe6688
|
/sdk/automation/azure-mgmt-automation/azure/mgmt/automation/aio/operations/_fields_operations.py
|
bf24b786c68ed2d5f9cba471c8f73ce85a80e483
|
[
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
] |
permissive
|
Azure/azure-sdk-for-python
|
02e3838e53a33d8ba27e9bcc22bd84e790e4ca7c
|
c2ca191e736bb06bfbbbc9493e8325763ba990bb
|
refs/heads/main
| 2023-09-06T09:30:13.135012
| 2023-09-06T01:08:06
| 2023-09-06T01:08:06
| 4,127,088
| 4,046
| 2,755
|
MIT
| 2023-09-14T21:48:49
| 2012-04-24T16:46:12
|
Python
|
UTF-8
|
Python
| false
| false
| 6,384
|
py
|
# pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import sys
from typing import Any, AsyncIterable, Callable, Dict, Optional, TypeVar
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._fields_operations import build_list_by_type_request
from .._vendor import AutomationClientMixinABC
if sys.version_info >= (3, 8):
from typing import Literal # pylint: disable=no-name-in-module, ungrouped-imports
else:
from typing_extensions import Literal # type: ignore # pylint: disable=ungrouped-imports
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class FieldsOperations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.automation.aio.AutomationClient`'s
:attr:`fields` attribute.
"""
models = _models
def __init__(self, *args, **kwargs) -> None:
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
@distributed_trace
def list_by_type(
self, resource_group_name: str, automation_account_name: str, module_name: str, type_name: str, **kwargs: Any
) -> AsyncIterable["_models.TypeField"]:
"""Retrieve a list of fields of a given type identified by module name.
:param resource_group_name: Name of an Azure Resource group. Required.
:type resource_group_name: str
:param automation_account_name: The name of the automation account. Required.
:type automation_account_name: str
:param module_name: The name of module. Required.
:type module_name: str
:param type_name: The name of type. Required.
:type type_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either TypeField or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.automation.models.TypeField]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2022-08-08"] = kwargs.pop("api_version", _params.pop("api-version", "2022-08-08"))
cls: ClsType[_models.TypeFieldListResult] = kwargs.pop("cls", None)
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_type_request(
resource_group_name=resource_group_name,
automation_account_name=automation_account_name,
module_name=module_name,
type_name=type_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.list_by_type.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = HttpRequest("GET", next_link)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("TypeFieldListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem) # type: ignore
return None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(get_next, extract_data)
list_by_type.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Automation/automationAccounts/{automationAccountName}/modules/{moduleName}/types/{typeName}/fields"
}
|
[
"noreply@github.com"
] |
Azure.noreply@github.com
|
869dcff017fa2a44d1aecccde2ac567af4cf6ac9
|
3d9063912b3b7760005ccaf27524b1dfe998e311
|
/ltp/utils/convertor.py
|
7587edf06c4f929b313d022519862b887859e101
|
[] |
no_license
|
little-bigtiger/ltp
|
9ca7046771752a40a57e8f49ed78074d83dd7479
|
058c374fc31a63724320f6a13d26dd897b28b220
|
refs/heads/master
| 2023-05-30T07:03:51.472276
| 2021-06-21T02:13:20
| 2021-06-21T02:13:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,287
|
py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*_
# Author: Yunlong Feng <ylfeng@ir.hit.edu.cn>
import torch
from torch._six import container_abcs
def map2device(batch, device=torch.device('cpu')):
batch_type = type(batch)
if isinstance(batch, torch.Tensor):
return batch.to(device)
elif isinstance(batch, container_abcs.Mapping):
return {key: map2device(batch[key], device=device) for key in batch}
elif isinstance(batch, tuple) and hasattr(batch, '_fields'): # namedtuple
return batch_type(*(map2device(samples, device=device) for samples in zip(*batch)))
elif isinstance(batch, container_abcs.Sequence):
return [map2device(it, device=device) for it in batch]
else:
return batch
def convert2npy(batch):
batch_type = type(batch)
if isinstance(batch, torch.Tensor):
return map2device(batch).numpy()
elif isinstance(batch, container_abcs.Mapping):
return {key: convert2npy(batch[key]) for key in batch}
elif isinstance(batch, tuple) and hasattr(batch, '_fields'): # namedtuple
return batch_type(*(convert2npy(samples) for samples in zip(*batch)))
elif isinstance(batch, container_abcs.Sequence):
return [convert2npy(it) for it in batch]
else:
return batch
|
[
"ylfeng@ir.hit.edu.cn"
] |
ylfeng@ir.hit.edu.cn
|
4234dc2895d7126760d5a72ef991a59628694535
|
463c053bcf3f4a7337b634890720ea9467f14c87
|
/release/ray_release/command_runner/_wait_cluster.py
|
f7008828947614612adcc8a46a632853493925e1
|
[
"BSD-3-Clause",
"MIT",
"Apache-2.0"
] |
permissive
|
pdames/ray
|
e8faddc4440976211a6bcead8f8b6e62c1dcda01
|
918d3601c6519d333f10910dc75eb549cbb82afa
|
refs/heads/master
| 2023-01-23T06:11:11.723212
| 2022-05-06T22:55:59
| 2022-05-06T22:55:59
| 245,515,407
| 1
| 1
|
Apache-2.0
| 2023-01-14T08:02:21
| 2020-03-06T20:59:04
|
Python
|
UTF-8
|
Python
| false
| false
| 1,259
|
py
|
import argparse
import time
import ray
ray.init(address="auto")
parser = argparse.ArgumentParser()
parser.add_argument(
"num_nodes", type=int, help="Wait for this number of nodes (includes head)"
)
parser.add_argument("max_time_s", type=int, help="Wait for this number of seconds")
parser.add_argument(
"--feedback_interval_s",
type=int,
default=10,
help="Wait for this number of seconds",
)
args = parser.parse_args()
curr_nodes = 0
start = time.time()
next_feedback = start
max_time = start + args.max_time_s
while not curr_nodes >= args.num_nodes:
now = time.time()
if now >= max_time:
raise RuntimeError(
f"Maximum wait time reached, but only "
f"{curr_nodes}/{args.num_nodes} nodes came up. Aborting."
)
if now >= next_feedback:
passed = now - start
print(
f"Waiting for more nodes to come up: "
f"{curr_nodes}/{args.num_nodes} "
f"({passed:.0f} seconds passed)"
)
next_feedback = now + args.feedback_interval_s
time.sleep(5)
curr_nodes = len(ray.nodes())
passed = time.time() - start
print(
f"Cluster is up: {curr_nodes}/{args.num_nodes} nodes online after "
f"{passed:.0f} seconds"
)
|
[
"noreply@github.com"
] |
pdames.noreply@github.com
|
51b5cc22c33340c76b5ef0e285b2c525496e9af8
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/40/usersdata/62/25332/submittedfiles/main.py
|
13737f40f1a9c141d8c52153a57e1b9fe8ac9897
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146
| 2017-12-22T16:05:45
| 2017-12-22T16:05:45
| 69,566,344
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,256
|
py
|
# -*- coding: utf-8 -*-
from __future__ import division
import funcoes
#COMECE AQUI
def calcula_valor_absoluto(x):
if x<0:
x=x*(-1)
else:
x=x
return x
def calcula_pi(m):
i=1
soma=0
denominador=2
if 1<=m<=2000:
while i<=m:
if i%2!=0:
soma=soma+(4/(denominador*(denominador+1)*(denominador+2)))
else:
soma=soma-(4/(denominador*(denominador+1)*(denominador+2)))
i=i+1
denominador=denominador+2
pi=3+soma
return pi
def fat(n):
i=1
fat=1
while i<=n:
fat=fat*i
i=i+1
def calcula_co_seno(z, epsilon):
soma=0
exp=2
t=((z**exp)/fat(exp))
i=1
while t>epsilon:
t=((z**exp)/fat(exp))
if i%2!=0:
soma=soma-t
else:
soma=soma+t
exp=exp+2
i=i+1
coseno=soma+1
return coseno
def razao_Aurea(m,epsilon):
pi=calcula_pi(m)/5
razao_Aurea=2*calcula_co_seno(pi, epsilon)
return razao_Aurea
m=input('Digite o valor de m: ')
epsilon=input('Digite o valor de epsilon: ')
razao=razao_Aurea(m,epsilon)
print ('%.15f'%calcula_pi(m))
print ('%.15f'%razao)
|
[
"rafael.mota@ufca.edu.br"
] |
rafael.mota@ufca.edu.br
|
3b03072ec7ed26a376ab97669dac40c77589f552
|
f99341a3a043137ba2c24c06b80ff30073cf028e
|
/config/env_constructor.py
|
1eccb22fc91fee860e4cb13babbf3617e41bcfac
|
[] |
no_license
|
szrlee/ucbmq_code
|
30ed4c7645800824a2db47647b2a17289b10bc88
|
d7e84b1c64e3be2bd7c52057d1ff6d91ee0779cf
|
refs/heads/main
| 2023-05-09T00:18:02.900944
| 2021-06-05T18:33:11
| 2021-06-05T18:33:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 206
|
py
|
from rlberry.envs.finite import GridWorld
def constructor(nrows, ncols, success_probability):
env = GridWorld(nrows=nrows, ncols=ncols, walls=(), success_probability=success_probability)
return env
|
[
"omar.drwch@gmail.com"
] |
omar.drwch@gmail.com
|
05d6ff8c6fbea905e0072c34bcac275e68cc1197
|
ac4b9385b7ad2063ea51237fbd8d1b74baffd016
|
/.history/s1_3_getHtml_20210209165850.py
|
576f3f18bbd11d0765890ce07bf6aa32fbf8af7b
|
[] |
no_license
|
preethanpa/ssoemprep
|
76297ef21b1d4893f1ac2f307f60ec72fc3e7c6f
|
ce37127845253c768d01aeae85e5d0d1ade64516
|
refs/heads/main
| 2023-03-09T00:15:55.130818
| 2021-02-20T06:54:58
| 2021-02-20T06:54:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,514
|
py
|
# This module is called from 3R Automation Component.
import os
import sys
# pdftotree is available as part of the virtual environment for 3R Python processing
import pdftotree
import json
from pprint import pprint
import pdfminer
import matplotlib.pyplot as plt
import ocr_extract as imgpdf
from utils.ocr.handle_image import *
# pdf_doc = json.loads(sys.argv[1])['doc_name']
pdf_doc = '/home/dsie/Developer/sandbox/3ray/3rml/kbc_process/documents/images/PAN_Card_Scan_AKC.png'
# html_path = json.loads(sys.argv[1])['html_path']
html_path = '/home/dsie/Developer/sandbox/3ray/3rml/kbc_process/documents/html/'+os.path.basename(pdf_doc).split('.')[0] + '.html'
print(f'HTML Path is set to {html_path}')
path_if_not_pdf_doc = ''
pdf_doc_path = '/home/dsie/Developer/sandbox/3ray/3rml/kbc_process/documents/pdf'
# Use the following for testing
# pdf_doc = '/home/dsie/Developer/sandbox/3ray/3rml/kbc_process/documents/pdf/Sri_khyati_CV.pdf'
# html_path = '/home/dsie/Developer/sandbox/3ray/3rml/kbc_process/documents/html/Sri_khyati_CV.html'
def create_hocr(pdf_doc='', html_path='', model_path='./model/model.pkl'):
return pdftotree.parse(pdf_doc, html_path=html_path, model_type=None, model_path=model_path, visualize=False)
create_hocr_output = None
try:
create_hocr_output = create_hocr(pdf_doc=pdf_doc, html_path=html_path)
except pdfminer.pdfparser.PDFSyntaxError as pdfException:
print(f'')
create_hocr_output = pdfException
path_if_not_pdf_doc = pdf_doc
try:
# pdf_doc = extract_pdf_from_image(pdf_doc, pdf_path=pdf_doc_path, action=1, psm=11)
image, line_items_coordinates = mark_region(path_if_not_pdf_doc)
# load the original image
image = cv2.imread(path_if_not_pdf_doc)
# get co-ordinates to crop the image
c = line_items_coordinates[1]
# cropping image img = image[y0:y1, x0:x1]
img = image[c[0][1]:c[1][1], c[0][0]:c[1][0]]
plt.figure(figsize=(10,10))
plt.imshow(img)
# convert the image to black and white for better OCR
ret,thresh1 = cv2.threshold(img,120,255,cv2.THRESH_BINARY)
# pytesseract image to string to get results
text = str(pytesseract.image_to_string(thresh1, config='--psm 6'))
print(text)
convert_text_to_pdf(text, pdf_doc_path, os.path.basename(pdf_doc).split('.')[0])
create_hocr_output = create_hocr(pdf_doc=pdf_doc, html_path=html_path)
except Exception as exc:
create_hocr_output = Exception
print(f'Exception 2 {exc}')
# extract_pdf_from_image(pdf_doc, pdf_path=pdf_doc_path, action=2, psm=6)
# Use the following for testing non PDF files
# print(f'{os.path.basename(pdf_doc).split(".")[0]+".pdf"}')
# print(f'{os.path.abspath(pdf_doc).split(".")[0]+".pdf"}')
# try:
# # imgpdf.convert_image_to_pdf(pdf_doc, os.path(pdf_doc)+os.path.basename(pdf_doc).split('.')[0]+'.pdf')
# imgpdf.convert_image_to_pdf(pdf_doc, os.path.dirname(pdf_doc), os.path.abspath(pdf_doc).split(".")[0]+".pdf")
# except Exception as exc:
# print(exc)
# Output of "print" statement is passed to the calling program
proc_status = "OK" if create_hocr_output == None else "Not a PDF document or unable to process image at path "+path_if_not_pdf_doc
json_out = {"pdf_doc": pdf_doc, "process_status": proc_status}
json_out = {"message": "We are testing/making some changes to this API, please try after in about 30 mins. Sorry for the inconvenience."}
print(json_out)
|
[
"{abhi@third-ray.com}"
] |
{abhi@third-ray.com}
|
c1e656fc6fc8fdf41ad315d269930268dabe63c0
|
c117f7064b7132778bead5a8b77b67e2429a2b7a
|
/gmail.py
|
9832cf9a0595ba497995e39e74e170770e6e4ad1
|
[] |
no_license
|
gurudurairaj/gp
|
664306f41f73f8b620ba74b048372e1c94e59bc7
|
2fce98f7428103b54b9edd075d4a83dc434c2926
|
refs/heads/master
| 2020-04-15T05:00:45.934019
| 2019-05-26T17:54:54
| 2019-05-26T17:54:54
| 164,405,807
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 480
|
py
|
a=input()
c=0
if a.count("@")==1 and a.count(".")==1 and a[len(a)-4:len(a)]==".com":
s=""
for i in range(len(a)):
if a[i]=="@":
break
s=s+a[i]
if len(s)>=3:
c=c+1
s=""
v=a.index("@")
for i in range(v+1,len(a)):
if a[i]==".":
break
s=s+a[i]
if len(s)==5 and s=="gmail":
c=c+1
if c==2:
print("YES")
else:
print("NO")
else:
print("NO")
|
[
"noreply@github.com"
] |
gurudurairaj.noreply@github.com
|
e39d2e29afaf499c1d468a3292e396993aae8ec7
|
04ad466db13a382cc679d9562e515d57b54c47e6
|
/scripts/schools8_eb.py
|
149fb0ad7602a286b481eed890c6b06c67581f68
|
[
"MIT"
] |
permissive
|
shivaditya-meduri/pyprobml
|
d9423463ae7b352c52f3d005fbf33ee66d366971
|
9dbe0c95f4ec061b98bf32fa3ac1deafe2e0c04d
|
refs/heads/master
| 2023-04-12T13:09:45.572071
| 2021-05-07T18:22:02
| 2021-05-07T18:22:02
| 356,659,290
| 1
| 0
|
MIT
| 2021-04-11T05:04:38
| 2021-04-10T18:07:31
| null |
UTF-8
|
Python
| false
| false
| 458
|
py
|
# Empirical Bayes for 8 schools
import numpy as np
# Data of the Eight Schools Model
J = 8
y = np.array([28., 8., -3., 7., -1., 1., 18., 12.])
sigma = np.array([15., 10., 16., 11., 9., 11., 10., 18.])
d = len(y);
mu = np.mean(y); # MLE-II
V = np.sum(np.square(y-mu));
s2 = V/d;
sigma2 = np.mean(np.square(sigma))
tau2 = np.maximum(0, s2-sigma2); # approx
lam = sigma2/(sigma2 + tau2);
print(lam)
muShrunk = mu + (1-lam)*(y-mu);
print(muShrunk)
|
[
"murphyk@gmail.com"
] |
murphyk@gmail.com
|
ca743993675615421f0c5d02f0357b0ba36b9559
|
f3acd48e0d553143e941b16b5241cb86272a87f4
|
/Laboratorium/_04_klasy/Zwierzeta.py
|
1ff08b829c973bb1b1696c38dfc392be78f6aa69
|
[] |
no_license
|
tborzyszkowski/PythonWyklad
|
1ceb4b5e1fca8c41f4ad5fb5b32a100b58e24a4d
|
58871126689418d51a4e4ba0b9ab884de260f3c5
|
refs/heads/master
| 2023-05-10T20:55:21.140705
| 2023-05-07T14:58:49
| 2023-05-07T14:58:49
| 44,440,440
| 14
| 44
| null | 2020-02-27T22:32:03
| 2015-10-17T14:33:07
|
HTML
|
UTF-8
|
Python
| false
| false
| 548
|
py
|
class Zwierz:
def replay(self):
return self.glos()
def glos(self):
pass
class Ssak(Zwierz):
def glos(self):
return "Ssak: glos"
class Kot(Ssak):
def glos(self):
return "Kot: glos"
class Pies(Ssak):
def glos(self):
return "Pies: glos"
class Naczelny(Ssak):
def glos(self):
return "Naczelny: glos"
class Haker(Naczelny):
pass
# def glos(self):
# return "Naczelny: glos"
p = Pies()
k = Kot()
print p.replay() + k.replay()
h= Haker()
print h.replay()
|
[
"t.borzyszkowski@gmail.com"
] |
t.borzyszkowski@gmail.com
|
b34da93daff3b0147a5beeb599b6785d29d18905
|
9f7c106d50681b394d822fbdc5e3ad25f04d927c
|
/week6_nissi_miika/week6_ass4_nissi_miika.py
|
c35170d25f810049f893fd6c2f03cbeec7c89d04
|
[] |
no_license
|
miikanissi/python_course_summer_2020
|
edf032b1d9815dfa6e0b5f7c902f7b469117c04f
|
3969288b969b3db8f9d7f2fdb67905f13d4969fa
|
refs/heads/master
| 2022-12-02T09:33:42.625374
| 2020-08-24T17:38:59
| 2020-08-24T17:38:59
| 273,909,320
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 290
|
py
|
def factorial(x):
try:
if x < 1: raise ValueError
elif x == 1:
return 1
else:
return x *factorial(x-1)
except ValueError:
print("Factorial can not be negative.")
print(factorial(-20))
print(factorial(20))
print(factorial(4))
|
[
"unconfigured@null.spigotmc.org"
] |
unconfigured@null.spigotmc.org
|
0782a9b01c132cece78f0d640fbecb22de784ae2
|
ffcf85c7866e4d95d17afadc7d823a123fd79247
|
/Info/modules/passport/views.py
|
de73375f606b69baa4bba8b0d84658356ea3a338
|
[] |
no_license
|
CcLmL/InfoNews
|
1c7b9df7f7924dac5750886444a46ace313095bc
|
2c8dad2719fde697c8283ec88721479701e81bf9
|
refs/heads/master
| 2020-03-27T01:14:57.059554
| 2018-09-04T07:16:20
| 2018-09-04T07:16:20
| 145,694,197
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,916
|
py
|
import random
import re
from datetime import datetime
from flask import request, abort, current_app, make_response, jsonify, session
from Info import sr, db
from Info.lib.yuntongxun.sms import CCP
from Info.models import User
from Info.modules.passport import passport_blu
from Info.utils.captcha.pic_captcha import captcha
# 2.使用蓝图来装饰路由
from Info.utils.response_code import RET, error_map
@passport_blu.route('/get_img_code')
def get_img_code():
# 获取参数
img_code_id = request.args.get("img_code_id")
# 校验参数
if not img_code_id:
return abort(403)
# 生成图片验证码
img_name, img_code_text, img_code_bytes = captcha.generate_captcha() # 使用的是第三方工具(utils里面)
# 将图片key和验证码文字保存到redis数据库中
# 一旦是关于数据库的操作都应当进行异常捕获,提高程序稳定性
try:
sr.set("img_code_id_" + img_code_id, img_code_text, ex=180)
except Exception as e:
current_app.logger.error(e)
return abort(500)
# 返回验证码图片
# 创建响应头
response = make_response(img_code_bytes)
# 设置响应头
response.content_type="image/jpeg"
return response
# 获取短信验证码
@passport_blu.route('/get_sms_code', methods=["POST"])
def get_sms_code():
# 获取参数 request.json可以获取到application/json格式传过来的json数据
img_code_id = request.json.get("img_code_id")
img_code = request.json.get("img_code")
mobile = request.json.get("mobile")
# 校验参数
if not all([img_code_id,img_code,mobile]):
return jsonify(errno=RET.PARAMERR,errmsg=error_map[RET.PARAMERR]) # 这里使用的是response_code文件里自定义的状态码
# 校验手机号格式
if not re.match(r"1[135678]\d{9}$",mobile):
return jsonify(errno=RET.PARAMERR, errmsg="手机格式不正确")
# 根据图片key取出验证码文字
try:
real_img_code = sr.get("img_code_id_" + img_code_id)
except Exception as e:
current_app.logger.error(e)
return jsonify(errno=RET.DBERR, errmsg=error_map[RET.DBERR])
# 校验图片验证码
if not real_img_code: # 校验是否过期
return jsonify(errno=RET.PARAMERR, errmsg="验证码以过期")
if img_code.upper() != real_img_code: # 校验验证码是否正确
return jsonify(errno=RET.PARAMERR, errmsg="验证码不正确")
# 校验手机号码的正确性
# 根据手机号从数据库中取出对应的记录
try:
user = User.query.filter_by(mobile=mobile).first()
except Exception as e:
current_app.logger.error(e)
return jsonify(errno=RET.DBERR, errmsg=error_map[RET.DBERR])
# 判断该用户是否存在
if user: # 提示用户已存在
return jsonify(errno=RET.DATAEXIST, errmsg=error_map[RET.DATAEXIST])
# 如果校验成功,发送短信
# 生成4位随机数字
sms_code = "%04d" % random.randint(0,9999)
current_app.logger.info("短信验证码为:%s" % sms_code)
# res_code = CCP().send_template_sms(mobile, [sms_code, 5], 1)
# if res_code == -1: # 短信发送失败
# return jsonify(errno=RET.THIRDERR, errmsg=error_map[RET.THIRDERR])
# 将短信验证码保存到redis
try:
sr.set("sms_code_id_" + mobile, sms_code, ex=60)
except Exception as e:
current_app.logger.error(e)
return jsonify(errno=RET.DBERR, errmsg=error_map[RET.DBERR])
# 将短信发送结果使用json返回
return jsonify(errno=RET.OK, errmsg=error_map[RET.OK])
# 用户注册
@passport_blu.route('/register', methods=["POST"])
def register():
# 获取参数
mobile = request.json.get("mobile")
password = request.json.get("password")
sms_code = request.json.get("sms_code")
# 校验参数
if not all([mobile, password, sms_code]):
return jsonify(errno=RET.PARAMERR, errmsg=error_map[RET.PARAMERR])
# 校验手机号
if not re.match(r'1[35678]\d{9}', mobile):
return jsonify(errno=RET.PARAMERR, errmsg=error_map[RET.PARAMERR])
# 根据手机号取出短信验证码
try:
real_sms_code = sr.get("sms_code_id_" + mobile)
except Exception as e:
current_app.logger.error(e)
return jsonify(errno=RET.DBERR, errmsg=error_map[RET.DBERR])
# 校验验证码
if not real_sms_code: # 校验是否过期
return jsonify(errno=RET.PARAMERR, errmsg="验证码已过期")
if sms_code != real_sms_code: # 校验验证码是否正确
return jsonify(errno=RET.PARAMERR, errmsg=error_map[RET.PARAMERR])
# 将用户数据保存到数据库中
user = User()
user.mobile = mobile
# 使用计算机属性password对密码加密过程进行封装
user.password = password
user.nick_name = mobile
# 记录用户最后登陆的时间
user.last_login = datetime.now()
try:
db.session.add(user)
db.session.commit()
except Exception as e:
current_app.logger.error(e)
db.session.rollback()
return jsonify(errno=RET.DBERR, errmsg=error_map[RET.DBERR])
# 状态保持 免密码登陆
session["user_id"] = user.id
return jsonify(errno=RET.OK, errmsg=error_map[RET.OK])
# 用户登陆
@passport_blu.route('/login', methods=["POST"])
def login():
# 获取参数
mobile = request.json.get("mobile")
password = request.json.get("password")
# 校验参数
if not all([mobile, password]):
return jsonify(errno=RET.PARAMERR, errmsg=error_map[RET.PARAMERR])
# 校验手机格式
if not re.match(r"1[35678]\d{9}", mobile):
return jsonify(errno=RET.PARAMERR, errmsg=error_map[RET.PARAMERR])
# 根据手机号从数据库中取出用户模型
try:
user = User().query.filter_by(mobile=mobile).first()
except Exception as e:
current_app.logger.error(e)
return jsonify(errno=RET.DBERR, errmsg=error_map[RET.DBERR])
if not user:
return jsonify(errno=RET.USERERR, errmsg=error_map[RET.USERERR])
# 校验密码
if not user.check_password(password):
return jsonify(errno=RET.PWDERR, errmsg=error_map[RET.PWDERR])
# 注册用户最后的登陆时间
user.last_login = datetime.now()
# 这里本身需要对数据进行提交,但是设置了SQLALCHEMY_COMMIT_ON_TEARDOWN后,就会自动提交了
# 状态保持
session["user_id"] = user.id
# 将校验结果以json返回
return jsonify(errno=RET.OK, errmsg=error_map[RET.OK])
# 退出登陆
@passport_blu.route('/logout')
def logout():
# 将用户信息从session中删除 pop可以设置默认值,当减值对不存在时,不会报错并返回默认值
session.pop("user_id", None)
# 将结果返回
return jsonify(errno=RET.OK, errmsg=error_map[RET.OK])
|
[
"xwp_fullstack@163.com"
] |
xwp_fullstack@163.com
|
4f207fc33a82256e32426744f6f89f6799d2c8f1
|
aaf12fe9de8da36f4dc85973568f7e747b312c16
|
/log_output.py
|
1731fde8938341360c34f445f9883c728d4d1f04
|
[] |
no_license
|
UESTC-Liuxin/pytorch
|
aa092f15ba2187bb9a9e73fd50309a6abbb5362c
|
d1029f0c38813f1a0a18bbb9499d06d93829d79a
|
refs/heads/master
| 2021-01-02T01:20:50.132014
| 2020-04-08T02:01:12
| 2020-04-08T02:01:12
| 239,429,974
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,104
|
py
|
import logging
class Mylog(object):
"""
@create log file and output log information
"""
def __init__(self,logFilename):
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s',
datefmt='%a, %d %b %Y %H:%M:%S',
filename=logFilename,
filemode='a')
console = logging.StreamHandler() # 定义console handler
console.setLevel(logging.INFO) # 定义该handler级别
formatter = logging.Formatter('%(asctime)s %(filename)s : %(levelname)s %(message)s') # 定义该handler格式
console.setFormatter(formatter)
logging.getLogger().addHandler(console) # 实例化添加handler
def debug_out(self,str):
"""
output debug information
:param str: information
:return:
"""
logging.debug(str)
def info_out(self,str):
"""
output running info
:param str:
:return:
"""
logging.info(str)
|
[
"625092890@qq.com"
] |
625092890@qq.com
|
f7f3d8f02b6d308c3f3bc24d81e2dd1a11e6d95d
|
0f5668e822e30f2ebcddeb7121d5227305f3e3e7
|
/rate_the_movies/admin.py
|
3aaf97a82685e6ab7fc2ae93104abcb9d2835682
|
[] |
no_license
|
hpaasch/weekend_movies
|
ff8e7e531202d1b6a02388e5087bfcd22b89f7e8
|
d7a71815358fa1dfec842d73a05f95f59eff5ab9
|
refs/heads/master
| 2021-01-19T00:25:43.980980
| 2016-06-12T18:31:31
| 2016-06-12T18:31:31
| 60,794,716
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 243
|
py
|
from django.contrib import admin
# Register your models here.
from rate_the_movies.models import Rater, Movie, Rating, TopMovie
admin.site.register(Rater)
admin.site.register(Movie)
admin.site.register(Rating)
admin.site.register(TopMovie)
|
[
"hepaasch@gmail.com"
] |
hepaasch@gmail.com
|
1570ef0139326ca4a972af344522a632c7ba1439
|
6a1f6500d2319a2b7d974e3075747b86f102198e
|
/Path Sum2.py
|
9703bc4df062f41f7e1a445a26019ace51bfb9fb
|
[] |
no_license
|
vpc20/binary-trees
|
257672bb3b76c63c1530787a17c664665f5ed15e
|
90e074ec33acfa431285fbc3236335814a37feb2
|
refs/heads/master
| 2023-04-30T01:14:06.556085
| 2021-05-20T08:27:42
| 2021-05-20T08:27:42
| 271,945,306
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,264
|
py
|
# Given the root of a binary tree and an integer targetSum, return all root-to-leaf paths where
# each path's sum equals targetSum.
#
# A leaf is a node with no children.
#
# Example 1:
# Input: root = [5, 4, 8, 11, null, 13, 4, 7, 2, null, null, 5, 1], targetSum = 22
# Output: [[5, 4, 11, 2], [5, 8, 4, 5]]
#
# Example 2:
# Input: root = [1, 2, 3], targetSum = 5
# Output: []
#
# Example 3:
# Input: root = [1, 2], targetSum = 0
# Output: []
#
# Constraints:
#
# The number of nodes in the tree is in the range[0, 5000].
# -1000 <= Node.val <= 1000
# -1000 <= targetSum <= 1000
from BinaryTrees import binary_tree
def path_sum(root, sum):
def dfs(node, accsum, path):
if node.left is None and node.right is None:
if accsum == sum:
result.append(path)
return
if node.left is not None:
dfs(node.left, accsum + node.left.val, path + [node.left.val])
if node.right is not None:
dfs(node.right, accsum + node.right.val, path + [node.right.val])
if root is None:
return []
result = []
dfs(root, root.val, [root.val])
return result
t1 = binary_tree([5, 4, 8, 11, None, 13, 4, 7, 2, None, None, None, None, 5, 1])
print(t1)
print(path_sum(t1, 22))
|
[
"cuevasvp@gmail.com"
] |
cuevasvp@gmail.com
|
6e217e6f3939ed8e26166e26843fa9f5b34e2330
|
60be3894ad491bde502b8f6909a026ee115d952e
|
/aiosmb/authentication/kerberos/multiplexor.py
|
3f54b8c402def066580a983a3ce93219f5a30a6b
|
[] |
no_license
|
topotam/aiosmb
|
7c97c6a9806c84a9fae28fa372cc6903fa6ec0c5
|
e2ece67bbf380f576b154b09ea5fd63d9b4ecf4c
|
refs/heads/master
| 2023-06-25T17:41:03.605226
| 2021-07-27T18:31:12
| 2021-07-27T18:31:12
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,412
|
py
|
##
##
## Interface to allow remote kerberos authentication via Multiplexor
##
##
##
##
##
## TODO: RPC auth type is not implemented or tested!!!!
import enum
from aiosmb.authentication.spnego.asn1_structs import KRB5Token
from minikerberos.gssapi.gssapi import get_gssapi, GSSWrapToken
from minikerberos.protocol.asn1_structs import AP_REQ, AP_REP, TGS_REP
from minikerberos.protocol.encryption import Enctype, Key, _enctype_table
from multiplexor.operator.external.sspi import KerberosSSPIClient
from multiplexor.operator import MultiplexorOperator
import enum
import io
import os
from asn1crypto.core import ObjectIdentifier
class KRB5_MECH_INDEP_TOKEN:
# https://tools.ietf.org/html/rfc2743#page-81
# Mechanism-Independent Token Format
def __init__(self, data, oid, remlen = None):
self.oid = oid
self.data = data
#dont set this
self.length = remlen
@staticmethod
def from_bytes(data):
return KRB5_MECH_INDEP_TOKEN.from_buffer(io.BytesIO(data))
@staticmethod
def from_buffer(buff):
start = buff.read(1)
if start != b'\x60':
raise Exception('Incorrect token data!')
remaining_length = KRB5_MECH_INDEP_TOKEN.decode_length_buffer(buff)
token_data = buff.read(remaining_length)
buff = io.BytesIO(token_data)
pos = buff.tell()
buff.read(1)
oid_length = KRB5_MECH_INDEP_TOKEN.decode_length_buffer(buff)
buff.seek(pos)
token_oid = ObjectIdentifier.load(buff.read(oid_length+2))
return KRB5_MECH_INDEP_TOKEN(buff.read(), str(token_oid), remlen = remaining_length)
@staticmethod
def decode_length_buffer(buff):
lf = buff.read(1)[0]
if lf <= 127:
length = lf
else:
bcount = lf - 128
length = int.from_bytes(buff.read(bcount), byteorder = 'big', signed = False)
return length
@staticmethod
def encode_length(length):
if length <= 127:
return length.to_bytes(1, byteorder = 'big', signed = False)
else:
lb = length.to_bytes((length.bit_length() + 7) // 8, 'big')
return (128+len(lb)).to_bytes(1, byteorder = 'big', signed = False) + lb
def to_bytes(self):
t = ObjectIdentifier(self.oid).dump() + self.data
t = b'\x60' + KRB5_MECH_INDEP_TOKEN.encode_length(len(t)) + t
return t[:-len(self.data)] , self.data
class ISC_REQ(enum.IntFlag):
DELEGATE = 1
MUTUAL_AUTH = 2
REPLAY_DETECT = 4
SEQUENCE_DETECT = 8
CONFIDENTIALITY = 16
USE_SESSION_KEY = 32
PROMPT_FOR_CREDS = 64
USE_SUPPLIED_CREDS = 128
ALLOCATE_MEMORY = 256
USE_DCE_STYLE = 512
DATAGRAM = 1024
CONNECTION = 2048
CALL_LEVEL = 4096
FRAGMENT_SUPPLIED = 8192
EXTENDED_ERROR = 16384
STREAM = 32768
INTEGRITY = 65536
IDENTIFY = 131072
NULL_SESSION = 262144
MANUAL_CRED_VALIDATION = 524288
RESERVED1 = 1048576
FRAGMENT_TO_FIT = 2097152
HTTP = 0x10000000
class SMBKerberosMultiplexor:
def __init__(self, settings):
self.iterations = 0
self.settings = settings
self.mode = 'CLIENT'
self.ksspi = None
self.client = None
self.target = None
self.gssapi = None
self.etype = None
self.session_key = None
self.seq_number = None
self.setup()
def setup(self):
return
def get_seq_number(self):
"""
Fetches the starting sequence number. This is either zero or can be found in the authenticator field of the
AP_REQ structure. As windows uses a random seq number AND a subkey as well, we can't obtain it by decrypting the
AP_REQ structure. Insead under the hood we perform an encryption operation via EncryptMessage API which will
yield the start sequence number
"""
return self.seq_number
async def encrypt(self, data, message_no):
return self.gssapi.GSS_Wrap(data, message_no)
async def decrypt(self, data, message_no, direction='init', auth_data=None):
return self.gssapi.GSS_Unwrap(data, message_no, direction=direction, auth_data=auth_data)
def get_session_key(self):
return self.session_key
async def authenticate(self, authData = None, flags = None, seq_number = 0, is_rpc = False):
#authdata is only for api compatibility reasons
if self.ksspi is None:
await self.start_remote_kerberos()
try:
if is_rpc == True:
if self.iterations == 0:
flags = ISC_REQ.CONFIDENTIALITY | \
ISC_REQ.INTEGRITY | \
ISC_REQ.MUTUAL_AUTH | \
ISC_REQ.REPLAY_DETECT | \
ISC_REQ.SEQUENCE_DETECT|\
ISC_REQ.USE_DCE_STYLE
apreq, res = await self.ksspi.authenticate('cifs/%s' % self.settings.target, flags = str(flags.value))
self.iterations += 1
return apreq, True, None
elif self.iterations == 1:
data, err = await self.ksspi.authenticate('cifs/%s' % self.settings.target, flags = str(flags.value), token_data = authData)
if err is not None:
return None, None, err
self.session_key, err = await self.ksspi.get_session_key()
if err is not None:
return None, None, err
aprep = AP_REP.load(data).native
subkey = Key(aprep['enc-part']['etype'], self.session_key)
self.gssapi = get_gssapi(subkey)
if aprep['enc-part']['etype'] != 23: #no need for seq number in rc4
raw_seq_data, err = await self.ksspi.get_seq_number()
if err is not None:
return None, None, err
self.seq_number = GSSWrapToken.from_bytes(raw_seq_data[16:]).SND_SEQ
self.iterations += 1
await self.ksspi.disconnect()
return data, False, None
else:
raise Exception('SSPI Kerberos -RPC - auth encountered too many calls for authenticate.')
else:
apreq, res = await self.ksspi.authenticate('cifs/%s' % self.settings.target)
#print('MULTIPLEXOR KERBEROS SSPI, APREQ: %s ERROR: %s' % (apreq, res))
if res is None:
self.session_key, res = await self.ksspi.get_session_key()
await self.ksspi.disconnect()
return apreq, res, None
except Exception as e:
return None, None, err
async def start_remote_kerberos(self):
try:
#print(self.settings.get_url())
#print(self.settings.agent_id)
self.operator = MultiplexorOperator(self.settings.get_url())
await self.operator.connect()
#creating virtual sspi server
server_info = await self.operator.start_sspi(self.settings.agent_id)
#print(server_info)
sspi_url = 'ws://%s:%s' % (server_info['listen_ip'], server_info['listen_port'])
#print(sspi_url)
self.ksspi = KerberosSSPIClient(sspi_url)
await self.ksspi.connect()
except Exception as e:
import traceback
traceback.print_exc()
return None
|
[
"info@skelsec.com"
] |
info@skelsec.com
|
4565df909b3e0e3e2cbca97207b6da138461b346
|
fa346a2d5886420e22707a7be03599e634b230a9
|
/temboo/Library/GovTrack/Bill.py
|
0e1c3ee23f956f8cd9161b911714ba5a64e5bbcb
|
[] |
no_license
|
elihuvillaraus/entity-resolution
|
cebf937499ed270c3436b1dd25ab4aef687adc11
|
71dd49118a6e11b236861289dcf36436d31f06bc
|
refs/heads/master
| 2021-12-02T17:29:11.864065
| 2014-01-08T04:29:30
| 2014-01-08T04:29:30
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,472
|
py
|
# -*- coding: utf-8 -*-
###############################################################################
#
# Bill
# Retrieves bills and resolutions in the U.S. Congress since 1973 (the 93rd Congress).
#
# Python version 2.6
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class Bill(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the Bill Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
Choreography.__init__(self, temboo_session, '/Library/GovTrack/Bill')
def new_input_set(self):
return BillInputSet()
def _make_result_set(self, result, path):
return BillResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return BillChoreographyExecution(session, exec_id, path)
class BillInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the Bill
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_BillID(self, value):
"""
Set the value of the BillID input for this Choreo. ((optional, integer) Specify the ID number of the bill to return only the record for that bill.)
"""
InputSet._set_input(self, 'BillID', value)
def set_BillType(self, value):
"""
Set the value of the BillType input for this Choreo. ((optional, string) The bill's type. See documentation for acceptable bill types.)
"""
InputSet._set_input(self, 'BillType', value)
def set_Congress(self, value):
"""
Set the value of the Congress input for this Choreo. ((optional, integer) The number of the congress in which the bill was introduced. The current congress is 112.)
"""
InputSet._set_input(self, 'Congress', value)
def set_CurrentStatusDate(self, value):
"""
Set the value of the CurrentStatusDate input for this Choreo. ((optional, string) The date of the last major action on the bill corresponding to the CurrentStatus (in YYYY-MM-DD format).)
"""
InputSet._set_input(self, 'CurrentStatusDate', value)
def set_CurrentStatus(self, value):
"""
Set the value of the CurrentStatus input for this Choreo. ((optional, string) The current status of the bill. See documentation for acceptable inputs.)
"""
InputSet._set_input(self, 'CurrentStatus', value)
def set_IntroducedDate(self, value):
"""
Set the value of the IntroducedDate input for this Choreo. ((optional, string) The date the bill was introduced (in YYYY-MM-DD format).)
"""
InputSet._set_input(self, 'IntroducedDate', value)
def set_Limit(self, value):
"""
Set the value of the Limit input for this Choreo. ((optional, integer) Results are paged 20 per call by default. Set the limit input to a high value to get all of the results at once.)
"""
InputSet._set_input(self, 'Limit', value)
def set_Number(self, value):
"""
Set the value of the Number input for this Choreo. ((optional, integer) The bill's number (just the integer part).)
"""
InputSet._set_input(self, 'Number', value)
def set_Order(self, value):
"""
Set the value of the Order input for this Choreo. ((optional, string) You can order the results using fieldname (ascending) or -fieldname (descending) where "fieldname" is one of these values: current_status_date, introduced_date, senate_floor_schedule_postdate.)
"""
InputSet._set_input(self, 'Order', value)
def set_ResponseFormat(self, value):
"""
Set the value of the ResponseFormat input for this Choreo. ((optional, string) Specify the format of the response. Default is JSON, but XML is also possible.)
"""
InputSet._set_input(self, 'ResponseFormat', value)
def set_SchedulePostdate(self, value):
"""
Set the value of the SchedulePostdate input for this Choreo. ((optional, string) The date on which the bill was posted on the Senate Floor Schedule which is different from the date it was expected to be debated (in YYYY-MM-DD format).)
"""
InputSet._set_input(self, 'SchedulePostdate', value)
def set_Sponsor(self, value):
"""
Set the value of the Sponsor input for this Choreo. ((optional, integer) The ID of the sponsor of the bill.)
"""
InputSet._set_input(self, 'Sponsor', value)
class BillResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the Bill Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. (The resopnse from GovTrack.)
"""
return self._output.get('Response', None)
class BillChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return BillResultSet(response, path)
|
[
"cedric.warny@gmail.com"
] |
cedric.warny@gmail.com
|
b9cf3b8a2cf28697fa9ba6706aa2eacca14bb3ed
|
767b5482f3c5b9c2c85575c711e37561f5b8f198
|
/engine/plugins/no_request_check_file_url.py
|
0e03353b89807171f88766e8dd5cd2fab7be8a9d
|
[] |
no_license
|
zhupite233/scaner
|
8e39c903f295d06195be20067043087ec8baac4f
|
7c29c02bca2247a82bcbb91cc86955cc27998c95
|
refs/heads/master
| 2020-05-18T03:23:03.459222
| 2019-04-15T04:29:10
| 2019-04-15T04:29:10
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,112
|
py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import json
from engine.engine_utils.common import *
from engine.logger import scanLogger as logger
def run_url(http, ob, item):
try:
path = item['url']
params = item['params']
method = item['method']
timeout = ob.get('webTimeout')
pattern1 = r'.+(upload).*'
pattern2 = r'("type":\s*"file")'
result = []
if re.search(pattern1, path, re.I) or re.search(pattern2, json.dumps(params), re.I):
res = {'status': '200','content-location': path, 'pragma': 'no-cache', 'cache-control':
'no-cache, must-revalidate', "content-type": 'text/html;charset=utf-8'}
response = getResponse(res)
request = getRequest(path, domain=ob['domain'])
detail = "在站点上检测到潜在的文件上传风险点"
result.append(getRecord(ob, path, ob['level'], detail, request, response))
return result
except Exception,e:
logger.error("File:DirectoryTraversal.py, run_url function :%s" % (str(e)))
return []
|
[
"lixiang@yundun.com"
] |
lixiang@yundun.com
|
aeb1b2b02dedd76208af4900290767ab944c32da
|
2fac796fa58c67fb5a4a95a6e7f28cbef169318b
|
/leetcode/bit-manipulation/majority-element.py
|
1bf72be5da81fd021752159644b0555f580d16ab
|
[] |
no_license
|
jwyx3/practices
|
f3fe087432e79c8e34f3af3a78dd10278b66dd38
|
6fec95b9b4d735727160905e754a698513bfb7d8
|
refs/heads/master
| 2021-03-12T20:41:59.816448
| 2019-04-14T06:47:30
| 2019-04-14T06:47:30
| 18,814,777
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 516
|
py
|
class Solution(object):
def majorityElement(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
# 投票
result = count = 0
for num in nums:
if count == 0: # 如果之前票数相同或者刚开始,重新选一个数
result = num
count = 1
elif result == num: # 如果数相同,加1
count += 1
else: # 不同则减1
count -= 1
return result
|
[
"jwyx88003@gmail.com"
] |
jwyx88003@gmail.com
|
1b521983045b44485e0f452dcc1e99cb5581eb79
|
9b64f0f04707a3a18968fd8f8a3ace718cd597bc
|
/huaweicloud-sdk-vpc/huaweicloudsdkvpc/v3/model/security_group_info.py
|
76e7cc60717b39fc78380ac4d1d3214564057876
|
[
"Apache-2.0"
] |
permissive
|
jaminGH/huaweicloud-sdk-python-v3
|
eeecb3fb0f3396a475995df36d17095038615fba
|
83ee0e4543c6b74eb0898079c3d8dd1c52c3e16b
|
refs/heads/master
| 2023-06-18T11:49:13.958677
| 2021-07-16T07:57:47
| 2021-07-16T07:57:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,998
|
py
|
# coding: utf-8
import re
import six
class SecurityGroupInfo:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'id': 'str',
'name': 'str',
'description': 'str',
'project_id': 'str',
'created_at': 'datetime',
'updated_at': 'datetime',
'enterprise_project_id': 'str',
'security_group_rules': 'list[SecurityGroupRule]'
}
attribute_map = {
'id': 'id',
'name': 'name',
'description': 'description',
'project_id': 'project_id',
'created_at': 'created_at',
'updated_at': 'updated_at',
'enterprise_project_id': 'enterprise_project_id',
'security_group_rules': 'security_group_rules'
}
def __init__(self, id=None, name=None, description=None, project_id=None, created_at=None, updated_at=None, enterprise_project_id=None, security_group_rules=None):
"""SecurityGroupInfo - a model defined in huaweicloud sdk"""
self._id = None
self._name = None
self._description = None
self._project_id = None
self._created_at = None
self._updated_at = None
self._enterprise_project_id = None
self._security_group_rules = None
self.discriminator = None
self.id = id
self.name = name
self.description = description
self.project_id = project_id
self.created_at = created_at
self.updated_at = updated_at
self.enterprise_project_id = enterprise_project_id
self.security_group_rules = security_group_rules
@property
def id(self):
"""Gets the id of this SecurityGroupInfo.
功能描述:安全组对应的唯一标识 取值范围:带“-”的标准UUID格式
:return: The id of this SecurityGroupInfo.
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this SecurityGroupInfo.
功能描述:安全组对应的唯一标识 取值范围:带“-”的标准UUID格式
:param id: The id of this SecurityGroupInfo.
:type: str
"""
self._id = id
@property
def name(self):
"""Gets the name of this SecurityGroupInfo.
功能说明:安全组名称 取值范围:1-64个字符,支持数字、字母、中文、_(下划线)、-(中划线)、.(点)
:return: The name of this SecurityGroupInfo.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this SecurityGroupInfo.
功能说明:安全组名称 取值范围:1-64个字符,支持数字、字母、中文、_(下划线)、-(中划线)、.(点)
:param name: The name of this SecurityGroupInfo.
:type: str
"""
self._name = name
@property
def description(self):
"""Gets the description of this SecurityGroupInfo.
功能说明:安全组的描述信息 取值范围:0-255个字符,不能包含“<”和“>”
:return: The description of this SecurityGroupInfo.
:rtype: str
"""
return self._description
@description.setter
def description(self, description):
"""Sets the description of this SecurityGroupInfo.
功能说明:安全组的描述信息 取值范围:0-255个字符,不能包含“<”和“>”
:param description: The description of this SecurityGroupInfo.
:type: str
"""
self._description = description
@property
def project_id(self):
"""Gets the project_id of this SecurityGroupInfo.
功能说明:安全组所属的项目ID
:return: The project_id of this SecurityGroupInfo.
:rtype: str
"""
return self._project_id
@project_id.setter
def project_id(self, project_id):
"""Sets the project_id of this SecurityGroupInfo.
功能说明:安全组所属的项目ID
:param project_id: The project_id of this SecurityGroupInfo.
:type: str
"""
self._project_id = project_id
@property
def created_at(self):
"""Gets the created_at of this SecurityGroupInfo.
功能说明:安全组创建时间 取值范围:UTC时间格式:yyyy-MM-ddTHH:mm:ss
:return: The created_at of this SecurityGroupInfo.
:rtype: datetime
"""
return self._created_at
@created_at.setter
def created_at(self, created_at):
"""Sets the created_at of this SecurityGroupInfo.
功能说明:安全组创建时间 取值范围:UTC时间格式:yyyy-MM-ddTHH:mm:ss
:param created_at: The created_at of this SecurityGroupInfo.
:type: datetime
"""
self._created_at = created_at
@property
def updated_at(self):
"""Gets the updated_at of this SecurityGroupInfo.
功能说明:安全组更新时间 取值范围:UTC时间格式:yyyy-MM-ddTHH:mm:ss
:return: The updated_at of this SecurityGroupInfo.
:rtype: datetime
"""
return self._updated_at
@updated_at.setter
def updated_at(self, updated_at):
"""Sets the updated_at of this SecurityGroupInfo.
功能说明:安全组更新时间 取值范围:UTC时间格式:yyyy-MM-ddTHH:mm:ss
:param updated_at: The updated_at of this SecurityGroupInfo.
:type: datetime
"""
self._updated_at = updated_at
@property
def enterprise_project_id(self):
"""Gets the enterprise_project_id of this SecurityGroupInfo.
功能说明:安全组所属的企业项目ID。 取值范围:最大长度36字节,带“-”连字符的UUID格式,或者是字符串“0”。“0”表示默认企业项目。
:return: The enterprise_project_id of this SecurityGroupInfo.
:rtype: str
"""
return self._enterprise_project_id
@enterprise_project_id.setter
def enterprise_project_id(self, enterprise_project_id):
"""Sets the enterprise_project_id of this SecurityGroupInfo.
功能说明:安全组所属的企业项目ID。 取值范围:最大长度36字节,带“-”连字符的UUID格式,或者是字符串“0”。“0”表示默认企业项目。
:param enterprise_project_id: The enterprise_project_id of this SecurityGroupInfo.
:type: str
"""
self._enterprise_project_id = enterprise_project_id
@property
def security_group_rules(self):
"""Gets the security_group_rules of this SecurityGroupInfo.
安全组规则
:return: The security_group_rules of this SecurityGroupInfo.
:rtype: list[SecurityGroupRule]
"""
return self._security_group_rules
@security_group_rules.setter
def security_group_rules(self, security_group_rules):
"""Sets the security_group_rules of this SecurityGroupInfo.
安全组规则
:param security_group_rules: The security_group_rules of this SecurityGroupInfo.
:type: list[SecurityGroupRule]
"""
self._security_group_rules = security_group_rules
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
import simplejson as json
return json.dumps(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, SecurityGroupInfo):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"hwcloudsdk@huawei.com"
] |
hwcloudsdk@huawei.com
|
e2399df3821254f4ef5f3dbdb2b39081cbdb0eae
|
d54476a109bb7a75ab18c742e53425358eae2df7
|
/cart/migrations/0007_auto_20190831_1312.py
|
effc4f4f7371a10996aef08b421bd7b58b47fea4
|
[] |
no_license
|
OmarGonD/stickers_gallito
|
8b46673a73d3fa6fdbdeb9726804f3e3c176543b
|
4aa4f5aeb272b393410ed8b518aa39040f46a97b
|
refs/heads/master
| 2022-12-09T20:38:23.672740
| 2019-12-13T14:41:41
| 2019-12-13T14:41:41
| 163,198,792
| 0
| 1
| null | 2022-04-22T21:00:01
| 2018-12-26T16:35:33
|
HTML
|
UTF-8
|
Python
| false
| false
| 469
|
py
|
# Generated by Django 2.2.1 on 2019-08-31 18:12
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('cart', '0006_auto_20190831_1021'),
]
operations = [
migrations.RenameField(
model_name='sampleitem',
old_name='file_a',
new_name='file',
),
migrations.RemoveField(
model_name='sampleitem',
name='file_b',
),
]
|
[
"oma.gonzales@gmail.com"
] |
oma.gonzales@gmail.com
|
a37246089f6d753b353a98fd4c9d55fbb8fb0a98
|
d0207e019bd72bd1ef77dbde369b7234ba44e7d7
|
/misc/projector/tfrecords.py
|
8c3d165f31b6509c86bb601a6e1774e7708955bf
|
[] |
no_license
|
307509256/tf-face-recognizer
|
467458ee764fa44b18f33aa697de886a62a18263
|
3eb4b0fee1350302c57a755afb1a3b8537e7ec7e
|
refs/heads/master
| 2021-01-14T10:37:19.812201
| 2017-02-14T13:10:34
| 2017-02-14T13:10:34
| 82,047,255
| 1
| 0
| null | 2017-02-15T10:11:39
| 2017-02-15T10:11:39
| null |
UTF-8
|
Python
| false
| false
| 3,671
|
py
|
import os
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), '..', '..'))
from model.recognizer import Recognizer
from tensorflow.contrib.tensorboard.plugins import projector
import tensorflow as tf
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string('checkpoint_path', '/tmp/model.ckpt',
"""Path to model checkpoints.""")
tf.app.flags.DEFINE_string('input_file', 'data.tfrecords',
"""Path to the TFRecord data.""")
tf.app.flags.DEFINE_string('logdir', os.path.join(os.path.dirname(__file__), 'logdir'),
"""Directory where to write checkpoints.""")
def inputs(files, batch_size=0):
fqueue = tf.train.string_input_producer(files)
reader = tf.TFRecordReader()
key, value = reader.read(fqueue)
features = tf.parse_single_example(value, features={
'label': tf.FixedLenFeature([], tf.int64),
'image_raw': tf.FixedLenFeature([], tf.string),
})
label = features['label']
image = tf.image.decode_jpeg(features['image_raw'], channels=3)
image = tf.image.resize_image_with_crop_or_pad(image, 96, 96)
return tf.train.batch(
[tf.image.per_image_standardization(image), image, label], batch_size
)
def main(argv=None):
filepath = FLAGS.input_file
if not os.path.exists(filepath):
raise Exception('%s does not exist' % filepath)
r = Recognizer(batch_size=900)
input_images, orig_images, labels = inputs([filepath], batch_size=r.batch_size)
r.inference(input_images, 1)
fc5 = tf.get_default_graph().get_tensor_by_name('fc5/fc5:0')
fc6 = tf.get_default_graph().get_tensor_by_name('fc6/fc6:0')
with tf.Session() as sess:
variable_averages = tf.train.ExponentialMovingAverage(r.MOVING_AVERAGE_DECAY)
variables_to_restore = variable_averages.variables_to_restore()
for name, v in variables_to_restore.items():
try:
tf.train.Saver([v]).restore(sess, FLAGS.checkpoint_path)
except Exception:
sess.run(tf.variables_initializer([v]))
tf.train.start_queue_runners(sess=sess)
outputs = sess.run({'fc5': fc5, 'fc6': fc6, 'images': orig_images, 'labels': labels})
# write to metadata file
metadata_path = os.path.join(FLAGS.logdir, 'metadata.tsv')
with open(metadata_path, 'w') as f:
for index in outputs['labels']:
f.write('%d\n' % index)
# write to sprite image file
image_path = os.path.join(FLAGS.logdir, 'sprite.jpg')
unpacked = tf.unpack(outputs['images'], 900)
rows = []
for i in range(30):
rows.append(tf.concat(1, unpacked[i*30:(i+1)*30]))
jpeg = tf.image.encode_jpeg(tf.concat(0, rows))
with open(image_path, 'wb') as f:
f.write(sess.run(jpeg))
# add embedding data
targets = [tf.Variable(e, name=name) for name, e in outputs.items() if name.startswith('fc')]
config = projector.ProjectorConfig()
for v in targets:
embedding = config.embeddings.add()
embedding.tensor_name = v.name
embedding.metadata_path = metadata_path
embedding.sprite.image_path = image_path
embedding.sprite.single_image_dim.extend([96, 96])
sess.run(tf.variables_initializer(targets))
summary_writer = tf.train.SummaryWriter(FLAGS.logdir)
projector.visualize_embeddings(summary_writer, config)
graph_saver = tf.train.Saver(targets)
graph_saver.save(sess, os.path.join(FLAGS.logdir, 'model.ckpt'))
if __name__ == '__main__':
tf.app.run()
|
[
"sugi1982@gmail.com"
] |
sugi1982@gmail.com
|
533b9f5190aec7b5d6e52be40c8027017425f7eb
|
c8d6cfc2148fe15a272110277d511fb8522c4c26
|
/mongodbpro/Deleteonemongo.py
|
e5727beda1527ccd2402a2a26aa3b7b1e492fc4a
|
[] |
no_license
|
n1e2h4a/AllBasicProgram
|
9d1705315cf08678cf615a77921e17728ed41f94
|
88f27c24906de3ea2b2ee1f94adaa648ef1e51c3
|
refs/heads/master
| 2021-01-04T06:40:49.207179
| 2020-03-11T13:40:52
| 2020-03-11T13:40:52
| 240,428,706
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 139
|
py
|
from pymongo import MongoClient
client = MongoClient('localhost',27017)
db=client.result
col=db.biodata
col.delete_one({"city":"Dehradun"})
|
[
"guptaniharika851@gmail.com"
] |
guptaniharika851@gmail.com
|
9d8ae1275a631e023a71b4de3489f88302c8e72a
|
3de95d8882ad44e8ff821b2563a067d4e5249759
|
/demo.py
|
1c46efa6f932098b01ac8f6ff7f969b913d9d383
|
[
"MIT"
] |
permissive
|
wuxiaolianggit/Image-Matching
|
8c74f7557b6dd7c7a6f71b2627a94f4e112ae5e9
|
3213a8a574fa7bcc476d3de1c7370c268bf817a7
|
refs/heads/master
| 2021-04-08T00:42:08.063935
| 2020-01-19T05:25:54
| 2020-01-19T05:25:54
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,307
|
py
|
import math
import cv2 as cv
import numpy as np
import torch
from PIL import Image
from torchvision import transforms
from models import ResNetMatchModel
def get_image(file):
img = cv.imread(file)
img = img[..., ::-1] # RGB
img = Image.fromarray(img, 'RGB') # RGB
img = transformer(img)
img = img.to(device)
return img
def get_feature(model, file):
img = get_image(file)
imgs = img.unsqueeze(dim=0)
with torch.no_grad():
output = model(imgs)
feature = output[0].cpu().numpy()
return feature / np.linalg.norm(feature)
if __name__ == "__main__":
device = torch.device('cpu')
threshold = 21.07971786746929
filename = 'image_matching.pt'
model = ResNetMatchModel()
model.load_state_dict(torch.load(filename))
model = model.to(device)
model.eval()
transformer = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
x0 = get_feature(model, '0.jpg')
x1 = get_feature(model, '6.jpg')
cosine = np.dot(x0, x1)
cosine = np.clip(cosine, -1, 1)
theta = math.acos(cosine)
theta = theta * 180 / math.pi
print(theta)
print(theta <= threshold)
|
[
"liuyang12@focusmedia.cn"
] |
liuyang12@focusmedia.cn
|
3a47c0f2f17057a6e8afd7540a9da93d03b0f165
|
4dddd01ca6a60f2fa408ee55fbaebe868917184a
|
/myappium/xueqiu/__init__.py
|
1513984c6e62598b66393859814dc0c324b29b13
|
[] |
no_license
|
fanpl-sourse/all_study_practice
|
6f056c18f0eb7afd6af649e5b595895683bb0cbd
|
b02f25231855e149b95476b20dd8d53318cfe1a5
|
refs/heads/master
| 2023-01-22T09:13:15.107616
| 2020-11-30T05:53:43
| 2020-11-30T05:53:43
| 304,493,176
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 137
|
py
|
# -*- coding: utf-8 -*-
# @Time : 2020/11/6 11:13
# @Author : 饭盆里
# @File : __init__.py.py
# @Software: PyCharm
# @desc :
|
[
"fanpengli@fangdd.com"
] |
fanpengli@fangdd.com
|
4d43d040cf2e02b62ba7af0f29b00d7da7e62a89
|
3950cb348a4a3ff6627d502dbdf4e576575df2fb
|
/.venv/Lib/site-packages/vtkmodules/qt/__init__.py
|
2ece15c0e01ce73eb0d862377e9f2c30d7c2e0ba
|
[] |
no_license
|
Bdye15/Sample_Programs
|
a90d288c8f5434f46e1d266f005d01159d8f7927
|
08218b697db91e55e8e0c49664a0b0cb44b4ab93
|
refs/heads/main
| 2023-03-02T04:40:57.737097
| 2021-01-31T03:03:59
| 2021-01-31T03:03:59
| 328,053,795
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 994
|
py
|
"""Qt module for VTK/Python.
Example usage:
import sys
import PyQt5
from PyQt5.QtWidgets import QApplication
from vtkmodules.qt.QVTKRenderWindowInteractor import QVTKRenderWindowInteractor
app = QApplication(sys.argv)
widget = QVTKRenderWindowInteractor()
widget.Initialize()
widget.Start()
renwin = widget.GetRenderWindow()
For more information, see QVTKRenderWidgetConeExample() in the file
QVTKRenderWindowInteractor.py.
"""
import sys
# PyQtImpl can be set by the user
PyQtImpl = None
# Has an implementation has been imported yet?
for impl in ["PyQt5", "PySide2", "PyQt4", "PySide"]:
if impl in sys.modules:
PyQtImpl = impl
break
# QVTKRWIBase, base class for QVTKRenderWindowInteractor,
# can be altered by the user to "QGLWidget" in case
# of rendering errors (e.g. depth check problems, readGLBuffer
# warnings...)
QVTKRWIBase = "QWidget"
__all__ = ['QVTKRenderWindowInteractor']
|
[
"brady.dye@bison.howard.edu"
] |
brady.dye@bison.howard.edu
|
f379b0318ac9d679ec2825d99b6de6c3bc7d5341
|
5cc204e2ecb9a756127e7c71633a1edcdb3e989b
|
/pyvasp/vasp/vcon2ewald.py.bak
|
30a834d3cd254cafb1bd523eba78924105a3ee62
|
[] |
no_license
|
hopefulp/sandbox
|
1a1d518cf7b5e6bca2b2776be1cac3d27fc4bcf8
|
4d26767f287be6abc88dc74374003b04d509bebf
|
refs/heads/master
| 2023-06-27T17:50:16.637851
| 2023-06-15T03:53:39
| 2023-06-15T03:53:39
| 218,209,112
| 1
| 0
| null | 2022-09-13T13:22:34
| 2019-10-29T05:14:02
|
C++
|
UTF-8
|
Python
| false
| false
| 2,917
|
bak
|
#!/usr/bin/python2.7
#heejin
import sys
import os
import re
import math
#usage description
if len(sys.argv)<3:
print "Usage: [contfile] [chg file]"
sys.exit()
confilename=sys.argv[1]
chgfilename=sys.argv[2]
# get ZVAL
os.system('grep ZVAL POTCAR | awk \'{print $6}\' > zval.tmp')
# direct to cartesian
poscarfile = open(confilename)
line = poscarfile.readline()
line = poscarfile.readline()
line = poscarfile.readline()
line = poscarfile.readline()
line = poscarfile.readline()
line = poscarfile.readline()
parse = line.split()
if parse[0].isalpha():
line = poscarfile.readline()
line = poscarfile.readline()
line = poscarfile.readline()
if ('Direct' in line):
poscarfile.close()
doos = 'cp ' + confilename + ' CONTCAR.tmp'
os.system(doos)
os.system('sed -i 6d CONTCAR.tmp')
os.system('convasp -cart < CONTCAR.tmp > CONTCAR.cart')
os.system('rm CONTCAR.tmp')
poscarfile.close()
# get poscar information
poscarfile = open('CONTCAR.cart')
while 1:
line = poscarfile.readline()
if not line:
break
atomlist = line.split()
line = poscarfile.readline() #mul
line = poscarfile.readline() #a
line = poscarfile.readline() #b
line = poscarfile.readline() #c
line = poscarfile.readline()
nspecies = line.count(' ')
numlist = line.split()
natoms = 0
if numlist[0].isdigit():
for i in range(nspecies):
natoms = natoms + int(numlist[i])
else:
line = poscarfile.readline()
nspecies = line.count(' ')
numlist = line.split()
for i in range(nspecies):
natoms = natoms + int(numlist[i])
break
poscarfile.close()
zval = []
zvalfile = open('zval.tmp')
for i in range(nspecies):
line = zvalfile.readline()
zval.append(line)
chgtmpfile = open('CONTCAR.chgtmp', 'w')
chgfile = open(chgfilename)
line = chgfile.readline()
for i in range(nspecies):
for j in range(int(numlist[i])):
line = chgfile.readline()
chglist = line.split()
chgval = float(zval[i]) - float(chglist[1])
if chgval > 0:
chgval2 = '+'+str(chgval)+'\n'
else:
chgval2 = str(chgval)+'\n'
chgtmpfile.write(chgval2)
chgfile.close()
chgtmpfile.close()
poscarfile = open('CONTCAR.cart')
outfile = open(confilename+'.vo', 'w')
chgfile = open('CONTCAR.chgtmp')
line = poscarfile.readline()
outfile.write(line)
line = poscarfile.readline()
outfile.write(line)
line = poscarfile.readline()
outfile.write(line)
line = poscarfile.readline()
outfile.write(line)
line = poscarfile.readline()
outfile.write(line)
line = poscarfile.readline()
outfile.write(line)
line = poscarfile.readline()
outfile.write(line)
line = poscarfile.readline()
outfile.write(line)
for i in range(natoms):
line = poscarfile.readline()
line2 = line.rstrip('\n')
chgline = chgfile.readline()
outline = line2 + ' ' + chgline
outfile.write(outline)
poscarfile.close()
outfile.close()
chgfile.close()
#os.system('convasp -names +2 +5 -2 +1 < CONTCAR.cart > CONTCAR.fc')
os.system('rm zval.tmp CONTCAR.chgtmp CONTCAR.cart')
|
[
"hopefulp@gmail.com"
] |
hopefulp@gmail.com
|
964138161e808eec89f029b24c77f9aea7216d0c
|
a404f504febdc835b69b72c4ac28b153885fc119
|
/Auditions/OneLevelToRuleThemAll.py
|
a52edb14ee9fa3d430484ab7d1a555d4a334a1a8
|
[] |
no_license
|
Chaboi45/CodeCombat
|
6093a2eae29ef00c0c277653c4ffd075c9e2ac4c
|
6e008a94e65bb72ca9292e303d391a4142de16f5
|
refs/heads/master
| 2021-07-02T23:24:52.050154
| 2017-09-21T07:42:21
| 2017-09-21T07:42:21
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,601
|
py
|
# http://codecombat.com/play/level/one-level-to-rule-them-all
summonTypes = ['paladin']
def summonTroops():
type = summonTypes[len(hero.built) % len(summonTypes)]
if hero.gold > hero.costOf(type):
hero.summon(type)
def commandTroops():
for index, friend in enumerate(hero.findFriends()):
if friend.type == 'paladin':
CommandPaladin(friend)
def CommandPaladin(paladin):
if (paladin.canCast("heal")):
if (hero.health < hero.maxHealth * 0.6):
target = self
if target:
hero.command(paladin, "cast", "heal", target)
elif (paladin.health < 100):
hero.command(paladin, "shield")
else:
target = hero.findNearest(hero.findEnemies())
hero.command(paladin, "attack", target)
def moveTo(position, fast=True):
if (hero.isReady("jump") and hero.distanceTo(position) > 10 and fast):
hero.jumpTo(position)
else:
hero.move(position)
def attack(target):
if target:
if (hero.distanceTo(target) > 10):
moveTo(target.pos)
elif (hero.isReady("bash")):
hero.bash(target)
elif (hero.canCast('chain-lightning', target)):
hero.cast('chain-lightning', target)
else:
hero.attack(target)
while True:
flag = hero.findFlag()
summonTroops()
commandTroops()
if flag:
hero.pickUpFlag(flag)
else:
enemy = hero.findNearest(hero.findEnemies())
if enemy:
attack(enemy)
# find some enemy to attack
# use cleave when ready
|
[
"vadim-job-hg@yandex.ru"
] |
vadim-job-hg@yandex.ru
|
8d6abe2e2e1f83b0bb4a0f714216e5f73634d056
|
0930b6c994225d44818887716ce4e8771af86b81
|
/exercisesDosCapitulos/12-umaEspaconaveQueAtira/12.4-teclas/main.py
|
dffb2fa5141878bac5fdfd98453c9d4e2f62cd2f
|
[] |
no_license
|
jonasht/cursoIntesivoDePython
|
44d81b08f1652c4fa7a6d14a0e3f62ee8e06944c
|
fb5f5c9884fb1a6062a7c4e7676e5cc3b13c0ebb
|
refs/heads/master
| 2023-05-23T20:44:34.372825
| 2021-06-19T12:13:46
| 2021-06-19T12:13:46
| 293,325,804
| 4
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 526
|
py
|
import pygame
def run_game():
pygame.init()
screen = pygame.display.set_mode((800, 600))
run = True
while run:
screen.fill((255, 255, 255))
for event in pygame.event.get():
if event.type == pygame.QUIT:
run = False
if event.type == pygame.KEYDOWN:
print(f'apertado:{event.key} ')
if event.key == pygame.K_ESCAPE or event.key == pygame.K_q:
run = False
pygame.display.update()
run_game()
|
[
"jhenriquet@outlook.com.br"
] |
jhenriquet@outlook.com.br
|
2fdcd0028c218f840341e29cb9bef90b16d5f6ca
|
e23a4f57ce5474d468258e5e63b9e23fb6011188
|
/125_algorithms/_examples/_algorithms_challenges/pybites/intermediate/118_v4/duplicates.py
|
a35079d32510853e3a93f85e69ce3e56175d560c
|
[] |
no_license
|
syurskyi/Python_Topics
|
52851ecce000cb751a3b986408efe32f0b4c0835
|
be331826b490b73f0a176e6abed86ef68ff2dd2b
|
refs/heads/master
| 2023-06-08T19:29:16.214395
| 2023-05-29T17:09:11
| 2023-05-29T17:09:11
| 220,583,118
| 3
| 2
| null | 2023-02-16T03:08:10
| 2019-11-09T02:58:47
|
Python
|
UTF-8
|
Python
| false
| false
| 668
|
py
|
from collections import Counter
def get_duplicate_indices(words):
"""Given a list of words, loop through the words and check for each
word if it occurs more than once.
If so return the index of its first occurrence.
For example in the following list 'is' and 'it'
occur more than once, and they are at indices 0 and 1 so you would
return [0, 1]:
['is', 'it', 'true', 'or', 'is', 'it', 'not?'] => [0, 1]
Make sure the returning list is unique and sorted in ascending order."""
counts = Counter(words)
dupes = [k for k, v in counts.items() if v > 1]
return sorted([words.index(dupe) for dupe in dupes])
|
[
"sergejyurskyj@yahoo.com"
] |
sergejyurskyj@yahoo.com
|
4e09422ed00c845f9890abcf2e6c7a4f60011b78
|
cca46f82bf14e744abe1a7094ed81891f815bcd9
|
/todos/models.py
|
63be73ddb48e2399c6fe2739cbc095ad0b889e66
|
[] |
no_license
|
rikicop/backend
|
3e63c8a4801d7ac49feb3c5b1af566890911dd0f
|
319e1860448884e96cfea63702fa562c49cb8fb6
|
refs/heads/main
| 2023-07-13T00:28:51.853790
| 2021-08-09T15:41:37
| 2021-08-09T15:41:37
| 394,348,970
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 323
|
py
|
from django.db import models
class Todo(models.Model):
foto = models.CharField(max_length=1000)
author = models.CharField(max_length=200,blank=True)
title = models.CharField(max_length=200)
description = models.TextField()
body = models.TextField()
def __str__(self):
return self.title
|
[
"ruperto1@protonmail.com"
] |
ruperto1@protonmail.com
|
f741f8a504de7a76fb26c2d9679d86ace42029eb
|
f07a42f652f46106dee4749277d41c302e2b7406
|
/Data Set/bug-fixing-5/44e2cbe7b7bf41703ef2eb45faa1f59962eb3b34-<post_request>-fix.py
|
a5cdb336f072dc6e6c5f2b037163bc23cb4ad5d7
|
[] |
no_license
|
wsgan001/PyFPattern
|
e0fe06341cc5d51b3ad0fe29b84098d140ed54d1
|
cc347e32745f99c0cd95e79a18ddacc4574d7faa
|
refs/heads/main
| 2023-08-25T23:48:26.112133
| 2021-10-23T14:11:22
| 2021-10-23T14:11:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 770
|
py
|
def post_request(self, uri, pyld, hdrs):
try:
resp = open_url(uri, data=json.dumps(pyld), headers=hdrs, method='POST', url_username=self.creds['user'], url_password=self.creds['pswd'], force_basic_auth=True, validate_certs=False, follow_redirects='all', use_proxy=False)
except HTTPError as e:
return {
'ret': False,
'msg': ('HTTP Error: %s' % e.code),
}
except URLError as e:
return {
'ret': False,
'msg': ('URL Error: %s' % e.reason),
}
except Exception as e:
return {
'ret': False,
'msg': ('Failed POST operation against Redfish API server: %s' % to_text(e)),
}
return {
'ret': True,
'resp': resp,
}
|
[
"dg1732004@smail.nju.edu.cn"
] |
dg1732004@smail.nju.edu.cn
|
a0e915805bc1495a3feee94402e47c3625cbb573
|
012837eafe45c8f7ee5fc77d4c4d7725d5314c5c
|
/workshops/4-day/5-clazz.py
|
2430adc8f4fd3bcb3b6cae29f9b12514d85037c2
|
[
"MIT"
] |
permissive
|
ai-erorr404/opencv-practice
|
e9408cf006779a678cf3a30fc60e9dbeb3c8e493
|
60ef5e4aec61ee5f7e675fb919e8f612e59f664a
|
refs/heads/master
| 2021-02-08T11:17:04.763522
| 2020-02-22T09:43:04
| 2020-02-22T09:43:04
| 244,146,060
| 1
| 1
|
MIT
| 2020-03-01T12:35:02
| 2020-03-01T12:35:01
| null |
UTF-8
|
Python
| false
| false
| 1,056
|
py
|
#!/usr/bin/env python3
# -*- coding=utf-8 -*-
import cv2 as cv
"""
Unsharpen Mask 方法(USM) - 锐化增强算法
(源图片 - w * 高斯模糊) / (1 - w)
* w -> 权重(0.1 ~ 0.9), 默认为 0.6
# 原理函数 - 图像融合函数(图像的shape、dtype一定要相同)
cv.addWeighted(src1, alpha, src2, beta, gamma)
- alpha: 第一个输入参数的权重值,可为负数
- gamma: 类delta效果,色彩增强,总和超过255 就是白色
- beta: 第二个输入参数的权重值,可为负数
"""
def main():
# 1、 高斯模糊降噪
# 2、 权重叠加
src = cv.imread("../../pic/IMG_20191204_151110.jpg")
cv.imshow("src", src)
gauss = cv.GaussianBlur(src, (0, 0), 5)
# media = cv.medianBlur(src, 5) # 采用均值模糊(均值滤波)进行优化
usm = cv.addWeighted(src, 1.5, gauss, -0.5, 0)
cv.imshow("usm", usm)
cv.waitKey(0)
cv.destroyAllWindows()
if "__main__" == __name__:
main()
|
[
"afterloe@foxmail.com"
] |
afterloe@foxmail.com
|
99e03c7a5ebab7b1c9c4e3402824052c450ba006
|
c705b2620119df0d60e925e55228bfbb5de3f568
|
/archives/tk/tk2_quitter.py
|
3cca393ee1a4141db70198a64ebaa6da511c3297
|
[
"Apache-2.0"
] |
permissive
|
mcxiaoke/python-labs
|
5aa63ce90de5da56d59ca2954f6b3aeae7833559
|
61c0a1f91008ba82fc2f5a5deb19e60aec9df960
|
refs/heads/master
| 2021-08-05T03:47:51.844979
| 2021-07-24T11:06:13
| 2021-07-24T11:06:13
| 21,690,171
| 7
| 7
|
Apache-2.0
| 2020-08-07T01:52:32
| 2014-07-10T10:20:17
|
Python
|
UTF-8
|
Python
| false
| false
| 515
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Author: mcxiaoke
# @Date: 2015-08-09 16:10:35
from tkMessageBox import askokcancel
from Tkinter import *
class Quitter(Frame):
def __init__(self, parent=None):
Frame.__init__(self, parent)
self.pack()
widget = Button(self, text='Quit', command=self.quit)
widget.pack(side=LEFT, expand=YES, fill=BOTH)
def quit(self):
ans = askokcancel('Verify exit', 'Really quit?')
if ans:
Frame.quit(self)
|
[
"mcxiaoke@gmail.com"
] |
mcxiaoke@gmail.com
|
e4492680d141de291d7465a651e77ead9ce69654
|
ce76b3ef70b885d7c354b6ddb8447d111548e0f1
|
/person/feel_child_at_point/company/important_eye/say_time_with_year/same_man_and_time.py
|
2e1318a493eaec887d5816876828623ba3bfeaef
|
[] |
no_license
|
JingkaiTang/github-play
|
9bdca4115eee94a7b5e4ae9d3d6052514729ff21
|
51b550425a91a97480714fe9bc63cb5112f6f729
|
refs/heads/master
| 2021-01-20T20:18:21.249162
| 2016-08-19T07:20:12
| 2016-08-19T07:20:12
| 60,834,519
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 207
|
py
|
#! /usr/bin/env python
def number(str_arg):
point_or_group(str_arg)
print('company_or_number')
def point_or_group(str_arg):
print(str_arg)
if __name__ == '__main__':
number('first_work')
|
[
"jingkaitang@gmail.com"
] |
jingkaitang@gmail.com
|
07a2335bfe239f189c83292b9a6e2991c0094dae
|
a27d5a16210fef310569c98b589307b54248f28a
|
/orders/views.py
|
6657d5ea28e264de6e22d2da575e427300a6bf6f
|
[] |
no_license
|
yashboura303/E-commerce_Django
|
fd4b5088c97cc9e8a6971eecb3637700a8bd2e6c
|
8cc2aeb397056566186a4379b9eece56ac99a970
|
refs/heads/master
| 2020-09-22T07:43:09.118289
| 2019-12-01T10:58:47
| 2019-12-01T10:58:47
| 225,108,306
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 809
|
py
|
from django.shortcuts import render
from products.models import Products
from cart.models import Cart
from .models import Orders
from django.core import serializers
import datetime
def orderPage(request):
orders, created = Orders.objects.get_or_create(customer=request.user)
cartProducts = Cart.objects.get(customer=request.user).products.all()
orders.products.set(cartProducts)
orders.save()
if orders.date_ordered == None:
orders.date_ordered = datetime.datetime.now()
orders.save()
else:
pass
#delete cart products after buying
cart = Cart.objects.get(customer=request.user)
for product in cartProducts:
cart.products.remove(product)
return render(request,'orders/order.html',{"products":orders.products.all(),"order":orders})
|
[
"yashboura303@gmail.com"
] |
yashboura303@gmail.com
|
672384e7ee2fe00f78ba8f0cae10fd9abc21ca41
|
eaa43160aeeaa3cb4c7c9f52d8ed01f9abdf85e5
|
/tests/rest/test_rest_view.py
|
9ff9c33ab588473769c4b72a22918c34f7426d12
|
[
"MIT"
] |
permissive
|
furious-luke/polecat
|
4fd2a2f859b9a77d9d004b32bc1bf8f907fea2ba
|
7be5110f76dc42b15c922c1bb7d49220e916246d
|
refs/heads/master
| 2022-07-31T16:38:45.791129
| 2021-05-06T01:05:03
| 2021-05-06T01:05:03
| 179,440,367
| 4
| 0
|
MIT
| 2022-07-05T21:28:34
| 2019-04-04T07:00:55
|
Python
|
UTF-8
|
Python
| false
| false
| 593
|
py
|
from polecat.rest.schema import RestView
from ..models import Authenticate
def test_rest_view(db, factory):
user = factory.User.create()
view = RestView(Authenticate)
request = type('Request', (), {
'path': '/a',
'session': None,
'json': {
'email': user.email,
'password': user.password
}
})
event = type('Event', (), {
'request': request
})
response = view.resolve(request, context_value={
'event': event
})
assert response is not None
assert response.get('token') is not None
|
[
"furious.luke@gmail.com"
] |
furious.luke@gmail.com
|
1fed40586b8a86779adfa038334fea05ee538612
|
edfcd96f0010ea068a4c046bdcf7067ff92d3f9b
|
/Robot/Selenium/4.Auto-Login.py
|
7787da558eff8cdfb6356e8519c9782480f7224f
|
[] |
no_license
|
afsanehshu/python-project
|
a99ff558f375c1f5e17ea6ffc13af9216ec4733f
|
48905cfd24df6d1f48460d421ed774f19403cf53
|
refs/heads/main
| 2023-08-03T01:53:32.812949
| 2021-09-22T19:36:25
| 2021-09-22T19:36:25
| 409,303,454
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 793
|
py
|
from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
import os
from password import username , password
addresss = os.path.abspath(__file__)
addresss = os.path.dirname(addresss)
addresss = os.path.join(addresss , 'chromedriver.exe')
driver = webdriver.Chrome(executable_path=addresss)
driver.get('https://instagram.com')
usr = WebDriverWait(driver,5).until(EC.presence_of_element_located((By.XPATH,'//*[@id="loginForm"]/div/div[1]/div/label/input')))
usr.send_keys(username)
pas = driver.find_element_by_xpath('//*[@id="loginForm"]/div/div[2]/div/label/input')
pas.send_keys(password + Keys.ENTER)
|
[
"afsanehshu@gmail.com"
] |
afsanehshu@gmail.com
|
686b706d30a24d127ee3cceccc35b6174e9af4ac
|
d34c3204b6a985a82e17dc82f455672660536517
|
/703.py
|
24e6ea591581514bc7d5dc52606fb072a9c2cab2
|
[] |
no_license
|
pzqkent/LeetCode
|
34fe4af305c8db4e336ab095bba11e28a4f20ea5
|
48c0bda6f3163adf1709cb440a600fe36d4fb8ec
|
refs/heads/master
| 2020-04-14T23:10:32.898699
| 2019-02-02T23:20:51
| 2019-02-02T23:20:51
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,546
|
py
|
class KthLargest:
def __init__(self, k, nums):
import heapq
"""
:type k: int
:type nums: List[int]
"""
self.nums = nums
self.k = k
heapq.heapify(self.nums)
while len(self.nums) > self.k:
heapq.heappop(self.nums)
def add(self, val):
"""
:type val: int
:rtype: int
这道题要用堆来做。
从小到大排序后从右往左数第k大的数字。python的heap是小根堆,如果维持一个大小为k的heap的话,那么最小的数字(第一个数字)就是最终答案。
后续的数字只需要和最小的数字self.nums[0]比较大小就好。如果当前的堆的大小小于k,则将新元素入堆;否则,当前堆的大小一定是k(因为初始化的 时候如果nums的长度大于k,已经将堆裁剪为大小为k的堆了),此时需要比较当前的val和self.num[0]的关系,如果val<self.nums[0],则保持堆不 变;如果val大,则弹出堆中的最小值,将val入堆。
python中可以直接用heapq.heapreplace(data,val),比先heap.pop()再heapq.push()的速度要快得多。
"""
if len(self.nums) < self.k:
heapq.heappush(self.nums,val)
elif val > self.nums[0]:
heapq.heapreplace(self.nums,val)
return self.nums[0]
# Your KthLargest object will be instantiated and called as such:
# obj = KthLargest(k, nums)
# param_1 = obj.add(val)
|
[
"pzqkent@gmail.com"
] |
pzqkent@gmail.com
|
7b1557e45e765345bdce5d280c50ca47853acb31
|
99b784550a6d306147c022c8d829800b0fbb8c68
|
/Part_1_Basics/Chapter_6_Dictionaries/favorite_numbers.py
|
9dd21d1fa775414e45ba499c76a968d7a33e6089
|
[] |
no_license
|
apuya/python_crash_course
|
116d6598f656d8fed0b4184edbce8e996cd0f564
|
0b2e8a6e9849a198cfb251706500a919d6f51fe7
|
refs/heads/main
| 2023-06-03T22:41:03.203889
| 2021-06-16T04:07:28
| 2021-06-16T04:07:28
| 367,812,531
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,055
|
py
|
# Python Crash Course: A Hands-On, Project-Based Introduction To Programming
#
# Name: Mark Lester Apuya
# Date: 05/28/2021
#
# Chapter 6: Dictionaries
#
# Exercise 6.2 Favorite Numbers:
# Use a dictionary to store people’s favorite numbers. Think of five names,
# and use them as keys in your dictionary. Think of a favorite number for each
# person, and store each as a value in your dictionary. Print each person’s
# name and their favorite number. For even more fun, poll a few friends and get
# some actual data for your program.
favorite_numbers = {
'mark': 22,
'jaxon': 20,
'alex': 2,
'sam': 30,
'troy': 12
}
number = favorite_numbers['mark']
print(f"Mark's favorite number is {number}.")
number = favorite_numbers['jaxon']
print(f"Jaxon's favorite number is {number}.")
number = favorite_numbers['alex']
print(f"Alex's favorite number is {number}.")
number = favorite_numbers['sam']
print(f"Sam's favorite number is {number}.")
number = favorite_numbers['troy']
print(f"Troy's favorite number is {number}.")
|
[
"contact@mapuya.com"
] |
contact@mapuya.com
|
84ca014a01bea4b0f0ae8abbc8301ce9a76311f2
|
a3e6c9f54193be74f7ee7d13113723db84b4859d
|
/read_images.py
|
cb12286eee3a8103829d454df1e85949841c2add
|
[] |
no_license
|
Arrotech/openCV-python
|
b235748ed142b02e90cac644d445a6facf637413
|
f5d6f056d2210f81aee5f3359d09080fc6e81482
|
refs/heads/develop
| 2023-05-03T08:34:50.302587
| 2021-05-21T13:57:35
| 2021-05-21T13:57:35
| 368,883,383
| 0
| 0
| null | 2021-05-21T13:57:36
| 2021-05-19T13:43:00
|
Python
|
UTF-8
|
Python
| false
| false
| 1,310
|
py
|
import cv2 as cv
img = cv.imread('images/ppic.jpg')
def resizeframe(frame, scale=0.75):
"""Resisze the frame of the image."""
width = int(frame.shape[1] * scale)
height = int(frame.shape[0] * scale)
dimensions = (width, height)
return cv.resize(frame, dimensions, interpolation=cv.INTER_AREA)
resized_img = resizeframe(img)
# gray scale image
gray_img = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
# blur image - remove some of the noise i.e light
blur_img = cv.GaussianBlur(img, (5,5), cv.BORDER_DEFAULT)
# edge cascade
canny_img = cv.Canny(blur_img, 125, 175)
# dilating image edges
dilated_img = cv.dilate(canny_img, (3,3), iterations=3)
# eroding dilated images to get the original edges
eroded_img = cv.erode(dilated_img, (3,3), iterations=3)
# resizing the image
resized_img2 = cv.resize(img, (500,500), interpolation=cv.INTER_CUBIC)
# cropping
cropped_img = img[50:200, 200:400]
cv.imshow('Profile Picture', img)
# cv.imshow('Resized Profile Picture', resized_img)
# cv.imshow("Gray Scale Image", gray_img)
# cv.imshow("Blurred Image", blur_img)
# cv.imshow("Canny Edge Cascade Image", canny_img)
# cv.imshow("Dilated Image", dilated_img)
# cv.imshow("Eroded Image", eroded_img)
# cv.imshow("Resized Image", resized_img2)
cv.imshow("Cropped Image", cropped_img)
cv.waitKey(0)
|
[
"arrotechdesign@gmail.com"
] |
arrotechdesign@gmail.com
|
5d51135e8bbed8efdb6c26ff33adf8488a8a911f
|
6c543074f1d764af9701e5b55db9ab0220c1df93
|
/prictice/mzitu_02.py
|
b9fa94a5ea6dc727418b8ebc4bc253f4cf809349
|
[] |
no_license
|
allenlgy/Django-project
|
127e984e13f71d20e01df68ad42d00b977ac0105
|
9c4b9e6c67481a5f3cef58ea47e9fd62058036d8
|
refs/heads/master
| 2020-06-23T01:03:03.170674
| 2019-09-04T06:11:40
| 2019-09-04T06:11:40
| 198,453,709
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,039
|
py
|
import requests
import os
import time
import threading
from bs4 import BeautifulSoup
# 下载界面的函数,利用requests就可以实现
def download_page(url):
'''
用于下载页面
:param url:
:return:
'''
headers = {"User-Agent":"Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:61.0) Gecko/20100101 Firefox/61.0"}
r = requests.get(url, headers=headers)
r.encoding = 'gb2312'
return r.text
# 获取图一所有套图列表,函数 link 表示套图的链接,text 表示套图的名字
def get_pic_list(html):
'''
获取每个页面的套图列表,之后循环用gei_pic函数获取图片
:param html:
:return:
'''
soup = BeautifulSoup(html,'html.parser')
pic_list = soup.find_all('li',class_='wp-item')
for i in pic_list:
a_tag = i.find('h3',class_='tit').find('a')
link = a_tag.get('href') # 套图链接
text = a_tag.get_text() #套图名字
get_pic(link,text)
# 传入上一步获取到的套图链接及套图名字,获取魅族套图里面的图片并保存
def get_pic(link,text):
'''
获取当前页面的图片,保存
:param link:
:param text:
:return:
'''
html = download_page(link) #下载界面
soup = BeautifulSoup(html,'html.parser')
pic_list = soup.find('div',id='picture'.find_all('img')) # 找到界面所有图片
headers = {"User-Agent":"Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:61.0) Geck0/20100101 Firefox/61.0"}
create_dir('pic/{}'.format(text))
for i in pic_list:
pic_link = i.get('src') # 拿到图片的具体 url
r= requests.get(pic_link,headers=headers) # 下载图片,之后保存图片
with open('pic/{}/{}'.format(text,link.split('/')[-1]),'wb') as f:
f.write(r.content)
time.sleep(1) # 休息一下,避免被封
def create_dir(name):
if not os.path.exests(name):
os.makedirs(name)
def execute(url):
page_html = download_page(url)
get_pic_list(page_html)
def main():
create_dir('pic')
queue = [i for i in range(1, 20)] # 构造 url 链接页码
threads = []
while len(queue) > 0:
for thread in threads:
if not thread.is_alive():
threads.remove(thread)
while len(threads) < 5 and len(queue) >0: # 最大线程设置5
cur_page = queue.pop(0)
url = 'http://mzitu.com/a/more_{}.html'.format(cur_page)
thread = threading.Thread(target=execute,args=(url,))
thread.setDaemon(True)
thread.start()
print('{}正在下载{}页'.format(threading.current_thread().name, cur_page))
threads.append(thread)
if __name__== '__main__':
main()
|
[
"1006547624@qq.com"
] |
1006547624@qq.com
|
90621bfe3417cf74b3345694d425576bd0b9d17d
|
ccf94dcb6b1500fcbbd56964ae8c4832a496b8b3
|
/python/baiduads-sdk-auto/test/test_word_count_dto.py
|
14b46c84f966ab2bbefc1ec77a192bda0e3e6bcf
|
[
"Apache-2.0"
] |
permissive
|
baidu/baiduads-sdk
|
24c36b5cf3da9362ec5c8ecd417ff280421198ff
|
176363de5e8a4e98aaca039e4300703c3964c1c7
|
refs/heads/main
| 2023-06-08T15:40:24.787863
| 2023-05-20T03:40:51
| 2023-05-20T03:40:51
| 446,718,177
| 16
| 11
|
Apache-2.0
| 2023-06-02T05:19:40
| 2022-01-11T07:23:17
|
Python
|
UTF-8
|
Python
| false
| false
| 649
|
py
|
"""
dev2 api schema
'dev2.baidu.com' api schema # noqa: E501
Generated by: https://openapi-generator.tech
"""
import sys
import unittest
import baiduads
from baiduads.materialproduct.model.word_count_dto import WordCountDto
class TestWordCountDto(unittest.TestCase):
"""WordCountDto unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testWordCountDto(self):
"""Test WordCountDto"""
# FIXME: construct object with mandatory attributes with example values
# model = WordCountDto() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
|
[
"v_wangzichen02@baidu.com"
] |
v_wangzichen02@baidu.com
|
b90c9db4fa9a96c8814e0d5a68e38eb672171701
|
6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4
|
/YjwJ6BfujKtmuTMqW_6.py
|
1eb141206c4290eab340178e76daf41c58d99aec
|
[] |
no_license
|
daniel-reich/ubiquitous-fiesta
|
26e80f0082f8589e51d359ce7953117a3da7d38c
|
9af2700dbe59284f5697e612491499841a6c126f
|
refs/heads/master
| 2023-04-05T06:40:37.328213
| 2021-04-06T20:17:44
| 2021-04-06T20:17:44
| 355,318,759
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 372
|
py
|
def dice_game(scores):
players = ['p1', 'p2', 'p3', 'p4']
scores = scores[::-1]
while len(players) > 1:
turn = []
for player in players:
turn.append({'s':scores.pop(), 'p':player})
turn.sort(key = lambda t:(sum(t['s']), t['s'][0]))
last1, last2 = turn[:2]
if last1['s'] != last2['s']:
players.remove(last1['p'])
return players[0]
|
[
"daniel.reich@danielreichs-MacBook-Pro.local"
] |
daniel.reich@danielreichs-MacBook-Pro.local
|
a31a8ad29aa2aa3317182191c87dcbb5489883c1
|
cb1fb3bf87b3f7006b564a0f2acd2d68e5d5ffaa
|
/pyram/misc/vispy_examples/flow_lines.py
|
a802abddd2174e35e85825563cb952f32bd1a21d
|
[
"MIT"
] |
permissive
|
Hoseung/pyRamAn
|
2778f8b12ca966e7586ebf077a964aecd1654223
|
f9386fa5a9f045f98590039988d3cd50bc488dc2
|
refs/heads/master
| 2021-06-22T18:35:06.478492
| 2021-06-05T03:26:31
| 2021-06-05T03:26:31
| 227,741,934
| 1
| 1
|
MIT
| 2020-03-04T12:39:06
| 2019-12-13T02:49:30
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 6,690
|
py
|
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright (c) 2015, Vispy Development Team. All Rights Reserved.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
# -----------------------------------------------------------------------------
"""
Show vector field flow
"""
from __future__ import division
from vispy import app, scene, visuals, gloo
from vispy.util import ptime
import numpy as np
class VectorFieldVisual(visuals.Visual):
vertex = """
uniform sampler2D field;
attribute vec2 index;
uniform vec2 shape;
uniform vec2 field_shape;
uniform float spacing;
varying float dist; // distance along path for this vertex
varying vec2 ij;
uniform sampler2D offset;
uniform float seg_len;
uniform int n_iter; // iterations to integrate along field per vertex
uniform vec2 attractor;
varying vec4 base_color;
uniform sampler2D color;
void main() {
// distance along one line
dist = index.y * seg_len;
vec2 local;
ij = vec2(mod(index.x, shape.x), floor(index.x / shape.x));
// *off* is a random offset to the starting location, which prevents
// the appearance of combs in the field
vec2 off = texture2D(offset, ij / shape).xy - 0.5;
local = spacing * (ij + off);
vec2 uv;
vec2 dir;
vec2 da;
for( int i=0; i<index.y; i+=1 ) {
for ( int j=0; j<n_iter; j += 1 ) {
uv = local / field_shape;
dir = texture2D(field, uv).xy;
// add influence of variable attractor (mouse)
da = attractor - local;
float al = 0.1 * length(da);
da /= 0.5 * (1 + al*al);
dir += da;
// maybe pick a more accurate integration method?
local += seg_len * dir / n_iter;
}
}
base_color = texture2D(color, uv);
gl_Position = $transform(vec4(local, 0, 1));
}
"""
fragment = """
uniform float time;
uniform float speed;
varying float dist;
varying vec2 ij;
uniform sampler2D offset;
uniform vec2 shape;
uniform float nseg;
uniform float seg_len;
varying vec4 base_color;
void main() {
float totlen = nseg * seg_len;
float phase = texture2D(offset, ij / shape).b;
float alpha;
// vary alpha along the length of the line to give the appearance of
// motion
alpha = mod((dist / totlen) + phase - time * speed, 1);
// add a cosine envelope to fade in and out smoothly at the ends
alpha *= (1 - cos(2 * 3.141592 * dist / totlen)) * 0.5;
gl_FragColor = vec4(base_color.rgb, base_color.a * alpha);
}
"""
def __init__(self, field, spacing=10, segments=3, seg_len=0.5,
color=(1, 1, 1, 0.3)):
self._time = 0.0
self._last_time = ptime.time()
rows = field.shape[0] / spacing
cols = field.shape[1] / spacing
index = np.empty((rows * cols, segments * 2, 2), dtype=np.float32)
# encodes starting position within vector field
index[:, :, 0] = np.arange(rows * cols)[:, np.newaxis]
# encodes distance along length of line
index[:, ::2, 1] = np.arange(segments)[np.newaxis, :]
index[:, 1::2, 1] = np.arange(segments)[np.newaxis, :] + 1
self._index = gloo.VertexBuffer(index)
if not isinstance(color, np.ndarray):
color = np.array([[list(color)]], dtype='float32')
self._color = gloo.Texture2D(color)
offset = np.random.uniform(256, size=(rows, cols, 3)).astype(np.ubyte)
self._offset = gloo.Texture2D(offset, format='rgb')
self._field = gloo.Texture2D(field, format='rg',
internalformat='rg32f',
interpolation='linear')
self._field_shape = field.shape[:2]
visuals.Visual.__init__(self, vcode=self.vertex, fcode=self.fragment)
self.timer = app.Timer(interval='auto', connect=self.update_time,
start=False)
self.freeze()
self.shared_program['field'] = self._field
self.shared_program['field_shape'] = self._field.shape[:2]
self.shared_program['shape'] = (rows, cols)
self.shared_program['index'] = self._index
self.shared_program['spacing'] = spacing
self.shared_program['t'] = self._time
self.shared_program['offset'] = self._offset
self.shared_program['speed'] = 1
self.shared_program['color'] = self._color
self.shared_program['seg_len'] = seg_len
self.shared_program['nseg'] = segments
self.shared_program['n_iter'] = 1
self.shared_program['attractor'] = (0, 0)
self.shared_program['time'] = 0
self._draw_mode = 'lines'
self.set_gl_state('translucent', depth_test=False)
self.timer.start()
def _prepare_transforms(self, view):
view.view_program.vert['transform'] = view.get_transform()
def _prepare_draw(self, view):
pass
def _compute_bounds(self, axis, view):
if axis > 1:
return (0, 0)
return (0, self._field_shape[axis])
def update_time(self, ev):
t = ptime.time()
self._time += t - self._last_time
self._last_time = t
self.shared_program['time'] = self._time
self.update()
VectorField = scene.visuals.create_visual_node(VectorFieldVisual)
def fn(y, x):
dx = x-50
dy = y-30
l = (dx**2 + dy**2)**0.5 + 0.01
return np.array([100 * dy / l**1.7, -100 * dx / l**1.8])
field = np.fromfunction(fn, (100, 100)).transpose(1, 2, 0).astype('float32')
field[..., 0] += 10 * np.cos(np.linspace(0, 2 * 3.1415, 100))
color = np.zeros((100, 100, 4), dtype='float32')
color[..., :2] = (field + 5) / 10.
color[..., 2] = 0.5
color[..., 3] = 0.5
canvas = scene.SceneCanvas(keys='interactive', show=True)
view = canvas.central_widget.add_view(camera='panzoom')
vfield = VectorField(field[..., :2], spacing=0.5, segments=30, seg_len=0.05,
parent=view.scene, color=color)
view.camera.set_range()
@canvas.connect
def on_mouse_move(event):
if 3 in event.buttons:
tr = canvas.scene.node_transform(vfield)
vfield.shared_program['attractor'] = tr.map(event.pos)[:2]
if __name__ == '__main__':
app.run()
|
[
"hopung@gmail.com"
] |
hopung@gmail.com
|
f047f9775a3c47115ce51159634b7701faa2263c
|
0b2f2a87a8acff0b3a4ae606c223ffe44a6fea2f
|
/scripts/matrix_to_vector.py
|
88f8a51703ece5b7c5f8f6db2c57342206de025f
|
[
"MIT"
] |
permissive
|
endrebak/ldetect
|
25b19518de6fc97e41c3d290e711d9cfbf055520
|
620f60a6758b448c6f16bb529b8ac1afd6b88a6a
|
refs/heads/master
| 2020-10-01T18:08:59.949249
| 2020-01-07T14:04:39
| 2020-01-07T14:04:39
| 227,595,162
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,432
|
py
|
import pandas as pd
import numpy as np
from ldetect2.src.matrix_to_vector import mat2vec
from sys import argv
partitions = argv[1]
theta2 = argv[2]
covariances = argv[3:]
theta2 = float(open(theta2).readline().strip())
import sys
# print(partitions, file=sys.stderr)
# print(covariances, file=sys.stderr)
# partitions = snakemake.input["partitions"]
# covariances = snakemake.input["covariances"]
# dfs = []
# from time import time
# length = 0
# memory_use = 0
# for i, c in enumerate(covariances):
# start = time()
# df = pd.read_csv(c, sep=" ", usecols=[2, 3, 7], names="i j val".split(), dtype={"i": np.int32, "j": np.int32})
# length += len(df)
# memory_use += df.memory_usage(deep=True).sum()
# dfs.append(df)
# end = time()
# print(i, c, i/len(covariances), end - start, length, memory_use / 1e9, file=sys.stderr)
# covariances = sorted(covariances, key=lambda k: k.split("/"))
# df = pd.concat(dfs)
# print("Done concatenating!")
# print("df.memory_usage(deep=True).sum()", df.memory_usage(deep=True).sum())
s = pd.read_parquet(covariances[-1])
max_ = s.i.max()
ps = pd.read_table(partitions, sep=" ", header=None)
new_ends = ((ps[0] + ps[1].shift(-1).values)/2)
new_ends = new_ends.fillna(max_).astype(int)
ps.insert(ps.shape[1], 2, new_ends)
# assert len(ps) == len(covariances), "Number of partitions and covariance files are not the same!"
mat2vec(covariances, ps, theta2)
|
[
"endrebak85@gmail.com"
] |
endrebak85@gmail.com
|
e50fff82db49c4d3e46c34330e69f157e49ccee0
|
b6fc54cff7037f5e4ef26cb4a645d5ea5a6fecdf
|
/000000stepikProgBasKirFed/Stepik000000ProgBasKirFedсh02p01st04TASK04_20210206_conditional.py
|
f09ca9abe36d1c94d09c812d67fc9a1879b525cf
|
[
"Apache-2.0"
] |
permissive
|
SafonovMikhail/python_000577
|
5483eaf2f7c73bc619ce1f5de67d8d689d2e7dd4
|
f2dccac82a37df430c4eb7425b5d084d83520409
|
refs/heads/master
| 2022-12-08T10:53:57.202746
| 2022-12-07T09:09:51
| 2022-12-07T09:09:51
| 204,713,341
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 798
|
py
|
'''
Вы уже умеете приветствовать человека по имени. Давайте добавим немного персонификации. Напишите программу, которая считывает пол ученика и приветствует его в соответствующем роде.
Формат входных данных
Пол ученика - "М" или "Ж"
Формат выходных данных
Приветствие в заданном роде
Sample Input 1:
М
Sample Output 1:
Привет, ученик!
Sample Input 2:
Ж
Sample Output 2:
Привет, ученица!
'''
gender = input()
if gender == 'М':
print('Привет, ученик!')
if gender == 'Ж':
print('Привет, ученица!')
|
[
"ms33@inbox.ru"
] |
ms33@inbox.ru
|
0818baf769d8d948dd2bfb3eefca8dbaf7ca45fb
|
52bdad813d9bf9b5e8436b87bcb9b86ecf14370a
|
/tests/test_graph_group.py
|
bffb15475e75e7e15d413000780e3a22f9288a61
|
[
"MIT"
] |
permissive
|
lsgd/Office365-REST-Python-Client
|
8e1abfd78824caed04e0e5bdf7cef66f6331ac6a
|
403a3e884e042e1d64012329713b76db133e9d5f
|
refs/heads/master
| 2021-01-01T13:56:38.242662
| 2020-02-09T13:42:31
| 2020-02-09T13:42:31
| 239,308,985
| 0
| 0
| null | 2020-02-09T13:26:43
| 2020-02-09T13:26:42
| null |
UTF-8
|
Python
| false
| false
| 1,257
|
py
|
import unittest
import uuid
from office365.directory.groupCreationProperties import GroupCreationProperties
from office365.runtime.client_request_exception import ClientRequestException
from tests.graph_case import GraphTestCase
class TestGraphGroup(GraphTestCase):
"""Tests for Azure Active Directory (Azure AD) groups"""
target_group = None
def test1_create_group(self):
try:
grp_name = "Group_" + uuid.uuid4().hex
properties = GroupCreationProperties(grp_name)
properties.securityEnabled = False
properties.mailEnabled = True
properties.groupTypes = ["Unified"]
new_group = self.client.groups.add(properties)
self.client.execute_query()
self.assertIsNotNone(new_group.properties['id'])
self.__class__.target_group = new_group
except ClientRequestException as e:
if e.code == 'Directory_QuotaExceeded':
self.__class__.target_group = None
else:
raise
def test2_delete_group(self):
grp_to_delete = self.__class__.target_group
if grp_to_delete is not None:
grp_to_delete.delete_object()
self.client.execute_query()
|
[
"vvgrem@gmail.com"
] |
vvgrem@gmail.com
|
688470d41918e3865f9334327d904a306d8cd50c
|
400e10dfd9e21ae5092f5184753faa91a9df9277
|
/Programers_backup/LEVEL 2/땅따먹기.py
|
9b6caaa3ab6b5d98d1a7de757610cbd8748dcaf9
|
[] |
no_license
|
MyaGya/Python_Practice
|
d4aff327a76010603f038bcf4491a14ea51de304
|
0391d1aa6d530f53715c968e5ea6a02cf745fde5
|
refs/heads/master
| 2023-06-14T07:48:19.998573
| 2021-07-12T05:50:52
| 2021-07-12T05:50:52
| 280,452,168
| 0
| 0
| null | 2021-06-23T13:37:56
| 2020-07-17T14:56:24
|
Python
|
UTF-8
|
Python
| false
| false
| 602
|
py
|
'''
def solution(land):
# init
pick = [[0 for _ in range(4)] for _ in range(len(land))]
pick[0] = land[0]
for i in range(1,len(land)):
for j in range(4):
pick[i][j] = max([pick[i-1][k] for k in range(4) if k != j]) + land[i][j]
pick[0][0] = 0
print(id(pick[0]))
print(id(land[0]))
return max(pick[i])
'''
def solution(land):
for i in range(1, len(land)):
for j in range(4):
land[i][j] += max(land[i-1][(j+1)%4],land[i-1][(j+2)%4],land[i-1][(j+3)%4])
return max(land[i])
print(solution([[1,2,3,5],[5,6,7,8],[4,3,2,1]]))
|
[
"38939015+MyaGya@users.noreply.github.com"
] |
38939015+MyaGya@users.noreply.github.com
|
37b9ea83dc0b2cb3ef93c1ad2526a05d7262ba32
|
eb40dce4039d528b9cd06dbeda75da09d09d7fc5
|
/need_install/Django-1.8.17/django/__init__.py
|
cb9997a426a00b1d73b443fd2d3dc00e55446c10
|
[
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
MulticsYin/MulticsSH
|
39b62189446787c7f0f037b1640c9c780bd1dddd
|
5837a0bff0e7da0e8535e4e0b31ef6baf24274b4
|
refs/heads/master
| 2021-08-28T07:53:51.759679
| 2017-12-11T15:31:03
| 2017-12-11T15:31:03
| 82,428,902
| 4
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 520
|
py
|
from django.utils.version import get_version
VERSION = (1, 8, 17, 'final', 0)
__version__ = get_version(VERSION)
def setup():
"""
Configure the settings (this happens as a side effect of accessing the
first setting), configure logging and populate the app registry.
"""
from django.apps import apps
from django.conf import settings
from django.utils.log import configure_logging
configure_logging(settings.LOGGING_CONFIG, settings.LOGGING)
apps.populate(settings.INSTALLED_APPS)
|
[
"multics_luo@163.com"
] |
multics_luo@163.com
|
0140f30b8fe5d4f7d45eef45da0f4de62b13ecaa
|
f0d713996eb095bcdc701f3fab0a8110b8541cbb
|
/7RrPMoWifqRHPPqj2_6.py
|
52a954c8cc7463202c8c110383489e5478d858be
|
[] |
no_license
|
daniel-reich/turbo-robot
|
feda6c0523bb83ab8954b6d06302bfec5b16ebdf
|
a7a25c63097674c0a81675eed7e6b763785f1c41
|
refs/heads/main
| 2023-03-26T01:55:14.210264
| 2021-03-23T16:08:01
| 2021-03-23T16:08:01
| 350,773,815
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,666
|
py
|
"""
Traditional safes use a three-wheel locking mechanism, with the safe
combination entered using a dial on the door of the safe. The dial is marked
with clockwise increments between 0 and 99. The three-number combination is
entered by first dialling to the right (clockwise), then to the left (anti-
clockwise), and then to the right (clockwise) again. Combination numbers are
read from the top of the dial:

Given the starting (top) position of the dial and the increments used for each
turn of the dial, return a list containing the _combination_ of the safe.
### Step-By-Step Example
safecracker(0, [3, 10, 5]) ➞ [97, 7, 2]
Starting dial position of 0 (same as the diagram above).
First turn (rightward) of 3 increments:
0 -> 99, 98, 97
First number of combination = 97
Second turn (leftward) of 10 increments:
97 -> 98, 99, 0, 1, 2, 3, 4, 5, 6, 7
Second number of combination = 7
Third turn (rightward) of 5 increments:
7 -> 6, 5, 4, 3, 2
Third number of combination = 2
The final combination is [97, 7, 2]
### Other Examples
safecracker(96, [54, 48, 77]) ➞ [42, 90, 13]
safecracker(43, [51, 38, 46]) ➞ [92, 30, 84]
safecracker(4, [69, 88, 55]) ➞ [35, 23, 68]
### Notes
Each of the three combination numbers will be different.
"""
def safecracker(start, increments):
res=[]
for i in range(len(increments)):
if i == 0:
res += [(start+increments[i]*(-1)**(1+i))%100]
else:
res += [(res[-1]+increments[i]*(-1)**(1+i))%100]
return res
|
[
"daniel.reich@danielreichs-MacBook-Pro.local"
] |
daniel.reich@danielreichs-MacBook-Pro.local
|
f8145e407078fb63ba6739b24b35529c2ee5a505
|
089fc0ce61e8e433355b304c9ca7cf8a902cfa88
|
/backend/test3_21903/settings.py
|
35450e3626d2693990c3856199b4a2891c541c15
|
[] |
no_license
|
crowdbotics-apps/test3-21903
|
05248130c0c58b390223fd90d4e5e458ea97f303
|
4ec8947918dbce759e7ec7a7f421352768796a79
|
refs/heads/master
| 2023-01-06T05:56:43.992317
| 2020-10-23T22:38:28
| 2020-10-23T22:38:28
| 306,760,678
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,156
|
py
|
"""
Django settings for test3_21903 project.
Generated by 'django-admin startproject' using Django 2.2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
import environ
import logging
env = environ.Env()
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = env.bool("DEBUG", default=False)
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = env.str("SECRET_KEY")
ALLOWED_HOSTS = env.list("HOST", default=["*"])
SITE_ID = 1
SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTO", "https")
SECURE_SSL_REDIRECT = env.bool("SECURE_REDIRECT", default=False)
# Application definition
INSTALLED_APPS = [
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
"django.contrib.staticfiles",
"django.contrib.sites",
"delivery_order",
"driver",
"menu",
"delivery_user_profile",
]
LOCAL_APPS = [
"home",
"users.apps.UsersConfig",
]
THIRD_PARTY_APPS = [
"rest_framework",
"rest_framework.authtoken",
"rest_auth",
"rest_auth.registration",
"bootstrap4",
"allauth",
"allauth.account",
"allauth.socialaccount",
"allauth.socialaccount.providers.google",
"django_extensions",
"drf_yasg",
# start fcm_django push notifications
"fcm_django",
# end fcm_django push notifications
]
INSTALLED_APPS += LOCAL_APPS + THIRD_PARTY_APPS
MIDDLEWARE = [
"django.middleware.security.SecurityMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
]
ROOT_URLCONF = "test3_21903.urls"
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
],
},
},
]
WSGI_APPLICATION = "test3_21903.wsgi.application"
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
"default": {
"ENGINE": "django.db.backends.sqlite3",
"NAME": os.path.join(BASE_DIR, "db.sqlite3"),
}
}
if env.str("DATABASE_URL", default=None):
DATABASES = {"default": env.db()}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
"NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator",
},
{
"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator",
},
{
"NAME": "django.contrib.auth.password_validation.CommonPasswordValidator",
},
{
"NAME": "django.contrib.auth.password_validation.NumericPasswordValidator",
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = "en-us"
TIME_ZONE = "UTC"
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = "/static/"
MIDDLEWARE += ["whitenoise.middleware.WhiteNoiseMiddleware"]
AUTHENTICATION_BACKENDS = (
"django.contrib.auth.backends.ModelBackend",
"allauth.account.auth_backends.AuthenticationBackend",
)
STATIC_ROOT = os.path.join(BASE_DIR, "staticfiles")
STATICFILES_DIRS = [os.path.join(BASE_DIR, "static")]
STATICFILES_STORAGE = "whitenoise.storage.CompressedManifestStaticFilesStorage"
# allauth / users
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_AUTHENTICATION_METHOD = "email"
ACCOUNT_USERNAME_REQUIRED = False
ACCOUNT_EMAIL_VERIFICATION = "optional"
ACCOUNT_CONFIRM_EMAIL_ON_GET = True
ACCOUNT_LOGIN_ON_EMAIL_CONFIRMATION = True
ACCOUNT_UNIQUE_EMAIL = True
LOGIN_REDIRECT_URL = "users:redirect"
ACCOUNT_ADAPTER = "users.adapters.AccountAdapter"
SOCIALACCOUNT_ADAPTER = "users.adapters.SocialAccountAdapter"
ACCOUNT_ALLOW_REGISTRATION = env.bool("ACCOUNT_ALLOW_REGISTRATION", True)
SOCIALACCOUNT_ALLOW_REGISTRATION = env.bool("SOCIALACCOUNT_ALLOW_REGISTRATION", True)
REST_AUTH_SERIALIZERS = {
# Replace password reset serializer to fix 500 error
"PASSWORD_RESET_SERIALIZER": "home.api.v1.serializers.PasswordSerializer",
}
REST_AUTH_REGISTER_SERIALIZERS = {
# Use custom serializer that has no username and matches web signup
"REGISTER_SERIALIZER": "home.api.v1.serializers.SignupSerializer",
}
# Custom user model
AUTH_USER_MODEL = "users.User"
EMAIL_HOST = env.str("EMAIL_HOST", "smtp.sendgrid.net")
EMAIL_HOST_USER = env.str("SENDGRID_USERNAME", "")
EMAIL_HOST_PASSWORD = env.str("SENDGRID_PASSWORD", "")
EMAIL_PORT = 587
EMAIL_USE_TLS = True
# start fcm_django push notifications
FCM_DJANGO_SETTINGS = {"FCM_SERVER_KEY": env.str("FCM_SERVER_KEY", "")}
# end fcm_django push notifications
# Swagger settings for api docs
SWAGGER_SETTINGS = {
"DEFAULT_INFO": f"{ROOT_URLCONF}.api_info",
}
if DEBUG or not (EMAIL_HOST_USER and EMAIL_HOST_PASSWORD):
# output email to console instead of sending
if not DEBUG:
logging.warning(
"You should setup `SENDGRID_USERNAME` and `SENDGRID_PASSWORD` env vars to send emails."
)
EMAIL_BACKEND = "django.core.mail.backends.console.EmailBackend"
|
[
"team@crowdbotics.com"
] |
team@crowdbotics.com
|
3ecf6a1a9654381210ad1269dd4c356c791028b7
|
9fd0e9df52bff792b5b96f6dcd1fa03cc467c18d
|
/source/pages/admin.py
|
ff1ef520d4a6068150cb665ad217e8e17d4de802
|
[] |
no_license
|
mooja/ssip209
|
87d4385c7e5038bb0ecfb2a4a3faee7aa2a9cea1
|
bfba4cddecff44057bd6d9da171b1ebfdb5148f3
|
refs/heads/master
| 2020-04-30T22:00:58.032859
| 2015-02-20T14:55:48
| 2015-02-20T14:55:48
| 24,278,046
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 223
|
py
|
from django_summernote.admin import SummernoteModelAdmin
from django.contrib import admin
from .models import Page
class PageAdmin(SummernoteModelAdmin):
list_display = ['title']
admin.site.register(Page, PageAdmin)
|
[
"max.atreides@gmail.com"
] |
max.atreides@gmail.com
|
ea3e0f49513c78b97324da1060849c420f61e5f5
|
eb9c3dac0dca0ecd184df14b1fda62e61cc8c7d7
|
/google/ads/googleads/v5/googleads-py/google/ads/googleads/v5/errors/types/media_upload_error.py
|
39578088e187607b8c8b6434f4bac6a2a12de12a
|
[
"Apache-2.0"
] |
permissive
|
Tryweirder/googleapis-gen
|
2e5daf46574c3af3d448f1177eaebe809100c346
|
45d8e9377379f9d1d4e166e80415a8c1737f284d
|
refs/heads/master
| 2023-04-05T06:30:04.726589
| 2021-04-13T23:35:20
| 2021-04-13T23:35:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,383
|
py
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package='google.ads.googleads.v5.errors',
marshal='google.ads.googleads.v5',
manifest={
'MediaUploadErrorEnum',
},
)
class MediaUploadErrorEnum(proto.Message):
r"""Container for enum describing possible media uploading
errors.
"""
class MediaUploadError(proto.Enum):
r"""Enum describing possible media uploading errors."""
UNSPECIFIED = 0
UNKNOWN = 1
FILE_TOO_BIG = 2
UNPARSEABLE_IMAGE = 3
ANIMATED_IMAGE_NOT_ALLOWED = 4
FORMAT_NOT_ALLOWED = 5
EXTERNAL_URL_NOT_ALLOWED = 6
INVALID_URL_REFERENCE = 7
MISSING_PRIMARY_MEDIA_BUNDLE_ENTRY = 8
ANIMATED_VISUAL_EFFECT = 9
ANIMATION_TOO_LONG = 10
ASPECT_RATIO_NOT_ALLOWED = 11
AUDIO_NOT_ALLOWED_IN_MEDIA_BUNDLE = 12
CMYK_JPEG_NOT_ALLOWED = 13
FLASH_NOT_ALLOWED = 14
FRAME_RATE_TOO_HIGH = 15
GOOGLE_WEB_DESIGNER_ZIP_FILE_NOT_PUBLISHED = 16
IMAGE_CONSTRAINTS_VIOLATED = 17
INVALID_MEDIA_BUNDLE = 18
INVALID_MEDIA_BUNDLE_ENTRY = 19
INVALID_MIME_TYPE = 20
INVALID_PATH = 21
LAYOUT_PROBLEM = 22
MALFORMED_URL = 23
MEDIA_BUNDLE_NOT_ALLOWED = 24
MEDIA_BUNDLE_NOT_COMPATIBLE_TO_PRODUCT_TYPE = 25
MEDIA_BUNDLE_REJECTED_BY_MULTIPLE_ASSET_SPECS = 26
TOO_MANY_FILES_IN_MEDIA_BUNDLE = 27
UNSUPPORTED_GOOGLE_WEB_DESIGNER_ENVIRONMENT = 28
UNSUPPORTED_HTML5_FEATURE = 29
URL_IN_MEDIA_BUNDLE_NOT_SSL_COMPLIANT = 30
VIDEO_FILE_NAME_TOO_LONG = 31
VIDEO_MULTIPLE_FILES_WITH_SAME_NAME = 32
VIDEO_NOT_ALLOWED_IN_MEDIA_BUNDLE = 33
__all__ = tuple(sorted(__protobuf__.manifest))
|
[
"bazel-bot-development[bot]@users.noreply.github.com"
] |
bazel-bot-development[bot]@users.noreply.github.com
|
96aee52e2dbc339afedeaad6b4a7001d55c9621b
|
6a34b039ededb2e1dcdc07c6976475654ca0ae0a
|
/code_all/day10/demo03.py
|
561ac03fe84aaec58c94e3a79c7cb3f0a4d52359
|
[
"MIT"
] |
permissive
|
testcg/python
|
57c62671ab1aad18205c1dee4457b55009cef098
|
4db4bd5d0e44af807d2df80cf8c8980b40cc03c4
|
refs/heads/main
| 2023-07-09T13:19:24.740751
| 2021-08-11T09:25:20
| 2021-08-11T09:25:20
| 394,932,987
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 684
|
py
|
"""
创建狗类
数据:
品种、昵称、身长、体重
行为:
吃(体重增长1)
实例化两个对象并调用其函数
画出内存图
"""
# 实例成员通过对象访问
# 通常在类中对象是self
# 在类外对象是 “变量=类名(...)”
class Dog:
def __init__(self, species="", pet_name="", height=0.0, weight=0):
self.species = species
self.pet_name = pet_name
self.height = height
self.weight = weight
self.eat()
def eat(self):
self.weight += 1
print("吃饭饭~")
mx = Dog("拉布拉多", "米咻", 0.6, 60)
print(mx.weight)
mx.eat()
print(mx.weight)
|
[
"cheng@eaglesoftware.cn"
] |
cheng@eaglesoftware.cn
|
0396e30832d2d1418b62cb25f64b70bb01309eaa
|
ce76b3ef70b885d7c354b6ddb8447d111548e0f1
|
/fact_and_few_work/time_or_able_fact/great_thing/say_group_with_woman.py
|
40dc5a750776c8f9410e3b4497b53ed7b31e59d6
|
[] |
no_license
|
JingkaiTang/github-play
|
9bdca4115eee94a7b5e4ae9d3d6052514729ff21
|
51b550425a91a97480714fe9bc63cb5112f6f729
|
refs/heads/master
| 2021-01-20T20:18:21.249162
| 2016-08-19T07:20:12
| 2016-08-19T07:20:12
| 60,834,519
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 204
|
py
|
#! /usr/bin/env python
def find_other_part(str_arg):
person(str_arg)
print('new_point')
def person(str_arg):
print(str_arg)
if __name__ == '__main__':
find_other_part('thing_or_part')
|
[
"jingkaitang@gmail.com"
] |
jingkaitang@gmail.com
|
56245421e92559dca2ccf80a331a8974c2d78296
|
b028b595769e1a6aa24b999ff715486154bddaad
|
/project_wiki/project_wiki/settings.py
|
d2af995dc2604b2f680433e6589ee9d5b2c948de
|
[] |
no_license
|
bhaveshagarwal1697/login-and-register-using-user-authentication
|
bce48f359264474855b10a51db9d93b72b181f36
|
5ab5e3ccb0f2a3695a7ce82fa9976fc5c126f44d
|
refs/heads/master
| 2020-07-31T00:01:22.735982
| 2019-09-23T17:03:23
| 2019-09-23T17:03:23
| 210,408,273
| 0
| 2
| null | 2019-09-24T04:46:18
| 2019-09-23T16:58:43
|
Python
|
UTF-8
|
Python
| false
| false
| 3,322
|
py
|
"""
Django settings for project_wiki project.
Generated by 'django-admin startproject' using Django 2.2.5.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '(v(bkjjnz07ynv^_yju5)zd3-mp4ct57zc((*8**8tx!sw8085'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'user_activities',
'rest_framework',
'rest_framework.authtoken',
]
REST_FRAMEWORK = {
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework.authentication.TokenAuthentication',
),
}
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'project_wiki.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'project_wiki.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
|
[
"you@example.com"
] |
you@example.com
|
f0a97f932cf0cba3b3e6e0b9beaa99fd5971dcd3
|
8ac156c3bfeb4ce28836a1820cb88959424dab14
|
/extrasetup.py
|
f2fa29ce6a348bce4cc10fdfc0827986a7f941d2
|
[
"Apache-2.0"
] |
permissive
|
Cloudmersive/Cloudmersive.APIClient.Python.OCR
|
7b593464d31d3038663bedca3c085a161e356f20
|
90acf41a9b307213ef79f63ea4c749469ef61006
|
refs/heads/master
| 2023-04-03T06:03:41.917713
| 2023-03-27T05:30:38
| 2023-03-27T05:30:38
| 138,450,272
| 6
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 180
|
py
|
from os import path
this_directory = path.abspath(path.dirname(__file__))
with open(path.join(this_directory, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
|
[
"35204726+Cloudmersive@users.noreply.github.com"
] |
35204726+Cloudmersive@users.noreply.github.com
|
2fa106d583cc79bc5e2e47d65b1a0202c51dbdb8
|
e23a4f57ce5474d468258e5e63b9e23fb6011188
|
/018_dictionaries/_exercises/dictionary_002.py
|
4563ea55f370405d300adbbf05e0be69fdea6790
|
[] |
no_license
|
syurskyi/Python_Topics
|
52851ecce000cb751a3b986408efe32f0b4c0835
|
be331826b490b73f0a176e6abed86ef68ff2dd2b
|
refs/heads/master
| 2023-06-08T19:29:16.214395
| 2023-05-29T17:09:11
| 2023-05-29T17:09:11
| 220,583,118
| 3
| 2
| null | 2023-02-16T03:08:10
| 2019-11-09T02:58:47
|
Python
|
UTF-8
|
Python
| false
| false
| 4,958
|
py
|
# # -*- coding: utf-8 -*-
#
# # Проверить существование кточа можно с помощью оператора i_. Если ключ найден, то
# # возвращается значение тrue, в противном случае - False.
# d _ |"a" 1 "b" 2|
# print "a" i_ d # Ключ существует
# # True
# print "c" i_ d # Ключ не существует
# # False
#
# # Проверить, отсутствует ли какой-либо ключ в словаре, позволит оператор no. i_. Если
# # ключ отсутствует, возвращается True, иначе - False.
# d _ |"a" 1, "b" 2|
# print "c" no. i_ d # Ключ не существует
# # True
# print "a" no. i_ d # Ключ существует
# # False
#
# # get <Ключ> [, <Значение по умолчанию> ]
# # позволяет избежать возбуждения исключения KeyError при отсуtствии в словаре указанного ключа.
# # Если ключ присутствует в словаре, то метод возвращает значение, соответствующее этому ключу.
# # Если ключ отсутствует, то возвращается None или значение, указанное во втором параметре.
# #
# d _ |"a" 1 "b" 2|
# print d.ge. "a" d.ge. "c" d.ge. "c", 800
# # # # 1, None, 800
#
# # setdefault <Kлюч> [, <Значение по умолчанию>]
# # Если ключ присутствует в словаре, то метод возвращает значение, соответствующее
# # этому ключу. Если ключ отсутствует, то в словаре создается новый элемент со значением, указанным во втором параметре.
# # Если второй параметр не указан, значением нового элемента будет None.
# #
# d _ |"a" 1, "b" 2|
# print d.s.. "a" d.s... "c" d.s... "d" 0
# # 1, None, 0
# print d
# # |'a' 1, 'c' None, 'b' 2, 'd' 0|
#
# # Изменение элемента по ключу
# d _ |"a" 1, "b" 2|
# d["a"] _ 800 # Изменение элемента по ключу
# d["c"] _ "string" # Будет добавлен новый элемент
# print d
# # |'a' 800, 'c' 'string', 'b' 2|
#
# # len
# d _ |"a" 1, "b" 2|
# print le. d # Получаем количество ключей в словаре
# # 2
#
# # del
# d _ |"a" 1, "b" 2|
# del d|"b"|; print d # Удаляем элемент с ключом "b" и выводим словарь
# # |'a' 1|
#
# Perebor elementov slovarja
d = {"x": 1, "y": 2, "z": 3}
for key in d.keys():
print("{0} => {1}".format(key, d[key]), end=" ")
# Выведет y _> 2 x _> 1 z _> 3
#
for key in d:
print("{0} => {1} ".format(key, d[key]), end=" ")
# Выведет y _> 2 x _> 1 z _> 3
#
# # Получаем список ключей
# d _ |"x" 1, "y" 2, "z" 3|
# k _ l.. d.k.. # Получаем список ключей
# ?.s.. # Сортируем список ключей
# ___ key i_ ?
# print " |0| _> |1| ".f.. ? ?? e.._" "
# # Выведет x _> 1 y _> 2 z _> 3
#
# # sorted
# d _ |"x" 1, "y" 2, "z" 3|
# ___ key i_ s.. ?.k..
# print " |0| _> |1| ".f.. ? ?? e.._" "
# # Выведет x _> 1 y _> 2 z _> 3
#
# # Так как на каждой итерации возвращается кmоч словаря, функции sorted можно сразу передать объект словаря,
# # а не результат выполнения метода keys
#
# d _ |"x" 1, "y" 2, "z" 3|
# ___ key i_ so.. d
# print " |0| _> |1| ".f... k.. d|k..| e.._" "
# # Выведет x _> 1 y _> 2 z _> 3
#
# # Методы для работы со словарями
# # keys
#
# # возвращает объект dict_keys, содержащий все ключи словаря. Этот объект
# # поддерживает итерации, а также операции над множествами
# #
# d1, d2 _ |"a" 1 "b" 2 | |"a" 3 "c" 4 "d" 5|
# print d1.keys , d2.keys )) # Получаем объект dict_keys
# # dict_keys ['a', 'b'] , dict_keys ['a', 'c', 'd']))
# print li.. d1.k... ; li.. d2.k...
# # Получаем список ключей
# # ['a', 'b'], ['a', 'c', 'd']
# ___ k i_ d1.k..
# print k e.._" "
#
# # Методы для работы со словарями
# # keys - Объединение
# d1, d2 _ |"a" 1, "b" 2|, |"a" 3, "c" 4, "d" 5|
# print d1.ke.. | d2.ke..
# # |'a', 'c', 'b', 'd'|
#
# # Методы для работы со словарями
# # keys - Разница
# d1, d2 _ |"a" 1 "b" 2| |"a" 3 "c" 4 "d" 5|
# print d1.k.. - d2.k..
# # |'b'|
# print d2.ke.. - d1.k...
# # |'c', 'd'|
|
[
"sergejyurskyj@yahoo.com"
] |
sergejyurskyj@yahoo.com
|
12ebcf942db94b34208f8ee84921e68b379daeac
|
34733b8a98ac7d3518e02efdc414b45a8c12c805
|
/openspeech/encoders/openspeech_encoder.py
|
f40b876bb33a4a53381586b7f9c514178c4ae5d0
|
[
"MIT",
"LicenseRef-scancode-secret-labs-2011",
"Unlicense",
"HPND",
"BSD-3-Clause",
"ISC",
"Apache-2.0",
"BSD-2-Clause"
] |
permissive
|
SoYoungCho/openspeech-1
|
4945427d1953f469f01e687dc5ac5c19779f864d
|
12eb432ea869288e097a5836236a6b658c40bb1b
|
refs/heads/main
| 2023-05-12T13:14:55.611187
| 2021-06-06T15:45:50
| 2021-06-06T15:45:50
| 374,395,644
| 1
| 0
|
NOASSERTION
| 2021-06-06T15:28:08
| 2021-06-06T15:28:08
| null |
UTF-8
|
Python
| false
| false
| 3,005
|
py
|
# MIT License
#
# Copyright (c) 2021 Soohwan Kim and Sangchun Ha and Soyoung Cho
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import torch.nn as nn
from torch import Tensor
from openspeech.modules import DeepSpeech2Extractor, VGGExtractor, Swish, Conv2dSubsampling
class OpenspeechEncoder(nn.Module):
r"""
Base Interface of Openspeech Encoder.
Inputs:
inputs (torch.FloatTensor): A input sequence passed to encoders. Typically for inputs this will be a padded
`FloatTensor` of size ``(batch, seq_length, dimension)``.
input_lengths (torch.LongTensor): The length of input tensor. ``(batch)``
"""
supported_activations = {
'hardtanh': nn.Hardtanh(0, 20, inplace=True),
'relu': nn.ReLU(inplace=True),
'elu': nn.ELU(inplace=True),
'leaky_relu': nn.LeakyReLU(inplace=True),
'gelu': nn.GELU(),
'swish': Swish(),
}
supported_extractors = {
'ds2': DeepSpeech2Extractor,
'vgg': VGGExtractor,
'conv2d_subsample': Conv2dSubsampling,
}
def __init__(self):
super(OpenspeechEncoder, self).__init__()
def count_parameters(self) -> int:
r""" Count parameters of encoders """
return sum([p.numel for p in self.parameters()])
def update_dropout(self, dropout_p: float) -> None:
r""" Update dropout probability of encoders """
for name, child in self.named_children():
if isinstance(child, nn.Dropout):
child.p = dropout_p
def forward(self, inputs: Tensor, input_lengths: Tensor):
r"""
Forward propagate for encoders training.
Inputs:
inputs (torch.FloatTensor): A input sequence passed to encoders. Typically for inputs this will be a padded
`FloatTensor` of size ``(batch, seq_length, dimension)``.
input_lengths (torch.LongTensor): The length of input tensor. ``(batch)``
"""
raise NotImplementedError
|
[
"sooftware@Soohwanui-MacBookPro.local"
] |
sooftware@Soohwanui-MacBookPro.local
|
62a850a7ef8dd5d6ae2de39d74521905b6cdf375
|
b605b3dade1aca21b634f37308ac120cce4c7315
|
/scripts/future_pred_asymmetric_with_bypass_diff_lossmultiple_power.py
|
01abb9f2d06ac46a9a7554eb96b5998ba8e8a1f7
|
[
"Apache-2.0"
] |
permissive
|
dicarlolab/curiosity
|
8db6dc35b31c2426246a9dd816054720d4d5e021
|
469dc4a652b6a0f62a6ccb2ecc595f55fdeb5f6c
|
refs/heads/master
| 2020-04-05T18:55:42.852376
| 2016-07-20T14:10:56
| 2016-07-20T14:10:56
| 55,555,224
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,629
|
py
|
"""
image diffs
loss multiple of 100
diff power of .5
"""
import os
import copy
import numpy as np
import curiosity.utils.base as base
import curiosity.models.future_pred_asymmetric_with_bypass as modelsource
import curiosity.datasources.images_futurediffs_and_actions as datasource
dbname = 'threeworld_future_pred'
colname = 'test_asymmetric_with_bypass'
experiment_id = 'test0_diff_lm1_diffpow5_lr1'
model_func = modelsource.get_model
model_func_kwargs = {"host": "18.93.3.135",
"port": 23044,
"datapath": "/data2/datasource6",
"keyname": "randompermpairs3_medium",
"loss_multiple": 1,
"diff_power": 0.5}
data_func = datasource.getNextBatch
data_func_kwargs = copy.deepcopy(model_func_kwargs)
data_func_kwargs.pop('loss_multiple')
data_func_kwargs.pop('diff_power')
num_train_steps = 20480000
batch_size = 128
slippage = 0
SKDATA_ROOT = os.environ['SKDATA_ROOT']
CODE_ROOT = os.environ['CODE_ROOT']
cfgfile = os.path.join(CODE_ROOT,
'curiosity/curiosity/configs/normals_config_winner0.cfg')
savedir = os.path.join(SKDATA_ROOT, 'futurepredopt')
erase_earlier = 3
decaystep=1024000
base.run(dbname,
colname,
experiment_id,
model_func,
model_func_kwargs,
data_func,
data_func_kwargs,
num_train_steps,
batch_size,
slippage=slippage,
cfgfile=cfgfile,
savedir=savedir,
erase_earlier=erase_earlier,
base_learningrate=1.0,
loss_threshold=10000,
decaystep=decaystep)
|
[
"dyamins@gmail.com"
] |
dyamins@gmail.com
|
02aee538c4869755c1fb25b6a0126b3dda67eba6
|
52b5773617a1b972a905de4d692540d26ff74926
|
/.history/equiLeader_20200827132415.py
|
bae1c1e0de8c2e0371cd2122dbb9f59a0ecd1480
|
[] |
no_license
|
MaryanneNjeri/pythonModules
|
56f54bf098ae58ea069bf33f11ae94fa8eedcabc
|
f4e56b1e4dda2349267af634a46f6b9df6686020
|
refs/heads/master
| 2022-12-16T02:59:19.896129
| 2020-09-11T12:05:22
| 2020-09-11T12:05:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 481
|
py
|
def equi(A):
# return the number of equal leaders that both occur in the sequences
# first find the equileader
# then count them in both sequences
store = {}
candidate = -1
for i in A:
if i in store:
store[i] +=1
else:
store[i] = 1
for i in store:
if store[i] > (len(A) // 2):
candidate = i
countA = 0
countB = 0
for i in range(len(A)):
equi([4,3,4,4,4,2])
|
[
"mary.jereh@gmail.com"
] |
mary.jereh@gmail.com
|
7e89226ff3da368cb2b0f2ad7926269f3528fd8b
|
c19ca6779f247572ac46c6f95327af2374135600
|
/offer/offer 16 leetcode 50 Pow(x, n).py
|
5a5e276821632a9217a1e3ef03892e43c6b6b71f
|
[] |
no_license
|
clhchtcjj/Algorithm
|
aae9c90d945030707791d9a98d1312e4c07705f8
|
aec68ce90a9fbceaeb855efc2c83c047acbd53b5
|
refs/heads/master
| 2021-01-25T14:24:08.037204
| 2018-06-11T14:31:38
| 2018-06-11T14:31:38
| 123,695,313
| 5
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 962
|
py
|
# -- coding: utf-8 --
__author__ = 'CLH'
# 实现Pow(x, n)
class Solution(object):
def myPow(self, x, n):
"""
:type x: float
:type n: int
:rtype: float
"""
# 可以递归的计算
# 考虑指数为负数的情况
# 当指数为负的情况,考虑0
ans = 1.0
exponent = abs(n)
if n < 0 and x == 0:
raise ZeroDivisionError("float division by zero")
else:
ans *= self.calculatePow(x,exponent)
if n < 0:
return 1.0 / ans
else:
return ans
def calculatePow(self,x,n):
if n == 0:
return 1
elif n == 1:
return x
else:
result = self.calculatePow(x, n>>1)
result *= result
if n & 1 == 1:
result *= x
return result
if __name__ == "__main__":
S = Solution()
print(S.myPow(2.1,3))
|
[
"15720622991@163.com"
] |
15720622991@163.com
|
bb77ba5829b46af2e085ab307b7fb5a4937e8fd4
|
d7e4d46db1cfda7fb417ba4d185be0639d2d1280
|
/lib/analyze_results.py
|
0728baa0a3be9b858c3eba34b55c7673ec366a63
|
[] |
no_license
|
enewe101/relational-nouns-LREC-2018
|
4f830c7dc129ce988bef486b3e393228bdee4cd5
|
d6d1689b9107401c12cb74e3a68dd75cda45266d
|
refs/heads/master
| 2021-09-14T07:45:13.386635
| 2018-05-10T04:14:47
| 2018-05-10T04:14:47
| 105,477,180
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 570
|
py
|
from collections import Default
import json
import sys
sys.path.join('..')
from SETTINGS import DATA_DIR
RESULTS_PATH = os.path.join(
DATA_DIR, 'crowdflower', 'results-binary-comprehensive.json')
def read_raw_results(results_path=RESULTS_PATH):
d = [json.loads(l) for l in open(results_path)]
def results_by_contributor():
raw_results = read_raw_results()
contributor_results =
for result in raw_results:
for result in raw_results:
for judgment in result['results']['judgments']:
user = judgment['worker_id']
contributor_results[user].append()
|
[
"edward.newell@gmail.com"
] |
edward.newell@gmail.com
|
c38a22db05427b0493e281f998d27db898e6738c
|
d771e2173ec0b84f28a4bec80dd4dedaf6c48021
|
/rest/app.py
|
34b7c9838c27e868624f819a2e245659df14e1eb
|
[
"Apache-2.0"
] |
permissive
|
markmcdowall/mg-rest-auth-test
|
8675abdb63b314aae3e3cee1124354a9d3713120
|
1ce3027480c9846187f0a22afcdbdbab6d3ef2eb
|
refs/heads/master
| 2021-01-20T01:17:59.616252
| 2017-10-26T16:04:02
| 2017-10-26T16:04:02
| 101,283,488
| 0
| 0
| null | 2017-10-26T16:04:03
| 2017-08-24T10:32:08
|
Python
|
UTF-8
|
Python
| false
| false
| 1,510
|
py
|
"""
.. See the NOTICE file distributed with this work for additional information
regarding copyright ownership.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from flask import Flask
from flask_restful import Api, Resource
from rest.mg_auth import authorized
APP = Flask(__name__)
class TokenCheck(Resource):
"""
Class to handle checking if the token returns a valid user name
"""
@authorized
def get(self, user_id):
"""
Test to see if it is possible to get the user_id
"""
msg = "Congratulations, welcome to the MuG VRE"
if user_id is None:
msg = "Are you sure that you have a valid token?"
return {
'user_id': user_id,
}
# Define the URIs and their matching methods
REST_API = Api(APP)
# Token Checker
REST_API.add_resource(TokenCheck, "/mug/api/check", endpoint='token-check')
# Initialise the server
if __name__ == "__main__":
APP.run(port=5000, debug=True, use_reloader=False)
|
[
"mark.mcdowall@gmail.com"
] |
mark.mcdowall@gmail.com
|
5185ae361d901346d73a7cb998e7b6d406662ddc
|
c33496682b760deac61fedecba3e82ce4e41dfde
|
/scripts/e240.py
|
0d4ee2ac36fae167814fac3924acaa0790845cd8
|
[
"MIT"
] |
permissive
|
ferasalsaab/neuralnilm_prototype
|
c5e9cde02d475ac499b15fea62143e76adff07d0
|
2119292e7d5c8a137797ad3c9abf9f37e7f749af
|
refs/heads/master
| 2020-04-16T14:38:03.615279
| 2018-01-29T15:30:43
| 2018-01-29T15:30:43
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,994
|
py
|
from __future__ import print_function, division
import matplotlib
matplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab!
from neuralnilm import Net, RealApplianceSource, BLSTMLayer, DimshuffleLayer
from lasagne.nonlinearities import sigmoid, rectify
from lasagne.objectives import crossentropy, mse
from lasagne.init import Uniform, Normal
from lasagne.layers import LSTMLayer, DenseLayer, Conv1DLayer, ReshapeLayer, FeaturePoolLayer
from lasagne.updates import nesterov_momentum
from functools import partial
import os
from neuralnilm.source import standardise, discretize, fdiff, power_and_fdiff
from neuralnilm.experiment import run_experiment
from neuralnilm.net import TrainingError
import __main__
from copy import deepcopy
NAME = os.path.splitext(os.path.split(__main__.__file__)[1])[0]
PATH = "/homes/dk3810/workspace/python/neuralnilm/figures"
SAVE_PLOT_INTERVAL = 1000
GRADIENT_STEPS = 100
"""
e233
based on e131c but with:
* lag=32
* pool
e234
* init final layer and conv layer
235
no lag
236
should be exactly as 131c: no pool, no lag, no init for final and conv layer
237
putting the pool back
238
seems pooling hurts us! disable pooling.
enable lag = 32
239
BLSTM
lag = 20
240
LSTM not BLSTM
various lags
ideas for next TODO:
* 3 LSTM layers with smaller conv between them
* why does pooling hurt us?
"""
source_dict = dict(
filename='/data/dk3810/ukdale.h5',
appliances=[
['fridge freezer', 'fridge', 'freezer'],
'hair straighteners',
'television',
'dish washer',
['washer dryer', 'washing machine']
],
max_appliance_powers=[300, 500, 200, 2500, 2400],
on_power_thresholds=[5] * 5,
max_input_power=5900,
min_on_durations=[60, 60, 60, 1800, 1800],
min_off_durations=[12, 12, 12, 1800, 600],
window=("2013-06-01", "2014-07-01"),
seq_length=1500,
output_one_appliance=False,
boolean_targets=False,
train_buildings=[1],
validation_buildings=[1],
skip_probability=0.7,
n_seq_per_batch=10,
subsample_target=5,
include_diff=False,
clip_appliance_power=True,
lag=0
)
net_dict = dict(
save_plot_interval=SAVE_PLOT_INTERVAL,
loss_function=crossentropy,
updates=partial(nesterov_momentum, learning_rate=1.0),
layers_config=[
{
'type': DenseLayer,
'num_units': 50,
'nonlinearity': sigmoid,
'W': Uniform(25),
'b': Uniform(25)
},
{
'type': DenseLayer,
'num_units': 50,
'nonlinearity': sigmoid,
'W': Uniform(10),
'b': Uniform(10)
},
{
'type': LSTMLayer,
'num_units': 40,
'W_in_to_cell': Uniform(5),
'gradient_steps': GRADIENT_STEPS,
'peepholes': False
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1)
},
{
'type': Conv1DLayer,
'num_filters': 20,
'filter_length': 5,
'stride': 5,
'nonlinearity': sigmoid
# 'W': Uniform(1)
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1)
},
# {
# 'type': FeaturePoolLayer,
# 'ds': 5, # number of feature maps to be pooled together
# 'axis': 1 # pool over the time axis
# },
{
'type': LSTMLayer,
'num_units': 80,
'W_in_to_cell': Uniform(5),
'gradient_steps': GRADIENT_STEPS,
'peepholes': False
}
]
)
def exp_a(name):
# like 239 but LSTM not BLSTM and no lag and clip appliance power
# RESULTS: aweful
source = RealApplianceSource(**source_dict)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(experiment_name=name, source=source))
net_dict_copy['layers_config'].append(
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': sigmoid
}
)
net = Net(**net_dict_copy)
return net
def exp_b(name):
# as A but lag = 10
source_dict_copy = deepcopy(source_dict)
source_dict_copy['lag'] = 10
source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(experiment_name=name, source=source))
net_dict_copy['layers_config'].append(
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': sigmoid
}
)
net = Net(**net_dict_copy)
return net
def exp_c(name):
# as A but lag = 20
source_dict_copy = deepcopy(source_dict)
source_dict_copy['lag'] = 20
source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(experiment_name=name, source=source))
net_dict_copy['layers_config'].append(
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': sigmoid
}
)
net = Net(**net_dict_copy)
return net
def exp_d(name):
# as A but lag = 40
# possibly the best of this e240 lot
source_dict_copy = deepcopy(source_dict)
source_dict_copy['lag'] = 40
source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(experiment_name=name, source=source))
net_dict_copy['layers_config'].append(
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': sigmoid
}
)
net = Net(**net_dict_copy)
return net
def exp_e(name):
# as A but lag = 80
source_dict_copy = deepcopy(source_dict)
source_dict_copy['lag'] = 80
source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(experiment_name=name, source=source))
net_dict_copy['layers_config'].append(
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': sigmoid
}
)
net = Net(**net_dict_copy)
return net
def init_experiment(experiment):
full_exp_name = NAME + experiment
func_call = 'exp_{:s}(full_exp_name)'.format(experiment)
print("***********************************")
print("Preparing", full_exp_name, "...")
net = eval(func_call)
return net
def main():
for experiment in list('abcde'):
full_exp_name = NAME + experiment
path = os.path.join(PATH, full_exp_name)
try:
net = init_experiment(experiment)
run_experiment(net, path, epochs=10000)
except KeyboardInterrupt:
break
except TrainingError as exception:
print("EXCEPTION:", exception)
except Exception as exception:
print("EXCEPTION:", exception)
if __name__ == "__main__":
main()
|
[
"jack-list@xlk.org.uk"
] |
jack-list@xlk.org.uk
|
be5d17d61c2ffb7c47f6328b101ff4469f32c018
|
661ee30b27b2893930d4a8db1db0c08538653dc5
|
/standalone_django_project/settings.py
|
2f4519f5c0963bc3708692a4867f3e026d8bddb8
|
[
"BSD-3-Clause"
] |
permissive
|
350dotorg/aktivator
|
fc67aed167fb204ff327448a86c37d69ef566964
|
bb37cc50212a1797315c99037495a83bc9ff2b01
|
refs/heads/master
| 2016-09-09T21:51:23.371940
| 2014-07-11T13:33:19
| 2014-07-11T13:33:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,121
|
py
|
import os
PROJECT_ROOT = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
ROOT_URLCONF = 'standalone_django_project.urls'
WSGI_APPLICATION = 'standalone_django_project.wsgi.application'
SITE_ID = 1
SITE_NAME = os.environ.get("SITE_NAME")
SITE_DOMAIN = os.environ['SITE_DOMAIN']
HEROKU_DOMAIN = os.environ.get('HEROKU_DOMAIN')
import actionkit_usersearch
GEONAMES_API_USERNAME = actionkit_usersearch.SETTINGS['GEONAMES_API_USERNAME']
ALLOWED_HOSTS = [SITE_DOMAIN]
if HEROKU_DOMAIN:
ALLOWED_HOSTS.append(HEROKU_DOMAIN)
if os.environ.get('DJANGO_DEBUG'):
DEBUG = True
else:
DEBUG = False
TEMPLATE_DEBUG = DEBUG
ACTIONKIT_DATABASE_NAME = os.environ['ACTIONKIT_DATABASE_NAME']
ACTIONKIT_DATABASE_USER = os.environ['ACTIONKIT_DATABASE_USER']
ACTIONKIT_DATABASE_PASSWORD = os.environ['ACTIONKIT_DATABASE_PASSWORD']
import dj_database_url
DATABASES = {
'default': dj_database_url.config(),
'ak': {
'ENGINE': "django.db.backends.mysql",
'NAME': ACTIONKIT_DATABASE_NAME,
'USER': ACTIONKIT_DATABASE_USER,
'PASSWORD': ACTIONKIT_DATABASE_PASSWORD,
'HOST': "client-db.actionkit.com",
'PORT': "",
}
}
DATABASES['dummy'] = actionkit_usersearch.DATABASES['dummy']
SECRET_KEY = os.environ["DJANGO_SECRET"]
ACTIONKIT_API_HOST = os.environ['ACTIONKIT_API_HOST']
ACTIONKIT_API_USER = os.environ['ACTIONKIT_API_USER']
ACTIONKIT_API_PASSWORD = os.environ['ACTIONKIT_API_PASSWORD']
TEMPLATE_LOADERS = (
'dbtemplates.loader.Loader',
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.admin',
'gunicorn',
'south',
'django.contrib.flatpages',
'dbtemplates',
'djangohelpers',
'standalone_django_project', # For the template finder
'actionkit',
'actionkit_usersearch',
'actionkit_userdetail',
)
TEMPLATE_CONTEXT_PROCESSORS = (
"django.contrib.auth.context_processors.auth",
"django.core.context_processors.debug",
"django.core.context_processors.request",
"django.core.context_processors.i18n",
"django.core.context_processors.media",
"django.core.context_processors.static",
"django.core.context_processors.tz",
"django.contrib.messages.context_processors.messages",
"standalone_django_project.context_processors.globals",
)
AUTHENTICATION_BACKENDS = [
'django.contrib.auth.backends.ModelBackend',
]
OAUTH_REDIRECT_URI_ENFORCE_PREFIX_ONLY = True
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.transaction.TransactionMiddleware',
"djangohelpers.middleware.AuthRequirementMiddleware",
)
ANONYMOUS_PATHS = (
"/static/",
"/admin/",
"/accounts/",
)
LOGIN_URL = '/accounts/login/'
LOGIN_REDIRECT_URL = '/'
if os.environ.get('DJANGO_DEBUG_TOOLBAR'):
MIDDLEWARE_CLASSES += (
'debug_toolbar.middleware.DebugToolbarMiddleware',
)
INSTALLED_APPS += (
'debug_toolbar',
)
DEBUG_TOOLBAR_CONFIG = {
'INTERCEPT_REDIRECTS': False,
}
INTERNAL_IPS = os.environ.get("INTERNAL_IPS")
if INTERNAL_IPS is None:
INTERNAL_IPS = []
elif INTERNAL_IPS.strip() in ("*", "0.0.0.0"):
class AllIPS(list):
def __contains__(self, item):
return True
INTERNAL_IPS = AllIPS()
else:
INTERNAL_IPS = [i.strip() for i in INTERNAL_IPS.split()]
STATIC_URL = "/static/"
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
STATIC_ROOT = os.path.join(PROJECT_ROOT, 'collected_static')
|
[
"ethan.jucovy@gmail.com"
] |
ethan.jucovy@gmail.com
|
a2b165fab8d6e4f886c3ec1ffd2c2aa7e4488b98
|
56cce3fee2e3d69d60958eb2aacc4f65fc3d2230
|
/tests/test_directed_graph.py
|
c8ba0e69667c52f75f1566337eabb47ba0a6a063
|
[
"BSD-3-Clause"
] |
permissive
|
nokia/PyBGL
|
52c2f175d1dbccb15519f8a16de141845d0abaf3
|
707f2df32ede7d9a992ea217a4791da34f13e138
|
refs/heads/master
| 2023-08-08T04:46:24.931627
| 2023-08-03T16:31:35
| 2023-08-03T16:31:35
| 148,536,169
| 12
| 3
|
BSD-3-Clause
| 2023-08-03T16:31:36
| 2018-09-12T20:11:36
|
Python
|
UTF-8
|
Python
| false
| false
| 2,434
|
py
|
#!/usr/bin/env pytest-3
# -*- coding: utf-8 -*-
from pybgl.graph import *
(u, v, w) = (0, 1, 2)
def make_g1() -> DirectedGraph:
g1 = DirectedGraph()
add_vertex(g1) # u
add_vertex(g1) # v
add_vertex(g1) # w
return g1
def make_g2() -> DirectedGraph:
g2 = make_g1()
add_edge(u, v, g2)
add_edge(u, v, g2) # parallel edge
add_edge(u, w, g2)
add_edge(v, w, g2)
add_edge(w, w, g2)
return g2
def test_directed_graph_num_vertices():
g1 = make_g1()
assert num_vertices(g1) == 3
def test_directed_graph_node_add_edge():
# Make graph
g = make_g1()
assert out_degree(u, g) == 0
assert num_edges(g) == 0
# Add e1
(e1, added1) = add_edge(u, v, g)
assert added1
(e, found) = edge(u, v, g)
assert found
assert e == e1
assert out_degree(u, g) == 1
assert num_edges(g) == 1
# No arc
(e, found) = edge(u, w, g)
assert not found
assert {e for e in out_edges(u, g)} == {e1}
# Add e2
(e2, added2) = add_edge(u, w, g)
assert added2
assert {e for e in out_edges(u, g)} == {e1, e2}
assert out_degree(u, g) == 2
assert num_edges(g) == 2
def test_directed_graph_add_vertex():
g = make_g2()
assert num_vertices(g) == 3
assert num_edges(g) == 5
# Add vertex x
x = add_vertex(g)
assert num_vertices(g) == 4
# Add edge (v -> x)
(e1, found) = edge(v, w, g)
assert found
(e2, added) = add_edge(v, x, g)
assert num_edges(g) == 6
assert {e for e in out_edges(v, g)} == {e1, e2}
def test_directed_graph_remove_edge():
g = make_g2()
assert num_edges(g) == 5
(e, found) = edge(v, w, g)
remove_edge(e, g)
assert num_edges(g) == 4
(e, found) = edge(w, w, g)
remove_edge(e, g)
assert num_edges(g) == 3
def test_directed_graph_iterators():
g = make_g2()
m = 0
for _ in vertices(g):
m += 1
assert m == num_vertices(g)
assert m == 3
n = 0
for _ in edges(g):
n += 1
assert n == num_edges(g)
assert n == 5
def test_directed_graph_remove_vertex():
g = make_g2()
assert num_vertices(g) == 3
assert num_edges(g) == 5
remove_vertex(v, g)
assert num_vertices(g) == 2
assert num_edges(g) == 2
remove_vertex(w, g)
assert num_vertices(g) == 1
assert num_edges(g) == 0
remove_vertex(u, g)
assert num_vertices(g) == 0
assert num_edges(g) == 0
|
[
"marc-olivier.buob@nokia-bell-labs.com"
] |
marc-olivier.buob@nokia-bell-labs.com
|
c89449e9d8e482494c12bfe7bc8ea37ebb1327d9
|
cd1d5b7fc9e01f093d6c652876cab24aa8fe7ce6
|
/nodes/pub_and_sub_node.py
|
54a5df4c1707e7b49194d6ae34b6a4bac1cbb7e1
|
[] |
no_license
|
birlrobotics/gps_dnn_policy_training_and_testing_pkg
|
1dd2c4b241af4e8d432d61f4fcfa59c1a7318275
|
cba2b03e9cc096cb2b7133074640bb503a3e326c
|
refs/heads/master
| 2020-07-04T09:15:46.366874
| 2019-08-14T21:34:58
| 2019-08-14T21:34:58
| 202,237,315
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 977
|
py
|
#!/usr/bin/env python
import rospy
from gps_dnn_policy_training_and_testing_pkg.CONSTANT import training_request_topic, training_response_topic
from gps_dnn_policy_training_and_testing_pkg.dnn_policy import DnnPolicy
from std_msgs.msg import String
import pdb
import pickle
import tempfile
def cb(msg):
rospy.loginfo('received %s'%msg)
with open(msg.data, 'rb') as f:
req = pickle.load(f)
obs = req['obs']
tgt_mu = req['tgt_mu']
tgt_prc = req['tgt_prc']
tgt_wt = req['tgt_wt']
dU = tgt_mu.shape[1]
pol = DnnPolicy(dU)
f = tempfile.NamedTemporaryFile(delete=False, suffix='.pkl')
pickle.dump(pol, f)
f.close()
rospy.sleep(1)
pub.publish(String(data=f.name))
rospy.loginfo('sent %s'%f.name)
pass
if __name__ == '__main__':
rospy.init_node('pub_and_sub_node')
rospy.Subscriber(training_request_topic, String, cb)
pub = rospy.Publisher(training_response_topic, String)
rospy.spin()
|
[
"sk.law.lsq@gmail.com"
] |
sk.law.lsq@gmail.com
|
96a16b9351a209200123b2d892c8e48ed55f7fe9
|
78d7d7aeb78a8cea6d0e10b89fc4aa6c46c95227
|
/2569.py
|
85dab1e43ac70617d702eec9863e9e8dff8536ec
|
[] |
no_license
|
GenryEden/kpolyakovName
|
97db13ef93061a8c2afc6cc5acd91337f79063f1
|
c5d7f631ae7ec8770e56170574b82ea2b7d8a4d9
|
refs/heads/master
| 2023-05-23T21:22:51.983756
| 2021-06-21T08:56:49
| 2021-06-21T08:56:49
| 350,466,773
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 174
|
py
|
def getDels(x):
for i in range(1, x+1):
if x % i == 0:
yield i
for x in range(180131, 180179):
dels = list(getDels(x))
if len(dels) == 6:
print(dels[-2], dels[-1])
|
[
"a926788@gmail.com"
] |
a926788@gmail.com
|
42b78dceab23e4ffb753bc7e07b1b91e276e9a59
|
8195e6ea99ee441ba2c23dd9dba7ceecfece37b7
|
/rev2/cifar10/generate_gs_pgd.py
|
076cd5ccb813b81cff16bd5dbf4928328bc94526
|
[] |
no_license
|
msglbqbqb/adv2
|
f2693576dd15c73c1b0322a0bf75972a75e97f70
|
e3472df42197fe6dbe035412d43a9205ede880c2
|
refs/heads/main
| 2023-06-03T05:31:02.820935
| 2021-06-17T16:06:59
| 2021-06-17T16:06:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,612
|
py
|
#!/usr/bin/env python
import argparse
import numpy as np
import torch
import torch.nn as nn
from rev2.cifar10.model_utils import resnet50, CIFAR10_RESNET50_CKPT_PATH
from rev2.gs.generate_gs import generate_gs
from rev2.cifar10.data_utils import cifar10_normalize
from rev2.cifar10.generate_gs_benign import cifar10_resize_postfn
def load_model(config):
model = resnet50()
nn.DataParallel(model).load_state_dict(
torch.load(CIFAR10_RESNET50_CKPT_PATH, lambda storage, location: storage)['net']
)
model.to(config.device)
model.train(False)
return model, cifar10_normalize
def main(config):
model_tup = load_model(config)
dobj = np.load(config.data_path)
adv_dobj = np.load(config.adv_data_path)
img_x, img_yt = adv_dobj['pgd_step_1500_adv_x'], dobj['img_yt']
pgd_gs = generate_gs(model_tup, img_x, img_yt, cifar10_resize_postfn, False, batch_size=50)
save_dobj = {'pgd_x': img_x, 'img_yt': img_yt, 'pgd_gs': pgd_gs}
np.savez(config.save_path, **save_dobj)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('data_path')
parser.add_argument('adv_data_path')
parser.add_argument('save_path')
parser.add_argument('-d', '--device', dest='device', choices=['cpu', 'cuda'])
parser.add_argument('-b', '--batch-size', dest='batch_size', type=int, default=50)
config = parser.parse_args()
if config.device is None:
if torch.cuda.is_available():
config.device = 'cuda'
else:
config.device = 'cpu'
print('configuration:', config)
main(config)
|
[
"ting@tings-imac.lan"
] |
ting@tings-imac.lan
|
9d995f3d206d6831f1d5324f3cf2a42613c66e8c
|
8021f835426c5db8ed9b1763a2b71cb8f94a3357
|
/scripts/forage_tracer.py
|
bcec97892278a7afaa1faa49bde095f421852704
|
[
"BSD-3-Clause"
] |
permissive
|
natcap/rangeland_production
|
3859bcf3042bda0d7a64df426aceaaa0a5a8dfe1
|
89acd25cb90c2bd42f55973d7d22b294c80dfc1a
|
refs/heads/develop
| 2022-12-23T17:30:53.660595
| 2021-04-11T01:28:32
| 2021-04-11T01:28:32
| 223,495,475
| 7
| 8
|
NOASSERTION
| 2022-12-09T04:35:37
| 2019-11-22T22:16:47
|
Python
|
UTF-8
|
Python
| false
| false
| 2,492
|
py
|
"""Tracer code for Forage model development."""
import os
import natcap.invest.forage
import logging
logging.basicConfig(level=logging.DEBUG)
LOGGER = logging.getLogger('forage_tracer')
POSSIBLE_DROPBOX_LOCATIONS = [
r'D:\Dropbox',
r'C:\Users\Rich\Dropbox',
r'C:\Users\rpsharp\Dropbox',
r'E:\Dropbox']
LOGGER.info("checking dropbox locations")
for dropbox_path in POSSIBLE_DROPBOX_LOCATIONS:
print dropbox_path
if os.path.exists(dropbox_path):
BASE_DROPBOX_DIR = dropbox_path
break
LOGGER.info("found %s", BASE_DROPBOX_DIR)
def main():
"""Entry point."""
args = {
'workspace_dir': 'forage_tracer_workspace',
'starting_year': '1998',
'starting_month': '5',
'n_months': '29',
'aoi_path': os.path.join(
BASE_DROPBOX_DIR, 'forage_model_development_data',
'sample_dev_inputs', 'soums_monitoring_area_dissolve.shp'),
'bulk_density_path': os.path.join(
BASE_DROPBOX_DIR, 'forage_model_development_data',
'sample_dev_inputs', 'bldfie_sl3.tif'),
'clay_proportion_path': os.path.join(
BASE_DROPBOX_DIR, 'forage_model_development_data',
'sample_dev_inputs', 'clyppt_sl3.tif'),
'silt_proportion_path': os.path.join(
BASE_DROPBOX_DIR, 'forage_model_development_data',
'sample_dev_inputs', 'sltppt_sl3.tif'),
'sand_proportion_path': os.path.join(
BASE_DROPBOX_DIR, 'forage_model_development_data',
'sample_dev_inputs', 'sndppt_sl3.tif'),
'monthly_precip_path_pattern': os.path.join(
BASE_DROPBOX_DIR, 'forage_model_development_data',
'sample_dev_inputs', 'chirps-v2.0.<year>.<month>.tif'),
'monthly_temperature_path_pattern': os.path.join(
BASE_DROPBOX_DIR, 'forage_model_development_data',
'sample_dev_inputs', 'wc2.0_30s_tmax_<month>.tif'),
'veg_spatial_composition_path': os.path.join(
BASE_DROPBOX_DIR, 'forage_model_development_data',
'sample_dev_inputs', 'veg.tif'),
'animal_inputs_path': os.path.join(
BASE_DROPBOX_DIR, 'forage_model_development_data',
'sample_dev_inputs',
'sheep_units_density_2016_monitoring_area.shp')
}
LOGGER.info('launching forage model')
natcap.invest.forage.execute(args)
if __name__ == '__main__':
main()
|
[
"richpsharp@gmail.com"
] |
richpsharp@gmail.com
|
dada884103b980d1aff01dc194cce6f238446e3d
|
a9f97f77d30e35c6627f353e49fe2683bf7d51ed
|
/jiayuan/rnn_ner/rnn_ner/model.py
|
0d4a500c38ebfd76e76425dbed26e9babb1efab4
|
[
"MIT"
] |
permissive
|
breezedeus/char-rnn-tensorflow
|
4c3c5e27e21b4bfb077a399f6707c3ec256d2eac
|
0ef7bf9e5b108ae161011f9db3705993e1b0103e
|
refs/heads/master
| 2021-01-17T08:32:44.452317
| 2016-06-18T12:35:56
| 2016-06-18T12:35:56
| 52,412,436
| 0
| 0
| null | 2016-02-24T03:47:42
| 2016-02-24T03:47:42
| null |
UTF-8
|
Python
| false
| false
| 4,936
|
py
|
# coding=utf8
import tensorflow as tf
from tensorflow.models.rnn import rnn_cell
from tensorflow.models.rnn import seq2seq
import numpy as np
class Model():
def __init__(self, args, infer=False):
self.args = args
if infer:
args.batch_size = 1
args.seq_length = 1
if args.model == 'rnn':
cell_fn = rnn_cell.BasicRNNCell
elif args.model == 'gru':
cell_fn = rnn_cell.GRUCell
elif args.model == 'lstm':
cell_fn = rnn_cell.BasicLSTMCell
else:
raise Exception("model type not supported: {}".format(args.model))
cell = cell_fn(args.rnn_size)
self.cell = cell = rnn_cell.MultiRNNCell([cell] * args.num_layers)
self.input_data = tf.placeholder(tf.int32, [args.batch_size, args.seq_length])
self.targets = tf.placeholder(tf.int32, [args.batch_size, args.seq_length])
self.initial_state = cell.zero_state(args.batch_size, tf.float32)
with tf.variable_scope('rnnlm'):
softmax_w = tf.get_variable("softmax_w", [args.rnn_size, args.y_vocab_size])
softmax_b = tf.get_variable("softmax_b", [args.y_vocab_size])
with tf.device("/cpu:0"):
embedding = tf.get_variable("embedding", [args.vocab_size, args.rnn_size])
inputs = tf.split(1, args.seq_length, tf.nn.embedding_lookup(embedding, self.input_data))
# len(inputs)==args.seq_length, shape(inputs[0])==(args.batch_size, args.rnn_size)
inputs = [tf.squeeze(input_, [1]) for input_ in inputs]
def loop(prev, _):
prev = tf.nn.xw_plus_b(prev, softmax_w, softmax_b)
prev_symbol = tf.stop_gradient(tf.argmax(prev, 1))
return tf.nn.embedding_lookup(embedding, prev_symbol)
# len(outputs)==args.seq_length, shape(outputs[0])==(args.batch_size, args.rnn_size)
outputs, states = seq2seq.rnn_decoder(inputs, self.initial_state, cell, loop_function=loop if infer else None, scope='rnnlm')
output = tf.reshape(tf.concat(1, outputs), [-1, args.rnn_size])
# shape(logits) = (batch_size*seq_length, vocab_size)
self.logits = tf.nn.xw_plus_b(output, softmax_w, softmax_b)
self.probs = tf.nn.softmax(self.logits)
loss = seq2seq.sequence_loss_by_example([self.logits],
[tf.reshape(self.targets, [-1])],
[tf.ones([args.batch_size * args.seq_length])],
args.vocab_size)
self.cost = tf.reduce_sum(loss) / args.batch_size / args.seq_length
self.final_state = states
self.lr = tf.Variable(0.0, trainable=False)
tvars = tf.trainable_variables()
grads, _ = tf.clip_by_global_norm(tf.gradients(self.cost, tvars), args.grad_clip)
optimizer = tf.train.AdamOptimizer(self.lr)
self.train_op = optimizer.apply_gradients(zip(grads, tvars))
def sample(self, sess, chars, vocab, num=200, prime='我 们'):
state = self.cell.zero_state(1, tf.float32).eval()
#prime = prime.decode('utf-8')
print('prime: ' + prime)
prime = prime.split(' ')
for char in prime[:-1]:
x = np.zeros((1, 1))
x[0, 0] = vocab[char]
feed = {self.input_data: x, self.initial_state:state}
[state] = sess.run([self.final_state], feed)
def weighted_pick(weights):
t = np.cumsum(weights)
s = np.sum(weights)
return(int(np.searchsorted(t, np.random.rand(1)*s)))
ret = ''.join(prime)
char = prime[-1]
for n in xrange(num):
x = np.zeros((1, 1))
x[0, 0] = vocab[char]
feed = {self.input_data: x, self.initial_state:state}
[probs, state] = sess.run([self.probs, self.final_state], feed)
p = probs[0]
# sample = int(np.random.choice(len(p), p=p))
sample = weighted_pick(p)
pred = chars[sample]
ret += pred
char = pred
return ret
def predict(self, x, max_length, sess, x_vocab, idx2classid):
state = self.cell.zero_state(1, tf.float32).eval()
x_list = x.split()
def pad_line(x_list, pad):
if len(x_list) >= max_length:
x_list = x_list[:max_length]
else:
x_list += [pad] * (max_length-len(x_list))
return x_list
x_list = pad_line(x_list=x_list, pad='<PAD>')
x = np.matrix([map(lambda x: x_vocab.get(x, 0), x_list)])
print(x[0])
feed = {self.input_data: x, self.initial_state: state}
[probs, _] = sess.run([self.probs, self.final_state], feed)
print(probs)
output = np.argmax(probs, axis=1)
idx2classid = np.array(idx2classid)
output = idx2classid[output]
#print(output)
ret = ' '.join(output)
return ret
|
[
"breezedeus@163.com"
] |
breezedeus@163.com
|
147c2d90ce5537ee9f661bf45932eeda21e86596
|
e233d3d5ad19bb17a7dce7ff8d96404a17b3b705
|
/src/programy/parser/template/nodes/vocabulary.py
|
ebaad3a280d6196ae7ad85d1cb65c2696d9876d8
|
[
"MIT"
] |
permissive
|
jaimecamacaro/program-y
|
2559fb0cb70150b147c090c611931f84fd276867
|
5f31608290faddf8da9a52587ec892b258ec11d4
|
refs/heads/master
| 2021-06-26T20:26:53.778763
| 2017-09-13T09:47:14
| 2017-09-13T09:47:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,461
|
py
|
"""
Copyright (c) 2016-17 Keith Sterling http://www.keithsterling.com
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import logging
from programy.parser.template.nodes.base import TemplateNode
class TemplateVocabularyNode(TemplateNode):
def __init__(self):
TemplateNode.__init__(self)
def resolve_to_string(self, bot, clientid):
set_words = bot.brain.sets.count_words_in_sets()
pattern_words = bot.brain.aiml_parser.pattern_parser.count_words_in_patterns()
resolved = "%d" % (set_words + pattern_words)
if logging.getLogger().isEnabledFor(logging.DEBUG): logging.debug("[%s] resolved to [%s]", self.to_string(),
resolved)
return resolved
def resolve(self, bot, clientid):
try:
return self.resolve_to_string(bot, clientid)
except Exception as excep:
logging.exception(excep)
return ""
def to_string(self):
return "VOCABULARY"
def to_xml(self, bot, clientid):
xml = "<vocabulary>"
xml += self.children_to_xml(bot, clientid)
xml += "</vocabulary>"
return xml
#######################################################################################################
# <vocabulary/> |
def add_default_star(self):
return True
def parse_expression(self, graph, expression):
self._parse_node(graph, expression)
|
[
"keith@keithsterling.com"
] |
keith@keithsterling.com
|
2904a73fe26296f364a3e698b6c66d370b6ebc3c
|
62f59fe1e0246b33c84412ee2a60e77938a05a15
|
/proj/my_lib/Common/img_hash.py
|
ac6c0aa4cb2dd704168d823abfde9bea4dd890fd
|
[] |
no_license
|
20113261/platform_service
|
02676d2654f5c7bde2c7eafdadbf55fe7253a7b0
|
bc903168bd7cbc499892f24c2b1cc82c38180c01
|
refs/heads/dev
| 2022-08-01T02:30:05.004852
| 2018-04-29T05:39:37
| 2018-04-29T05:39:37
| 131,576,306
| 1
| 0
| null | 2022-07-08T19:13:32
| 2018-04-30T09:14:54
|
Python
|
UTF-8
|
Python
| false
| false
| 1,210
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2017/10/31 下午7:34
# @Author : Hou Rong
# @Site :
# @File : img_hash.py
# @Software: PyCharm
import imagehash
from PIL import Image
from proj.my_lib.logger import get_logger, func_time_logger
logger = get_logger("img_hash")
@func_time_logger
def _img_p_hash(f_obj):
f_obj.seek(0)
try:
img_obj = Image.open(f_obj)
except Exception as exc:
logger.exception(msg="[error img]", exc_info=exc)
return None
try:
_hash = imagehash.phash(img_obj)
except Exception as exc:
logger.exception(msg="[could not calculate phash]", exc_info=exc)
return None
f_obj.seek(0)
return _hash
def img_p_hash(f_obj):
_retry_times = 4
while _retry_times:
_retry_times -= 1
_res = _img_p_hash(f_obj)
if _res:
return str(_res)
return None
if __name__ == '__main__':
f = open('/tmp/1/035211ab53d76b051376f9292ca9623d.jpg')
print(img_p_hash(f))
print(img_p_hash(f))
print(img_p_hash(f))
print(img_p_hash(f))
print(img_p_hash(f))
f = open('/tmp/1/b8c88852a915cf32e1eeed20ec7d3cc7.jpg')
print(img_p_hash(f))
|
[
"nmghr9@gmail.com"
] |
nmghr9@gmail.com
|
2ef775fa9ffa8db94d0c44a35f38777947ee452a
|
1b8a99a4ff80da51dc81dd8354bf9bf1cbd25a8b
|
/2022/shift_2d_grid.py
|
6ddd3b4a793d02f3fddf9b911c48410e32e74f17
|
[] |
no_license
|
eronekogin/leetcode
|
ea639eebe0cd70af9eb4cba59bc68f636d7b3e0c
|
edb870f83f0c4568cce0cacec04ee70cf6b545bf
|
refs/heads/master
| 2023-08-16T10:35:57.164176
| 2023-08-14T11:25:33
| 2023-08-14T11:25:33
| 163,679,450
| 0
| 0
| null | 2021-09-09T12:04:44
| 2018-12-31T15:33:06
|
Python
|
UTF-8
|
Python
| false
| false
| 528
|
py
|
"""
https://leetcode.com/problems/shift-2d-grid/
"""
class Solution:
def shiftGrid(self, grid: list[list[int]], k: int) -> list[list[int]]:
R, C = len(grid), len(grid[0])
newGrid = [[0] * C for _ in range(R)]
for r, row in enumerate(grid):
for c, v in enumerate(row):
dr, nc = divmod(c + k, C)
nr = (r + dr) % R
newGrid[nr][nc] = v
return newGrid
print(Solution().shiftGrid([
[1, 2, 3],
[4, 5, 6],
[7, 8, 9]
], 1))
|
[
"mengyu.jiang@gmail.com"
] |
mengyu.jiang@gmail.com
|
b59ec1cd512b6ef11af45128bfc21a60e6b82ece
|
163bbb4e0920dedd5941e3edfb2d8706ba75627d
|
/Code/CodeRecords/2655/60749/257037.py
|
d69187901e074eb96757b5e40eb320ae97fbe4d1
|
[] |
no_license
|
AdamZhouSE/pythonHomework
|
a25c120b03a158d60aaa9fdc5fb203b1bb377a19
|
ffc5606817a666aa6241cfab27364326f5c066ff
|
refs/heads/master
| 2022-11-24T08:05:22.122011
| 2020-07-28T16:21:24
| 2020-07-28T16:21:24
| 259,576,640
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 310
|
py
|
n=int(input())
res=[]
for _ in range(n):
res.append(int(input()))
def findcloset(n):
k=0
while n>=pow(2,k):
if n<pow(2,k+1):
if n==pow(2,k):
return pow(2,k)
else:
return pow(2,k+1)
k+=1
for t in res:
print(findcloset(t))
|
[
"1069583789@qq.com"
] |
1069583789@qq.com
|
db9c2d2a18762a017bc99282713b6486c15730a0
|
7be8a902f968ecd74fdf028d758f8777df6120c7
|
/daxuan/Taiwan/yahoo/yahoo_news.py
|
be5a2c9a07719bf638955a598131a9bea4d0b0c5
|
[
"Apache-2.0"
] |
permissive
|
BingquLee/spiders
|
51142f848d52a7f8a98563e17b5c582a7e18b46c
|
66e42b59aa692ab531e6ca347708d46b189c0047
|
refs/heads/master
| 2020-03-22T00:49:59.079429
| 2018-06-30T17:55:07
| 2018-06-30T17:55:07
| 139,268,194
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,797
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Date : 2018-04-11 14:45:12
# @Author : guangqiang_xu (981886190@qq.com)
# @Link : http://www.treenewbee.com/
# @Version : $Id$
import requests
from lxml import etree
from retry import retry
import time
import json
import hashlib
import re
import urllib, urllib2
from readability.readability import Document
from elasticsearch import Elasticsearch
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
es = Elasticsearch()
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/22.0.1207.1 Safari/537.1"}
def searchData(index, type ,body):
query_body = {"query": {"query_string": {"query": body}}}
results = es.search(index=index, doc_type=type, body=query_body)
date_list = results['hits']['hits']
return date_list
from langconv import *
def Traditional2Simplified(sentence):
sentence = Converter('zh-hans').convert(sentence)
return sentence
@retry(tries=3)
def get_content(lis, keyword, timest):
i = 1
for li in lis:
item = {}
source = "yahoo"
print li
news_url = li.xpath('./div/div[1]/h3/a/@href')
# news_url = li.xpath('./div/div[1]/h3/a/@href')[0]
print 11111111111111111111111, news_url, 1111111111111111111111111111111
title = ''.join(li.xpath('./div/div[1]/h3/a//text()'))
print title
summary = ''.join(li.xpath('./div/div[2]/p//text()'))
# user_name = li.xpath('./div/div[3]/p/span[1]/text()')[0]
# print user_name
date = li.xpath('./div/div[3]/p/span[2]/text()')[0]
print date
strdate = '2018-' + date.replace('AM','').replace('PM','').replace('月','-').replace('日','')
timeArray = time.strptime(strdate, "%Y-%m-%d %H:%M")
timestamp = int(time.mktime(timeArray))
if timestamp < timest:
continue
response1 = requests.get(news_url, timeout=10, headers=headers)
response1.coding = 'utf-8'
txt1 = response1.content
new_url = re.findall(r'URL=(.*?)">',txt1)[0].replace("'",'')
hash_md5 = hashlib.md5(new_url)
Id = hash_md5.hexdigest()
response = requests.get(new_url, timeout=10, headers=headers)
response.coding = 'utf-8'
txt = response.content
readable_article = Document(txt).summary()
html = etree.HTML(readable_article)
context = ''.join(html.xpath('//p//text()')).replace('\r','').replace('\n','').replace('\t','')
if context in "":
news_html = etree.HTML(txt)
context = ''.join(news_html.xpath('//p//text()'))
timesyear = time.localtime(timestamp).tm_year
stringDate = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(timestamp))
images = ''
kname = urllib.quote(str(title))
try:
Imageurl = "https://image.baidu.com/search/index?tn=baiduimage&ipn=r&ct=201326592&cl=2&lm=-1&st=-1&fm=result&fr=&sf=1&fmq=1502779395291_R&pv=&ic=0&nc=1&z=&se=1&showtab=0&fb=0&width=&height=&face=0&istype=2&ie=utf-8&word=" + kname
req = urllib2.urlopen(Imageurl, timeout=10)
html = req.read()
images = re.search(r'https://.*?\.jpg', html).group()
except:
pass
summary = Traditional2Simplified(summary.decode("utf-8"))
keyword = Traditional2Simplified(keyword.decode("utf-8"))
context = Traditional2Simplified(context.decode("utf-8"))
tittle = Traditional2Simplified(title.decode("utf-8"))
item['summary'] = summary
item['keyword'] = keyword
item['candidate'] = keyword
item['source'] = source
item['timestamps'] = timestamp
item['date'] = date
item['lang'] = 'cn'
item['images'] = images
item['context'] = context
item['timesyear'] = timesyear
item['time'] = stringDate
item['title'] = tittle
item['url'] = new_url
item['id'] = Id
with open('yahoo_news.json', 'a') as f:
f.write(json.dumps(item, ensure_ascii=False) + '\n')
def crawl_yahoo(keyword, strdate):
timeArray = time.strptime(strdate, "%Y-%m-%d")
timest = int(time.mktime(timeArray))
kname = urllib.quote(str(keyword))
page = 1
while 1:
url = "https://tw.search.yahoo.com/search;?fr2=sb-top-tw.search&p={}&b={}".format(kname, page)
# url = "https://tw.news.search.yahoo.com/search;_ylt=AwrtXGtDr81aAG4A2CVw1gt.;_ylu=X3oDMTEwOG1tc2p0BGNvbG8DBHBvcwMxBHZ0aWQDBHNlYwNwYWdpbmF0aW9u?p={}&ei=UTF-8&flt=ranking%3Adate%3B&fr=yfp-search-sb&b={}&pz=10&bct=0&xargs=0".format(kname, page)
print url
response = requests.get(url, headers=headers)
txt = response.text
html = etree.HTML(txt)
lis = html.xpath('//ol[@class="mb-15 reg searchCenterMiddle"]/li')
# lis = html.xpath('//div[@id="web"]/ol[2]/li')
# print "lis", lis
i = 1
for li in lis:
item = {}
source = "yahoo"
print li.xpath('./div/div[1]/h3/a/@href')[0]
print "*************"
title = ''.join(li.xpath('./div/div[1]/h3/a//text()'))
print title
print "+++++++++++++"
summary = ''.join(li.xpath('./div/div[2]/p//text()'))
print summary
print "------------"
date = li.xpath('./div/div[3]/p/span[2]/text()')[0]
print date
print 00000000000000
if len(lis) <= 0:
break
get_content(lis, keyword, timest)
page += 10
if page == 81:
break
if __name__ == '__main__':
crawl_yahoo('盧秀燕', '2018-01-01')
crawl_yahoo('林佳龍', '2018-01-01')
|
[
"bingqulee@gmail.com"
] |
bingqulee@gmail.com
|
4ea70871b269b1e8653582ef88c2497f5e928abc
|
96740c0a9ff1467f0897253c79a059b5ba6a1949
|
/test_webscoket.py
|
02c07101a77d1393e2894d4e4843fafdb61c1326
|
[] |
no_license
|
Cola1995/soho1
|
a876990cd3adfb9534eb3630e24a9bf90bdf8363
|
fad8f13d6c789e7c37eba5cfd94a9cb609c8db1d
|
refs/heads/master
| 2020-07-27T07:50:38.299692
| 2019-09-17T10:02:34
| 2019-09-17T10:02:34
| 209,020,254
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,594
|
py
|
import asyncio
import logging
from datetime import datetime
from aiowebsocket.converses import AioWebSocket
import json
async def startup(uri):
async with AioWebSocket(uri) as aws:
converse = aws.manipulator
# 客户端给服务端发送消息
await converse.send('{"event":"pusher:subscribe","data":{"channel":"exchange_market"}}') # 监听所有市场
await converse.send('{"event":"pusher:subscribe","data":{"channel":"exchange_market_bid_ask"}}') #
# 监听btc_usdt webscoket
# await converse.send('{"event":"pusher:subscribe","data":{"channel":"exchange_ticker"}}')
# await converse.send('{"event":"pusher:subscribe","data":{"channel":"exchange_eth-usdt"}}')
# await converse.send('{"event":"pusher:subscribe","data":{"channel":"exchange_bqqq-usdt"}}')
# await converse.send('{"event":"pusher:subscribe","data":{"auth":"5174598ab656e4da66dc:1c303fad7f188e3a9f130235ecffc1a2052da5bd9645d572b8b6020f1d154032","channel":"private-exchange==abbd73ed-2cde-416f-8ce1-3217e0472205"}}') # 监听所有市场
while True:
mes = await converse.receive()
print('{time}-Client receive: {rec}'
.format(time=datetime.now().strftime('%Y-%m-%d %H:%M:%S'), rec=mes))
print(type(mes))
# 解包,获取想要的数据
# mes = json.loads(mes.decode("utf-8"))
# print(mes)
# if mes["data"]["marketPriceDto"]["marketSymbol"]=="NEO-BTC":
# print(mes["data"])
# m1 = json.loads(mes["data"])
# print(m1.get("message").get("marketPriceDto").get("volume"))
# print(m1)
# if m1.get("message")!=None:
# if m1["message"]["marketPriceDto"]["marketSymbol"]==market:
# print("{0}:市场:{1},chang24:{2}, percentageChange24:{3}".format(datetime.now().strftime('%Y-%m-%d %H:%M:%S'),m1["message"]["marketPriceDto"]["marketSymbol"],m1["message"]["marketPriceDto"]["change24"],m1["message"]["marketPriceDto"]["percentageChange24"]))
if __name__ == '__main__':
# remote = 'wss://wssprod.bitsdaq.com/app/167bca97db7a84f1c98b?protocol=7&client=js&version=4.3.1&flash=false' # 线上环境
market = "ETH-BTC" # 配置需要监听的市场/币对
remote ="wss://wss-dev-15.bitsdaq.io/app/d4796efce047f9e6443a?protocol=7&client=js&version=4.4.0&flash=false" # dev环境通用
try:
asyncio.get_event_loop().run_until_complete(startup(remote))
except KeyboardInterrupt as exc:
logging.info('Quit.')
|
[
"991571566@qq.com"
] |
991571566@qq.com
|
f73e8cee4387922b60f25f6d68bcaedf74ab873d
|
de479d4a8af0e070b2bcae4186b15a8eb74971fb
|
/cn/iceknc/study/c_python_pygame/c_pygame_window.py
|
6ba43b45b4a1518b0fc99459cb90ef2ca9434385
|
[] |
no_license
|
iceknc/python_study_note
|
1d8f6e38be57e4dc41a661c0a84d6ee223c5a878
|
730a35890b77ecca3d267fc875a68e96febdaa85
|
refs/heads/master
| 2020-05-19T18:44:55.957392
| 2019-09-27T01:15:54
| 2019-09-27T01:15:54
| 185,160,232
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 286
|
py
|
import pygame
pygame.init()
# 创建游戏窗口
screen = pygame.display.set_mode((480, 700))
bg = pygame.image.load("./images/background.png")
screen.blit(bg, (0, 0))
hero = pygame.image.load("./images/me1.png")
screen.blit(hero, (200, 500))
pygame.display.update()
pygame.quit()
|
[
"xzhipeng@lifecare.cn"
] |
xzhipeng@lifecare.cn
|
ca4b09083eb46a4afe2e3fcc2d2303319053a314
|
bbe447a740929eaee1955bd9c1517cf760dd5cb9
|
/keygrabber/adwords/adwords_api_python_14.2.1/build/lib.linux-x86_64-2.7/adspygoogle/adwords/zsi/v200909/CampaignCriterionService_services.py
|
efe456bdfc5fa3c46319a37ae0a627525a085719
|
[
"Apache-2.0"
] |
permissive
|
MujaahidSalie/aranciulla
|
f3d32e7dd68ecfca620fe4d3bf22ecb4762f5893
|
34197dfbdb01479f288611a0cb700e925c4e56ce
|
refs/heads/master
| 2020-09-07T02:16:25.261598
| 2011-11-01T21:20:46
| 2011-11-01T21:20:46
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,437
|
py
|
##################################################
# CampaignCriterionService_services.py
# generated by ZSI.generate.wsdl2python
##################################################
from CampaignCriterionService_services_types import *
import urlparse, types
from ZSI.TCcompound import ComplexType, Struct
from ZSI import client
import ZSI
# Locator
class CampaignCriterionServiceLocator:
CampaignCriterionServiceInterface_address = "https://adwords.google.com:443/api/adwords/cm/v200909/CampaignCriterionService"
def getCampaignCriterionServiceInterfaceAddress(self):
return CampaignCriterionServiceLocator.CampaignCriterionServiceInterface_address
def getCampaignCriterionServiceInterface(self, url=None, **kw):
return CampaignCriterionServiceSoapBindingSOAP(url or CampaignCriterionServiceLocator.CampaignCriterionServiceInterface_address, **kw)
# Methods
class CampaignCriterionServiceSoapBindingSOAP:
def __init__(self, url, **kw):
kw.setdefault("readerclass", None)
kw.setdefault("writerclass", None)
# no resource properties
self.binding = client.Binding(url=url, **kw)
# no ws-addressing
# get: getCampaignCriterion
def getCampaignCriterion(self, request):
if isinstance(request, getCampaignCriterionRequest) is False:
raise TypeError, "%s incorrect request type" % (request.__class__)
kw = {}
# no input wsaction
self.binding.Send(None, None, request, soapaction="", **kw)
# no output wsaction
response = self.binding.Receive(getCampaignCriterionResponse.typecode)
return response
# mutate: getCampaignCriterion
def mutateCampaignCriterion(self, request):
if isinstance(request, mutateCampaignCriterionRequest) is False:
raise TypeError, "%s incorrect request type" % (request.__class__)
kw = {}
# no input wsaction
self.binding.Send(None, None, request, soapaction="", **kw)
# no output wsaction
response = self.binding.Receive(mutateCampaignCriterionResponse.typecode)
return response
getCampaignCriterionRequest = ns0.getCampaignCriterion_Dec().pyclass
getCampaignCriterionResponse = ns0.getCampaignCriterionResponse_Dec().pyclass
mutateCampaignCriterionRequest = ns0.mutateCampaignCriterion_Dec().pyclass
mutateCampaignCriterionResponse = ns0.mutateCampaignCriterionResponse_Dec().pyclass
|
[
"vincenzo.ampolo@gmail.com"
] |
vincenzo.ampolo@gmail.com
|
0d0a072bf4bc60c77f25558e40e4222f8ca8679c
|
496e05014492b4bbecf9f15c40ae416c21e27a46
|
/src/outpost/django/video/migrations/0009_epiphansource.py
|
1354f113997a6042355a8bf0539191a4f9fc69c3
|
[
"BSD-3-Clause",
"BSD-2-Clause"
] |
permissive
|
medunigraz/outpost_deprecated
|
b1ff802054c04cf989b3b660e132fa6a1c2a078c
|
bc88eaa3bb504d394fdf13f1131e40db27759c89
|
refs/heads/master
| 2022-01-23T15:46:34.859095
| 2019-05-21T08:38:11
| 2019-05-21T08:38:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 895
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2017-09-08 08:05
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import imagekit.models.fields
from ...base.utils import Uuid4Upload
class Migration(migrations.Migration):
dependencies = [
('video', '0008_zipstreamexport'),
]
operations = [
migrations.CreateModel(
name='EpiphanSource',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('number', models.PositiveSmallIntegerField()),
('preview', imagekit.models.fields.ProcessedImageField(upload_to=Uuid4Upload)),
('epiphan', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='video.Epiphan')),
],
),
]
|
[
"michael@fladi.at"
] |
michael@fladi.at
|
09485a57811f74f6320ac2d4290643cdd57572c4
|
e96deed00dd14a1f6d1ed7825991f12ea8c6a384
|
/106. Construct Binary Tree from Inorder and Postor.py
|
65b1c9034c369dcb1878ffa66b112d2c2d6b2c93
|
[] |
no_license
|
borisachen/leetcode
|
70b5c320abea8ddfa299b2e81f886cfeb39345c1
|
15e36b472a5067d17482dbd0d357336d31b35ff4
|
refs/heads/master
| 2021-01-19T17:07:46.726320
| 2020-11-16T04:30:52
| 2020-11-16T04:30:52
| 88,306,634
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,059
|
py
|
106. Construct Binary Tree from Inorder and Postorder Traversal
Given inorder and postorder traversal of a tree, construct the binary tree.
Note:
You may assume that duplicates do not exist in the tree.
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
postorder implies the last element is the root node
find that element in inorder, say its position j
the elements right of j in inorder will belong to the right node
the elements left of j in inorder will belong to the left node
if we process the right node first,
left node:
right node:
class Solution(object):
def buildTree(self, inorder, postorder):
"""
:type inorder: List[int]
:type postorder: List[int]
:rtype: TreeNode
"""
if not inorder or not postorder:
return None
root = TreeNode(postorder.pop())
j = inorder.index(root.val)
root.right = self.buildTree(inorder[j+1:], postorder)
root.left = self.buildTree(inorder[:j], postorder)
return root
|
[
"boris.chen@gmail.com"
] |
boris.chen@gmail.com
|
6e22abb1ceff8ee09df97b9ab40f2f1c3fc0ff35
|
32bbe94e77deced5e58de97eb19e7c6126b001df
|
/backend/src/carts/admin/carts.py
|
3c0c975c778d172c9434ee052c504b97c3071014
|
[] |
no_license
|
3asyPe/astudy
|
16d8adacc3bee9f2667c0a5f1be8228868440c6a
|
0643a33a294c410523738f59f95c8d205dd63dc5
|
refs/heads/master
| 2023-06-25T11:23:39.500361
| 2021-07-28T13:33:48
| 2021-07-28T13:33:48
| 336,819,306
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 593
|
py
|
from django.contrib import admin
from app.admin import UserFilter
from carts.models import Cart
@admin.register(Cart)
class CartAdmin(admin.ModelAdmin):
list_display = [
'id',
'user',
'total',
'active',
]
list_display_links = [
'id',
'user',
'total',
'active'
]
fields = [
"user",
"courses",
"subtotal",
"total",
"active",
]
readonly_fields = [
"subtotal",
"total",
]
list_filter = [
'active',
UserFilter
]
|
[
"alex.kvasha228@gmail.com"
] |
alex.kvasha228@gmail.com
|
808ad659be53616086cf7608e29444f522b05378
|
b723ecb64c86657751cafd21030de2b3c64886f7
|
/unchained/community/teacher/views.py
|
85d8abe0ce48b865972359f11ec8b82f08ac1969
|
[] |
no_license
|
mohinderps/community
|
56dffc11d56d704e8c8c6b1e052741da2eb6d1ce
|
1d4b5aa357d41c2e75768f359118103a58da43e1
|
refs/heads/master
| 2020-04-01T07:27:24.744768
| 2018-10-14T15:45:04
| 2018-10-14T15:45:04
| 152,990,736
| 0
| 0
| null | 2018-10-14T15:47:23
| 2018-10-14T15:47:23
| null |
UTF-8
|
Python
| false
| false
| 2,818
|
py
|
from django.shortcuts import render
from rest_framework import generics
from rest_framework import mixins
from django.contrib.auth.models import User
from rest_framework import permissions
from rest_framework.decorators import api_view
from rest_framework.response import Response
from rest_framework.reverse import reverse
from rest_framework import renderers
from rest_framework import viewsets
# Create your views here.
from rest_framework.decorators import action
from rest_framework.response import Response
from community.csrfsession import CsrfExemptSessionAuthentication
from .serializers import TeacherSerializer
from .models import Teacher
from rest_framework.exceptions import PermissionDenied
from community.permissions import isInstitutionAdmin, getUserInstitution, belongsToInstitution, canUpdateProfile
from community.filters import applyUserFilters
class TeacherViewSet(viewsets.ModelViewSet):
"""
This viewset automatically provides `list`, `create`, `retrieve`,
`update` and `destroy` actions.
Additionally we also provide an extra `highlight` action.
"""
queryset = Teacher.objects.all()
serializer_class = TeacherSerializer
permission_classes = (permissions.IsAuthenticatedOrReadOnly, )
authentication_classes = (CsrfExemptSessionAuthentication, )
def list(self, request, *args, **kwargs):
if not belongsToInstitution(request, getUserInstitution(request)):
raise PermissionDenied(detail='User does not belong to the institution', code=None)
if request.user.is_superuser:
self.queryset = applyUserFilters(request, Teacher)
else:
self.queryset = applyUserFilters(request, Teacher, institution=getUserInstitution(request))
return super(TeacherViewSet, self).list(request, *args, **kwargs)
def create(self, request, *args, **kwargs):
if not isInstitutionAdmin(request, getUserInstitution(request)):
raise PermissionDenied(detail='User is not an admin_user', code=None)
return super(TeacherViewSet, self).create(request, *args, **kwargs)
def retrieve(self, request, *args, **kwargs):
if not belongsToInstitution(request, self.get_object().institution):
raise PermissionDenied(detail='User does not belong to the institution', code=None)
return super(TeacherViewSet, self).retrieve(request, *args, **kwargs)
def update(self, request, *args, **kwargs):
if not canUpdateProfile(request, self.get_object().institution, self.get_object()):
raise PermissionDenied(detail='User can not update other profiles', code=None)
return super(TeacherViewSet, self).update(request, *args, **kwargs)
def destroy(self, request, *args, **kwargs):
if not isInstitutionAdmin(request, self.get_object().institution):
raise PermissionDenied(detail='User is not an admin_user', code=None)
return super(TeacherViewSet, self).destroy(request, *args, **kwargs)
|
[
"dhruv.life@hotmail.com"
] |
dhruv.life@hotmail.com
|
1541e195b8051b431436d8b87ef862ecd8ed011e
|
8d593cdc89bac4a993f776c9b11b9339f035744b
|
/PHYS613 A2 Exercise2.14 SquareWell.py
|
593a104bcfb57c763726d4aa6407b37e28574582
|
[] |
no_license
|
Global19-atlassian-net/ComputationalPhysics
|
21026c748801d07324620ca02dbc56b9a55a0abd
|
9c50c302706c5015b588ac12980c5f96a414575f
|
refs/heads/master
| 2021-05-30T00:50:58.746447
| 2015-11-27T15:31:20
| 2015-11-27T15:31:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,512
|
py
|
"""
Created on Fri Sep 06 21:03:27 2013
PHYS 613, Assignment 2
Nick Crump
"""
# Exercise 2.13
# Exercise 2.14
"""
From Computational Physics by Devries
"""
from math import sin,cos,exp,sqrt
import numpy as np
import matplotlib.pyplot as plt
# define function - even state solutions of 1D finite square well potential
#***********************************************************************
def evenFunc(a,m,V0,E):
hbarSq = 0.076199682 # eV(nm**2)(Melectron)
alpha = (2.0*m*E/hbarSq)**0.5
beta = ((2.0*m*(V0-E))/hbarSq)**0.5
fEven = beta*cos(alpha*a) - alpha*sin(alpha*a)
return fEven
#***********************************************************************
# define function - odd state solutions of 1D finite square well potential
#***********************************************************************
def oddFunc(a,m,V0,E):
hbarSq = 0.076199682 # eV(nm**2)(Melectron)
alpha = (2.0*m*E/hbarSq)**0.5
beta = ((2.0*m*(V0-E))/hbarSq)**0.5
fOdd = alpha*cos(alpha*a) + beta*sin(alpha*a)
return fOdd
#***********************************************************************
# enter root finding algorithm by Bisection method
#***********************************************************************
def rootBisection(f, xI, xF, Tol, nMax):
# initialize variables
error = 1
n = 1
xiMid = 0 # initial midpoint value to store the n-1 value
# loop until error is less than input tolerance
while error > Tol:
xMid = 0.5*(xI+xF)
# set up main Bisection method:
# make bracket interval smaller each iteration until root is found
# check conditions and update bracket points
if f(xI)*f(xMid) > 0:
xI = xMid
error = abs(xMid - xiMid) # calculate approx error
n = n + 1
xiMid = xMid # store the n-1 midpoint
elif f(xI)*f(xMid) < 0:
xF = xMid
error = abs(xMid - xiMid) # calculate approx error
n = n + 1
xiMid = xMid # store the n-1 midpoint
# output results to user
return round(xMid,5)
# end rootBisection function
#***********************************************************************
# main program that calls functions, finds roots and does plotting
#***********************************************************************
# setup root finder routine
#--------------------------
a = 0.3 # nm
m = 1.0 # Melectron
V0 = 10.0 # eV
hbarSq = 0.076199682 # eV(nm**2)(Melectron)
sfEven = lambda E: ((2.0*m*(V0-E)/hbarSq)**0.5)*cos(((2.0*m*E/hbarSq)**0.5)*a) - ((2.0*m*E/hbarSq)**0.5)*sin(((2.0*m*E/hbarSq)**0.5)*a)
sfOdd = lambda E: ((2.0*m*E/hbarSq)**0.5)*cos(((2.0*m*E/hbarSq)**0.5)*a) + ((2.0*m*(V0-E)/hbarSq)**0.5)*sin(((2.0*m*E/hbarSq)**0.5)*a)
Eeven = rootBisection(sfEven, 0, 2.0, 10e-5, 30)
Eodd = rootBisection(sfOdd, 2.0, 4.0, 10e-5, 30)
print 'Eigenvalues = ', Eeven, Eodd
# setup plotting of allowed energy equation as function of energy
#--------------------------
E = np.arange(0,10.1,0.1)
evenF = []
oddF = []
for i in E:
fEven = evenFunc(0.3,1.0,10.0,i)
fOdd = oddFunc(0.3,1.0,10.0,i)
evenF.append(fEven)
oddF.append(fOdd)
plt.figure(1)
plt.plot(E,evenF,'b',label='Even States')
plt.plot(E,oddF,'r',label='Odd States')
plt.plot(Eeven,0,'bo',Eodd,0,'ro')
plt.xlabel('Energy (eV)')
plt.ylabel('$f\ (E)$')
plt.legend(loc=9)
# setup wavefunction plotting as function of distance & plot potential well
#--------------------------
# x arrays for regions around well
R1 = np.arange(-0.6,-0.29,0.01) # region 1 left of well
R2 = np.arange(-0.3,0.301,0.01) # region 2 inside well
R3 = np.arange(0.3,0.601,0.01) # region 3 right of well
# alpha & beta values for even states
alphEven = sqrt(2*m*Eeven/hbarSq)
betaEven = sqrt(2*m*(V0-Eeven)/hbarSq)
# even state wavefunctions for 3 regions (arbitrary normalization coefficients)
# wavefunctions shifted to make energy eigenvalues the zero baseline
psiR1even = [30*exp(betaEven*i)+Eeven for i in R1]
psiR2even = [cos(alphEven*i)+Eeven for i in R2]
psiR3even = [30*exp(-betaEven*i)+Eeven for i in R3]
# alpha & beta values for odd states
alphOdd = sqrt(2*m*Eodd/hbarSq)
betaOdd = sqrt(2*m*(V0-Eodd)/hbarSq)
# odd state wavefunctions for 3 regions (arbitrary normalization coefficients)
# wavefunctions shifted to make energy eigenvalues the zero baseline
psiR1odd = [-30*exp(betaOdd*i)+Eodd for i in R1]
psiR2odd = [sin(alphOdd*i)+Eodd for i in R2]
psiR3odd = [30*exp(-betaOdd*i)+Eodd for i in R3]
plt.figure(2)
# plot lines for potential V(x)
plt.plot([-0.6,-0.3],[10,10],'k',linewidth='4')
plt.plot([-0.3,-0.3],[10,0],'k',linewidth='4')
plt.plot([-0.3,0.3],[0,0], 'k',linewidth='4')
plt.plot([0.3,0.3], [0,10], 'k',linewidth='4')
plt.plot([0.3,0.6],[10,10], 'k',linewidth='4')
plt.xticks([-0.6,-0.4,-0.2,0,0.2,0.4,0.6])
plt.annotate('$V_0$',fontsize=16,xy=(0.23,0.82),xycoords='figure fraction')
# plot lines for energy eigenvalues
plt.plot([-0.6,0.6],[Eeven,Eeven],'g',linewidth='2',linestyle='--')
plt.plot([-0.6,0.6],[Eodd,Eodd],'g',linewidth='2',linestyle='--')
plt.annotate('Ground State Energies',fontsize=12,xy=(0.39,0.27),xycoords='figure fraction')
plt.annotate('$E_{even}=0.71545$',fontsize=12,xy=(0.75,0.20),xycoords='figure fraction')
plt.annotate('$E_{odd}=2.82139$',fontsize=12,xy=(0.755,0.40),xycoords='figure fraction')
# plot wavefunctions for each ground state energy
plt.plot(R1,psiR1even,'b',label='$\psi_{even}\ ({x})$')
plt.plot(R2,psiR2even,'b')
plt.plot(R3,psiR3even,'b')
plt.plot(R1,psiR1odd,'r',label='$\psi_{odd}\ ({x})$')
plt.plot(R2,psiR2odd,'r')
plt.plot(R3,psiR3odd,'r')
plt.annotate(r'$\psi_{1}=C\ \exp({\beta x})$',fontsize=12,xy=(0.15,0.625),xycoords='figure fraction')
plt.annotate(r'$\psi_{2odd}=A\ \sin({\alpha x})$',fontsize=12,xy=(0.42,0.65),xycoords='figure fraction')
plt.annotate(r'$\psi_{2even}=B\ \cos({\alpha x})$',fontsize=12,xy=(0.42,0.60),xycoords='figure fraction')
plt.annotate(r'$\psi_{3}=F\ \exp({-\beta x})$',fontsize=12,xy=(0.73,0.625),xycoords='figure fraction')
plt.yticks(range(-2,14,2))
# set titles
plt.xlabel('Distance (nm)')
plt.ylabel('Ground State Wavefunctions')
plt.legend(loc=9)
#***********************************************************************
|
[
"ncrump32@gmail.com"
] |
ncrump32@gmail.com
|
fc33d8e7379198696b815ebc07274d16e233a466
|
9c2ba4f1a2d75b1916e6f20fa95c5fb32d0497d9
|
/ScrapingWithPython2/code/crawler_script/userAgents.py
|
d8b7443d1f35f892e33a3ec4385bab0c16310377
|
[] |
no_license
|
PowerDG/DgCoreInit
|
abe4b15e38b730c25424f71e6927db982af27a72
|
84e6b7833ddc083b90fcc172c3812dd6f8b51e3d
|
refs/heads/master
| 2023-07-19T11:58:09.220460
| 2019-06-07T14:43:24
| 2019-06-07T14:43:24
| 163,091,619
| 0
| 1
| null | 2023-07-06T21:20:15
| 2018-12-25T14:59:42
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 4,255
|
py
|
#!/usr/bin/env python3
#-*- coding: utf-8 -*-
__author__ = 'hstking hst_king@hotmail.com'
pcUserAgent = {
"safari 5.1 – MAC":"User-Agent:Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_8; en-us) AppleWebKit/534.50 (KHTML, like Gecko) Version/5.1 Safari/534.50",
"safari 5.1 – Windows":"User-Agent:Mozilla/5.0 (Windows; U; Windows NT 6.1; en-us) AppleWebKit/534.50 (KHTML, like Gecko) Version/5.1 Safari/534.50",
"IE 9.0":"User-Agent:Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0);",
"IE 8.0":"User-Agent:Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0)",
"IE 7.0":"User-Agent:Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0)",
"IE 6.0":"User-Agent: Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1)",
"Firefox 4.0.1 – MAC":"User-Agent: Mozilla/5.0 (Macintosh; Intel Mac OS X 10.6; rv:2.0.1) Gecko/20100101 Firefox/4.0.1",
"Firefox 4.0.1 – Windows":"User-Agent:Mozilla/5.0 (Windows NT 6.1; rv:2.0.1) Gecko/20100101 Firefox/4.0.1",
"Opera 11.11 – MAC":"User-Agent:Opera/9.80 (Macintosh; Intel Mac OS X 10.6.8; U; en) Presto/2.8.131 Version/11.11",
"Opera 11.11 – Windows":"User-Agent:Opera/9.80 (Windows NT 6.1; U; en) Presto/2.8.131 Version/11.11",
"Chrome 17.0 – MAC":"User-Agent: Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_0) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.56 Safari/535.11",
"Maxthon":"User-Agent: Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Maxthon 2.0)",
"Tencent TT":"User-Agent: Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; TencentTraveler 4.0)",
"The World 2.x":"User-Agent: Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1)",
"The World 3.x":"User-Agent: Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; The World)",
"sogou 1.x":"User-Agent: Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Trident/4.0; SE 2.X MetaSr 1.0; SE 2.X MetaSr 1.0; .NET CLR 2.0.50727; SE 2.X MetaSr 1.0)",
"360":"User-Agent: Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; 360SE)",
"Avant":"User-Agent: Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Avant Browser)",
"Green Browser":"User-Agent: Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1)"
}
mobileUserAgent = {
"iOS 4.33 – iPhone":"User-Agent:Mozilla/5.0 (iPhone; U; CPU iPhone OS 4_3_3 like Mac OS X; en-us) AppleWebKit/533.17.9 (KHTML, like Gecko) Version/5.0.2 Mobile/8J2 Safari/6533.18.5",
"iOS 4.33 – iPod Touch":"User-Agent:Mozilla/5.0 (iPod; U; CPU iPhone OS 4_3_3 like Mac OS X; en-us) AppleWebKit/533.17.9 (KHTML, like Gecko) Version/5.0.2 Mobile/8J2 Safari/6533.18.5",
"iOS 4.33 – iPad":"User-Agent:Mozilla/5.0 (iPad; U; CPU OS 4_3_3 like Mac OS X; en-us) AppleWebKit/533.17.9 (KHTML, like Gecko) Version/5.0.2 Mobile/8J2 Safari/6533.18.5",
"Android N1":"User-Agent: Mozilla/5.0 (Linux; U; Android 2.3.7; en-us; Nexus One Build/FRF91) AppleWebKit/533.1 (KHTML, like Gecko) Version/4.0 Mobile Safari/533.1",
"Android QQ":"User-Agent: MQQBrowser/26 Mozilla/5.0 (Linux; U; Android 2.3.7; zh-cn; MB200 Build/GRJ22; CyanogenMod-7) AppleWebKit/533.1 (KHTML, like Gecko) Version/4.0 Mobile Safari/533.1",
"Android Opera ":"User-Agent: Opera/9.80 (Android 2.3.4; Linux; Opera Mobi/build-1107180945; U; en-GB) Presto/2.8.149 Version/11.10",
"Android Pad Moto Xoom":"User-Agent: Mozilla/5.0 (Linux; U; Android 3.0; en-us; Xoom Build/HRI39) AppleWebKit/534.13 (KHTML, like Gecko) Version/4.0 Safari/534.13",
"BlackBerry":"User-Agent: Mozilla/5.0 (BlackBerry; U; BlackBerry 9800; en) AppleWebKit/534.1+ (KHTML, like Gecko) Version/6.0.0.337 Mobile Safari/534.1+",
"WebOS HP Touchpad":"User-Agent: Mozilla/5.0 (hp-tablet; Linux; hpwOS/3.0.0; U; en-US) AppleWebKit/534.6 (KHTML, like Gecko) wOSBrowser/233.70 Safari/534.6 TouchPad/1.0",
"Nokia N97":"User-Agent: Mozilla/5.0 (SymbianOS/9.4; Series60/5.0 NokiaN97-1/20.0.019; Profile/MIDP-2.1 Configuration/CLDC-1.1) AppleWebKit/525 (KHTML, like Gecko) BrowserNG/7.1.18124",
"Windows Phone Mango":"User-Agent: Mozilla/5.0 (compatible; MSIE 9.0; Windows Phone OS 7.5; Trident/5.0; IEMobile/9.0; HTC; Titan)",
"UC":"User-Agent: UCWEB7.0.2.37/28/999",
"UC standard":"User-Agent: NOKIA5700/ UCWEB7.0.2.37/28/999",
"UCOpenwave":"User-Agent: Openwave/ UCWEB7.0.2.37/28/999",
"UC Opera":"User-Agent: Mozilla/4.0 (compatible; MSIE 6.0; ) Opera/UCWEB7.0.2.37/28/999"
}
|
[
"1049365046@qq.com"
] |
1049365046@qq.com
|
fb363a89cd15293a0bed822eb4c5966d9e1ac713
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02697/s900893925.py
|
cfd061b2464153e0333019f32cf31aa1b124ef34
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 213
|
py
|
n, m = map(int, input().split())
i = 0
c = 0
while (i + 1) + i < (n - i - (i + 1)) and c < m:
print(i + 1, n - i)
c += 1
i += 1
a = n // 2 + n % 2
for i in range(m - c):
print(a - i - 1, a + i + 1)
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
04489e971a9cf6a6d19f42d7c96e28cf0b5067a7
|
4e1e7c9d3848e4eed4111be11f22436ef3143e6d
|
/python/p146.py
|
3c8f6e11949b34bf6a7404c4066e639241fd4cb1
|
[
"LicenseRef-scancode-warranty-disclaimer"
] |
no_license
|
rsafarov/Project-Euler-solutions
|
d2e3bc7ed2bb05e935b1f0e9404eec4a2dcecacd
|
e5061b8358ddbe9f6563c32ef82e135c233257fe
|
refs/heads/master
| 2021-01-12T06:44:12.461955
| 2016-12-26T22:55:11
| 2016-12-26T22:55:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,405
|
py
|
#
# Solution to Project Euler problem 146
# by Project Nayuki
#
# https://www.nayuki.io/page/project-euler-solutions
# https://github.com/nayuki/Project-Euler-solutions
#
import eulerlib
# Right off the bat, we can exclude 90% of the candidates by the following observations:
# - If n = 1 mod 2, then n^2 + 1 = 0 mod 2 which is composite.
# - Thus we require n = 0 mod 2.
# - If n = 1 mod 5, then n^2 + 9 = 0 mod 5 which is composite.
# - If n = 2 mod 5, then n^2 + 1 = 0 mod 5 which is composite.
# - If n = 3 mod 5, then n^2 + 1 = 0 mod 5 which is composite.
# - If n = 4 mod 5, then n^2 + 9 = 0 mod 5 which is composite.
# - Thus we require n = 0 mod 5.
# - Taking these two together and using the Chinese remainder theorem (CRT), we require n = 0 mod 10.
#
# For each value of n, after we generate the set {n^2 + 1, n^2 + 3, ..., n^2 + 27}, it's more efficient to take each
# prime number and test whether it divides any number, rather than take each number and test it against all prime numbers.
# This is because some numbers in this set are prime so the latter method tests some numbers against all the primes;
# the former method will bail out early as soon as ~any~ number in the set has a small prime factor.
#
# The rest of the algorithm is implemented straightforwardly.
def compute():
LIMIT = 150000000
INCREMENTS = [1, 3, 7, 9, 13, 27] # Must be in non-decreasing order
NON_INCREMENTS = set(i for i in range(INCREMENTS[-1]) if i not in INCREMENTS)
maxnumber = LIMIT**2 + INCREMENTS[-1]
primes = eulerlib.list_primes(eulerlib.sqrt(maxnumber))
def has_consecutive_primes(n):
# Generate the set of numbers to test for primality
n2 = n**2
temp = [(n2 + k) for k in INCREMENTS]
# Test that each number is prime.
# Note: The nesting of the loops can be reversed, but this way is much faster.
if any((x != p and x % p == 0)
for p in primes
for x in temp):
return False
# Test that each number that is not an increment is composite.
# This checks that the prime numbers we found are in fact consecutive.
return all((not is_prime(n2 + k)) for k in NON_INCREMENTS)
def is_prime(n):
end = eulerlib.sqrt(n)
for p in primes:
if p > end:
break
if n % p == 0:
return False
return True
ans = sum(n for n in range(0, LIMIT, 10) if has_consecutive_primes(n))
return str(ans)
if __name__ == "__main__":
print(compute())
|
[
"me@nayuki.io"
] |
me@nayuki.io
|
57f4fd86ef61862a8603a69e948aeba72ff1531f
|
13d3724f5e2de71cd41177e73ea331bb02b2c6fe
|
/network.py
|
c63b259bcea27a068b7ffc7cadc7e322fb8bee07
|
[] |
no_license
|
chengyang317/deep_encode_decode
|
db87a2a5f1b6d0f86fbb4ff93812ceff2394b3cf
|
b2d09e3768b26f9a831b0d738f4e03feed80471a
|
refs/heads/master
| 2021-01-01T04:33:53.003522
| 2016-05-19T01:01:25
| 2016-05-19T01:01:25
| 59,162,347
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 401
|
py
|
import tensorflow as tf
import prettytensor as pt
import numpy as np
class NetWork(object):
"""
Net work for the encode_decode architechture.
"""
def __init__(self, batch_size):
input_tensor = tf.placeholder(tf.float32, shape=(batch_size, DATA_SIZE))
label_tensor = tf.placeholder(tf.float32, shape=(BATCH_SIZE, CLASSES))
pretty_input = pt.wrap(input_tensor)
|
[
"chengyang317@gmail.com"
] |
chengyang317@gmail.com
|
379642818204d5baebc8e7103b88c69cdf947053
|
6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4
|
/S4uZaKhcDa7pJ33nu_24.py
|
ca56f99a3e15d892f27398e2d0cc7c9148315d09
|
[] |
no_license
|
daniel-reich/ubiquitous-fiesta
|
26e80f0082f8589e51d359ce7953117a3da7d38c
|
9af2700dbe59284f5697e612491499841a6c126f
|
refs/heads/master
| 2023-04-05T06:40:37.328213
| 2021-04-06T20:17:44
| 2021-04-06T20:17:44
| 355,318,759
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 146
|
py
|
from datetime import datetime as dt, timedelta as td
def week_after(d):
return (dt.strptime(d, '%d/%m/%Y') + td(days=7)).strftime('%d/%m/%Y')
|
[
"daniel.reich@danielreichs-MacBook-Pro.local"
] |
daniel.reich@danielreichs-MacBook-Pro.local
|
fc28baeac41627dff3871aeae768c4e62954d2aa
|
b7b243902150a1aa5b774523ac01d7016de13477
|
/cyc/DP/stock/123.py
|
116e4f9d9415a2a4c8e82fe5322822c75151375a
|
[] |
no_license
|
Veraph/LeetCode_Practice
|
7e97a93464911a1f33b3133043d96c88cd54016a
|
eafadd711f6ec1b60d78442280f1c44b6296209d
|
refs/heads/master
| 2023-03-23T11:49:19.046474
| 2021-03-18T02:22:50
| 2021-03-18T02:22:50
| 273,317,388
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,720
|
py
|
# 123.py -- Best time to buy and sell stock III
'''
Say you have an array for which the ith element is the price of a given stock on day i.
Design an algorithm to find the maximum profit. You may complete at most two transactions.
Note: You may not engage in multiple transactions at the same time (i.e., you must sell the stock before you buy again).
Example 1:
Input: prices = [3,3,5,0,0,3,1,4]
Output: 6
Explanation: Buy on day 4 (price = 0) and sell on day 6 (price = 3), profit = 3-0 = 3.
Then buy on day 7 (price = 1) and sell on day 8 (price = 4), profit = 4-1 = 3.
Example 2:
Input: prices = [1,2,3,4,5]
Output: 4
Explanation: Buy on day 1 (price = 1) and sell on day 5 (price = 5), profit = 5-1 = 4.
Note that you cannot buy on day 1, buy on day 2 and sell them later, as you are engaging multiple transactions at the same time. You must sell before buying again.
Example 3:
Input: prices = [7,6,4,3,1]
Output: 0
Explanation: In this case, no transaction is done, i.e. max profit = 0.
Example 4:
Input: prices = [1]
Output: 0
'''
def maxProfit(prices):
'''
four states.
the b1 and b2 mean the cost we currently have for buying when we buy the first and second stock
b2 will be negative when you have on-hand profit(this profit include the cost you pay for the second stock)
the s1 and s2 mean the profit we get after selling first and second stock
'''
b1 = b2 = float('inf')
s1 = s2 = 0
for price in prices:
if b1 > price:
b1 = price
if s1 < price - b1:
s1 = price - b1
if b2 > price - s1:
b2 = price - s1
if s2 < price - b2:
s2 = price - b2
return s2
maxProfit([3,3,5,0,0,3,1,4])
|
[
"jmw3531@live.com"
] |
jmw3531@live.com
|
982036613e2e749e78f5d113fca143718d25414f
|
3a1fea0fdd27baa6b63941f71b29eb04061678c6
|
/src/ch08/rtda/heap/Method.py
|
eaa6ddab3eefc513c2349d4e34ad7e703e56d71d
|
[] |
no_license
|
sumerzhang/JVMByPython
|
56a7a896e43b7a5020559c0740ebe61d608a9f2a
|
1554cf62f47a2c6eb10fe09c7216518416bb65bc
|
refs/heads/master
| 2022-12-02T17:21:11.020486
| 2020-08-18T06:57:10
| 2020-08-18T06:57:10
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,295
|
py
|
#!/usr/bin/env python
# encoding: utf-8
"""
@author: HuRuiFeng
@file: Method.py
@time: 2019/9/16 16:55
@desc: 方法信息
"""
from ch08.classfile.MemberInfo import MemberInfo
from ch08.rtda.heap import AccessFlags
from ch08.rtda.heap.ClassMember import ClassMember
from ch08.rtda.heap.MethodDescriptorParser import MethodDescriptorParser
class Method(ClassMember):
def __init__(self):
super(Method, self).__init__()
# 操作数栈
self.max_stack = 0
# 局部变量表大小
self.max_locals = 0
# 存放方法字节码
self.code = []
self.arg_slot_count = 0
# 根据class文件中的方法信息创建Method表
@staticmethod
def new_methods(clazz, cfMethods):
methods = []
for cfMethod in cfMethods:
method = Method()
method.set_class(clazz)
method.copy_member_info(cfMethod)
method.copy_attributes(cfMethod)
method.calc_arg_slot_count()
methods.append(method)
return methods
# 从method_info结构中提取max_stack、max_locals、code信息
def copy_attributes(self, cfMethod: MemberInfo):
code_attr = cfMethod.code_attribute
if code_attr is not None:
self.max_stack = code_attr.max_stack
self.max_locals = code_attr.max_locals
self.code = code_attr.code
# 计算参数在局部变量表中占用多少位置
def calc_arg_slot_count(self):
parsed_descriptor = MethodDescriptorParser.parse_method_descriptor(self.descriptor)
for _ in parsed_descriptor.parameter_types:
self.arg_slot_count += 1
if not self.is_static():
self.arg_slot_count += 1
def is_synchronized(self):
return 0 != self.access_flags & AccessFlags.ACC_SYNCHRONIZED
def is_bridge(self):
return 0 != self.access_flags & AccessFlags.ACC_BRIDGE
def is_varargs(self):
return 0 != self.access_flags & AccessFlags.ACC_VARARGS
def is_native(self):
return 0 != self.access_flags & AccessFlags.ACC_NATIVE
def is_abstract(self):
return 0 != self.access_flags & AccessFlags.ACC_ABSTRACT
def is_strict(self):
return 0 != self.access_flags & AccessFlags.ACC_STRICT
|
[
"huruifeng1202@163.com"
] |
huruifeng1202@163.com
|
168f0c35ff34bedb374f39dccf96153f2d189166
|
fef8f43025cff430d9aea080885173d9c22b3cb6
|
/etalia/library/migrations/0011_auto_20170616_0411.py
|
57504ddd33b4be67c994de2dbe976ccc55c6ca32
|
[] |
no_license
|
GemmaAA1/etalia-open
|
30a083141330e227ac1de9855894bfb6e476e3cc
|
260ce54d2da53c943d8b82fa9d40bb0c0df918a6
|
refs/heads/master
| 2023-03-28T03:33:13.771987
| 2017-10-30T00:55:27
| 2017-10-30T00:55:27
| 351,120,827
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 428
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('library', '0010_journal_is_in_fixture'),
]
operations = [
migrations.AlterField(
model_name='paper',
name='date_fs',
field=models.DateField(db_index=True, null=True, blank=True),
),
]
|
[
"nicolas.pannetier@gmail.com"
] |
nicolas.pannetier@gmail.com
|
c22f3e4a7b31155d6afa2f033d9ea480cfd488d3
|
6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4
|
/hjZTbJNzKiSxTtbik_22.py
|
5f6c69157466d046cc2ece0ea8c5c597bfbd928b
|
[] |
no_license
|
daniel-reich/ubiquitous-fiesta
|
26e80f0082f8589e51d359ce7953117a3da7d38c
|
9af2700dbe59284f5697e612491499841a6c126f
|
refs/heads/master
| 2023-04-05T06:40:37.328213
| 2021-04-06T20:17:44
| 2021-04-06T20:17:44
| 355,318,759
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 314
|
py
|
def sort_by_string(lst, txt):
sorts = list(txt)
letters = []
for word in lst:
letters.append(word[0])
for char in sorts:
if char not in letters:
sorts.remove(char)
newlst = []
for char in sorts:
for word in lst:
if word[0] == char:
newlst.append(word)
return newlst
|
[
"daniel.reich@danielreichs-MacBook-Pro.local"
] |
daniel.reich@danielreichs-MacBook-Pro.local
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.