blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
5c9772366bf445c954968e4b507dc52dcb29c60e
|
28deae4b6f2ef4c83116d8a7e08061b2ac47bb71
|
/Spider/commentbox/spider/encrypt.py
|
71a3b95bbdf67d38fd3744475f576aec28cdd9eb
|
[
"MIT",
"Apache-2.0"
] |
permissive
|
Danceiny/HackGirlfriend
|
9cc796c733be7055799efb1c51f1e5ecb3d12d81
|
d64f43c5cfb48d30ed812e34fb19bc7b90ba01f8
|
refs/heads/master
| 2023-01-04T16:09:55.205094
| 2017-07-22T16:48:59
| 2017-07-22T16:48:59
| 93,874,976
| 2
| 1
|
Apache-2.0
| 2022-12-26T20:14:57
| 2017-06-09T15:57:34
|
HTML
|
UTF-8
|
Python
| false
| false
| 1,611
|
py
|
# coding=utf-8
import os
import base64
import platform
import json
if platform.system() == 'Darwin':
try:
import crypto
import sys
sys.modules['Crypto'] = crypto
except ImportError:
pass
from Crypto.Cipher import AES
# https://github.com/darknessomi/musicbox/wiki/网易云音乐新版WebAPI分析
def aes_encrypt(text, secKey):
pad = 16 - len(text) % 16
text = text + pad * chr(pad)
encryptor = AES.new(secKey, 2, '0102030405060708')
ciphertext = encryptor.encrypt(text)
ciphertext = base64.b64encode(ciphertext)
return ciphertext
def rsa_encrypt(text, pubKey, modulus):
text = text[::-1]
rs = int(text.encode('hex'), 16)**int(pubKey, 16) % int(modulus, 16)
return format(rs, 'x').zfill(256)
def create_secretKey(size):
return ''.join(map(lambda xx: (hex(ord(xx))[2:]), os.urandom(size)))[0:16]
def gen_data():
text = {
'username': '邮箱',
'password': '密码',
'rememberLogin': 'true'
}
modulus = '00e0b509f6259df8642dbc35662901477df22677ec152b5ff68ace615bb7b725152b3ab17a876aea8a5aa76d2e417629ec4ee341f56135fccf695280104e0312ecbda92557c93870114af6c9d05c4f7f0c3685b7a46bee255932575cce10b424d813cfe4875d3e82047b97ddef52741d546b8e289dc6935b3ece0462db0a22b8e7'
nonce = '0CoJUm6Qyw8W8jud'
pubKey = '010001'
text = json.dumps(text)
secKey = create_secretKey(16)
encText = aes_encrypt(aes_encrypt(text, nonce), secKey)
encSecKey = rsa_encrypt(secKey, pubKey, modulus)
data = {
'params': encText,
'encSecKey': encSecKey
}
return data
|
[
"danceiny@gmail.com"
] |
danceiny@gmail.com
|
bae8eb019762143945ce74fa7330120d0ad3a8b3
|
e7e536df0263ae2a7ac44ef30f19110f891213a9
|
/src/tests/api/test_api_reviews.py
|
8af79849d889a34659681f05b76196ee11d9e8d8
|
[
"Apache-2.0"
] |
permissive
|
pretalx/pretalx
|
b3b3808266f4810dfc8445dc1ed33ba398e7a9c2
|
269dce90a6fb1ce0064008c40ce5dd4dad61e2e3
|
refs/heads/main
| 2023-09-05T11:09:23.538325
| 2023-09-04T19:57:47
| 2023-09-04T19:57:47
| 83,081,285
| 563
| 195
|
Apache-2.0
| 2023-09-13T19:12:28
| 2017-02-24T20:46:51
|
Python
|
UTF-8
|
Python
| false
| false
| 3,279
|
py
|
import json
import pytest
from django_scopes import scope
from pretalx.api.serializers.review import ReviewSerializer
@pytest.mark.django_db
def test_review_serializer(review):
with scope(event=review.event):
data = ReviewSerializer(review).data
assert set(data.keys()) == {
"id",
"answers",
"submission",
"user",
"text",
"score",
"created",
"updated",
}
assert data["submission"] == review.submission.code
assert data["user"] == review.user.name
assert data["answers"] == []
@pytest.mark.django_db
def test_anon_cannot_see_reviews(client, event, review):
response = client.get(event.api_urls.reviews, follow=True)
content = json.loads(response.content.decode())
assert response.status_code == 200
assert len(content["results"]) == 0, content
@pytest.mark.django_db
def test_orga_can_see_reviews(orga_client, event, review):
response = orga_client.get(event.api_urls.reviews, follow=True)
content = json.loads(response.content.decode())
assert response.status_code == 200
assert len(content["results"]) == 1
@pytest.mark.django_db
def test_orga_cannot_see_reviews_of_deleted_submission(orga_client, event, review):
review.submission.state = "deleted"
review.submission.save()
response = orga_client.get(event.api_urls.reviews, follow=True)
content = json.loads(response.content.decode())
assert response.status_code == 200
assert len(content["results"]) == 0
@pytest.mark.django_db
def test_reviewer_can_see_reviews(review_client, event, review, other_review):
response = review_client.get(event.api_urls.reviews, follow=True)
content = json.loads(response.content.decode())
assert response.status_code == 200
assert len(content["results"]) == 2, content
@pytest.mark.django_db
def test_reviewer_can_see_reviews_by_track(
review_client, review_user, event, review, other_review, track, other_track
):
review.submission.track = track
review.submission.save()
other_review.submission.track = other_track
other_review.submission.save()
review_user.teams.filter(is_reviewer=True).first().limit_tracks.add(track)
response = review_client.get(event.api_urls.reviews, follow=True)
content = json.loads(response.content.decode())
assert response.status_code == 200
assert len(content["results"]) == 1, content
@pytest.mark.django_db
def test_reviewer_can_filter_by_submission(review_client, event, review, other_review):
response = review_client.get(
event.api_urls.reviews + f"?submission__code={review.submission.code}",
follow=True,
)
content = json.loads(response.content.decode())
assert response.status_code == 200
assert len(content["results"]) == 1, content
@pytest.mark.django_db
def test_reviewer_cannot_see_review_to_own_talk(
review_user, review_client, event, review, other_review
):
other_review.submission.speakers.add(review_user)
response = review_client.get(event.api_urls.reviews, follow=True)
content = json.loads(response.content.decode())
assert response.status_code == 200
assert len(content["results"]) == 1, content
|
[
"r@rixx.de"
] |
r@rixx.de
|
ae0818619246b3bb7d794ba9dc5038f83db79eed
|
f89d70fc8bf370ef4e2aa54c7ee0de3b4a053624
|
/scripts/patches/codepipeline.py
|
20b98f8526ecd6469e0a36ff5f6078f7b847e0da
|
[
"BSD-2-Clause",
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
yks0000/troposphere
|
a7622bff01c31f10dcb296d2ca353144e1d7f793
|
9a94a8fafd8b4da1cd1f4239be0e7aa0681fd8d4
|
refs/heads/main
| 2022-04-28T03:51:42.770881
| 2022-04-15T15:15:01
| 2022-04-15T15:15:01
| 482,753,190
| 1
| 0
|
BSD-2-Clause
| 2022-04-18T07:20:42
| 2022-04-18T07:20:42
| null |
UTF-8
|
Python
| false
| false
| 2,244
|
py
|
patches = [
# backward compatibility
{
"op": "move",
"from": "/PropertyTypes/AWS::CodePipeline::Pipeline.StageTransition",
"path": "/PropertyTypes/AWS::CodePipeline::Pipeline.DisableInboundStageTransitions",
},
{
"op": "replace",
"path": "/ResourceTypes/AWS::CodePipeline::Pipeline/Properties/DisableInboundStageTransitions/ItemType",
"value": "DisableInboundStageTransitions",
},
{
"op": "move",
"from": "/PropertyTypes/AWS::CodePipeline::Pipeline.StageDeclaration",
"path": "/PropertyTypes/AWS::CodePipeline::Pipeline.Stages",
},
{
"op": "replace",
"path": "/ResourceTypes/AWS::CodePipeline::Pipeline/Properties/Stages/ItemType",
"value": "Stages",
},
{
"op": "move",
"from": "/PropertyTypes/AWS::CodePipeline::Pipeline.InputArtifact",
"path": "/PropertyTypes/AWS::CodePipeline::Pipeline.InputArtifacts",
},
{
"op": "replace",
"path": "/PropertyTypes/AWS::CodePipeline::Pipeline.ActionDeclaration/Properties/InputArtifacts/ItemType",
"value": "InputArtifacts",
},
{
"op": "move",
"from": "/PropertyTypes/AWS::CodePipeline::Pipeline.OutputArtifact",
"path": "/PropertyTypes/AWS::CodePipeline::Pipeline.OutputArtifacts",
},
{
"op": "replace",
"path": "/PropertyTypes/AWS::CodePipeline::Pipeline.ActionDeclaration/Properties/OutputArtifacts/ItemType",
"value": "OutputArtifacts",
},
{
"op": "move",
"from": "/PropertyTypes/AWS::CodePipeline::Pipeline.ActionDeclaration",
"path": "/PropertyTypes/AWS::CodePipeline::Pipeline.Actions",
},
{
"op": "replace",
"path": "/PropertyTypes/AWS::CodePipeline::Pipeline.Stages/Properties/Actions/ItemType",
"value": "Actions",
},
{
"op": "move",
"from": "/PropertyTypes/AWS::CodePipeline::Pipeline.BlockerDeclaration",
"path": "/PropertyTypes/AWS::CodePipeline::Pipeline.Blockers",
},
{
"op": "replace",
"path": "/PropertyTypes/AWS::CodePipeline::Pipeline.Stages/Properties/Blockers/ItemType",
"value": "Blockers",
},
]
|
[
"mark@peek.org"
] |
mark@peek.org
|
2f47b872200e92c1dd739ecfba7b29d356bbc5c9
|
ae8590dc2dd0dd6530868ccd52702d06e5d96fa1
|
/set.py
|
db1f8968df305dbd882d4f03ee932777ea1fa60b
|
[] |
no_license
|
abhisek08/Python-Basics-Part-1-
|
e3bec8e4d7f9e484c4bcade7763842334c93f4b0
|
3687dd6ebb01f2289b3fa226cea28b564894a68f
|
refs/heads/master
| 2022-09-08T11:42:28.871012
| 2020-05-25T07:58:01
| 2020-05-25T07:58:01
| 266,717,854
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 460
|
py
|
'''
Write a Python program to print out a set containing all the colors from color_list_1 which are not present in color_list_2. Go to the editor
Test Data :
color_list_1 = set(["White", "Black", "Red"])
color_list_2 = set(["Red", "Green"])
Expected Output :
{'Black', 'White'}
'''
color_list_1 = set(["White", "Black", "Red"])
color_list_2 = set(["Red", "Green"])
set3=set()
for a in color_list_1:
if a not in color_list_2:
set3.add(a)
print(set3)
|
[
"abhisek.bhunia08@gmail.com"
] |
abhisek.bhunia08@gmail.com
|
61795a374265bfd7628a5a4f8567cea6a4871501
|
41de4210af23a8a8a3ca7dd090bb51faecf4a0c8
|
/lib/python3.5/site-packages/statsmodels/tsa/statespace/tests/test_pickle.py
|
e4143eb73bf4dbaa785ecf20c15a8bc067c18aaf
|
[
"Python-2.0"
] |
permissive
|
randybrown-github/ziplineMacOS
|
42a0c2bfca2a54baa03d2803dc41317647811285
|
eb5872c0903d653e19f259f0800fb7aecee0ee5c
|
refs/heads/master
| 2022-11-07T15:51:39.808092
| 2020-06-18T20:06:42
| 2020-06-18T20:06:42
| 272,631,387
| 0
| 1
| null | 2022-11-02T03:21:45
| 2020-06-16T06:48:53
|
Python
|
UTF-8
|
Python
| false
| false
| 5,445
|
py
|
"""
Tests for python wrapper of state space representation and filtering
Author: Chad Fulton
License: Simplified-BSD
References
----------
Kim, Chang-Jin, and Charles R. Nelson. 1999.
"State-Space Models with Regime Switching:
Classical and Gibbs-Sampling Approaches with Applications".
MIT Press Books. The MIT Press.
"""
from __future__ import division, absolute_import, print_function
from statsmodels.compat.testing import SkipTest
from distutils.version import LooseVersion
import numpy as np
import pandas as pd
from numpy.testing import assert_equal, assert_allclose
from statsmodels.compat import cPickle
from statsmodels.tsa.statespace import sarimax
from statsmodels.tsa.statespace.kalman_filter import KalmanFilter
from statsmodels.tsa.statespace.representation import Representation
from statsmodels.tsa.statespace.structural import UnobservedComponents
from .results import results_kalman_filter
# Skip copy test on older NumPy since copy does not preserve order
NP_LT_18 = LooseVersion(np.__version__).version[:2] < [1, 8]
if NP_LT_18:
raise SkipTest("Old NumPy doesn't preserve matrix order when copying")
true = results_kalman_filter.uc_uni
data = pd.DataFrame(
true['data'],
index=pd.date_range('1947-01-01', '1995-07-01', freq='QS'),
columns=['GDP']
)
data['lgdp'] = np.log(data['GDP'])
def test_pickle_fit_sarimax():
# Fit an ARIMA(1,1,0) to log GDP
mod = sarimax.SARIMAX(data['lgdp'], order=(1, 1, 0))
pkl_mod = cPickle.loads(cPickle.dumps(mod))
res = mod.fit(disp=-1)
pkl_res = pkl_mod.fit(disp=-1)
assert_allclose(res.llf_obs, pkl_res.llf_obs)
assert_allclose(res.tvalues, pkl_res.tvalues)
assert_allclose(res.smoothed_state, pkl_res.smoothed_state)
assert_allclose(res.resid.values, pkl_res.resid.values)
assert_allclose(res.impulse_responses(10), res.impulse_responses(10))
def test_unobserved_components_pickle():
# Tests for missing data
nobs = 20
k_endog = 1
np.random.seed(1208)
endog = np.random.normal(size=(nobs, k_endog))
endog[:4, 0] = np.nan
exog2 = np.random.normal(size=(nobs, 2))
index = pd.date_range('1970-01-01', freq='QS', periods=nobs)
endog_pd = pd.DataFrame(endog, index=index)
exog2_pd = pd.DataFrame(exog2, index=index)
models = [
UnobservedComponents(endog, 'llevel', exog=exog2),
UnobservedComponents(endog_pd, 'llevel', exog=exog2_pd),
]
for mod in models:
# Smoke tests
pkl_mod = cPickle.loads(cPickle.dumps(mod))
assert_equal(mod.start_params, pkl_mod.start_params)
res = mod.fit(disp=False)
pkl_res = pkl_mod.fit(disp=False)
assert_allclose(res.llf_obs, pkl_res.llf_obs)
assert_allclose(res.tvalues, pkl_res.tvalues)
assert_allclose(res.smoothed_state, pkl_res.smoothed_state)
assert_allclose(res.resid, pkl_res.resid)
assert_allclose(res.impulse_responses(10), res.impulse_responses(10))
def test_kalman_filter_pickle():
# Construct the statespace representation
k_states = 4
model = KalmanFilter(k_endog=1, k_states=k_states)
model.bind(data['lgdp'].values)
model.design[:, :, 0] = [1, 1, 0, 0]
model.transition[([0, 0, 1, 1, 2, 3],
[0, 3, 1, 2, 1, 3],
[0, 0, 0, 0, 0, 0])] = [1, 1, 0, 0, 1, 1]
model.selection = np.eye(model.k_states)
# Update matrices with given parameters
(sigma_v, sigma_e, sigma_w, phi_1, phi_2) = np.array(
true['parameters']
)
model.transition[([1, 1], [1, 2], [0, 0])] = [phi_1, phi_2]
model.state_cov[
np.diag_indices(k_states) + (np.zeros(k_states, dtype=int),)] = [
sigma_v ** 2, sigma_e ** 2, 0, sigma_w ** 2
]
# Initialization
initial_state = np.zeros((k_states,))
initial_state_cov = np.eye(k_states) * 100
# Initialization: modification
initial_state_cov = np.dot(
np.dot(model.transition[:, :, 0], initial_state_cov),
model.transition[:, :, 0].T
)
model.initialize_known(initial_state, initial_state_cov)
pkl_mod = cPickle.loads(cPickle.dumps(model))
results = model.filter()
pkl_results = pkl_mod.filter()
assert_allclose(results.llf_obs[true['start']:].sum(),
pkl_results.llf_obs[true['start']:].sum())
assert_allclose(results.filtered_state[0][true['start']:],
pkl_results.filtered_state[0][true['start']:])
assert_allclose(results.filtered_state[1][true['start']:],
pkl_results.filtered_state[1][true['start']:])
assert_allclose(results.filtered_state[3][true['start']:],
pkl_results.filtered_state[3][true['start']:])
def test_representation_pickle():
nobs = 10
k_endog = 2
endog = np.asfortranarray(np.arange(nobs * k_endog).reshape(k_endog, nobs) * 1.)
mod = Representation(endog, k_states=2)
pkl_mod = cPickle.loads(cPickle.dumps(mod))
assert_equal(mod.nobs, pkl_mod.nobs)
assert_equal(mod.k_endog, pkl_mod.k_endog)
mod._initialize_representation()
pkl_mod._initialize_representation()
assert_equal(mod.design, pkl_mod.design)
assert_equal(mod.obs_intercept, pkl_mod.obs_intercept)
assert_equal(mod.initial_variance, pkl_mod.initial_variance)
|
[
"randybrown18@me.com"
] |
randybrown18@me.com
|
62c27b0cf0a5f8a1a68a8aedafbea9941629ddf5
|
c50e7eb190802d7849c0d0cea02fb4d2f0021777
|
/src/azure-firewall/azext_firewall/vendored_sdks/v2020_07_01/v2020_07_01/operations/_available_resource_group_delegations_operations.py
|
e40effbf3eedfc272c4a8dabfc86149ff292a448
|
[
"LicenseRef-scancode-generic-cla",
"MIT"
] |
permissive
|
Azure/azure-cli-extensions
|
c1615b19930bba7166c282918f166cd40ff6609c
|
b8c2cf97e991adf0c0a207d810316b8f4686dc29
|
refs/heads/main
| 2023-08-24T12:40:15.528432
| 2023-08-24T09:17:25
| 2023-08-24T09:17:25
| 106,580,024
| 336
| 1,226
|
MIT
| 2023-09-14T10:48:57
| 2017-10-11T16:27:31
|
Python
|
UTF-8
|
Python
| false
| false
| 5,696
|
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class AvailableResourceGroupDelegationsOperations(object):
"""AvailableResourceGroupDelegationsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2020_07_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
location, # type: str
resource_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.AvailableDelegationsResult"]
"""Gets all of the available subnet delegations for this resource group in this region.
:param location: The location of the domain name.
:type location: str
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either AvailableDelegationsResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2020_07_01.models.AvailableDelegationsResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.AvailableDelegationsResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-07-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'location': self._serialize.url("location", location, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('AvailableDelegationsResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/locations/{location}/availableDelegations'} # type: ignore
|
[
"noreply@github.com"
] |
Azure.noreply@github.com
|
1721d823737b9758f72ff95546378340fdbe225f
|
b73a66c9593b7aa326c26d4f148606ca100f541e
|
/corehq/apps/indicators/urls.py
|
e7409ae140feb38f5a5bb6f54c8f4055d3c30201
|
[] |
no_license
|
SEL-Columbia/commcare-hq
|
c995a921de6d076e777ca2d5d2baed6a8bcd5d7b
|
992ee34a679c37f063f86200e6df5a197d5e3ff6
|
refs/heads/master
| 2021-01-14T14:37:34.391473
| 2014-09-15T21:01:54
| 2014-09-15T21:01:54
| 17,970,223
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 657
|
py
|
from django.conf.urls.defaults import patterns, url
from corehq import IndicatorAdminInterfaceDispatcher
from corehq.apps.indicators.views import IndicatorAdminCRUDFormView, BulkCopyIndicatorsView
urlpatterns = patterns('corehq.apps.indicators.views',
url(r'^$', 'default_admin', name="default_indicator_admin"),
url(r'^copy/(?P<indicator_type>[\w_]+)/$', BulkCopyIndicatorsView.as_view(), name="indicator_bulk_copy"),
url(r'^form/(?P<form_type>[\w_]+)/(?P<action>[(update)|(new)|(delete)]+)/((?P<item_id>[\w_]+)/)?$',
IndicatorAdminCRUDFormView.as_view(), name="indicator_def_form"),
IndicatorAdminInterfaceDispatcher.url_pattern(),
)
|
[
"biyeun@dimagi.com"
] |
biyeun@dimagi.com
|
0cbb6f41c16ebe936880049ad757b009d9c9d15c
|
54f352a242a8ad6ff5516703e91da61e08d9a9e6
|
/Source Codes/AtCoder/agc004/B/4553322.py
|
708d9d250a600dde43d899a86623332b0cf0c4bf
|
[] |
no_license
|
Kawser-nerd/CLCDSA
|
5cbd8a4c3f65173e4e8e0d7ed845574c4770c3eb
|
aee32551795763b54acb26856ab239370cac4e75
|
refs/heads/master
| 2022-02-09T11:08:56.588303
| 2022-01-26T18:53:40
| 2022-01-26T18:53:40
| 211,783,197
| 23
| 9
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 247
|
py
|
import numpy as np
N, x = map(int, input().split())
a = np.array(list(map(int, input().split())))
b = np.copy(a)
ans = float('inf')
for i in range(N):
c = np.roll(a,i)
b = np.minimum(b,c)
ans = min(ans, sum(b)+i*x)
print(ans)
|
[
"kwnafi@yahoo.com"
] |
kwnafi@yahoo.com
|
0910059b9001607f6889bee415cd0046879e7eba
|
57dccf7b8da26753b66a9eecb9eb6cd1ae5584b5
|
/yolov5/backup/yolov5_2.py
|
0db2a2eb1f6be065758b4c99caec163f748bed1f
|
[] |
no_license
|
vbvg2008/benchmarks
|
4b743d6b19a4d0b41fa78b8db2a3f3a3f4e86018
|
29e2e445e6701529e048e8ffa283b5b071295566
|
refs/heads/master
| 2022-12-12T21:50:51.082085
| 2022-12-06T22:09:26
| 2022-12-06T22:09:26
| 187,144,413
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 395
|
py
|
import pdb
import numpy as np
import torch
from PIL import Image
# Model
model = torch.hub.load('ultralytics/yolov5', 'yolov5s', pretrained=False)
# Images
# img1 = Image.open('zidane.jpg')
# inputs = np.array(img1)
# inputs = np.transpose(inputs, [2, 0, 1])
# pdb.set_trace()
# # Inference
# result = model([inputs])
inputs = torch.rand(3, 720, 1280)
pred = model([inputs])
pdb.set_trace()
|
[
"shawnmengdong@gmail.com"
] |
shawnmengdong@gmail.com
|
321fa041bc8aa7599fc821cd44dae64b4deb545b
|
5c883c87f337be7ffd52f49f0a4e6c72bbd58932
|
/apps/seguimiento/migrations/0012_auto_20161009_1256.py
|
53fe40acab70a7d988dc02796c1d764cf8059d45
|
[] |
no_license
|
DARKDEYMON/Tesis-2-Vidaurre-J.C.
|
f1b0d8e8a593a9d4a585bdd14b21d4809d55ce9f
|
4299cea2e990ee798b02724849d747bfd558b97d
|
refs/heads/master
| 2021-06-20T09:25:53.273225
| 2017-05-25T22:20:31
| 2017-05-25T22:20:31
| 65,408,196
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 474
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2016-10-09 16:56
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('seguimiento', '0011_requerimiento_maq_he_requerimientopersonal'),
]
operations = [
migrations.RenameField(
model_name='proyecto',
old_name='plaso_previsto',
new_name='plazo_previsto',
),
]
|
[
"darkdeymon04@gmail.com"
] |
darkdeymon04@gmail.com
|
8403f2004c7f764c1701a784cd86927f379d97bd
|
85373d45a83e4096affafa4f4e5b400787413e57
|
/test/programytest/parser/template/node_tests/richmedia_tests/test_list.py
|
282b8606c75b00ec4a823c41783ef19aa46ed8ab
|
[
"MIT"
] |
permissive
|
keiffster/program-y
|
a02bb9d8278835547cc875f4f9cd668d5b1f44da
|
fc7b0a3afa4fa6ed683e0c817a9aa89f9543bb20
|
refs/heads/master
| 2023-08-23T13:55:39.255535
| 2022-12-13T09:51:57
| 2022-12-13T09:51:57
| 74,462,571
| 379
| 173
|
NOASSERTION
| 2023-05-23T00:51:21
| 2016-11-22T10:43:41
|
Python
|
UTF-8
|
Python
| false
| false
| 969
|
py
|
from programy.parser.template.nodes.base import TemplateNode
from programy.parser.template.nodes.richmedia.list import TemplateListNode
from programy.parser.template.nodes.word import TemplateWordNode
from programytest.parser.base import ParserTestsBaseClass
class TemplateListNodeTests(ParserTestsBaseClass):
def test_list_node(self):
root = TemplateNode()
self.assertIsNotNone(root)
self.assertIsNotNone(root.children)
self.assertEqual(len(root.children), 0)
list = TemplateListNode()
list._items.append(TemplateWordNode("Item1"))
list._items.append(TemplateWordNode("Item2"))
root.append(list)
resolved = root.resolve(self._client_context)
self.assertIsNotNone(resolved)
self.assertEqual("<list><item>Item1</item><item>Item2</item></list>", resolved)
self.assertEqual("<list><item>Item1</item><item>Item2</item></list>", root.to_xml(self._client_context))
|
[
"keith@keithsterling.com"
] |
keith@keithsterling.com
|
e86764ade6955c0e9d01a19dd792a7783ffab002
|
230ccae62e975f7bfde062edd32e5a54db888a04
|
/programmers/[Level-4]/fail/스티커모으기.py
|
e07a5aef7d58ef6b2d9cbd566e5f8fe54890943f
|
[] |
no_license
|
seung-woo-ryu/AlgorithmTest
|
6f56ec762dc2c863218c529299a3874ad9fd6c53
|
2b735535dbd447f873650bfb649616b78de34343
|
refs/heads/master
| 2023-02-03T08:00:19.929711
| 2020-12-12T10:04:07
| 2020-12-12T10:04:07
| 285,925,867
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,268
|
py
|
vi = []
li2 = []
answer = 0
def s(n,edges):
global answer
global vi
li2 = [[0 for _ in range(n)] for _ in range(n)]
vi = [0] * n
def re(idx,li2):
global vi
if idx != 0:
for i in range(0,idx):
if li2[idx][i] == 1:
vi[i] += 1
break
re(i,li2)
for x,y in edges:
li2[x][y] = 1
li2[y][x] = 1
for x,y in edges:
if x!= 0:
vi[x] += 1
re(x,li2)
queue = []
temp = set()
temp.add(0)
answer= 0
while temp:
for x in list(temp):
for i in range(x+1,n):
if li2[x][i] == 1:
queue.append(i)
max_index = -1
max_value =-1
for x in queue:
max_temp=0
for k in range(x+1,n):
max_temp = max(max_temp,vi[k])
if vi[x] - max_temp > max_value:
max_index = x
max_value = vi[x] - max_temp
temp = set(queue) - set([max_index])
return n - answer
print(s(19, [[0, 1], [0, 2], [0, 3], [1, 4], [1, 5], [2, 6], [3, 7], [3, 8], [3, 9], [4, 10], [4, 11], [5, 12], [5, 13], [6, 14], [6, 15], [6, 16], [8, 17], [8, 18]]))
|
[
"tmddn645@naver.com"
] |
tmddn645@naver.com
|
d1325713c07c1c46518100d38aa60e1e84a7af95
|
9bc17bffce835eb8e27422e39438bf7bd1af2282
|
/pnc_cli/swagger_client/models/page.py
|
2ba471e49073d2f40d51ae98c7e9566888ed8e25
|
[
"Apache-2.0"
] |
permissive
|
pgier/pnc-cli
|
c3e7d61c3bce4c1a48b29e5f980b6b72cded3e31
|
4d29a8a7ec749c8843c6e32adb7c9c969e6cc24a
|
refs/heads/master
| 2021-01-15T23:59:08.874319
| 2016-05-02T20:59:48
| 2016-05-02T20:59:48
| 57,930,193
| 0
| 0
| null | 2016-05-03T00:36:34
| 2016-05-03T00:36:33
| null |
UTF-8
|
Python
| false
| false
| 4,465
|
py
|
# coding: utf-8
"""
Copyright 2015 SmartBear Software
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Ref: https://github.com/swagger-api/swagger-codegen
"""
from datetime import datetime
from pprint import pformat
from six import iteritems
class Page(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self):
"""
Page - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'page_index': 'int',
'page_size': 'int',
'total_pages': 'int',
'content': 'list[PageContent]'
}
self.attribute_map = {
'page_index': 'pageIndex',
'page_size': 'pageSize',
'total_pages': 'totalPages',
'content': 'content'
}
self._page_index = None
self._page_size = None
self._total_pages = None
self._content = None
@property
def page_index(self):
"""
Gets the page_index of this Page.
:return: The page_index of this Page.
:rtype: int
"""
return self._page_index
@page_index.setter
def page_index(self, page_index):
"""
Sets the page_index of this Page.
:param page_index: The page_index of this Page.
:type: int
"""
self._page_index = page_index
@property
def page_size(self):
"""
Gets the page_size of this Page.
:return: The page_size of this Page.
:rtype: int
"""
return self._page_size
@page_size.setter
def page_size(self, page_size):
"""
Sets the page_size of this Page.
:param page_size: The page_size of this Page.
:type: int
"""
self._page_size = page_size
@property
def total_pages(self):
"""
Gets the total_pages of this Page.
:return: The total_pages of this Page.
:rtype: int
"""
return self._total_pages
@total_pages.setter
def total_pages(self, total_pages):
"""
Sets the total_pages of this Page.
:param total_pages: The total_pages of this Page.
:type: int
"""
self._total_pages = total_pages
@property
def content(self):
"""
Gets the content of this Page.
:return: The content of this Page.
:rtype: list[PageContent]
"""
return self._content
@content.setter
def content(self, content):
"""
Sets the content of this Page.
:param content: The content of this Page.
:type: list[PageContent]
"""
self._content = content
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, datetime):
result[attr] = str(value.date())
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
|
[
"thauser@redhat.com"
] |
thauser@redhat.com
|
39f9ab976f2acb071d2f4cc6d0b3c49a985bcd32
|
2d4af29250dca8c72b74e190e74d92f1467120a0
|
/TaobaoSdk/Request/UmpToolsGetRequest.py
|
34245bfd22984b2009657c88f859c074ebb7ee59
|
[] |
no_license
|
maimiaolmc/TaobaoOpenPythonSDK
|
2c671be93c40cf487c0d7d644479ba7e1043004c
|
d349aa8ed6229ce6d76a09f279a0896a0f8075b3
|
refs/heads/master
| 2020-04-06T03:52:46.585927
| 2014-06-09T08:58:27
| 2014-06-09T08:58:27
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,465
|
py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim: set ts=4 sts=4 sw=4 et:
## @brief 查询工具列表
# @author wuliang@maimiaotech.com
# @date 2013-09-22 16:52:38
# @version: 0.0.0
import os
import sys
import time
def __getCurrentPath():
return os.path.normpath(os.path.join(os.path.realpath(__file__), os.path.pardir))
__modulePath = os.path.join(__getCurrentPath(), os.path.pardir)
__modulePath = os.path.normpath(__modulePath)
if __modulePath not in sys.path:
sys.path.insert(0, __modulePath)
## @brief <SPAN style="font-size:16px; font-family:'宋体','Times New Roman',Georgia,Serif;">查询工具列表</SPAN>
# <UL>
# </UL>
class UmpToolsGetRequest(object):
def __init__(self):
super(self.__class__, self).__init__()
## @brief <SPAN style="font-size:16px; font-family:'宋体','Times New Roman',Georgia,Serif;">获取API名称</SPAN>
# <UL>
# <LI>
# <SPAN style="color:DarkRed; font-size:18px; font-family:'Times New Roman',Georgia,Serif;">Type</SPAN>: <SPAN style="color:DarkMagenta; font-size:16px; font-family:'Times New Roman','宋体',Georgia,Serif;">str</SPAN>
# </LI>
# </UL>
self.method = "taobao.ump.tools.get"
## @brief <SPAN style="font-size:16px; font-family:'宋体','Times New Roman',Georgia,Serif;">时间戳,如果不设置,发送请求时将使用当时的时间</SPAN>
# <UL>
# <LI>
# <SPAN style="color:DarkRed; font-size:18px; font-family:'Times New Roman',Georgia,Serif;">Type</SPAN>: <SPAN style="color:DarkMagenta; font-size:16px; font-family:'Times New Roman','宋体',Georgia,Serif;">int</SPAN>
# </LI>
# </UL>
self.timestamp = int(time.time())
## @brief <SPAN style="font-size:16px; font-family:'宋体','Times New Roman',Georgia,Serif;">工具编码</SPAN>
# <UL>
# <LI>
# <SPAN style="color:DarkRed; font-size:18px; font-family:'Times New Roman',Georgia,Serif;">Type</SPAN>: <SPAN style="color:DarkMagenta; font-size:16px; font-family:'Times New Roman','宋体',Georgia,Serif;">String</SPAN>
# </LI>
# <LI>
# <SPAN style="color:DarkRed; font-size:18px; font-family:'Times New Roman',Georgia,Serif;">Required</SPAN>: <SPAN style="color:DarkMagenta; font-size:16px; font-family:'Times New Roman','宋体',Georgia,Serif;">optional</SPAN>
# </LI>
# </UL>
self.tool_code = None
|
[
"chenke@maimiaotech.com"
] |
chenke@maimiaotech.com
|
cc50b631b320114baf420e8a9698000c87c7eaca
|
0b802a3b3572ae4e9be55cb1c116ebcf06cceb4d
|
/tests/pipupgrade/cli/test_cli__init__.py
|
04031a9628d0715815b484f5dbf878b575837d64
|
[
"MIT"
] |
permissive
|
todun/pipupgrade
|
fc8b1315a9b432a75dd78c1783f85cd0147e631b
|
2f2e04d77c7e276e4b6172d42b5bdeaae11075fb
|
refs/heads/master
| 2020-06-25T00:43:26.995923
| 2019-06-10T18:46:22
| 2019-06-10T18:46:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 820
|
py
|
# imports - compatibility imports
from pipupgrade._compat import iteritems, iterkeys
# imports - module imports
from pipupgrade import cli
from pipupgrade.cli import get_args
from pipupgrade.util.types import merge_dict
def test_command():
def _assert_command(values, override = dict(), initial = dict()):
@cli.command
def foobar(*args, **kwargs):
args = get_args()
params = merge_dict(args, override)
for k, v in iteritems(values):
assert params[k] == v
if initial:
for k in iterkeys(initial):
assert initial[k] == args[k]
foobar()
_assert_command(dict(yes = False))
_assert_command(dict(latest = True), dict(latest = True), dict(latest = False))
|
[
"achillesrasquinha@gmail.com"
] |
achillesrasquinha@gmail.com
|
378ba1016f60d57bd7f16d42e2c06e05626ec211
|
42b3c0d4691df8cfe60177abe7c33d01575f2d9a
|
/multiThreads/多进程拷贝代码.py
|
507e05be7f3bbfd237f703ce6c7499b1ad3191d0
|
[] |
no_license
|
richard-ql/pythonNotes
|
68d592bdf9f81ea1569b1a5f9a12f5897b98f922
|
27919b2c95cf9ca7443d218488a6edefdb846129
|
refs/heads/master
| 2021-07-13T23:00:29.126607
| 2021-06-27T16:16:26
| 2021-06-27T16:16:26
| 227,252,159
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 185
|
py
|
import os
print("hello world")
pid = os.fork()
print("多进程会拷贝os.fork之后的代码")
print(pid)
if pid == 0:
print("son process")
else:
print("father process")
|
[
"richard_ql@hotmail.com"
] |
richard_ql@hotmail.com
|
f135315217f58fe37e1088ccf0c094c7fd1d9606
|
09c18cf1d9dc443e43357383030be9b3ce9e2756
|
/QUANTAXIS/QAData/__init__.py
|
4a2cfb9ae8dfa14a8a5228a58a67d7843913348e
|
[
"MIT"
] |
permissive
|
zhouji0212/QUANTAXIS
|
ed47f78be7d78d2888faf01ba5cfe75dca463e06
|
54b2a0c3445d77c7fcd4858100e8bebe6656e940
|
refs/heads/master
| 2020-04-07T16:56:56.332211
| 2018-12-23T14:44:30
| 2018-12-23T14:44:30
| 141,289,835
| 0
| 0
|
MIT
| 2018-11-21T12:19:41
| 2018-07-17T12:56:33
|
Python
|
UTF-8
|
Python
| false
| false
| 2,386
|
py
|
# coding:utf-8
#
# The MIT License (MIT)
#
# Copyright (c) 2016-2018 yutiansut/QUANTAXIS
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from QUANTAXIS.QAData.QASeriesStruct import QA_DataStruct_Series
from QUANTAXIS.QAData.data_fq import QA_data_stock_to_fq
from QUANTAXIS.QAData.data_marketvalue import QA_data_calc_marketvalue, QA_data_marketvalue
from QUANTAXIS.QAData.data_resample import QA_data_tick_resample, QA_data_min_resample, QA_data_day_resample
from QUANTAXIS.QAData.QADataStruct import (QA_DataStruct_Index_day,
QA_DataStruct_Index_min,
QA_DataStruct_Stock_day,
QA_DataStruct_Stock_min,
QA_DataStruct_Future_day,
QA_DataStruct_Future_min,
QA_DataStruct_Stock_realtime,
QA_DataStruct_Stock_transaction)
from QUANTAXIS.QAData.QABlockStruct import QA_DataStruct_Stock_block
from QUANTAXIS.QAData.QAFinancialStruct import QA_DataStruct_Financial
from QUANTAXIS.QAData.QAIndicatorStruct import QA_DataStruct_Indicators
from QUANTAXIS.QAData.dsmethods import QDS_StockDayWarpper, QDS_StockMinWarpper, QDS_IndexDayWarpper, QDS_IndexMinWarpper, from_tushare, concat
|
[
"yutiansut@qq.com"
] |
yutiansut@qq.com
|
05fae7a028b9f848821d1fb01887ec4165b34f20
|
e2d5b42941f6bd5a5adace442feab1c446f4a997
|
/dp-knight-chess-movement.py
|
19e4d662b82190f8979d3e4651678f59e7e6ba2b
|
[] |
no_license
|
yongxuUSTC/challenges
|
21601f8f47beed3ef2c733caaf512b39ce00bc69
|
00ece128923511f29c207d42cbf060cae6bafa01
|
refs/heads/master
| 2021-06-26T01:15:03.436234
| 2020-08-17T19:17:33
| 2020-08-17T19:17:33
| 97,131,133
| 2
| 1
| null | 2017-07-13T14:22:36
| 2017-07-13T14:22:36
| null |
UTF-8
|
Python
| false
| false
| 2,361
|
py
|
'''
How many different 10-digit numbers can be formed starting from 1?
The constraint is that the movement from 1 digit to the next is similar to the movement of the Knight in a chess game.
Reference: http://stackoverflow.com/questions/2893470/generate-10-digit-number-using-a-phone-keypad
'''
def initialize():
table = [[0 for i in range(3)] for j in range(4)]
values = [1,2,3,4,5,6,7,8,9,None,0,None]
rows = len(table)
cols = len(table[0])
count = 0
for i in range(rows):
for j in range(cols):
table[i][j] = values[count]
count += 1
return table
#given value find coordinates
def getCoordinates(value,table):
rows = len(table)
cols = len(table[0])
for i in range(rows):
for j in range(cols):
if table[i][j] == value:
return([i,j])
#Next Knights move from current coordinates
def nextKnightMove(value,table):
i, j = getCoordinates(value,table)
rows = len(table)
cols = len(table[0])
result = []
#down 3 right
if(i+1 < rows and j+2 < cols and table[i+1][j+2] is not None):
result.append(table[i+1][j+2])
#down 3 left
if(i+1 < rows and j-2 >= 0 and table[i+1][j-2] is not None):
result.append(table[i+1][j-2])
#up 3 right
if(i-1 >= 0 and j+2 < cols and table[i-1][j+2] is not None):
result.append(table[i-1][j+2])
#up 3 left
if(i-1 >= 0 and j-2 >= 0 and table[i-1][j-2] is not None):
result.append(table[i-1][j-2])
#down 1 right
if(i+2 < rows and j+1 < cols and table[i+2][j+1] is not None):
result.append(table[i+2][j+1])
#down 1 left
if(i+2 < rows and j-1 >= 0 and table[i+2][j-1] is not None):
result.append(table[i+2][j-1])
#up 1 right
if(i-2 >= 0 and j+1 < cols and table[i-2][j+1] is not None):
result.append(table[i-2][j+1])
#up 1 left
if(i-2 >=0 and j-1 >= 0 and table[i-2][j-1] is not None):
result.append(table[i-2][j-1])
return result
#http://stackoverflow.com/questions/2893470/generate-10-digit-number-using-a-phone-keypad
def generateTableM(table,mtable,digits,start):
if digits == 1:
return 1
if (mtable[digits][start] == 0):
for next in nextKnightMove(start,table):
mtable[digits][start] += generateTableM(table,mtable,digits-1,next)
#else:
#print("found ...",digits,start)
return mtable[digits][start]
table = initialize()
#memoization table
mtable = [[0 for i in range(10)] for j in range(11)]
print(generateTableM(table,mtable,10,1)) #mtable[10][1] = 1424
|
[
"harishvc@gmail.com"
] |
harishvc@gmail.com
|
911c126a0f974f911bf5b66ca8c23e2cfd9747a3
|
1bf9f6b0ef85b6ccad8cb029703f89039f74cedc
|
/src/spring/azext_spring/vendored_sdks/appplatform/v2021_06_01_preview/aio/_app_platform_management_client.py
|
27281616bb20c88913df623e391e6632539d2a69
|
[
"MIT",
"LicenseRef-scancode-generic-cla"
] |
permissive
|
VSChina/azure-cli-extensions
|
a1f4bf2ea4dc1b507618617e299263ad45213add
|
10b7bfef62cb080c74b1d59aadc4286bd9406841
|
refs/heads/master
| 2022-11-14T03:40:26.009692
| 2022-11-09T01:09:53
| 2022-11-09T01:09:53
| 199,810,654
| 4
| 2
|
MIT
| 2020-07-13T05:51:27
| 2019-07-31T08:10:50
|
Python
|
UTF-8
|
Python
| false
| false
| 7,399
|
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from copy import deepcopy
from typing import Any, Awaitable, TYPE_CHECKING
from azure.core.rest import AsyncHttpResponse, HttpRequest
from azure.mgmt.core import AsyncARMPipelineClient
from .. import models
from ..._serialization import Deserializer, Serializer
from ._configuration import AppPlatformManagementClientConfiguration
from .operations import (
AppsOperations,
BindingsOperations,
CertificatesOperations,
ConfigServersOperations,
CustomDomainsOperations,
DeploymentsOperations,
MonitoringSettingsOperations,
Operations,
RuntimeVersionsOperations,
ServicesOperations,
SkusOperations,
)
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from azure.core.credentials_async import AsyncTokenCredential
class AppPlatformManagementClient: # pylint: disable=client-accepts-api-version-keyword,too-many-instance-attributes
"""REST API for Azure Spring Cloud.
:ivar services: ServicesOperations operations
:vartype services: azure.mgmt.appplatform.v2021_06_01_preview.aio.operations.ServicesOperations
:ivar config_servers: ConfigServersOperations operations
:vartype config_servers:
azure.mgmt.appplatform.v2021_06_01_preview.aio.operations.ConfigServersOperations
:ivar monitoring_settings: MonitoringSettingsOperations operations
:vartype monitoring_settings:
azure.mgmt.appplatform.v2021_06_01_preview.aio.operations.MonitoringSettingsOperations
:ivar apps: AppsOperations operations
:vartype apps: azure.mgmt.appplatform.v2021_06_01_preview.aio.operations.AppsOperations
:ivar bindings: BindingsOperations operations
:vartype bindings: azure.mgmt.appplatform.v2021_06_01_preview.aio.operations.BindingsOperations
:ivar certificates: CertificatesOperations operations
:vartype certificates:
azure.mgmt.appplatform.v2021_06_01_preview.aio.operations.CertificatesOperations
:ivar custom_domains: CustomDomainsOperations operations
:vartype custom_domains:
azure.mgmt.appplatform.v2021_06_01_preview.aio.operations.CustomDomainsOperations
:ivar deployments: DeploymentsOperations operations
:vartype deployments:
azure.mgmt.appplatform.v2021_06_01_preview.aio.operations.DeploymentsOperations
:ivar operations: Operations operations
:vartype operations: azure.mgmt.appplatform.v2021_06_01_preview.aio.operations.Operations
:ivar runtime_versions: RuntimeVersionsOperations operations
:vartype runtime_versions:
azure.mgmt.appplatform.v2021_06_01_preview.aio.operations.RuntimeVersionsOperations
:ivar skus: SkusOperations operations
:vartype skus: azure.mgmt.appplatform.v2021_06_01_preview.aio.operations.SkusOperations
:param credential: Credential needed for the client to connect to Azure. Required.
:type credential: ~azure.core.credentials_async.AsyncTokenCredential
:param subscription_id: Gets subscription ID which uniquely identify the Microsoft Azure
subscription. The subscription ID forms part of the URI for every service call. Required.
:type subscription_id: str
:param base_url: Service URL. Default value is "https://management.azure.com".
:type base_url: str
:keyword api_version: Api Version. Default value is "2021-06-01-preview". Note that overriding
this default value may result in unsupported behavior.
:paramtype api_version: str
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
"""
def __init__(
self,
credential: "AsyncTokenCredential",
subscription_id: str,
base_url: str = "https://management.azure.com",
**kwargs: Any
) -> None:
self._config = AppPlatformManagementClientConfiguration(
credential=credential, subscription_id=subscription_id, **kwargs
)
self._client = AsyncARMPipelineClient(base_url=base_url, config=self._config, **kwargs)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._deserialize = Deserializer(client_models)
self._serialize.client_side_validation = False
self.services = ServicesOperations(self._client, self._config, self._serialize, self._deserialize)
self.config_servers = ConfigServersOperations(self._client, self._config, self._serialize, self._deserialize)
self.monitoring_settings = MonitoringSettingsOperations(
self._client, self._config, self._serialize, self._deserialize
)
self.apps = AppsOperations(self._client, self._config, self._serialize, self._deserialize)
self.bindings = BindingsOperations(self._client, self._config, self._serialize, self._deserialize)
self.certificates = CertificatesOperations(self._client, self._config, self._serialize, self._deserialize)
self.custom_domains = CustomDomainsOperations(self._client, self._config, self._serialize, self._deserialize)
self.deployments = DeploymentsOperations(self._client, self._config, self._serialize, self._deserialize)
self.operations = Operations(self._client, self._config, self._serialize, self._deserialize)
self.runtime_versions = RuntimeVersionsOperations(
self._client, self._config, self._serialize, self._deserialize
)
self.skus = SkusOperations(self._client, self._config, self._serialize, self._deserialize)
def _send_request(self, request: HttpRequest, **kwargs: Any) -> Awaitable[AsyncHttpResponse]:
"""Runs the network request through the client's chained policies.
>>> from azure.core.rest import HttpRequest
>>> request = HttpRequest("GET", "https://www.example.org/")
<HttpRequest [GET], url: 'https://www.example.org/'>
>>> response = await client._send_request(request)
<AsyncHttpResponse: 200 OK>
For more information on this code flow, see https://aka.ms/azsdk/dpcodegen/python/send_request
:param request: The network request you want to make. Required.
:type request: ~azure.core.rest.HttpRequest
:keyword bool stream: Whether the response payload will be streamed. Defaults to False.
:return: The response of your network call. Does not do error handling on your response.
:rtype: ~azure.core.rest.AsyncHttpResponse
"""
request_copy = deepcopy(request)
request_copy.url = self._client.format_url(request_copy.url)
return self._client.send_request(request_copy, **kwargs)
async def close(self) -> None:
await self._client.close()
async def __aenter__(self) -> "AppPlatformManagementClient":
await self._client.__aenter__()
return self
async def __aexit__(self, *exc_details) -> None:
await self._client.__aexit__(*exc_details)
|
[
"noreply@github.com"
] |
VSChina.noreply@github.com
|
68b882f21b82ee98e1f7d0034f05ab3e7456ca93
|
2fc4ccffe5c557602302f087ae296fd31c0c1c2e
|
/apps/backups/serializers.py
|
8dc1c7418ea730901f3e574b5a9e84ba57ccd033
|
[] |
no_license
|
Duyshg/syncano-platform
|
7cfee3f877f761deaa5fb2e70f89deba4f90cb05
|
ea645f998edb80d5e1c6eca5ae9f7beb37d4e711
|
refs/heads/master
| 2020-04-25T20:47:32.717475
| 2019-02-14T17:49:06
| 2019-02-14T17:49:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,496
|
py
|
# coding=UTF8
from django.conf import settings
from rest_framework.relations import PrimaryKeyRelatedField, SlugRelatedField
from rest_framework.serializers import ModelSerializer, ValidationError
from apps.admins.serializers import AdminFullSerializer
from apps.core.exceptions import PermissionDenied
from apps.core.field_serializers import DisplayedChoiceField, JSONField
from apps.core.mixins.serializers import HyperlinkedMixin, MetadataMixin
from .models import Backup, Restore
from .site import default_site
class BackupSerializer(MetadataMixin, ModelSerializer):
instance = SlugRelatedField(slug_field='name',
required=False,
read_only=True,
allow_null=False)
status = DisplayedChoiceField(Backup.STATUSES.as_choices(), read_only=True)
author = AdminFullSerializer(read_only=True, source="owner")
details = JSONField(read_only=True)
class Meta:
model = Backup
read_only_fields = ('id', 'instance', 'created_at', 'updated_at',
'archive', 'size', 'status', 'status_info', 'author', 'details')
fields = read_only_fields + ('description', 'label', 'query_args', 'metadata')
extra_kwargs = {'description': {'required': False}, 'label': {'required': False}}
class FullBackupSerializer(HyperlinkedMixin, BackupSerializer):
hyperlinks = (
('self', 'full_backups-toplevel-detail', ('id',)),
)
class Meta(BackupSerializer.Meta):
fields = ('id', 'instance', 'created_at', 'updated_at', 'size',
'status', 'status_info', 'description', 'label', 'author', 'details', 'metadata')
class PartialBackupSerializer(HyperlinkedMixin, BackupSerializer):
hyperlinks = (
('self', 'partial_backups-toplevel-detail', ('id',)),
)
query_args = JSONField(required=True, validators=[default_site.validate_query_args], write_only=True,
schema=lambda: default_site.jsonschema)
class RestoreSerializer(HyperlinkedMixin, ModelSerializer):
hyperlinks = (
('self', 'restores-detail', ('instance.name', 'id')),
)
backup = PrimaryKeyRelatedField(required=False, allow_null=True,
queryset=Backup.objects.none())
status = DisplayedChoiceField(Backup.STATUSES.as_choices(), read_only=True)
author = AdminFullSerializer(read_only=True, source="owner")
class Meta:
model = Restore
fields = ('id', 'backup', 'created_at', 'updated_at', 'status', 'archive', 'status_info', 'author')
read_only_fields = ('created_at', 'id', 'status', 'status_info', 'author')
def get_fields(self):
fields = super().get_fields()
if 'request' in self.context:
fields['backup'].queryset = Backup.objects.filter(
owner=self.context['view'].request.user,
status=Backup.STATUSES.SUCCESS,
location=settings.LOCATION,
)
return fields
def validate(self, attrs):
has_archive = bool(attrs.get('archive', False))
has_backup = bool(attrs.get('backup', False))
if has_backup and has_archive or (not has_backup and not has_archive):
raise ValidationError('You have to provide either backup or archive.')
if has_archive and not self.context['request'].user.is_staff:
raise PermissionDenied()
return super().validate(attrs)
|
[
"rk@23doors.com"
] |
rk@23doors.com
|
7c7705efd2928f8d5566e1d078bd5e130c52912c
|
5aad0901bba97bdec3e8ad576abdcb780cc7f99e
|
/experiment/surprise/prediction_algorithms/item_rel_tags.py
|
41ae0831820f49417af314ac8db066d57b73a2c7
|
[] |
no_license
|
HelloYym/Cross-TTCF
|
544f2322d25855586bf517bb769e94ffd112e847
|
d4504af02a7d0dcc1b5c59aba33ba9bc897e381d
|
refs/heads/master
| 2021-06-19T01:04:32.401074
| 2017-06-07T05:53:29
| 2017-06-07T05:53:29
| 86,427,595
| 3
| 4
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,272
|
py
|
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import numpy as np
from six.moves import range
import copy
from .algo_base import AlgoBase
class ItemRelTags(AlgoBase):
def __init__(self, n_factors=100, n_epochs=20, biased=True, lr_all=.005,
reg_all=.02, lr_bu=None, lr_bi=None, lr_pu=None, lr_qi=None,
reg_bu=None, reg_bi=None, reg_pu=None, reg_qi=None,
confidence = 0.95,
verbose=False):
self.n_factors = n_factors
self.n_epochs = n_epochs
self.biased = biased
self.lr_all = lr_all
self.lr_bu = lr_bu if lr_bu is not None else lr_all
self.lr_bi = lr_bi if lr_bi is not None else lr_all
self.lr_pu = lr_pu if lr_pu is not None else lr_all
self.lr_qi = lr_qi if lr_qi is not None else lr_all
self.reg_all = reg_all
self.reg_bu = reg_bu if reg_bu is not None else reg_all
self.reg_bi = reg_bi if reg_bi is not None else reg_all
self.reg_pu = reg_pu if reg_pu is not None else reg_all
self.reg_qi = reg_qi if reg_qi is not None else reg_all
self.confidence = confidence
self.verbose = verbose
AlgoBase.__init__(self)
self.estimate_with_tags = True
def train(self, trainset):
trainset.rank_sum_test(confidence=self.confidence)
trainset.construct()
AlgoBase.train(self, trainset)
self.sgd(trainset)
def sgd(self, trainset):
# user biases
bu = np.zeros(trainset.n_users, np.double)
# item biases
bi = np.zeros(trainset.n_items, np.double)
# user factors
pu = np.random.random((trainset.n_users, self.n_factors)
) / np.sqrt(self.n_factors)
# item factors
qi = np.random.random((trainset.n_items, self.n_factors)
) / np.sqrt(self.n_factors)
# tag factors
yt = np.zeros((trainset.n_tags,
self.n_factors), np.double)
lr_all = self.lr_all
lr_bu = self.lr_bu
lr_bi = self.lr_bi
lr_pu = self.lr_pu
lr_qi = self.lr_qi
reg_all = self.reg_all
reg_bu = self.reg_bu
reg_bi = self.reg_bi
reg_pu = self.reg_pu
reg_qi = self.reg_qi
global_mean = trainset.global_mean
for current_epoch in range(self.n_epochs):
if self.verbose:
print("Processing epoch {}".format(current_epoch))
for u, i, r in trainset.all_ratings():
item_tags = trainset.get_item_tags(i)
n_tags = max(1, sum(item_tags.values()))
sum_yt = np.sum(
[yt[tid] * freq for tid, freq in item_tags.items()], axis=0) / n_tags
# compute current error
dot = np.dot((qi[i] + sum_yt), pu[u])
err = r - (global_mean + bu[u] + bi[i] + dot)
# update biases
if self.biased:
bu[u] += lr_bu * (err - reg_bu * bu[u])
bi[i] += lr_bi * (err - reg_bi * bi[i])
# update factors
pu[u] += lr_pu * (err * (qi[i] + sum_yt) - reg_pu * pu[u])
qi[i] += lr_qi * (err * pu[u] - reg_qi * qi[i])
for t, freq in item_tags.items():
yt[t] += lr_all * \
(pu[u] * freq * (err / n_tags) - reg_all * yt[t])
self.bu = bu
self.bi = bi
self.pu = pu
self.qi = qi
self.yt = yt
def estimate(self, u, i, tags):
est = self.trainset.global_mean
if self.trainset.knows_user(u):
est += self.bu[u]
if self.trainset.knows_item(i):
est += self.bi[i]
if self.trainset.knows_user(u) and self.trainset.knows_item(i):
item_tags = copy.deepcopy(self.trainset.get_item_tags(i))
yt_cnt = max(sum(item_tags.values()), 1)
yt_sum = np.sum([self.yt[tid] * freq for tid,
freq in item_tags.items()], axis=0) / yt_cnt
est += np.dot((self.qi[i] + yt_sum), self.pu[u])
return est
|
[
"yangym@zju.edu.cn"
] |
yangym@zju.edu.cn
|
896785f9a67cae451dd0cc416ffc28e3f1afa9a3
|
456a87fc1d6c6ea29063b542a4ae3d636577a56d
|
/06_Python_Fonksiyonlar/04_function-demos.py
|
d9240c2dede075c4739ac482a7520918bb307646
|
[] |
no_license
|
dyedefRa/python_bastan_sona_sadik_turan
|
baca8a8e05321e21bcd9d0c2bd97504d93ae8c33
|
a289501b408a26c4036d68968001e2b4a6a57da7
|
refs/heads/master
| 2021-03-04T12:28:48.481785
| 2020-02-26T12:07:35
| 2020-02-26T12:07:35
| 246,033,399
| 1
| 0
| null | 2020-03-09T12:45:54
| 2020-03-09T12:45:54
| null |
UTF-8
|
Python
| false
| false
| 2,142
|
py
|
# 1- Gönderilen bir kelimeyi belirtilen kez ekranda gösteren fonksiyonu yazın.
'''
word = input('word : ')
count = int(input('count : '))
def yazdir(word,count):
for n in range(0,count):
print(word)
yazdir(word,count)
def yazdir2(word,count):
print(word*count)
yazdir2(word+'\n',count)
'''
# 2- Kendine gönderilen sınırsız sayıdaki parametreyi bir listeye çeviren fonksiyonu yazınız.
'''
def listeyeCevir(*params):
liste = params
return liste
print(listeyeCevir('meti','oguzhan',1986,'metinoguzhan@gmail.com'))
def listeyeCevir2(*params):
liste = []
for n in params:
liste.append(n)
return liste
print(listeyeCevir2(10,20,30,40,50,60,'Merhaba'))
'''
# 3- Gönderilen 2 sayı arasındaki tüm asal sayıları bulun.
'''
def asalSayiBulma(baslangic, bitis):
asalSayiListesi = []
isPrime = True
for n in range(baslangic, bitis+1):
if(baslangic>1):
for bolen in range(2, n):
if n % bolen == 0:
isPrime = False
break
else:
isPrime = True
else:
isPrime = True
if isPrime:
asalSayiListesi.append(n)
return asalSayiListesi
print(asalSayiBulma(2, 19))
def asalSayilariBul(sayi1, sayi2):
for sayi in range(sayi1, sayi2+1):
if(sayi1>1):
for i in range(2,sayi):
if sayi % i == 0:
break
else:
print(sayi)
sayi1 = int(input('sayı 1 : '))
sayi2 = int(input('sayı 2 : '))
asalSayilariBul(sayi1,sayi2)
'''
# 4- Kendisine gönderilen bir sayının tam bölenlerini bir liste şeklinde döndürünüz.
def tamBolenListesi(sayi):
for n in range(1,sayi + 1):
if(sayi % n == 0):
print(n)
else:
continue
sayi = int(input('sayı : '))
tamBolenListesi(sayi)
def tamBolenleriBul(sayi):
tamBolenler = []
for i in range(2,sayi):
if(sayi % i == 0):
tamBolenler.append(i)
return tamBolenler
print(tamBolenleriBul(90))
|
[
"metinoguzhann@gmail.com"
] |
metinoguzhann@gmail.com
|
3ae8ac2cf1fd31a817682334a42b0a5be16ee6b1
|
d267ec32822b24092f617e88da919d1709549394
|
/wproject1m/ecommerce2/One_GB_Mobiles/models.py
|
da5e919880576434f6e016c4f8a762046b6dfaa9
|
[] |
no_license
|
sam-student/Evaluation
|
42fcccae54358fbb6a8bef8c5f9d80a7bc075864
|
3ba7842a15e431d30618c28819ea9b64c618ef2a
|
refs/heads/master
| 2020-05-17T04:23:08.997952
| 2019-04-25T21:05:46
| 2019-04-25T21:05:46
| 183,507,753
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,768
|
py
|
import random
import os
from django.db import models
from django.urls import reverse
# Create your models here.
from django.db.models.signals import pre_save, post_save
from ecommerce.utils import unique_slug_generator
def get_filename_ext(filename):
base_name=os.path.basename(filename)
name, ext = os.path.splitext(filename)
return name,ext
def upload_image_path(instance,filename):
print(instance)
print(filename)
new_filename=random.randint(1,39321457854)
name,ext = get_filename_ext(filename)
final_filename = '{new_filename}{ext}'.format(new_filename=new_filename,ext=ext)
return "One_GB_Mobiles/{new_filename}/{final_filename}".format(new_filename=filename, final_filename=final_filename)
class ProductQuerySet(models.query.QuerySet):
def active(self):
return self.filter()
def featured(self):
return self.filter()
class ProductManager(models.Manager):
def get_queryset(self):
return ProductQuerySet(self.model, using=self._db)
# def all(self):
# return self.get_queryset().active()
#
# def featured(self):
# return self.get_queryset().featured()
def get_by_id(self, id):
qs = self.get_queryset().filter(id = id)
if qs.count() == 1:
return qs.first()
return None
class One_GB_Mobile(models.Model):
title = models.CharField(max_length=120)
slug = models.SlugField(blank=True, unique=True)
price = models.DecimalField(decimal_places=2, max_digits=20, default=39.99)
Charging = models.TextField(default="good speakers")
Torch = models.TextField(default="Yes")
Games = models.TextField(default="built-in + downloadable")
Messaging = models.TextField(default=", SMS (threaded view), MMS, Email, Push Email")
Browser = models.TextField(default="HTML5")
Audio = models.TextField(default="3.5mm audio jack, MP4/WMV/H.264 player")
Data = models.TextField(default="GPRS, Edge, 3G (HSPA 42.2/5.76 Mbps), 4G (LTE-A (2CA) Cat6 300/50 Mbps")
NFC = models.TextField(default="Yes")
USB = models.TextField(default="microUSB 2.0")
GPS = models.TextField(default="Yes + A-GPS support & Glonass, BDS, GALILEO")
Bluetooth = models.TextField(default="None")
Wifi = models.TextField(default="Wi-Fi 802.11 a/b/g/n/ac, dual-band, hotspot")
Front = models.TextField(default="13 MP, f/1.9, LED flash")
Main = models.TextField(default="8MP")
card = models.TextField(default="Yes")
BuiltIn = models.TextField(default="16GB Built-in")
Features = models.TextField(default="None")
Protection = models.TextField(default="Yes")
Resolution = models.TextField(default="720 x 1280 Pixels (~282 PPI) ")
Size = models.TextField(default="5.5 inches")
Technology = models.TextField(default="None")
GPU = models.TextField(default="Mali-T830MP2 ")
Chipset = models.TextField(default="None")
CPU = models.TextField(default="None")
FourGBand = models.TextField(default="LTE")
ThreeGBand = models.TextField(default="HSDPA 850 / 900 / 1700(AWS) / 1900 / 2100 ")
TwoGBand = models.TextField(default="SIM1: GSM 850 / 900 / 1800 / 1900 SIM2: GSM 850 / 900 / 1800 / 1900 ")
Color = models.TextField(default="Silver, Space Gray, Gold")
SIM = models.TextField(default="Single SIM (Nano-SIM) ")
Weight = models.TextField(default="148g")
Dimension = models.TextField(default="146.2 x 71.3 x 8 mm")
UIBuild = models.TextField(default="TouchWiz UI")
OperatingSystem = models.TextField(default="Android v7.1 Nougat")
image = models.ImageField(upload_to=upload_image_path, null=True, blank=True)
image1 = models.ImageField(upload_to=upload_image_path, null=True, blank=True)
image2 = models.ImageField(upload_to=upload_image_path, null=True, blank=True)
Review_count = models.TextField(default="90")
Average_Rating = models.TextField(default=" 4")
Reviews = models.TextField(default="None")
Ram = models.TextField(default="2GB")
# description = models.TextField()
# featured = models.BooleanField(default=False)
# active = models.BooleanField(default=True)
# timestamp = models.DateTimeField(auto_now_add=True)
objects = ProductManager()
def get_absolute_url(self):
#return "/products/{slug}".format(slug=self.slug)
return reverse("One_GB_Mobiles:detail", kwargs={"slug": self.slug})
def __str__(self):
return self.title
def __unicode__(self):
return self.title
def name(self):
return self.title
def product_pre_save_receiver(sender, instance , *args,**kwargs):
if not instance.slug:
instance.slug = unique_slug_generator(instance)
pre_save.connect(product_pre_save_receiver, sender=One_GB_Mobile)
|
[
"abc@gmail.com"
] |
abc@gmail.com
|
2469c0eba172dd50239c61a100a2e4db476432c2
|
5733fb1a6746146889ac0941258ef5716ea17e7e
|
/snippets/migrations/0003_auto_20171127_0352.py
|
fa74dc12b146cc8cb2888b9c339cc997394a825c
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
teraoka-hiroshi/django-auth-example
|
b401df8877c3fc9ca61cf1cdb7d7541ef8e19820
|
675492aeb5f42dc04f9ba5de7f8f528120ddceea
|
refs/heads/master
| 2022-01-13T08:23:25.879459
| 2018-05-21T17:06:22
| 2018-05-21T17:06:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 967
|
py
|
# Generated by Django 2.0rc1 on 2017-11-26 18:52
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('snippets', '0002_auto_20171127_0329'),
]
operations = [
migrations.RenameField(
model_name='snippet',
old_name='posted_by',
new_name='created_by',
),
migrations.RemoveField(
model_name='snippet',
name='created_date',
),
migrations.AddField(
model_name='snippet',
name='created_at',
field=models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now, verbose_name='投稿日'),
preserve_default=False,
),
migrations.AddField(
model_name='snippet',
name='updated_at',
field=models.DateTimeField(auto_now=True, verbose_name='更新日'),
),
]
|
[
"contact@c-bata.link"
] |
contact@c-bata.link
|
680ef4bff1f4d131a6765303e1123e2525fa7bb0
|
d7f2df4896898b9c30ce58507ecc72d83c34f07c
|
/classification.py
|
a079ad32e31b4e0e1fbb10af9cea8d01280bd65b
|
[] |
no_license
|
candlewill/Vecamend
|
a12e6f74f22325cd7993c41661816780d2f3e868
|
7b73678cd4eb4aba926d4cbe752c91c7fa10ebc3
|
refs/heads/master
| 2021-01-10T13:27:10.847329
| 2016-01-18T16:09:00
| 2016-01-18T16:09:00
| 46,402,087
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,176
|
py
|
import numpy as np
from load_data import load_pickle
from sklearn.cross_validation import ShuffleSplit
from sklearn.linear_model import LogisticRegression
def build_data():
positive_data = load_pickle('./tmp/amended_pos.p')
negative_data = load_pickle('./tmp/amended_neg.p')
X, Y = [], []
for pos in positive_data.keys():
X.append(positive_data[pos])
Y.append(1)
for neg in negative_data.keys():
X.append(negative_data[neg])
Y.append(0)
return np.array(X), np.array(Y)
def train_model(X, Y):
nub_iter = 20
rs = ShuffleSplit(n=len(X), n_iter=nub_iter, test_size=0.2, indices=True, random_state=0)
accuracy = []
for train_index, test_index in rs:
X_test, Y_test = X[test_index], Y[test_index]
X_train, Y_train = X[train_index], Y[train_index]
classifier = LogisticRegression()
classifier.fit(X_train, Y_train)
acc = classifier.score(X_test, Y_test)
accuracy.append(acc)
print('准确率Accuracy: %s.'%acc)
print('平均准确率: %s.' % np.mean(np.array(accuracy)))
if __name__ == '__main__':
X, Y = build_data()
train_model(X, Y)
|
[
"yunchaohe@gmail.com"
] |
yunchaohe@gmail.com
|
ba451ddd52423d13c07f5377076fc5316f56263b
|
48e124e97cc776feb0ad6d17b9ef1dfa24e2e474
|
/sdk/python/pulumi_azure_native/web/v20190801/list_static_site_function_app_settings.py
|
6388c3e6d80456c0f9b063f72414997bc774ce73
|
[
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
bpkgoud/pulumi-azure-native
|
0817502630062efbc35134410c4a784b61a4736d
|
a3215fe1b87fba69294f248017b1591767c2b96c
|
refs/heads/master
| 2023-08-29T22:39:49.984212
| 2021-11-15T12:43:41
| 2021-11-15T12:43:41
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,459
|
py
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
__all__ = [
'ListStaticSiteFunctionAppSettingsResult',
'AwaitableListStaticSiteFunctionAppSettingsResult',
'list_static_site_function_app_settings',
'list_static_site_function_app_settings_output',
]
@pulumi.output_type
class ListStaticSiteFunctionAppSettingsResult:
"""
String dictionary resource.
"""
def __init__(__self__, id=None, kind=None, name=None, properties=None, type=None):
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if kind and not isinstance(kind, str):
raise TypeError("Expected argument 'kind' to be a str")
pulumi.set(__self__, "kind", kind)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if properties and not isinstance(properties, dict):
raise TypeError("Expected argument 'properties' to be a dict")
pulumi.set(__self__, "properties", properties)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def id(self) -> str:
"""
Resource Id.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def kind(self) -> Optional[str]:
"""
Kind of resource.
"""
return pulumi.get(self, "kind")
@property
@pulumi.getter
def name(self) -> str:
"""
Resource Name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def properties(self) -> Mapping[str, str]:
"""
Settings.
"""
return pulumi.get(self, "properties")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type.
"""
return pulumi.get(self, "type")
class AwaitableListStaticSiteFunctionAppSettingsResult(ListStaticSiteFunctionAppSettingsResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return ListStaticSiteFunctionAppSettingsResult(
id=self.id,
kind=self.kind,
name=self.name,
properties=self.properties,
type=self.type)
def list_static_site_function_app_settings(name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableListStaticSiteFunctionAppSettingsResult:
"""
String dictionary resource.
:param str name: Name of the static site.
:param str resource_group_name: Name of the resource group to which the resource belongs.
"""
__args__ = dict()
__args__['name'] = name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:web/v20190801:listStaticSiteFunctionAppSettings', __args__, opts=opts, typ=ListStaticSiteFunctionAppSettingsResult).value
return AwaitableListStaticSiteFunctionAppSettingsResult(
id=__ret__.id,
kind=__ret__.kind,
name=__ret__.name,
properties=__ret__.properties,
type=__ret__.type)
@_utilities.lift_output_func(list_static_site_function_app_settings)
def list_static_site_function_app_settings_output(name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[ListStaticSiteFunctionAppSettingsResult]:
"""
String dictionary resource.
:param str name: Name of the static site.
:param str resource_group_name: Name of the resource group to which the resource belongs.
"""
...
|
[
"noreply@github.com"
] |
bpkgoud.noreply@github.com
|
2e7c3ca2251c8b4024a5b4bf215a578d51f2c361
|
847273de4b1d814fab8b19dc651c651c2d342ede
|
/.history/Sudoku_upgr2_20180607105940.py
|
e76935186c51e8439262854cc75ecf84734205af
|
[] |
no_license
|
Los4U/sudoku_in_python
|
0ba55850afcffeac4170321651620f3c89448b45
|
7d470604962a43da3fc3e5edce6f718076197d32
|
refs/heads/master
| 2020-03-22T08:10:13.939424
| 2018-07-04T17:21:13
| 2018-07-04T17:21:13
| 139,749,483
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,220
|
py
|
#row1 = [0,0,0,0,0,0,0,0,0]
#row2 = [0,0,0,5,0,6,0,0,0]
#row3 = [0,0,1,0,0,0,0,3,0]
#row4 = [0,9,5,0,0,0,2,0,0]
#row5 = [0,0,0,0,0,1,6,0,7]
#row6 = [1,0,6,0,0,9,0,0,5]
#row7 = [7,0,0,8,0,3,9,0,0]
#row8 = [0,3,8,9,0,0,0,2,0]
#row9 = [0,5,0,0,2,0,7,0,0]
columns = [1,2,3,4,5,6,7,8,9]
row1 = [9,8,7,4,3,2,5,6,1]
row2 = [2,4,3,5,1,6,8,7,9]
row3 = [5,6,1,7,9,8,4,3,2]
row4 = [3,9,5,6,4,7,2,1,8]
row5 = [8,2,4,3,5,1,6,9,7]
row6 = [1,7,6,2,8,9,3,4,5]
row7 = [7,1,2,8,6,3,9,5,4]
row8 = [4,3,8,9,7,5,1,2,6]
row9 = [0,5,0,0,2,0,7,0,0]
def print_sudoku():
print(' ', columns[0],columns[1],columns[2], sep=' ', end=" ")
print(columns[3],columns[4],columns[5], sep=' ', end=" ")
print(columns[6],columns[7],columns[8], sep=' ')
print(" -------------------------------------" )
print('1 |', row1[0],row1[1],row1[2], sep=' ', end=" | ")
print(row1[3],row1[4],row1[5], sep=' ', end=" | ")
print(row1[6],row1[7],row1[8], "|", sep=' ')
print(" | | | |")
print('2 |', row2[0],row2[1],row2[2], sep=' ', end=" | ")
print(row2[3],row2[4],row2[5], sep=' ', end=" | ")
print(row2[6],row2[7],row2[8], "|", sep=' ')
print(" | | | |")
print('3 |', row3[0],row3[1],row3[2], sep=' ', end=" | ")
print(row3[3],row3[4],row3[5], sep=' ', end=" | ")
print(row3[6],row3[7],row3[8], "|", sep=' ')
print(" |-----------------------------------|" )
print('4 |', row4[0],row4[1],row4[2], sep=' ', end=" | ")
print(row4[3],row4[4],row4[5], sep=' ', end=" | ")
print(row4[6],row4[7],row4[8], "|", sep=' ')
print(" | | | |")
print('5 |', row5[0],row5[1],row5[2], sep=' ', end=" | ")
print(row5[3],row5[4],row5[5], sep=' ', end=" | ")
print(row5[6],row5[7],row5[8], "|", sep=' ')
print(" | | | |")
print('6 |', row6[0],row6[1],row6[2], sep=' ', end=" | ")
print(row6[3],row6[4],row6[5], sep=' ', end=" | ")
print(row6[6],row6[7],row6[8], "|", sep=' ')
print(" |-----------------------------------|" )
print('7 |', row7[0],row7[1],row7[2], sep=' ', end=" | ")
print(row7[3],row7[4],row7[5], sep=' ', end=" | ")
print(row7[6],row7[7],row7[8], "|", sep=' ')
print(" | | | |")
print('8 |', row8[0],row8[1],row8[2], sep=' ', end=" | ")
print(row8[3],row8[4],row8[5], sep=' ', end=" | ")
print(row8[6],row8[7],row8[8], "|", sep=' ')
print(" | | | |")
print('9 |', row9[0],row9[1],row9[2], sep=' ', end=" | ")
print(row9[3],row9[4],row9[5], sep=' ', end=" | ")
print(row9[6],row9[7],row9[8], "|", sep=' ')
print(" |-----------------------------------|" )
print("Your sudoku to solve:")
print_sudoku()
while True:
print("Input 3 numbers in format a b c, np. 4 5 8")
print("a - row number")
print("b - column number ")
print("c - value \n ")
x = input("Input a b c: ")
print("")
numbers= "0123456789"
if len(x) != 5:
print("BŁĄD - niepoprawny format!\n ")
continue
if (str(x[0]) not in numbers) or (str(x[2]) not in numbers) or (str(x[4]) not in numbers) or (str(x[1]) != " "):
print("BŁĄD - niepoprawny format!\n ")
continue
if int(x[0])==1:
row1[int(x[2])-1]=int(x[4])
elif int(x[0])==2:
row2[int(x[2])-1]=int(x[4])
elif int(x[0])==3:
row3[int(x[2])-1]=int(x[4])
elif int(x[0])==4:
row4[int(x[2])-1]=int(x[4])
elif int(x[0])==5:
row5[int(x[2])-1]=int(x[4])
elif int(x[0])==6:
row6[int(x[2])-1]=int(x[4])
elif int(x[0])==7:
row7[int(x[2])-1]=int(x[4])
elif int(x[0])==8:
row8[int(x[2])-1]=int(x[4])
elif int(x[0])==9:
row9[int(x[2])-1]=int(x[4])
print_sudoku()
if sum(row1) == 45 and sum(row2) == 45 and sum(row3) == 45 and sum(row4) == 45 and sum(row5) == 45 and sum(row6) == 45 and sum(row7) == 45 and sum(row8) == 45 and sum(row9) == 45:
print("YOU WIN !! Master teach me!")
break
|
[
"inz.kamil.wos@gmail.com"
] |
inz.kamil.wos@gmail.com
|
40f2eac079d40bc274d3a0b07534b141a26c2887
|
6d9ebbee5dd515ff8d1e039b28ebcdbe185f6275
|
/info/modules/uic/ex_loaduitype.py
|
9d512b9e7c59a341e0e9d153b8620f8823240bcd
|
[] |
no_license
|
volitilov/PyQt5_learn
|
50bc378798609d98db2bd7fabe4b13ad1257e308
|
f5270173d62bb61b374593cb22c4f9905a61d404
|
refs/heads/master
| 2021-09-08T14:12:58.387721
| 2018-03-10T10:03:06
| 2018-03-10T10:03:06
| 115,354,065
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 738
|
py
|
from PyQt5 import QtWidgets, uic
import sys
# :::::::::::::::::::::::::::::::::::::::::::::::::::::::::
# class MyWidget(QtWidgets.QWidget):
# def __init__(self, parent=None):
# QtWidgets.QWidget.__init__(self, parent)
# Form, _ = uic.loadUiType('MyForm.ui')
# self.ui = Form()
# self.ui.setupUi(self)
# self.ui.button.clicked.connect(QtWidgets.qApp.quit)
Form, _ = uic.loadUiType('MyForm.ui')
class MyWidget(QtWidgets.QWidget, Form):
def __init__(self, parent=None):
QtWidgets.QWidget.__init__(self, parent)
self.setupUi(self)
self.button.clicked.connect(QtWidgets.qApp.quit)
if __name__ == '__main__':
import sys
app = QtWidgets.QApplication(sys.argv)
window = MyWidget()
window.show()
sys.exit(app.exec_())
|
[
"volitilov@gmail.com"
] |
volitilov@gmail.com
|
5a2acdcac93d580ba844a41f6be3e5618201d90b
|
a5b8dc5566567a8d23fc061b98ea2aa55e8f6361
|
/tests/test_endpoints_sync_methods.py
|
d2411b2d1594f498fcd609e6a5d0fea369d5b703
|
[
"MIT"
] |
permissive
|
vltr/sanic-jwt
|
5f512f91e89121c55498c88669c44dce441fdac8
|
19df69f78db121404325417f71d7bef2d1d4738d
|
refs/heads/master
| 2021-05-11T06:31:34.497767
| 2018-02-06T19:00:50
| 2018-02-06T19:01:42
| 117,989,778
| 0
| 0
| null | 2018-01-18T13:57:27
| 2018-01-18T13:57:27
| null |
UTF-8
|
Python
| false
| false
| 5,020
|
py
|
import binascii
import os
from sanic import Sanic
from sanic.response import json
import pytest
from sanic_jwt import initialize
from sanic_jwt import exceptions
from sanic_jwt.decorators import protected
@pytest.yield_fixture
def app_with_sync_methods(users):
cache = {}
def authenticate(request, *args, **kwargs):
username = request.json.get('username', None)
password = request.json.get('password', None)
if not username or not password:
raise exceptions.AuthenticationFailed(
"Missing username or password.")
user = None
for u in users:
if u.username == username:
user = u
break
if user is None:
raise exceptions.AuthenticationFailed("User not found.")
if password != user.password:
raise exceptions.AuthenticationFailed("Password is incorrect.")
return user
def store_refresh_token(user_id, refresh_token, *args, **kwargs):
key = 'refresh_token_{user_id}'.format(user_id=user_id)
cache[key] = refresh_token
def retrieve_refresh_token(user_id, *args, **kwargs):
key = 'refresh_token_{user_id}'.format(user_id=user_id)
return cache.get(key, None)
def retrieve_user(request, payload, *args, **kwargs):
if payload:
user_id = payload.get('user_id', None)
if user_id is not None:
for u in users:
if u.user_id == user_id:
return u
else:
return None
sanic_app = Sanic()
initialize(
sanic_app,
authenticate=authenticate,
store_refresh_token=store_refresh_token,
retrieve_refresh_token=retrieve_refresh_token,
retrieve_user=retrieve_user)
sanic_app.config.SANIC_JWT_REFRESH_TOKEN_ENABLED = True
sanic_app.config.SANIC_JWT_SECRET = str(
binascii.hexlify(os.urandom(32)), 'utf-8')
@sanic_app.route("/")
async def helloworld(request):
return json({"hello": "world"})
@sanic_app.route("/protected")
@protected()
async def protected_request(request):
return json({"protected": True})
yield sanic_app
class TestEndpointsSync(object):
@pytest.yield_fixture
def authenticated_response(self, app_with_sync_methods):
_, response = app_with_sync_methods.test_client.post(
'/auth', json={
'username': 'user1',
'password': 'abcxyz'
})
assert response.status == 200
yield response
def test_root_endpoint(self, app_with_sync_methods):
_, response = app_with_sync_methods.test_client.get('/')
assert response.status == 200
assert response.json.get('hello') == 'world'
def test_protected_endpoint(self, app_with_sync_methods,
authenticated_response):
access_token = authenticated_response.json.get(
app_with_sync_methods.config.SANIC_JWT_ACCESS_TOKEN_NAME, None)
_, response = app_with_sync_methods.test_client.get(
'/protected',
headers={
'Authorization': 'Bearer {}'.format(access_token)
})
assert response.status == 200
assert response.json.get('protected') is True
def test_me_endpoint(self, app_with_sync_methods,
authenticated_response):
access_token = authenticated_response.json.get(
app_with_sync_methods.config.SANIC_JWT_ACCESS_TOKEN_NAME, None)
_, response = app_with_sync_methods.test_client.get(
'/auth/me',
headers={
'Authorization': 'Bearer {}'.format(access_token)
})
assert response.status == 200
def test_refresh_token_sunc(self, app_with_sync_methods,
authenticated_response):
access_token = authenticated_response.json.get(
app_with_sync_methods.config.SANIC_JWT_ACCESS_TOKEN_NAME, None)
refresh_token = authenticated_response.json.get(
app_with_sync_methods.config.SANIC_JWT_REFRESH_TOKEN_NAME, None)
_, response = app_with_sync_methods.test_client.post(
'/auth/refresh',
headers={'Authorization': 'Bearer {}'.format(access_token)},
json={
app_with_sync_methods.config.
SANIC_JWT_REFRESH_TOKEN_NAME:
refresh_token
})
new_access_token = response.json.get(
app_with_sync_methods.config.SANIC_JWT_ACCESS_TOKEN_NAME, None)
assert response.status == 200
assert new_access_token is not None
assert response.json.get(
app_with_sync_methods.config.SANIC_JWT_REFRESH_TOKEN_NAME,
None) is None # there is no new refresh token
assert \
app_with_sync_methods.config.SANIC_JWT_REFRESH_TOKEN_NAME \
not in response.json
|
[
"rkuesters@gmail.com"
] |
rkuesters@gmail.com
|
5244256cfaf82bd7735b6e8a555dc572ce428f38
|
d8d8fce19c88edc68f295c3ea0756ffe8576f982
|
/bin/reportPatches.py
|
2e8e58a6693daec37fd9b438f3cc2d819371309a
|
[] |
no_license
|
mjuric/lsst-pipe_tasks
|
4f178efd11b930d4c6bf3ed4ebce896ad8402537
|
585fa1b78ea99306edc9f89f98f0ce6618400240
|
refs/heads/master
| 2021-01-01T05:31:53.037714
| 2013-02-13T18:05:35
| 2013-02-13T18:05:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,206
|
py
|
#!/usr/bin/env python
#
# LSST Data Management System
# Copyright 2008, 2009, 2010, 2011, 2012 LSST Corporation.
#
# This product includes software developed by the
# LSST Project (http://www.lsst.org/).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the LSST License Statement and
# the GNU General Public License along with this program. If not,
# see <http://www.lsstcorp.org/LegalNotices/>.
#
"""Select images and report which tracts and patches they are in
@warning: this is a very basic start. Misfeatures include:
- Only reports the best tract and patch containing the center of each image;
a proper implementation will report all tracts and patches that overlap each image
- One must specify a patch and tract even though those arguments are ignored.
"""
import numpy
import lsst.pex.config as pexConfig
import lsst.afw.coord as afwCoord
import lsst.afw.geom as afwGeom
import lsst.pipe.base as pipeBase
from lsst.pipe.tasks.makeSkyMap import MakeSkyMapTask
__all__ = ["ReportPatchesTask", "ReportPatchesArgumentParser"]
class ReportPatchesConfig(pexConfig.Config):
"""Config for ReportPatchesTask
"""
coaddName = pexConfig.Field(
doc = "coadd name: one of deep or goodSeeing",
dtype = str,
default = "deep",
)
raDecRange = pexConfig.ListField(
doc = "min RA, min Dec, max RA, max Dec (ICRS, deg)",
dtype = float,
length = 4,
)
class ReportPatchesTask(pipeBase.CmdLineTask):
"""Report which tracts and patches are needed for coaddition
"""
ConfigClass = ReportPatchesConfig
_DefaultName = "reportPatches"
def __init__(self, *args, **kwargs):
pipeBase.CmdLineTask.__init__(self, *args, **kwargs)
@pipeBase.timeMethod
def run(self, dataRef):
"""Report tracts and patches that are within a given region of a skymap
@param dataRef: data reference for sky map.
@return: a pipeBase.Struct with fields:
- ccdInfoSetDict: a dict of (tractId, patchIndex): set of CcdExposureInfo
"""
skyMap = dataRef.get(self.config.coaddName + "Coadd_skyMap")
# make coords in the correct order to form an enclosed space
raRange = (self.config.raDecRange[0], self.config.raDecRange[2])
decRange = (self.config.raDecRange[1], self.config.raDecRange[3])
raDecList = [
(raRange[0], decRange[0]),
(raRange[1], decRange[0]),
(raRange[1], decRange[1]),
(raRange[0], decRange[1]),
]
coordList = [
afwCoord.IcrsCoord(afwGeom.Angle(ra, afwGeom.degrees), afwGeom.Angle(dec, afwGeom.degrees))
for ra, dec in raDecList]
tractPatchList = skyMap.findTractPatchList(coordList)
for tractInfo, patchInfoList in tractPatchList:
for patchInfo in patchInfoList:
patchIndex = patchInfo.getIndex()
print "tract=%d patch=%d,%d" % (tractInfo.getId(), patchIndex[0], patchIndex[1])
@classmethod
def _makeArgumentParser(cls):
"""Create an argument parser
Use datasetType="deepCoadd" to get the right keys (even chi-squared coadds
need filter information for this particular task).
"""
return ReportPatchesArgumentParser(name=cls._DefaultName, datasetType="deepCoadd")
def _getConfigName(self):
"""Don't persist config, so return None
"""
return None
def _getMetadataName(self):
"""Don't persist metadata, so return None
"""
return None
class ReportPatchesArgumentParser(pipeBase.ArgumentParser):
"""A version of lsst.pipe.base.ArgumentParser specialized for reporting images.
Required because there is no dataset type that is has exactly the right keys for this task.
datasetType = namespace.config.coaddName + "Coadd" comes closest, but includes "patch" and "tract",
which are irrelevant to the task, but required to make a data reference of this dataset type.
Also required because butler.subset cannot handle this dataset type.
"""
def _makeDataRefList(self, namespace):
"""Make namespace.dataRefList from namespace.dataIdList
"""
datasetType = namespace.config.coaddName + "Coadd"
namespace.dataRefList = []
for dataId in namespace.dataIdList:
expandedDataId = dict(patch=0, tract=(0,0))
expandedDataId.update(dataId)
dataRef = namespace.butler.dataRef(
datasetType = datasetType,
dataId = expandedDataId,
)
namespace.dataRefList.append(dataRef)
if __name__ == "__main__":
ReportPatchesTask.parseAndRun()
|
[
"rowen@uw.edu"
] |
rowen@uw.edu
|
902e6ddb8c5ff647d175b814fc0a296e4e136f3e
|
1315e1c8357f1bae712db6e3ebd3e76902173959
|
/src/app/agents/authorize.py
|
ddd0fa9a459158d404a713525161a2f8c71b4033
|
[] |
no_license
|
jldupont/musync
|
e2e68d85db40c9eb4f0369c25a4b73426b1d54c0
|
b52908b263ec7e18d1433dc27fa75e092fa415aa
|
refs/heads/master
| 2021-01-23T21:37:51.762597
| 2010-08-27T01:29:12
| 2010-08-27T01:29:12
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,442
|
py
|
"""
Authorization Agent
Responsible for handling the authorization process
with the user
Messages Processed:
- "start_authorize"
- "start_verify"
- "oauth_error"
- "oauth?"
Messages Generated:
- "error_requesttoken"
- "error_webbrowser"
- "error_accesstoken"
Created on 2010-08-15
@author: jldupont
"""
__all__=["AuthorizeAgent"]
import oauth.oauth as oauth
import httplib
import webbrowser
from app.system.base import AgentThreadedBase
from app.system.state import StateManager
class OauthClient(object):
gREQUEST_TOKEN_URL = 'OAuthGetRequestToken'
gACCESS_TOKEN_URL = 'OAuthGetAccessToken'
gAUTHORIZATION_URL = 'OAuthAuthorizeToken'
def __init__(self, server, port, base):
self.server=server
self.port=port
self.base=base
self.request_token_url=self.base+self.gREQUEST_TOKEN_URL
self.access_token_url=self.base+self.gACCESS_TOKEN_URL
self.authorize_token_url=self.base+self.gAUTHORIZATION_URL
self.connection = httplib.HTTPConnection("%s:%d" % (self.server, self.port))
def fetch_request_token(self, oauth_request):
self.connection.request(oauth_request.http_method, self.request_token_url, headers=oauth_request.to_header())
response = self.connection.getresponse()
return oauth.OAuthToken.from_string(response.read())
def fetch_access_token(self, oauth_request):
self.connection.request(oauth_request.http_method, self.access_token_url, headers=oauth_request.to_header())
response = self.connection.getresponse()
return oauth.OAuthToken.from_string(response.read())
def authorize_token(self, oauth_request):
self.connection.request(oauth_request.http_method, oauth_request.to_url())
response = self.connection.getresponse()
return response.read()
class AuthorizeAgent(AgentThreadedBase):
CALLBACK_URL = "oob"
REQUEST_TOKEN="oauth_request_token"
ACCESS_TOKEN_KEY="oauth_access_token_key"
ACCESS_TOKEN_SECRET="oauth_access_token_secret"
VERIFICATION_CODE="oauth_verification_code"
def __init__(self, app_name, server, port, consumer_key, consumer_secret, base):
"""
@param interval: interval in seconds
"""
AgentThreadedBase.__init__(self)
self.server=server
self.port=port
self.base=base
self.consumer_key=consumer_key
self.consumer_secret=consumer_secret
self.app_name=app_name
self.client=OauthClient(server, port, base)
self.consumer=None
self.signature_method_plaintext = oauth.OAuthSignatureMethod_PLAINTEXT()
self.signature_method_hmac_sha1 = oauth.OAuthSignatureMethod_HMAC_SHA1()
self.token=None
self.sm=StateManager(self.app_name)
def h_start_authorize(self, *_):
try:
self.token=None
self.consumer = oauth.OAuthConsumer(self.consumer_key, self.consumer_secret)
oauth_request = oauth.OAuthRequest.from_consumer_and_token(self.consumer,
callback=self.CALLBACK_URL,
http_url=self.client.request_token_url)
oauth_request.sign_request(self.signature_method_hmac_sha1, self.consumer, None)
self.token = self.client.fetch_request_token(oauth_request)
oauth_request = oauth.OAuthRequest.from_token_and_callback(token=self.token,
http_url=self.client.authorize_token_url)
url= oauth_request.to_url()
self.sm.save(self.REQUEST_TOKEN, self.token)
except Exception,e:
self.pub("error_requesttoken", e)
self.pub("log", "warning", "Authorization: 'RequestToken' failed: "+str(e))
return
self.pub("log", "getting authorization from url: "+url)
try:
webbrowser.open(url)
print url
except Exception,e:
self.pub("log", "error", "Opening url(%s)" % url)
def h_start_verify(self, verificationCode):
"""
Got verification code from user
Attempting to retrieve "access token"
"""
try:
self.consumer = oauth.OAuthConsumer(self.consumer_key, self.consumer_secret)
oauth_request = oauth.OAuthRequest.from_consumer_and_token(self.consumer, token=self.token,
verifier=verificationCode,
http_url=self.client.access_token_url)
oauth_request.sign_request(self.signature_method_hmac_sha1, self.consumer, self.token)
self.atoken = self.client.fetch_access_token(oauth_request)
except Exception,e:
self.atoken=None
self.sm.save(self.ACCESS_TOKEN_KEY, "")
self.sm.save(self.ACCESS_TOKEN_SECRET, "")
self.pub("oauth", None, None)
self.pub("error_accesstoken", e)
self.pub("log", "warning", "Verification: 'AccessToken' failed: "+str(e))
return
finally:
self.sm.save(self.VERIFICATION_CODE, verificationCode)
try:
key=self.atoken.key
secret=self.atoken.secret
self.pub("oauth", key, secret)
self.pub("log", "oauth: key: %s secret: %s" % (key, secret))
self.sm.save(self.ACCESS_TOKEN_KEY, key)
self.sm.save(self.ACCESS_TOKEN_SECRET, secret)
except:
self.sm.save(self.ACCESS_TOKEN_KEY, "")
self.sm.save(self.ACCESS_TOKEN_SECRET, "")
self.pub("log", "warning", "Verification: 'AccessToken' failed: "+str(e))
def h_oauth_error(self, *_):
"""
An oauth level error occured - reset access token
"""
self.sm.save(self.ACCESS_TOKEN, "")
self.sm.save(self.VERIFICATION_CODE, "")
def hq_oauth(self):
key=self.sm.retrieve(self.ACCESS_TOKEN_KEY)
secret=self.sm.retrieve(self.ACCESS_TOKEN_SECRET)
self.pub("oauth", key, secret)
"""
_=AuthorizeAgent()
_.start()
"""
|
[
"github@jldupont.com"
] |
github@jldupont.com
|
2c2198547b61fdbeb366057c6b3ffc9759df27f8
|
5963c12367490ffc01c9905c028d1d5480078dec
|
/tests/components/met/test_init.py
|
64323af56ce222c79f5d0d50a696796b676ae555
|
[
"Apache-2.0"
] |
permissive
|
BenWoodford/home-assistant
|
eb03f73165d11935e8d6a9756272014267d7d66a
|
2fee32fce03bc49e86cf2e7b741a15621a97cce5
|
refs/heads/dev
| 2023-03-05T06:13:30.354545
| 2021-07-18T09:51:53
| 2021-07-18T09:51:53
| 117,122,037
| 11
| 6
|
Apache-2.0
| 2023-02-22T06:16:51
| 2018-01-11T16:10:19
|
Python
|
UTF-8
|
Python
| false
| false
| 1,381
|
py
|
"""Test the Met integration init."""
from homeassistant.components.met.const import (
DEFAULT_HOME_LATITUDE,
DEFAULT_HOME_LONGITUDE,
DOMAIN,
)
from homeassistant.config import async_process_ha_core_config
from homeassistant.config_entries import ConfigEntryState
from . import init_integration
async def test_unload_entry(hass):
"""Test successful unload of entry."""
entry = await init_integration(hass)
assert len(hass.config_entries.async_entries(DOMAIN)) == 1
assert entry.state is ConfigEntryState.LOADED
assert await hass.config_entries.async_unload(entry.entry_id)
await hass.async_block_till_done()
assert entry.state is ConfigEntryState.NOT_LOADED
assert not hass.data.get(DOMAIN)
async def test_fail_default_home_entry(hass, caplog):
"""Test abort setup of default home location."""
await async_process_ha_core_config(
hass,
{"latitude": 52.3731339, "longitude": 4.8903147},
)
assert hass.config.latitude == DEFAULT_HOME_LATITUDE
assert hass.config.longitude == DEFAULT_HOME_LONGITUDE
entry = await init_integration(hass, track_home=True)
assert len(hass.config_entries.async_entries(DOMAIN)) == 1
assert entry.state is ConfigEntryState.SETUP_ERROR
assert (
"Skip setting up met.no integration; No Home location has been set"
in caplog.text
)
|
[
"noreply@github.com"
] |
BenWoodford.noreply@github.com
|
7a0f7b03eaaf6ee2ded1913f70ceb02941f42851
|
ad080bd1612b980490ef2d1b61647cbc6beddf5d
|
/my_game/diplomacy/send_mail.py
|
89f7c3c35034326843b74ea6aeb2f25eeefc6298
|
[] |
no_license
|
rokealva83/my_game
|
8f915076986144234950aa4443e8bc51ad019664
|
76ecc1dbf60c7f93621ddca66d62d5fea2826d0e
|
refs/heads/master
| 2020-12-24T17:54:59.491881
| 2016-05-10T20:06:53
| 2016-05-10T20:06:53
| 29,264,967
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,021
|
py
|
# -*- coding: utf-8 -*-
from datetime import datetime
from django.shortcuts import render
from my_game.models import MyUser, UserCity, Warehouse
from my_game import function
from my_game.models import Mail
def send_mail(request):
if "live" not in request.session:
return render(request, "index.html", {})
else:
session_user = MyUser.objects.filter(id=int(request.session['user'])).first()
session_user_city = UserCity.objects.filter(id=int(request.session['user_city'])).first()
function.check_all_queues(session_user)
target = request.POST.get('message_target')
target_name = MyUser.objects.filter(user_name=target).first()
message = ''
if target_name is None:
message = 'Нет такого пользователя'
else:
title = request.POST.get('title')
mail = request.POST.get('message')
user = MyUser.objects.filter(user_id=session_user).first()
user_name = user.user_name
new_mail = Mail(
user=target_name.user_id,
recipient=session_user,
time=datetime.now(),
status=1,
category=1,
login_recipient=user_name,
title=title,
message=mail
)
new_mail.save()
mails = Mail.objects.filter(user=session_user).order_by('category', '-time')
warehouses = Warehouse.objects.filter(user=session_user, user_city=session_user_city).order_by('resource_id')
user_citys = UserCity.objects.filter(user=session_user)
request.session['user'] = session_user.id
request.session['user_city'] = session_user_city.id
request.session['live'] = True
output = {'user': session_user, 'warehouses': warehouses, 'user_city': session_user_city,
'user_citys': user_citys, 'mails': mails, 'message': message}
return render(request, "diplomacy.html", output)
|
[
"tolik20002@bigmir.net"
] |
tolik20002@bigmir.net
|
a97607aa70412fb502d24b6319285ac72592a6b5
|
f662bd04d2f29ef25bbfd7e768b1e57dfbba4d9f
|
/apps/plmejoras/migrations/0002_plan_mejoras_activo.py
|
2d3ce174039be499a21756157156df72a31334f2
|
[] |
no_license
|
DARKDEYMON/sisevadoc
|
f59b193688f7eca7c140a03ee414f5d20ada78c7
|
9fc0943200986824a2aab2134fdba5c9f3315798
|
refs/heads/master
| 2020-03-19T03:27:07.907125
| 2019-12-11T13:30:43
| 2019-12-11T13:30:43
| 135,729,070
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 383
|
py
|
# Generated by Django 2.0.8 on 2019-02-13 15:46
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('plmejoras', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='plan_mejoras',
name='activo',
field=models.BooleanField(default=True),
),
]
|
[
"darkdeymon04@gmail.com"
] |
darkdeymon04@gmail.com
|
4f2e66526c5ab51faf1f6d381c56f55f00e4bf5d
|
fa76868608739eb514c7bf9cb3be6ca1a0283409
|
/l3-patterns+descriptors+metaclasses/lesson/abstract_classes.py
|
f4ac7bd0eb21302ca449c521709e5df6c6f295a9
|
[] |
no_license
|
k1r91/course2
|
efa4b200f19798275251d1b737613cf4560e3f47
|
a4b0413030e17d37406feb8f58314356e3ab15e3
|
refs/heads/master
| 2021-08-16T04:04:26.796036
| 2018-10-23T17:04:54
| 2018-10-23T17:04:54
| 135,111,700
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 303
|
py
|
from abc import ABCMeta, abstractmethod, abstractproperty
class Foo(metaclass=ABCMeta):
@abstractmethod
def spam(self, a, b):
pass
@property
@abstractmethod
def name(self, asd):
pass
class Grok:
pass
Foo.register(Grok)
g = Grok()
print(isinstance(g, Foo))
|
[
"cherkasov.kirill@gmail.com"
] |
cherkasov.kirill@gmail.com
|
a351715c2f009f811b5f12fe749143736ea6a79e
|
9269bbcf34563ba16602b693858cae2908c8505c
|
/Python/racy/plugins/libext/sconsbuilders/mkdir.py
|
10013540ef0215f405a79ce0f9b3c4578f6cb368
|
[
"BSD-3-Clause"
] |
permissive
|
cfobel/sconspiracy
|
4bfe4066731ecbfb781d17d3014c5b4bdb201396
|
478876b2b033d313085a33ac0f7647da18a8439a
|
refs/heads/master
| 2021-01-04T22:32:52.809083
| 2012-02-21T15:11:35
| 2012-02-21T15:11:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 737
|
py
|
# -*- coding: UTF8 -*-
import os
import SCons.Node
import utils
def MkdirArgs(target, source, env):
args = []
args.extend(env.get('ARGS',[]))
args = map(env.subst, args)
return args
@utils.marker_decorator
def Mkdir(target, source, env):
for d in MkdirArgs(target, source, env):
env.Execute(SCons.Script.Mkdir(env.Dir(d)))
return None
def MkdirString(target, source, env):
""" Information string for Mkdir """
args = MkdirArgs(target, source, env)
return env.subst('[${CURRENT_PROJECT}]: mkdir ') + ' '.join(args)
def generate(env):
action = SCons.Action.Action(Mkdir, MkdirString)
builder = env.Builder( action = action )
env.Append(BUILDERS = {'Mkdir' : builder})
|
[
"none@none"
] |
none@none
|
91da0924f0be6bd28259ad79770de110838e7057
|
8f5aa55a8860a33290692a3455b75bc512a369bb
|
/controller/report.py
|
d485cbb30f9896976f82b80d90c8db5039b42b09
|
[] |
no_license
|
Trafire/PurchaseReports
|
c683072712988f50154f6bf301e0e82b8ef92d4e
|
71f2ae13b366d186fef9c524cd443b78c46cdb6f
|
refs/heads/master
| 2023-02-20T12:16:32.207351
| 2021-01-17T17:57:32
| 2021-01-17T17:57:32
| 330,453,147
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,294
|
py
|
from autof2.dailytasks import purchaselist
from openpyxl import Workbook
from openpyxl.worksheet.table import Table
from openpyxl.worksheet.table import TableStyleInfo
from openpyxl import load_workbook
import openpyxl
import datetime, time
from datetime import date
from autof2.interface import send_data
from autof2.navigation import navigation
import os
import os.path
def get_date_sunday(year, week):
week -= 1
d = str(year) + '-W' + str(week)
return datetime.datetime.strptime(d + '-1', "%Y-W%W-%w") - datetime.timedelta(days=1)
def get_today():
year = datetime.date.today().strftime("%Y")
week = datetime.date.today().strftime("%W")
day = datetime.date.today().strftime("%w")
d = str(year) + '-W' + str(week)
return datetime.datetime.strptime(d + '-' + day, "%Y-W%W-%w")
def get_current_week(add=0):
date = (datetime.datetime.now() + datetime.timedelta(days=add * 7)).isocalendar()
if date[2] == 7:
d = datetime.datetime.now() + datetime.timedelta(days=7)
date = d.isocalendar()
week = date[1]
year = date[0]
return (year, week)
def get_order_week(year, week):
current = get_date_sunday(year, week)
product = []
print("\nstarting Week %i:" % week)
for i in range(7):
str_date = current.strftime('%d/%m/%y')
if current >= get_today():
print("\tprocessing day - %s" % current.strftime('%d/%m/%y'), end=" ")
try:
new_product = purchaselist.run_all_purchase_list_report(str_date, str_date)
except:
new_product = purchaselist.run_all_purchase_list_report(str_date, str_date)
for p in new_product:
p.date = str_date
print(" lines found = %i" % len(new_product))
product.extend(new_product)
## print(current.strftime('%d/%m/%y'))
send = send_data.SendData()
send.send('{LEFT}')
current += datetime.timedelta(days=1)
print("Week %i total lines = %i" % (week, len(product)))
return product
def make_order_sheet(wb, product, year, week):
ws = wb.active
rows = 1
ws.append(product[0].excel_heading())
for line in product:
ws.append(line.excel_data())
rows += 1
right_corner = chr(64 + len(product[0].excel_heading())) + str(rows)
# define a table style
mediumStyle = openpyxl.worksheet.table.TableStyleInfo(name='TableStyleMedium2',
showRowStripes=True)
# create a table
table = openpyxl.worksheet.table.Table(ref='A1:' + right_corner,
displayName='orders',
tableStyleInfo=mediumStyle)
# add the table to the worksheet
ws.title = "orders"
ws.add_table(table)
def go_to_puchase_list():
for i in range(10):
if navigation.to_purchase_list():
return True
return False
def get_filename(directory, year, week):
directory += '\\%s\\week %s' % (year, week)
if not os.path.exists(directory):
os.makedirs(directory)
# save the workbook file
return directory + '\\week ' + str(week) + ' orders' + '.xlsx'
def create_report(year, week, directory = os.getcwd()):
if go_to_puchase_list():
print(year,week)
product = get_order_week(year, week)
product.sort()
if product:
wb = Workbook()
make_order_sheet(wb, product, year, week)
# create directory
filename = get_filename(directory, year, week)
## filename = "test2.xlsx"
bought = {}
if os.path.isfile(filename):
wb2 = load_workbook(filename)
a = (wb2['purchases'])
index = 0
for row in a.rows:
if index == 0:
index += 1
categories_order = row
else:
p = {}
for i in range(len(categories_order)):
p[categories_order[i].value] = row[i].value
if p['PurchaseID'] in bought:
bought[p['PurchaseID']]['Confirmed'] += p['Confirmed']
else:
bought[p['PurchaseID']] = p
bought[p['PurchaseID']]['Ordered'] = 0
for p in product:
if p.key not in bought:
bought[p.key] = p.excel_order_dict_vers()
else:
bought[p.key]['Ordered'] += p.quantity
product_list = []
for b in bought:
product_list.append(bought[b])
## wb = Workbook()
ws2 = wb.create_sheet()
rows = 1
headings = (
"PurchaseID", "f2_supplier", "Category", "Variety", "Colour", "Grade", "Supplier", "Price", "Ordered",
"Confirmed")
ws2.append(headings + ("Total",))
for line in bought:
l = []
for h in headings:
l.append(bought[line][h])
l.append("=J%s - I%s" % (rows + 1, rows + 1))
ws2.append(l)
rows += 1
right_corner = chr(64 + 1 + len(product[0].excel_order_headings())) + str(rows)
# define a table style
mediumStyle = openpyxl.worksheet.table.TableStyleInfo(name='TableStyleMedium2',
showRowStripes=True)
# create a table
table = openpyxl.worksheet.table.Table(ref='A1:' + right_corner,
displayName='purchases',
tableStyleInfo=mediumStyle)
# add the table to the worksheet
ws2.title = "purchases"
ws2.add_table(table)
# save the workbook file
## wb.save('test_1'.replace(':','-').replace('.','-') + '.xlsx')
##
try:
wb.save(filename)
except:
print("did not save")
|
[
"antoinewood@gmail.com"
] |
antoinewood@gmail.com
|
be877c7774e1bb701cf61046bdf12f27d5bf2d0f
|
5b3090dece7d3d276922f53bfba18fdff3a5ba12
|
/app/base/config.py
|
4fa810801a20b3d033452130877cc6a43e3b5644
|
[
"MIT"
] |
permissive
|
HsOjo/PyJSONEditor
|
338978b36a545982bec7285ba1de9aa5704f39b0
|
c2cf5398fa569ba0575048f3deebbf23028a61a1
|
refs/heads/master
| 2020-06-30T00:35:40.215143
| 2019-10-15T11:27:01
| 2019-10-15T11:27:01
| 200,668,517
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,775
|
py
|
import base64
import json
import os
import platform
import sys
from app.res.const import Const
from app.util import object_convert
from app.util.log import Log
sys_type = platform.system()
if sys_type == 'Darwin':
CONFIG_NAME = ('com.%s.%s' % (Const.author, Const.app_name)).lower()
CONFIG_PATH = os.path.expanduser('~/Library/Application Support/%s' % CONFIG_NAME)
else:
CONFIG_NAME = '%s.cfg' % Const.app_name
CONFIG_PATH = '%s/%s' % (os.path.dirname(sys.executable), CONFIG_NAME)
class ConfigBase:
_protect_fields = []
_config_path = CONFIG_PATH
language = 'en'
def load(self):
try:
if os.path.exists(self._config_path):
with open(self._config_path, 'r') as io:
config = json.load(io)
for f in self._protect_fields:
config[f] = base64.b64decode(config[f][::-1].encode()).decode()
object_convert.dict_to_object(config, self, new_fields=False)
replaces = dict([(getattr(self, f), Const.protector) for f in self._protect_fields])
Log.set_replaces(replaces)
Log.append('config_load', 'Info', object_convert.object_to_dict(self))
except:
self.save()
def save(self):
with open(self._config_path, 'w') as io:
config = object_convert.object_to_dict(self)
for f in self._protect_fields:
config[f] = base64.b64encode(config[f].encode()).decode()[::-1]
json.dump(config, io, indent=' ')
Log.append('config_save', 'Info', object_convert.object_to_dict(self))
def clear(self):
if os.path.exists(self._config_path):
os.unlink(self._config_path)
|
[
"1134031392@qq.com"
] |
1134031392@qq.com
|
d60f887276a626cc23bd15b52d1b2af930c4090c
|
badf813b23670f38233a2f66031df33b12d6685c
|
/tests/test_plotting.py
|
28ec689c4c5b8ed7e4ea260c1a73f340c0a70458
|
[
"MIT"
] |
permissive
|
healthonrails/annolid
|
6ef2de72bc666e247ae51ae1a5df3d75337fc28c
|
730f7dff2239ef716841390311b5b9250149acaf
|
refs/heads/main
| 2023-09-01T20:52:14.857248
| 2023-09-01T14:34:34
| 2023-09-01T14:34:34
| 290,017,987
| 25
| 8
|
MIT
| 2022-05-03T14:36:21
| 2020-08-24T19:14:07
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 843
|
py
|
import os
import numpy as np
import pandas as pd
from annolid.postprocessing.plotting import plot_trajactory
def test_plot_trajactory():
tracking_csv = '/tmp/tracking.csv'
cx = np.random.randint(0, 100, size=100)
cy = np.random.randint(0, 100, size=100)
instance_name = ['mouse'] * 100
df = pd.DataFrame({'cx': cx,
'cy': cy,
'instance_name': instance_name})
df.to_csv(tracking_csv, index=False)
plot_trajactory(tracking_csv, instance_name="mouse",
title="Trajectory",
xlabel="X position for instance centroid",
ylabel="Y position for instance centroid",
save_path='/tmp/trajectory.png',
trajactory_color_style='b-')
assert os.path.isfile('/tmp/trajectory.png')
|
[
"healthonrails@gmail.com"
] |
healthonrails@gmail.com
|
23e636f36c7413ef55ccef2d4ace1aa86d27543e
|
55c250525bd7198ac905b1f2f86d16a44f73e03a
|
/Python/Games/Python wow/models/items/loot_table.py
|
c59db565ea77b475438d958eda7505fb8b440c3c
|
[] |
no_license
|
NateWeiler/Resources
|
213d18ba86f7cc9d845741b8571b9e2c2c6be916
|
bd4a8a82a3e83a381c97d19e5df42cbababfc66c
|
refs/heads/master
| 2023-09-03T17:50:31.937137
| 2023-08-28T23:50:57
| 2023-08-28T23:50:57
| 267,368,545
| 2
| 1
| null | 2022-09-08T15:20:18
| 2020-05-27T16:18:17
| null |
UTF-8
|
Python
| false
| false
| 129
|
py
|
version https://git-lfs.github.com/spec/v1
oid sha256:71adc2c7aa6f6a777843ec5491e3b43f745ce5167f6810246ac3946d4ec95a0b
size 6943
|
[
"nateweiler84@gmail.com"
] |
nateweiler84@gmail.com
|
6f94a4b8651fca9e5f6dfe42b57d55cebbf1eaab
|
3851a5f2233aa68ae98aa4cd813e0a6dcbda464e
|
/spider/jiaoben/anjvkexzl (1).py
|
62d19c7f400f3ca1116d5840a905ace18a722522
|
[] |
no_license
|
scmsqhn/yunying
|
976a2c9fff98613361d4b28719080d9e4d8112dc
|
3c30b6985ac974bc75d50e8abe0b69174fb46700
|
refs/heads/master
| 2021-01-19T21:06:21.778902
| 2017-04-25T09:14:00
| 2017-04-25T09:14:00
| 88,607,812
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,631
|
py
|
# -*- coding: utf-8 -*-
import scrapy
import re
import time
from lxml import etree
from scrapy import log
import random
import requests
from myscrapy.items import MyscrapyItemcd
import logging
'''
1. logging.CRITICAL - for critical errors (highest severity) 致命错误
2. logging.ERROR - for regular errors 一般错误
3. logging.WARNING - for warning messages 警告+错误
4. logging.INFO - for informational messages 消息+警告+错误
5. logging.DEBUG - for debugging messages (lowest severity) 低级别
'''
logging.warning("This is a warning")
logging.log(logging.WARNING,"This is a warning")
from myscrapy.middlewares import agents
from myscrapy.middlewares import proxys
class AnjvkexzlSpider(scrapy.Spider):
name = "anjvkexzl"
allowed_domains = ["cd.xzl.anjuke.com"]
start_urls = 'http://cd.xzl.anjuke.com/'
handle_httpstatus_list = [111, 404, 500]
def dum(self):
time.sleep(random.randint(1, 3))
def start_requests(self):
user_agent = 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/49.0.2623.22 \
Safari/537.36 SE 2.X MetaSr 1.0'
headers = {'User-Agent': user_agent}
yield scrapy.Request(url=self.start_urls, headers=headers, method='GET', callback=self.parse)
def parse(self, response):
if response.status in self.handle_httpstatus_list:
proxy = random.choice(proxys)
agent = random.choice(agents)
headers.setdefault('User-Agent', agent)
metas = "http://%s" % proxy['ip_port']
self.logger.info('=================START_parse_404===================')
self.logger.info('Spider opened: %s, %s' % (spider.name,request.meta['proxy']))
self.logger.info('request: %s, %s' % (request.url, exception))
self.logger.info('=================END_parse_404===================')
yield scrapy.Request(url=response.url, callback=self.parse, meta={"proxy":metas}, method='GET', dont_filter=True )
else:
scrapy.log.msg()
lists = response.body.decode('utf-8')
selector = etree.HTML(lists)
area_list = selector.xpath('/html/body/div[5]/div[2]/div/div[1]/div/a')
for area in range[2:len(area_list)]:
area_url = iselector.xpath('//*[@id="list-content"]/div[%d]' % area)
print(area_url)
self.log(('Parse function called on %s', response.url),level=log.INFO)
self.log(('Parse function called on %s', response.url),level=log.INFO)
yield scrapy.Request(url=area_url, callback=self.detail_url, dont_filter=True )
#'http://cd.lianjia.com/ershoufang/dongcheng/pg2/'
def detail_url(self,response):
for i in range(1, 101):
self.dum()
contents = etree.HTML(response.body.decode('utf-8'))
col1 = contents.xpath('//*[@id="fy_info"]/ul[1]')
col2 = contents.xpath('//*[@id="fy_info"]/ul[2]')
cols[2] = [col1, col2]
self.dum();
for col in cols:
for i in col:
item = AnjvkexzlItem()
item['zizujin'] = house.xpath('//*[@id="fy_info"]/ul[%d]/li[%d]/span[2]',(col, i)).pop()
item['yuezujin'] = house.xpath('//*[@id="fy_info"]/ul[%d]/li[%d]/span[2]',(col, i)).pop()
item['loupan'] = house.xpath('//*[@id="fy_info"]/ul[%d]/li[%d]/span[2]',(col, i)).pop()
item['dizhi'] = house.xpath('//*[@id="fy_info"]/ul[%d]/li[%d]/span[2]',(col, i)).pop()
item['ditie'] = house.xpath('//*[@id="fy_info"]/ul[%d]/li[%d]/span[2]',(col, i)).pop()
item['jzmianji'] = house.xpath('//*[@id="fy_info"]/ul[%d]/li[%d]/span[2]',(col, i)).pop()
item['louceng'] = house.xpath('//*[@id="fy_info"]/ul[%d]/li[%d]/span[2]',(col, i)).pop()
item['gongweishu'] = house.xpath('//*[@id="fy_info"]/ul[%d]/li[%d]/span[2]',(col, i)).pop()
item['wuye'] = house.xpath('//*[@id="fy_info"]/ul[%d]/li[%d]/span[2]',(col, i)).pop()
item['pingjia'] = house.xpath('//*[@id="xzl_desc"]/div/text()')
self.logger.info('item is %s' % item)
latitude = contents.xpath('/html/body/script[11]/text()').pop()
relat = '''lat: ".*'''
relng = '''lng: ".*'''
j = re.search(relat, latitude)
w = re.search(relng, latitude)
j = j.split(':')[1]
w = w.split(':')[1]
item['jwd'] = jwd[j,w]
yield item
|
[
"haining.qin@changhong.com"
] |
haining.qin@changhong.com
|
5cf0ddbe1552569d850eeea5f21edb458b930f1b
|
7f73b32886f69e34dcef53b6593727effdc2fdf5
|
/sentence_transformers/models/WordEmbeddings.py
|
a235b3af1000f3ad8951d66c310cefcb3f74575c
|
[
"Apache-2.0"
] |
permissive
|
gabbage/sentence-transformers
|
bac116f35b5ba61bc64f35149a1963db851e5552
|
4a5308479bbb0bac7c0f60a3b2f6a01ebdfa2aa0
|
refs/heads/master
| 2020-07-07T01:46:56.442696
| 2019-08-19T16:24:56
| 2019-08-19T16:24:56
| 203,205,790
| 0
| 0
| null | 2019-08-19T16:03:58
| 2019-08-19T16:03:58
| null |
UTF-8
|
Python
| false
| false
| 5,836
|
py
|
import torch
from torch import nn, Tensor
from typing import Union, Tuple, List, Iterable, Dict
import logging
import gzip
from tqdm import tqdm
import numpy as np
import os
import json
from ..util import import_from_string, fullname, http_get
from .tokenizer import WordTokenizer, WhitespaceTokenizer
class WordEmbeddings(nn.Module):
def __init__(self, tokenizer: WordTokenizer, embedding_weights, update_embeddings: bool = False, max_seq_length: int = 1000000):
nn.Module.__init__(self)
if isinstance(embedding_weights, list):
embedding_weights = np.asarray(embedding_weights)
if isinstance(embedding_weights, np.ndarray):
embedding_weights = torch.from_numpy(embedding_weights)
num_embeddings, embeddings_dimension = embedding_weights.size()
self.embeddings_dimension = embeddings_dimension
self.emb_layer = nn.Embedding(num_embeddings, embeddings_dimension)
self.emb_layer.load_state_dict({'weight': embedding_weights})
self.emb_layer.weight.requires_grad = update_embeddings
self.tokenizer = tokenizer
self.update_embeddings = update_embeddings
self.max_seq_length = max_seq_length
def forward(self, features):
token_embeddings = self.emb_layer(features['input_ids'])
cls_tokens = None
features.update({'token_embeddings': token_embeddings, 'cls_token_embeddings': cls_tokens, 'input_mask': features['input_mask']})
return features
def get_sentence_features(self, tokens: List[str], pad_seq_length: int):
pad_seq_length = min(pad_seq_length, self.max_seq_length)
tokens = tokens[0:pad_seq_length] #Truncate tokens if needed
input_ids = tokens
sentence_length = len(input_ids)
input_mask = [1] * len(input_ids)
padding = [0] * (pad_seq_length - len(input_ids))
input_ids += padding
input_mask += padding
assert len(input_ids) == pad_seq_length
assert len(input_mask) == pad_seq_length
return {'input_ids': input_ids, 'input_mask': input_mask, 'sentence_lengths': sentence_length}
return {'input_ids': np.asarray(input_ids, dtype=np.int),
'input_mask': np.asarray(input_mask, dtype=np.int),
'sentence_lengths': np.asarray(sentence_length, dtype=np.int)}
def get_word_embedding_dimension(self) -> int:
return self.embeddings_dimension
def tokenize(self, text: str) -> List[str]:
return self.tokenizer.tokenize(text)
def save(self, output_path: str):
with open(os.path.join(output_path, 'wordembedding_config.json'), 'w') as fOut:
json.dump(self.get_config_dict(), fOut, indent=2)
torch.save(self.state_dict(), os.path.join(output_path, 'pytorch_model.bin'))
self.tokenizer.save(output_path)
def get_config_dict(self):
return {'tokenizer_class': fullname(self.tokenizer), 'update_embeddings': self.update_embeddings, 'max_seq_length': self.max_seq_length}
@staticmethod
def load(input_path: str):
with open(os.path.join(input_path, 'wordembedding_config.json'), 'r') as fIn:
config = json.load(fIn)
tokenizer_class = import_from_string(config['tokenizer_class'])
tokenizer = tokenizer_class.load(input_path)
weights = torch.load(os.path.join(input_path, 'pytorch_model.bin'))
embedding_weights = weights['emb_layer.weight']
model = WordEmbeddings(tokenizer=tokenizer, embedding_weights=embedding_weights, update_embeddings=config['update_embeddings'])
return model
@staticmethod
def from_text_file(embeddings_file_path: str, update_embeddings: bool = False, item_separator: str = " ", tokenizer=WhitespaceTokenizer(), max_vocab_size: int = None):
logging.info("Read in embeddings file {}".format(embeddings_file_path))
if not os.path.exists(embeddings_file_path):
logging.info("{} does not exist, try to download from server".format(embeddings_file_path))
if '/' in embeddings_file_path or '\\' in embeddings_file_path:
raise ValueError("Embeddings file not found: ".format(embeddings_file_path))
url = "https://public.ukp.informatik.tu-darmstadt.de/reimers/embeddings/"+embeddings_file_path
http_get(url, embeddings_file_path)
embeddings_dimension = None
vocab = []
embeddings = []
with gzip.open(embeddings_file_path, "rt", encoding="utf8") if embeddings_file_path.endswith('.gz') else open(embeddings_file_path, encoding="utf8") as fIn:
iterator = tqdm(fIn, desc="Load Word Embeddings", unit="Embeddings")
for line in iterator:
split = line.rstrip().split(item_separator)
word = split[0]
if embeddings_dimension == None:
embeddings_dimension = len(split) - 1
vocab.append("PADDING_TOKEN")
embeddings.append(np.zeros(embeddings_dimension))
if (len(split) - 1) != embeddings_dimension: # Assure that all lines in the embeddings file are of the same length
logging.error("ERROR: A line in the embeddings file had more or less dimensions than expected. Skip token.")
continue
vector = np.array([float(num) for num in split[1:]])
embeddings.append(vector)
vocab.append(word)
if max_vocab_size is not None and max_vocab_size > 0 and len(vocab) > max_vocab_size:
break
embeddings = np.asarray(embeddings)
tokenizer.set_vocab(vocab)
return WordEmbeddings(tokenizer=tokenizer, embedding_weights=embeddings, update_embeddings=update_embeddings)
|
[
"rnils@web.de"
] |
rnils@web.de
|
882cb593206aad566ecbacb6fa9144344bd399b9
|
4111ca5a73a22174f189361bef654c3f91c3b7ed
|
/Lintcode/Ladder_11_15_A/134. LRU Cache.py
|
abb130048a64c5c6df4397b67d81b496d32268fd
|
[
"MIT"
] |
permissive
|
ctc316/algorithm-python
|
58b541b654509ecf4e9eb8deebfcbdf785699cc4
|
ac4580d55e05e93e407c6156c9bb801808027d60
|
refs/heads/master
| 2020-03-16T06:09:50.130146
| 2019-08-02T02:50:49
| 2019-08-02T02:50:49
| 132,548,222
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,788
|
py
|
class Node:
def __init__(self, key="", val=-1, prev=None, next=None):
self.key = key
self.val = val
self.prev = prev
self.next = next
class LRUCache:
"""
@param: capacity: An integer
"""
def __init__(self, capacity):
self.capacity = capacity
self.mapping = {}
self.head = None
self.tail = None
"""
@param: key: An integer
@return: An integer
"""
def get(self, key):
if key not in self.mapping:
return -1
node = self.mapping[key]
self.__moveToHead(node)
return node.val
"""
@param: key: An integer
@param: value: An integer
@return: nothing
"""
def set(self, key, value):
if key not in self.mapping:
if len(self.mapping) >= self.capacity:
self.removeTail()
new_node = Node(key, value, None, self.head)
self.mapping[key] = new_node
if self.head:
self.head.prev = new_node
self.head = new_node
if self.tail is None:
self.tail = self.head
else:
node = self.mapping[key]
node.val = value
self.__moveToHead(node)
def __moveToHead(self, node):
if node is self.head:
return
if node.prev:
node.prev.next = node.next
if node.next:
node.next.prev = node.prev
if node is self.tail:
self.tail = node.prev
self.head.prev = node
node.next = self.head
self.head = node
def removeTail(self):
if self.tail.prev:
self.tail.prev.next = None
del self.mapping[self.tail.key]
self.tail = self.tail.prev
|
[
"mike.tc.chen101@gmail.com"
] |
mike.tc.chen101@gmail.com
|
1743aa0b591c3eb8da10ea9d4d5551356ad61da9
|
4510bbf54e2ca619c3a863f5ca03df6584585402
|
/tfx/examples/custom_components/container_components/download_grep_print_pipeline.py
|
2ce8d69992f2a165d0a9b87bd887d7450af33c60
|
[
"Apache-2.0"
] |
permissive
|
Mdlglobal-atlassian-net/tfx
|
e55f38336d1989ac970b5069c7128097ed86b422
|
37cbbb95c65e1a891045dd13232a7f2a293a7b70
|
refs/heads/master
| 2022-10-02T07:44:41.180873
| 2020-06-01T18:49:15
| 2020-06-01T18:49:53
| 268,607,840
| 0
| 1
|
Apache-2.0
| 2020-06-01T19:01:51
| 2020-06-01T19:01:50
| null |
UTF-8
|
Python
| false
| false
| 4,376
|
py
|
# Lint as: python2, python3
# Copyright 2020 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Container-based pipeline sample."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from typing import Text
from tfx.dsl.component.experimental import container_component
from tfx.dsl.component.experimental import placeholders
from tfx.types import standard_artifacts
downloader_component = container_component.create_container_component(
name='DownloadFromHttp',
outputs={
'data': standard_artifacts.ExternalArtifact,
},
parameters={
'url': str,
},
# The component code uses gsutil to upload the data to GCS, so the
# container image needs to have gsutil installed and configured.
# Fixing b/150670779 by merging cl/294536017 will lift this limitation.
image='google/cloud-sdk:278.0.0',
command=[
'sh', '-exc',
'''
url="$0"
output_data_uri="$1"
output_data_path=$(mktemp)
# Running the main code
wget "$0" -O "$output_data_path" || curl "$0" > "$output_data_path"
# Getting data out of the container
gsutil cp "$output_data_path" "$output_data_uri"
''',
placeholders.InputValuePlaceholder('url'),
placeholders.OutputUriPlaceholder('data'),
],
)
grep_component = container_component.create_container_component(
name='FilterWithGrep',
inputs={
'text': standard_artifacts.ExternalArtifact,
},
outputs={
'filtered_text': standard_artifacts.ExternalArtifact,
},
parameters={
'pattern': str,
},
# The component code uses gsutil to upload the data to GCS, so the
# container image needs to have gsutil installed and configured.
# Fixing b/150670779 by merging cl/294536017 will lift this limitation.
image='google/cloud-sdk:278.0.0',
command=[
'sh', '-exc',
'''
pattern="$0"
text_uri="$1"
text_path=$(mktemp)
filtered_text_uri="$2"
filtered_text_path=$(mktemp)
# Getting data into the container
gsutil cp "$text_uri" "$text_path"
# Running the main code
grep "$pattern" "$text_path" >"$filtered_text_path"
# Getting data out of the container
gsutil cp "$filtered_text_path" "$filtered_text_uri"
''',
placeholders.InputValuePlaceholder('pattern'),
placeholders.InputUriPlaceholder('text'),
placeholders.OutputUriPlaceholder('filtered_text'),
],
)
print_component = container_component.create_container_component(
name='Print',
inputs={
'text': standard_artifacts.ExternalArtifact,
},
# The component code uses gsutil to upload the data to GCS, so the
# container image needs to have gsutil installed and configured.
# Fixing b/150670779 by merging cl/294536017 will lift this limitation.
image='google/cloud-sdk:278.0.0',
command=[
'sh', '-exc',
'''
text_uri="$0"
text_path=$(mktemp)
# Getting data into the container
gsutil cp "$text_uri" "$text_path"
# Running the main code
cat "$text_path"
''',
placeholders.InputUriPlaceholder('text'),
],
)
def create_pipeline_component_instances(text_url: Text, pattern: Text):
"""Creates tasks for the download_grep_print pipeline."""
downloader_task = downloader_component(url=text_url)
grep_task = grep_component(
text=downloader_task.outputs['data'],
pattern=pattern,
)
print_task = print_component(
text=grep_task.outputs['filtered_text'],
)
component_instances = [
downloader_task,
grep_task,
print_task,
]
return component_instances
|
[
"tensorflow-extended-nonhuman@googlegroups.com"
] |
tensorflow-extended-nonhuman@googlegroups.com
|
327e3bd7f32ec9065be455843c7a3ed5b6283fed
|
4703856e735a81b43232bf47c8e1b0e7c29cc714
|
/charities/serializers.py
|
9a4a0bd37d128ac5bcd4edd3d9b128283e9a1c87
|
[
"MIT"
] |
permissive
|
salmanAndroidDev/charity-app
|
6a367e8e16b55db20f3624559547c33299155285
|
f2ea53c91c9cf46a63af6d3bef211c75dd5219bc
|
refs/heads/main
| 2023-03-17T04:31:55.291455
| 2021-03-04T19:45:07
| 2021-03-04T19:45:07
| 344,589,781
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 654
|
py
|
from rest_framework import serializers
from .models import Benefactor
from .models import Charity, Task
class BenefactorSerializer(serializers.ModelSerializer):
"""Serializer class for benefactor object"""
class Meta:
model = Benefactor
fields = ('experience', 'free_time_per_week')
class CharitySerializer(serializers.ModelSerializer):
"""Serializer class for charity object"""
class Meta:
model = Charity
fields = ('name', 'reg_number')
class TaskSerializer(serializers.ModelSerializer):
"""Serializer class for Task object"""
class Meta:
model = Task
fields = ('__all__')
|
[
"salmanAndB@outlook.com"
] |
salmanAndB@outlook.com
|
1d324d45ec8ac267e4dac7c06c7c9077ccda5aef
|
3861d9f9c68eb0b09c46b9a10b92fca8fa608a23
|
/Pygame/Snake/snake.py
|
e75117e333e90732a4c9f093dbd85a426892d47e
|
[] |
no_license
|
vuquangtam/Apps
|
3bbd8125dda67210862b114e3961f3d78676a06b
|
94ba79e87b914595937efc95d60d8531172c87fa
|
refs/heads/master
| 2021-01-22T13:08:22.922382
| 2016-02-01T02:51:49
| 2016-02-01T02:51:49
| 32,475,321
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,763
|
py
|
import pygame, random, os
BLOCK_WIDTH = 40
BLOCK_HEIGHT = 40
WINDOW_WIDTH = 1280
WINDOW_HEIGHT = 600
BLOCK_X_MAX = int(WINDOW_WIDTH / BLOCK_WIDTH)
BLOCK_Y_MAX = int(WINDOW_HEIGHT / BLOCK_HEIGHT)
LENGHT_OF_SNAKE = 5
START_POSITION_X = 10
START_POSITION_Y = 10
SNAKE_SPEED = 1
BLACK = (0, 0, 0)
WHITE = (255, 255, 255)
BLUE = (0, 0, 255)
YELLOW = (255, 255, 0)
pygame.init()
screen = pygame.display.set_mode((WINDOW_WIDTH, WINDOW_HEIGHT))
folder = "data"
head_sprite = pygame.image.load(os.path.join(folder, "head_sprite.png")).convert_alpha()
head_sprite = pygame.transform.scale(head_sprite, (BLOCK_WIDTH, BLOCK_HEIGHT))
apple_sprite = pygame.image.load(os.path.join(folder, "apple.png")).convert_alpha()
apple_sprite = pygame.transform.scale(apple_sprite, (BLOCK_WIDTH, BLOCK_HEIGHT))
background = pygame.image.load(os.path.join(folder, "background.jpg")).convert()
background = pygame.transform.scale(background, (WINDOW_WIDTH, WINDOW_HEIGHT))
class Block(pygame.sprite.Sprite):
previous_part = None
def __init__(self):
pygame.sprite.Sprite.__init__(self)
self.image = pygame.Surface((int(BLOCK_WIDTH), int(BLOCK_HEIGHT))).convert()
self.image.set_colorkey(BLACK)
pygame.draw.circle(self.image, YELLOW, (BLOCK_WIDTH // 2, BLOCK_HEIGHT // 2), BLOCK_WIDTH // 2, 0)
self.rect = self.image.get_rect()
self.oldx = self.rect.x
self.oldy = self.rect.y
def update(self):
self.oldx = self.rect.x
self.oldy = self.rect.y
self.rect.x, self.rect.y = self.previous_part.oldpos()
def oldpos(self):
return self.oldx, self.oldy
class Head(Block):
def __init__(self):
Block.__init__(self)
self.image = pygame.Surface((int(BLOCK_WIDTH), int(BLOCK_HEIGHT)))
self.image.blit(head_sprite, (0, 0))
self.image.set_colorkey(WHITE)
self.rect = self.image.get_rect()
self.dx = SNAKE_SPEED
self.dy = 0
def update(self):
self.oldx = self.rect.x
self.oldy = self.rect.y
key = pygame.key.get_pressed()
if key[pygame.K_UP]:
self.dx = 0
if self.dy != SNAKE_SPEED:
self.dy = -SNAKE_SPEED
elif key[pygame.K_DOWN]:
self.dx = 0
if self.dy != -SNAKE_SPEED:
self.dy = SNAKE_SPEED
elif key[pygame.K_LEFT]:
self.dy = 0
if self.dx != SNAKE_SPEED:
self.dx = -SNAKE_SPEED
elif key[pygame.K_RIGHT]:
self.dy = 0
if self.dx != -SNAKE_SPEED:
self.dx = SNAKE_SPEED
self.rect.x += int(self.dx) * BLOCK_WIDTH
self.rect.y += int(self.dy) * BLOCK_HEIGHT
class Apple(pygame.sprite.Sprite):
def __init__(self, headOfSnake):
pygame.sprite.Sprite.__init__(self)
self.headOfSnake = headOfSnake
self.image = pygame.Surface((int(BLOCK_WIDTH), int(BLOCK_HEIGHT))).convert()
self.image.blit(apple_sprite, (0, 0))
self.image.set_colorkey(WHITE)
self.rect = self.image.get_rect()
self.reset()
def reset(self):
self.rect.x, self.rect.y = random.randint(1, BLOCK_X_MAX - 1), random.randint(1, BLOCK_Y_MAX - 1)
self.rect.x *= BLOCK_WIDTH
self.rect.y *= BLOCK_HEIGHT
def update(self):
return self.rect.x == self.headOfSnake.rect.x and self.rect.y == self.headOfSnake.rect.y
def drawLine(screen):
for x in range(0, WINDOW_WIDTH, BLOCK_WIDTH):
pygame.draw.line(screen, WHITE,(x, 0), (x, WINDOW_HEIGHT))
for y in range(0, WINDOW_HEIGHT, BLOCK_HEIGHT):
pygame.draw.line(screen, WHITE,(0, y), (WINDOW_WIDTH, y))
def getText(text, color):
font = pygame.font.SysFont(None, 50)
textSurf = font.render(text, 1, color)
return textSurf
clock = pygame.time.Clock()
level = 8
snake = pygame.sprite.Group()
all_sprite = pygame.sprite.Group()
snake_list = []
head = Head()
head.rect.x = (START_POSITION_X + LENGHT_OF_SNAKE) * BLOCK_WIDTH
head.rect.y = START_POSITION_Y * BLOCK_HEIGHT
all_sprite.add(head)
previous = head
snake_list.append(head)
apple = Apple(head)
for x in range(START_POSITION_X + LENGHT_OF_SNAKE - 1, START_POSITION_X, -1):
block = Block()
block.rect.x = x * BLOCK_WIDTH
block.rect.y = START_POSITION_Y * BLOCK_HEIGHT
block.previous_part = previous
previous = block
snake.add(block)
all_sprite.add(block)
snake_list.append(block)
all_sprite.add(apple)
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
for block in snake_list:
block.update()
if apple.update():
apple.reset()
tail = snake_list[-1]
block = Block()
block.previous_part = tail
block.update()
snake.add(block)
all_sprite.add(block)
snake_list.append(block)
print len(snake_list)
if len(pygame.sprite.spritecollide(head, snake, False)):
pygame.quit()
if head.rect.x < 0 : head.rect.x = BLOCK_X_MAX * BLOCK_WIDTH
elif head.rect.x > BLOCK_X_MAX * BLOCK_WIDTH : head.rect.x = 0
if head.rect.y < 0 : head.rect.y = BLOCK_Y_MAX * BLOCK_HEIGHT
elif head.rect.y > BLOCK_Y_MAX * BLOCK_HEIGHT : head.rect.y = 0
screen.blit(background, (0,0))
drawLine(screen)
all_sprite.draw(screen)
screen.blit(getText('Score : %s'%(len(snake_list) - LENGHT_OF_SNAKE), BLUE), (10, 10))
pygame.display.flip()
clock.tick(level)
|
[
"vuquangtam1994@gmail.com"
] |
vuquangtam1994@gmail.com
|
f82270579338afb628549cc0faca8293c5922f33
|
60a831fb3c92a9d2a2b52ff7f5a0f665d4692a24
|
/IronPythonStubs/release/stubs.min/Autodesk/Revit/DB/__init___parts/FaceSecondDerivatives.py
|
a8db8408a607733b344af34107b62ddde5d1bfee
|
[
"MIT"
] |
permissive
|
shnlmn/Rhino-Grasshopper-Scripts
|
a9411098c5d1bbc55feb782def565d535b27b709
|
0e43c3c1d09fb12cdbd86a3c4e2ba49982e0f823
|
refs/heads/master
| 2020-04-10T18:59:43.518140
| 2020-04-08T02:49:07
| 2020-04-08T02:49:07
| 161,219,695
| 11
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,656
|
py
|
class FaceSecondDerivatives(object,IDisposable):
""" Contains second partial derivatives of a face at a specified point. """
def Dispose(self):
""" Dispose(self: FaceSecondDerivatives) """
pass
def ReleaseUnmanagedResources(self,*args):
""" ReleaseUnmanagedResources(self: FaceSecondDerivatives,disposing: bool) """
pass
def __enter__(self,*args):
""" __enter__(self: IDisposable) -> object """
pass
def __exit__(self,*args):
""" __exit__(self: IDisposable,exc_type: object,exc_value: object,exc_back: object) """
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __repr__(self,*args):
""" __repr__(self: object) -> str """
pass
IsValidObject=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Specifies whether the .NET object represents a valid Revit entity.
Get: IsValidObject(self: FaceSecondDerivatives) -> bool
"""
MixedDerivative=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""The mixed derivative.
Get: MixedDerivative(self: FaceSecondDerivatives) -> XYZ
"""
UUDerivative=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""The second derivative with respect to U.
Get: UUDerivative(self: FaceSecondDerivatives) -> XYZ
"""
VVDerivative=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""The second derivative with respect to V.
Get: VVDerivative(self: FaceSecondDerivatives) -> XYZ
"""
|
[
"magnetscoil@gmail.com"
] |
magnetscoil@gmail.com
|
f7a2b88e5951ed5c9599d5fa6f35931526001d5d
|
e84020108a7037d8d4867d95fada1b72cbcbcd25
|
/src/chattisgarh/misc/processNregaAssets.py
|
00148cbca88667705777386bf35810bf5a749c0f
|
[] |
no_license
|
rajesh241/libtech
|
8384316051a2e8c2d4a925cd43216b855b82e4d9
|
0105e717357a3626106028adae9bf162a7f93fbf
|
refs/heads/master
| 2022-12-10T03:09:00.048841
| 2020-06-14T09:39:04
| 2020-06-14T09:39:04
| 24,629,538
| 1
| 1
| null | 2022-12-08T02:26:11
| 2014-09-30T07:57:45
|
Python
|
UTF-8
|
Python
| false
| false
| 4,969
|
py
|
import csv
from bs4 import BeautifulSoup
import requests
import os
import time
import re
import sys
import urllib2
import MySQLdb
import time
import re
import os
import sys
import os.path
fileDir=os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, fileDir+'/../../includes/')
from settings import dbhost,dbuser,dbpasswd,sid,token
#Getting the block code
#inblock=sys.argv[1]
#print inblock
#Connect to MySQL Database
def main():
datadir='/home/libtech/webroot/chaupalDataDashboard/reports/general/chattisgarhNregaAssets/KOREA/'
workCodes=['AV','SK','CA','DP','FR','FP','FG','LD','IC','OP','PG','WH','RC','DW','RS','WC','IF']
workNames=['anganwadi','bharatNirmanRajeevGandhiSewaKendra','costalAreas','droughtProofing','fisheries','floodControlProtection','foodGrains','landDevelopment','microIrrigationWorks','otherWorks','playGround','renovationTraditionalWaterBodies','ruralConnectivity','ruralDrinkingWater','ruralSanitation','waterConservationWaterHarvesting','worksIndividualLand']
finYears=['2012-2013','2013-2014','2014-2015','2015-2016']
blockNames=['AMBIKAPUR','BATAULI','LAKHANPUR','LUNDRA','MAINPAT','SITAPUR','UDAIPUR']
blockCodes=['3305001','3305007','3305002','3305005','3305008','3305006','3305003']
db = MySQLdb.connect(host=dbhost, user=dbuser, passwd=dbpasswd, db="korea",charset='utf8')
cur=db.cursor()
db.autocommit(True)
#Query to set up Database to read Hindi Characters
query="SET NAMES utf8"
cur.execute(query)
query="select stateCode,districtCode,blockCode,name from blocks"
# query="select stateCode,districtCode,blockCode,name from blocks where blockCode='005'"
cur.execute(query)
results=cur.fetchall()
for row in results:
fullBlockCode=row[0]+row[1]+row[2]
blockCode=row[2]
blockName=row[3]
print fullBlockCode+blockName
query="select workCode,description from workCodes where workCode='DP'"
query="select workCode,description from workCodes "
cur.execute(query)
results1=cur.fetchall()
for row1 in results1:
workDescription=row1[1]
finYears=['2012-2013','2013-2014','2014-2015','2015-2016']
# finYears=['2012-2013']
for finyear in finYears:
assetfilename=datadir+blockName+"/"+finyear+"/"+workDescription+".html"
print assetfilename
if (os.path.isfile(assetfilename)):
assethtml=open(assetfilename,'r').read()
# assethtml=re.sub(regex,"</font></td>",assethtml1)
else:
assethtml="Timeout expired"
htmlsoup=BeautifulSoup(assethtml)
try:
foundtable=htmlsoup.find('table',id="Table2")
table = foundtable.findNext('table')
rows = table.findAll('tr')
errorflag=0
except:
errorflag=1
if errorflag==0:
i=0
for tr in rows:
cols = tr.findAll('td')
print "Length of Columns ="+str(len(cols))
if len(cols) == 11:
block="".join(cols[2].text.split())
panchayat="".join(cols[3].text.split())
worknameworkcode=cols[4].text
print worknameworkcode.encode("UTF-8")
executingLevel="".join(cols[5].text.split())
completionDateString="".join(cols[6].text.split())
laborComponent="".join(cols[7].text.split())
materialComponent="".join(cols[8].text.split())
actualLaborExpense="".join(cols[9].text.split())
actualMaterialExpense="".join(cols[10].text.split())
if completionDateString != '':
completionDate = time.strptime(completionDateString, '%d/%m/%Y')
completionDate = time.strftime('%Y-%m-%d %H:%M:%S', completionDate)
else:
completionDate=''
worknameworkcodearray=re.match(r'(.*)\(3306(.*)\)',worknameworkcode)
if worknameworkcodearray:
workName=worknameworkcodearray.groups()[0]
workCode='3306'+worknameworkcodearray.groups()[1]
query="insert into assets (blockCode,block,panchayat,fullfinyear,executingLevel,workCode,workName,completionDate,laborComponent,materialComponent,actualLaborExpense,actualMaterialExpense) values ('%s','%s','%s','%s','%s','%s','%s','%s',%s,%s,%s,%s) " % (blockCode,blockName,panchayat,finyear,executingLevel,workCode,workName,completionDate,str(laborComponent),str(materialComponent),str(actualLaborExpense),str(actualMaterialExpense))
#print query.encode("UTF-8")
try:
cur.execute(query)
except MySQLdb.IntegrityError,e:
errormessage=(time.strftime("%d/%m/%Y %H:%M:%S "))+str(e)+"\n"
continue
cur.execute(query)
i=i+1
#print str(i)+block+panchayat+workCode.encode("UTF-8")
if __name__ == '__main__':
main()
|
[
"togoli@gmail.com"
] |
togoli@gmail.com
|
f9637053b972322b18327b19537200a6e0b6944e
|
4027d8dafb6f60568f03357e329c09262161e963
|
/machinelearn/neural_network/logistic.py
|
f68ba50920fdc46698497029c196f4d0f14b714c
|
[] |
no_license
|
pentiumCM/machinelearn
|
a2bfa15d6e9f20fd604116f77186da76ebcc4f27
|
329bb9521b5e06e3471aa209fc87ca47f8d5fdcb
|
refs/heads/master
| 2022-12-08T23:43:05.784930
| 2021-05-24T04:02:23
| 2021-05-24T04:02:23
| 216,704,188
| 7
| 1
| null | 2022-12-08T09:30:07
| 2019-10-22T02:13:45
|
Python
|
UTF-8
|
Python
| false
| false
| 6,241
|
py
|
#!/usr/bin/env python
# encoding: utf-8
'''
@Author : pentiumCM
@Email : 842679178@qq.com
@Software: PyCharm
@File : logistic.py
@Time : 2019/10/27 21:49
@desc : 基于逻辑回归的单层感知器(神经网络)算法
'''
import numpy as np
import matplotlib.pyplot as plt
# 1. 激活函数
def sigmoid(z):
"""
sigmoid激活函数
:param z: 输入
:return: sigmoid(z)
"""
return 1 / (1 + np.exp(-z))
# 2.初始化参数,对于logistic回归,可以将权重初始化为零
def init_param_withZeros(dim):
"""
初始化权重和偏置
:param dim: 输入维度
:return: 返回初始化的w和b
w:(dim,1)的向量
b:标量
"""
w = np.zeros((dim, 1))
b = 0
return w, b
# 3.正向传播函数
def forward_propagate(w, b, X, Y):
"""
BP算法分为两个部分:正向传播与反向传播。正向传播:计算出神经网络的输出。反向传播是采用梯度下降法使误差函数减小
:param w: 权重向量
:param b: 偏置
:param X: 输入数据向量
:param Y: 输入标签。逻辑回归是二分类问题,Y为0/1
:return: 梯度和损失函数的值
"""
# 输入数据数目
num = X.shape[1]
Z = np.dot(w.T, X) + b # z = wTX + b.使用向量化同时计算,消除了代码中的显式的for循环
# 正向传播
A = sigmoid(Z) # 整个训练集的预测值 A = [a1,a2,......,am]
# 损失函数,损失函数采用交叉熵算法。
loss = Y * np.log(A) + (1 - Y) * np.log(1 - A)
# 成本函数是w和b的函数,是1到m项损失函数的平均,衡量了参数w,b在训练集上的效果
cost = -1 / num * np.sum(loss)
# 反向传播,求出权值w和偏置b的导数
dz = A - Y # dz = [a1-y1,.....,am-ym]
dw = 1 / num * np.dot(X, dz.T) # dw = 1/m * [x1 * dz1 + ... + xm * dzm]
db = 1 / num * np.sum(dz) # db = 1/m * (dz1 + dz2 + ... + dzm)
# 用字典存储dw和db
gradients = {"dw": dw,
"db": db}
return gradients, cost
# 4. 反向传播函数,采用梯度下降与优化, 求解使损失函数最小的W和b。
def backward_propagate(w, b, X, Y, iters, learning_rate):
"""
反向传播是采用梯度下降法使误差函数减小
:param w: 初始权值
:param b: 初始偏置
:param X: 输入数据
:param Y: 输入数据标签
:param iters: 训练迭代次数
:param learning_rate: 学习速率
:return: 权值w,偏置b,梯度gradients和损失函数cost
"""
# 存储损失函数的值
costs = []
for i in range(iters):
# 初始化梯度和损失函数的值
gradients, cost = forward_propagate(w, b, X, Y)
# 获取偏导。dw为总体样本损失函数对w的导数的均值,db为总体样本损失函数对b导数的均值
dw = gradients["dw"]
db = gradients["db"]
# 更新参数权值w和偏置b
w = w - learning_rate * dw
b = b - learning_rate * db
# 记录损失并输出函数
costs.append(cost)
print("The cost in the %d th iteration is %f" % (i, cost))
gradients = {"dw": dw,
"db": db}
return w, b, gradients, costs
# 5.预测
def predict(w, b, X):
"""
预测结果分为两部分,第一部分计算输出:A = sigmoid(np.dot(w.T, X) + b)。
然后判断输出和0.5的大小,大于0.5则为1,反之则为0
:param w: 训练后的权值
:param b: 训练后偏置
:param X: 输入数据
:return: 预测出输入数据的标签
"""
# 获取输入数目m
m = X.shape[1]
# 存储预测结果
Y_prediction = np.zeros((1, m))
w = w.reshape(X.shape[0], 1)
# 神经元输出
A = sigmoid(np.dot(w.T, X) + b)
# 开始预测
for i in range(A.shape[1]):
if A[0, i] > 0.5:
Y_prediction[0, i] = 1
else:
Y_prediction[0, i] = 0
return Y_prediction
# XOY坐标轴的散点图
def plot_dis_data(x, y, color, graph_name):
'''
用散点图显示出点的分布
:param x: X坐标
:param y: Y坐标
:param color: 点的颜色
:param graph_name: 图表的名称
:return:
'''
plt.scatter(x, y, s=15, c=color)
plt.title(graph_name)
plt.show()
# XOY坐标轴的线性图
def plot_line_chart(data, xlabel, ylabel, graph_name):
plt.plot(data)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.title(graph_name)
plt.show()
if __name__ == "__main__":
# X为横坐标,Y为纵坐标。 相当于两个属性x1,x2
X = [0, 1.5, 2, 2, 2.25, 2.8, 3.2, 4]
Y = [1.5, 3, 4, 3, 0.5, 2.8, 1.35, 4]
label = [1, 1, 1, 1, 0, 0, 0, 0]
# 1类为蓝色, 0类为红色
label_color = ['blue', 'red']
color = []
for i in label:
if i == 1:
color.append(label_color[0])
else:
color.append(label_color[1])
# pyplot绘制原始数据分布图
plot_dis_data(X, Y, color, 'Raw Data')
# 数据归一化
X = np.array(X)
Y = np.array(Y)
X = (X - np.average(X))
Y = (Y - np.average(Y))
X = X / X.max()
Y = Y / Y.max()
# pyplot绘制归一化之后的数据分布图
plot_dis_data(X, Y, color, 'Normalization Data')
data_X = np.vstack((X, Y))
data_label = np.array([label])
# 参数设置
w = []
b = []
Y_prediction = []
iters = 50 # 迭代次数
learning_rate = 0.5 # 学习率
w, b = init_param_withZeros(data_X.shape[0]) # 初始化w,b
# 开始训练
w, b, gradients, costs = backward_propagate(
w, b, data_X, data_label, iters, learning_rate)
Y_prediction = predict(w, b, data_X, data_label)
# pyplot画"损失函数-迭代次数"的线性图
plot_line_chart(
costs,
'iterations',
'cost',
"Learning rate =" +
str(learning_rate))
# 测试输入数据
point = input("Please enter a coordinates:\n")
# 获取坐标
x = int(point.split(' ')[0])
y = int(point.split(' ')[1])
point_data = np.vstack((x, y))
point_prediction = predict(w, b, point_data, data_label)
print("The point is below to", end=" ")
print(point_prediction[0, 0])
|
[
"842679178@qq.com"
] |
842679178@qq.com
|
73937dbd616aa7fd615db0615064c976542f5ef3
|
c9ab605cdd2dbf92c9de05768ade0ecf1718be02
|
/algorithm/t3.py
|
74cec31f9cfc6fb539cb226f55bf3b5622627dae
|
[] |
no_license
|
PyeongGang-Kim/TIL
|
42d69308cf99d2e07644b51d7636e1b64551a697
|
8711501d131ee7d78fdaac544dda2008adf820a1
|
refs/heads/master
| 2023-01-12T21:10:38.027946
| 2021-10-23T07:19:48
| 2021-10-23T07:19:48
| 195,937,990
| 10
| 1
| null | 2023-01-07T11:25:30
| 2019-07-09T05:22:45
|
HTML
|
UTF-8
|
Python
| false
| false
| 814
|
py
|
# 0번집이 털린 경우의 dp 최대값 - 마지막 빼고 할것
# 0번집이 털리지 않은 경우의 dp 최대값 - 첫번째 빼고 할 것
def solution(money):
D11 = [0] * len(money)
D12 = [0] * len(money)
D21 = [0] * len(money)
D22 = [0] * len(money)
D11[0] = money[0]
D21[1] = money[1]
# 이전 인덱스의 0 번이 털린경우의 최대값
# 1번이 털리지 않은 경우의 최대값
for i in range(1, len(money) - 1):
D11[i] = money[i] + D12[i-1]
D12[i] = D11[i-1] if D11[i-1] > D12[i-1] else D12[i-1]
for i in range(2, len(money)):
D21[i] = money[i] + D22[i-1]
D22[i] = D21[i-1] if D21[i-1] > D22[i-1] else D22[i-1]
answer = max(D11[-2], D12[-2], D21[-1], D22[-1])
return answer
print(solution([1, 2, 3, 1] ))
|
[
"pyeonggangkim@gmail.com"
] |
pyeonggangkim@gmail.com
|
84330b145a69b3630554ae4f66c56b7a6e6c2946
|
aa42be48004e22faf72e5a2cfcd4714cfba04ee7
|
/crafters/image/ImageCropper/__init__.py
|
98c16e7eb0bdd54389537e9ab4543e251ef70e54
|
[
"Apache-2.0"
] |
permissive
|
YueLiu1415926/jina-hub
|
e14b426924cb00f8253004271cda7f050ef1c3c4
|
e0a7dc95dbd69a55468acbf4194ddaf11fd5aa6c
|
refs/heads/master
| 2022-12-05T15:36:26.665207
| 2020-08-20T03:40:47
| 2020-08-20T03:40:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,701
|
py
|
__copyright__ = "Copyright (c) 2020 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
from typing import Tuple, Dict, Union
import numpy as np
from jina.executors.crafters import BaseCrafter
from .helper import _crop_image, _move_channel_axis, _load_image
class ImageCropper(BaseCrafter):
"""
:class:`ImageCropper` crops the image with the specific crop box. The coordinate is the same coordinate-system in
the :py:mode:`PIL.Image`.
"""
def __init__(self, top: int, left: int, height: int, width: int, channel_axis: int = -1, *args, **kwargs):
"""
:param top: the vertical coordinate of the top left corner of the crop box.
:param left: the horizontal coordinate of the top left corner of the crop box.
:param height: the height of the crop box.
:param width: the width of the crop box.
:param channel_axis: the axis refering to the channels
"""
super().__init__(*args, **kwargs)
self.top = top
self.left = left
self.height = height
self.width = width
self.channel_axis = channel_axis
def craft(self, blob: 'np.ndarray', *args, **kwargs) -> Dict:
"""
Crop the input image array.
:param blob: the ndarray of the image
:returns: a chunk dict with the cropped image
"""
raw_img = _load_image(blob, self.channel_axis)
_img, top, left = _crop_image(raw_img, target_size=(self.height, self.width), top=self.top, left=self.left)
img = _move_channel_axis(np.asarray(_img), -1, self.channel_axis)
return dict(offset=0, weight=1., blob=img.astype('float32'), location=(top, left))
|
[
"han.xiao@jina.ai"
] |
han.xiao@jina.ai
|
f13bebe05707028d5ef7c32256afa8695be99970
|
8f6cc0e8bd15067f1d9161a4b178383e62377bc7
|
/ppo_baseline_DMB/WORKINGON/easy_ppo_v6/Exp_run_v0001.py
|
8fb7ec75b0875fad040abe46af4ef32294900a85
|
[] |
no_license
|
humorbeing/python_github
|
9c4dfc61a3cefbb266fefff335f6b28d05797e5e
|
e4b4b49bee7e7e3843c6874717779ce8d619bd02
|
refs/heads/master
| 2023-01-22T21:51:20.193131
| 2020-01-26T21:47:23
| 2020-01-26T21:47:23
| 163,707,778
| 0
| 0
| null | 2022-12-27T15:37:48
| 2019-01-01T01:58:18
|
Python
|
UTF-8
|
Python
| false
| false
| 5,926
|
py
|
from collections import deque
import time
import torch
import numpy as np
try:
from .envs import make_vec_envs
except Exception: #ImportError
from envs import make_vec_envs
try:
from .model import Policy
except Exception:
from model import Policy
try:
from .ppo import PPO
except Exception:
from ppo import PPO
try:
from .storage import RolloutStorage
except Exception:
from storage import RolloutStorage
try:
from .util_this import Log
except Exception:
from util_this import Log
try:
from .evaluation import evaluate
except Exception:
from evaluation import evaluate
try:
from .utils_from_pytorch import get_vec_normalize
except Exception:
from utils_from_pytorch import get_vec_normalize
def ss(s=''):
print()
print(' ---' * 15)
print(' ---' * 15)
print()
# print(' >>>>>>>>>>>>>>>>>>>> <<<<<<<<<<<<<<<<<<<< ')
print(s)
print()
print(' ---' * 15)
print(' ---' * 15)
print()
import sys
sys.exit()
log_name = 'ppo_PongrD4_6act_gamma'
args_env_name = 'Pong-ramDeterministic-v4'
args_num_processes = 10 # how many envs running, default: 10
args_seed = 1
args_gamma = 0.99
args_num_mini_batch = 10 # how many batchs to train, default: 32
args_clip_param = 0.2
args_ppo_epoch = 4 # in training weight after collection, how many epoch to train agent, default: 4
args_value_loss_coef = 0.5
args_entropy_coef = 0.01
args_lr = 0.0007
args_eps = 1e-5
args_max_grad_norm = 0.5
args_num_steps = 10 # in gathering rollouts, how many steps forward, default: 4
args_num_env_steps = 5e6 # total training steps
args_log_interval = 200
args_eval_interval = 200
def main():
# is_limit_action = True
is_limit_action = False
train_log = Log(log_name+'_train_log')
evl_log = Log(log_name+'_evaluation_log')
torch.set_num_threads(1)
envs = make_vec_envs(
args_env_name,
args_seed,
args_num_processes)
if is_limit_action:
envs.action_space.n = 3
print('Number of Actions:', envs.action_space.n)
# print(envs.action_space)
# ss('hohoho')
actor_critic = Policy(
envs.observation_space.shape,
envs.action_space)
agent = PPO(
actor_critic,
args_clip_param,
args_ppo_epoch,
args_num_mini_batch,
args_value_loss_coef,
args_entropy_coef,
lr=args_lr,
eps=args_eps,
max_grad_norm=args_max_grad_norm)
rollouts = RolloutStorage(
args_num_steps,
args_num_processes,
envs.observation_space.shape,
envs.action_space)
obs = envs.reset()
rollouts.obs[0].copy_(obs)
# print(obs)
# ss('i am over it')
num_updates = int(
args_num_env_steps) // args_num_steps // args_num_processes
episode_rewards = deque(maxlen=10)
start = time.time()
sum_re = torch.zeros(args_num_processes, 1)
for j in range(num_updates):
for step in range(args_num_steps):
with torch.no_grad():
value, action, action_log_prob\
= actor_critic.act(rollouts.obs[step])
# print(action)
# print()
# action = action + 1
# print(action)
# ss('hoiohasdfhioas')
if is_limit_action:
obs, reward, done, infos = envs.step(action+1)
else:
obs, reward, done, infos = envs.step(action)
sum_re += reward
if any(done):
for i in range(len(done)):
if done[i]:
episode_rewards.append(sum_re[i].item())
# print(done)
# print(sum_re[i])
sum_re[i] *= 0
masks = torch.FloatTensor(
[[0.0] if done_ else [1.0] for done_ in done])
bad_masks = torch.FloatTensor(
[[0.0] if 'bad_transition' in info.keys() else [1.0]
for info in infos])
rollouts.insert(obs, action,
action_log_prob,
value, reward,
masks, bad_masks)
with torch.no_grad():
next_value = actor_critic.get_value(
rollouts.obs[-1])
rollouts.compute_returns(
next_value,
args_gamma)
value_loss, action_loss, dist_entropy = agent.update(rollouts)
rollouts.after_update()
if j % args_log_interval == 0 and len(episode_rewards) > 1:
total_num_steps = (j + 1) * args_num_processes * args_num_steps
end = time.time()
logstring = "E {}, N_steps {}, FPS {} mean/median" \
" {:.1f}/{:.1f}, min/max {:.1f}/{:.1f}" \
" Entropy {:.5f},V {:.5f},Action {:.5f}".format(
j, total_num_steps,
int(total_num_steps / (end - start)),
np.mean(episode_rewards),
np.median(episode_rewards), np.min(episode_rewards),
np.max(episode_rewards),
dist_entropy, value_loss,
action_loss)
# print(logstring)
train_log.log(logstring)
# if True:
if (args_eval_interval is not None and len(episode_rewards) > 1
and j % args_eval_interval == 0):
total_num_steps = (j + 1) * args_num_processes * args_num_steps
ob_rms = get_vec_normalize(envs).ob_rms
ev_result = evaluate(actor_critic, ob_rms, args_env_name, args_seed,
args_num_processes, is_limit_action=is_limit_action)
ev_log_string = 'steps:'+str(total_num_steps)+'. '+ev_result
evl_log.log(ev_log_string)
if __name__ == "__main__":
main()
|
[
"geemguang@gmail.com"
] |
geemguang@gmail.com
|
e3ab109cbd7ee8af1a38d19e640309ac777edf33
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/otherforms/_humidifies.py
|
eca272ad0fd541f4afc6614cbf8c1e3c03ae63f2
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 232
|
py
|
#calss header
class _HUMIDIFIES():
def __init__(self,):
self.name = "HUMIDIFIES"
self.definitions = humidify
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['humidify']
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
56ad186bf416e7055d7e7210a444f0856051c226
|
25ebf226893b44dd8a6b1b85cf80864579372892
|
/divide-two-integers/Wrong Answer/2-28-2021, 1:11:22 AM/Solution.py
|
6f112ee1acf15b23f426815b780ac2338de4df1f
|
[] |
no_license
|
TianrunCheng/LeetcodeSubmissions
|
db15f5a1a8e1bbecefc45cb0b2b5fbaa036aa6f5
|
00a5403f1950e039ccc370cb266b752faebb8e79
|
refs/heads/main
| 2023-06-29T21:51:43.029300
| 2021-07-22T03:12:15
| 2021-07-22T03:12:15
| 388,305,775
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 962
|
py
|
// https://leetcode.com/problems/divide-two-integers
class Solution:
def divide(self, dividend: int, divisor: int) -> int:
neg = False
if dividend < 0:
dividend = 0 - dividend
neg = not neg
if divisor < 0:
divisor = 0 - divisor
neg = not neg
powers = [divisor] # record the 2^n * divisor values at index n
while powers[-1] < dividend:
temp = powers[-1] + powers[-1]
powers.append(temp)
bi_quotient = []
for i in range(len(powers)-1, -1, -1):
if dividend > powers[i]:
bi_quotient.append(1)
dividend = dividend - powers[i]
else:
bi_quotient.append(0)
n = ''.join([str(elem) for elem in bi_quotient])
if neg:
return -int(n,2)
return int(n, 2)
|
[
"tc905@georgetown.edu"
] |
tc905@georgetown.edu
|
9a69b3ede4a9045e9356c9c5067bc0b9f40dac61
|
e21599d08d2df9dac2dee21643001c0f7c73b24f
|
/practice/profile/cProfile/stats.py
|
5c1b922d260936e05ac059b23dec1b91f2af3de5
|
[] |
no_license
|
herolibra/PyCodeComplete
|
c7bf2fb4ce395737f8c67749148de98a36a71035
|
4ef7d2c3aec6d28a53eed0e649cdeb74df3d783b
|
refs/heads/master
| 2022-07-17T05:39:03.554760
| 2020-05-03T07:00:14
| 2020-05-03T07:00:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 796
|
py
|
#!/usr/bin/env python
# coding=utf-8
# author: zengyuetian
if __name__ == '__main__':
import pstats
# 创建Stats对象
p = pstats.Stats("result.out")
# strip_dirs(): 去掉无关的路径信息
# sort_stats(): 排序,支持的方式和上述的一致
# print_stats(): 打印分析结果,可以指定打印前几行
# 按照函数名排序,只打印前3行函数的信息, 参数还可为小数,表示前百分之几的函数信息
p.strip_dirs().sort_stats("name").print_stats(3)
# 按照运行时间和函数名进行排序
p.strip_dirs().sort_stats("cumulative", "name").print_stats(0.8)
# 如果想知道有哪些函数调用了bar
p.print_callers("bar")
# 查看test()函数中调用了哪些函数
p.print_callees("foo")
|
[
"ijumper@163.com"
] |
ijumper@163.com
|
9f5deeabe426194334c63fe23dfd1178c20184ec
|
7041c85dffb757c3e7063118730363f32ebb9b8a
|
/Algorithm/python 파일/20190129/글자수.py
|
0740ce2e51a31a4650217cb8239b85c97193425d
|
[] |
no_license
|
woonji913/til
|
efae551baff56f3ca16169b93185a65f4d81cd7a
|
a05efc68f88f535c26cb4d4a396a1e9cd6bf0248
|
refs/heads/master
| 2021-06-06T23:17:54.504620
| 2019-06-19T04:29:18
| 2019-06-19T04:29:18
| 163,778,844
| 1
| 0
| null | 2021-05-08T16:27:17
| 2019-01-02T01:08:19
|
HTML
|
UTF-8
|
Python
| false
| false
| 249
|
py
|
import sys
sys.stdin = open("글자수_input.txt", "r")
T = int(input())
for tc in range(1, T + 1):
str1 = str(input())
str2 = str(input())
ans = []
for i in str1:
ans.append(str2.count(i))
print(f"#{tc} {max(ans)}")
|
[
"johnnyboy0913@gmail.com"
] |
johnnyboy0913@gmail.com
|
f56a97f3a3b19d1678cd8892d3f96a6483ee6e44
|
0d9cd43c4bc56e917135dc329c5cd9c1a4cb2b87
|
/idangr/gui.py
|
53874d0a7f93456b78e7c1825872f8e07e060f58
|
[
"BSD-2-Clause"
] |
permissive
|
budanthara/IDAngr
|
8ec10ec9b3736d2419244161830a8bf90f957a63
|
0acbbf9847b728e8d0fccdc06ae63c3b971f5808
|
refs/heads/master
| 2020-03-22T07:43:05.641342
| 2018-06-27T08:23:29
| 2018-06-27T08:23:29
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 357
|
py
|
import manage
print "######### IDAngr GUI #########"
def show():
if not manage.is_initialized():
from init_gui import IDAngrConnectDialog
if IDAngrConnectDialog.go():
from main_gui import idangr_panel_show
idangr_panel_show()
else:
from main_gui import idangr_panel_show
idangr_panel_show()
|
[
"andreafioraldi@gmail.com"
] |
andreafioraldi@gmail.com
|
44bd4131bcd523e76930ee093593e6c0c8c07d61
|
b9de33c6fb310ef69cba728b9de1a31165c3a031
|
/chapter_32/spam_static.py
|
d461de172a54a1bde7567489c009edee95058f0c
|
[] |
no_license
|
bimri/learning-python
|
2fc8c0be304d360b35020a0dfc16779f78fb6848
|
5f2fcc9a08f14e1d848530f84ce3b523d1f72aad
|
refs/heads/master
| 2023-08-12T20:30:09.754468
| 2021-10-15T20:53:49
| 2021-10-15T20:53:49
| 377,515,946
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,386
|
py
|
"Counting Instances with Static Methods"
class Spam:
numInstances = 0 # Use static method for class data
def __init__(self):
Spam.numInstances += 1
def printNumInstances():
print("Number of instances: %s" % Spam.numInstances)
printNumInstances = staticmethod(printNumInstances) # this version requires an extra staticmethod call # Now printNumInstances() is a static method
'''
Using the static method built-in, our code now allows the self-less method to be called
through the class or any instance of it
'''
if __name__ == "__main__":
from spam_static import Spam
a = Spam()
b = Spam()
c = Spam()
Spam.printNumInstances() # Call as simple function
a.printNumInstances() # Instance argument not passed
'''
allows subclasses to customize the static method with inheritance—a
more convenient and powerful approach than importing functions from the files in
which superclasses are coded.
'''
class Sub(Spam):
def printNumInstances(): # Override a static method
print("Extra stuff...") # But call back to original
Spam.printNumInstances() # Call static method
printNumInstances = staticmethod(printNumInstances) # Make printNumInstances a static method
if __name__ == "__main__":
print()
from spam_static import Spam, Sub
a = Sub()
b = Sub()
a.printNumInstances() # Call from subclass instance
Sub.printNumInstances() # Call from subclass itself
Spam.printNumInstances() # Call from original/parent class
"""
Moreover, classes can inherit the static method without redefining it—it is run without
an instance, regardless of where it is defined in a class tree:
"""
class Other(Spam): pass # Inherit static method verbatim
if __name__ == "__main__":
print()
from spam_static import Other
c = Other()
c.printNumInstances()
"""
Notice how this also bumps up the superclass’s instance counter, because its constructor
is inherited and run
"""
|
[
"bimri@outlook.com"
] |
bimri@outlook.com
|
9b8c7faa9ecbc4bf81e1ed72473dbe553ffe7c31
|
df83f97ed2c6dd199005e96bc7c494cfb3b49f8c
|
/GeeksForGeeks/Chocolate Distribution Problem.py
|
803170c58228d3d30393c9b4bb6f758534761bf3
|
[] |
no_license
|
poojan14/Python-Practice
|
45f0b68b0ad2f92bbf0b92286602d64f3b1ae992
|
ed98acc788ba4a1b53bec3d0757108abb5274c0f
|
refs/heads/master
| 2022-03-27T18:24:18.130598
| 2019-12-25T07:26:09
| 2019-12-25T07:26:09
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,039
|
py
|
'''
//MEMORY ERROR
from itertools import permutations
if __name__=='__main__':
T=int(input())
for _ in range(T):
N=int(input())
A=list(map(int,input().split()))
M=int(input())
A.sort()
if M==1:print('0')
else:
lst=[]
l=list(permutations(A,M))
for ch in l:
ch=list(ch)
ch.sort()
lst.append(ch[-1]-ch[0])
print(min(lst))
'''
import sys
def MinimumDifference(arr,n,m):
if n==0 or m==0 or m==1:
return 0
if m>n:return -1
arr.sort()
i=0
first,last=0,0
min_diff=sys.maxsize
while i+m-1<n:
diff=arr[i+m-1]-arr[i]
if diff<min_diff:
min_diff=diff
first=i
last=(i+m-1)
i+=1
return arr[last]-arr[first]
if __name__=='__main__':
T=int(input())
for _ in range(T):
N=int(input())
A=list(map(int,input().split()))
M=int(input())
print(MinimumDifference(A,N,M))
|
[
"noreply@github.com"
] |
poojan14.noreply@github.com
|
14fc8447bbed8c468586a52217f4963fdec8fc15
|
e3b42e43555cb34e9a7f44c5e1e42b06c89e2b49
|
/envi/tests/msp430/irlc.py
|
f50db0e4a61b68e1fb1c41a4d5605096c4ae251d
|
[
"Apache-2.0"
] |
permissive
|
bat-serjo/vivisect-py3
|
77eed20e8e78ff0f5bbde57eb7709c68617aeb1d
|
75d58115b09c209a042713736181888fad31482c
|
refs/heads/master
| 2021-01-11T21:54:42.853791
| 2019-01-08T20:15:57
| 2019-01-08T20:15:57
| 78,873,268
| 12
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,176
|
py
|
checks = [
# RLC
(
'RLC r15 (destination negative + overflow)',
{ 'regs': [(REG_R15, 0x5555)], 'flags': [(SR_N, 0), (SR_Z, 0), (SR_C, 0), (SR_V, 0)], 'code': "0f6f", 'data': "" },
{ 'regs': [(REG_R15, 0xaaaa)], 'flags': [(SR_N, 1), (SR_Z, 0), (SR_C, 0), (SR_V, 1)], 'code': "0f6f", 'data': "" }
),
(
'RLC r15 (destination C=1 + negative + overflow)',
{ 'regs': [(REG_R15, 0x5555)], 'flags': [(SR_N, 0), (SR_Z, 0), (SR_C, 1), (SR_V, 0)], 'code': "0f6f", 'data': "" },
{ 'regs': [(REG_R15, 0xaaab)], 'flags': [(SR_N, 1), (SR_Z, 0), (SR_C, 0), (SR_V, 1)], 'code': "0f6f", 'data': "" }
),
(
'RLC r15 (destination carry + zero + overflow)',
{ 'regs': [(REG_R15, 0x8000)], 'flags': [(SR_N, 0), (SR_Z, 0), (SR_C, 0), (SR_V, 0)], 'code': "0f6f", 'data': "" },
{ 'regs': [(REG_R15, 0x0)], 'flags': [(SR_N, 0), (SR_Z, 1), (SR_C, 1), (SR_V, 1)], 'code': "0f6f", 'data': "" }
),
(
'RLC r15 (destination negative + overflow)',
{ 'regs': [(REG_R15, 0x4000)], 'flags': [(SR_N, 0), (SR_Z, 0), (SR_C, 0), (SR_V, 0)], 'code': "0f6f", 'data': "" },
{ 'regs': [(REG_R15, 0x8000)], 'flags': [(SR_N, 1), (SR_Z, 0), (SR_C, 0), (SR_V, 1)], 'code': "0f6f", 'data': "" }
),
(
'RLC r15 (destination negative + carry)',
{ 'regs': [(REG_R15, 0xc000)], 'flags': [(SR_N, 0), (SR_Z, 0), (SR_C, 0), (SR_V, 0)], 'code': "0f6f", 'data': "" },
{ 'regs': [(REG_R15, 0x8000)], 'flags': [(SR_N, 1), (SR_Z, 0), (SR_C, 1), (SR_V, 0)], 'code': "0f6f", 'data': "" }
),
# RLC.b
(
'RLC.b r15 (destination negative + overflow)',
{ 'regs': [(REG_R15, 0x1155)], 'flags': [(SR_N, 0), (SR_Z, 0), (SR_C, 0), (SR_V, 0)], 'code': "4f6f", 'data': "" },
{ 'regs': [(REG_R15, 0xaa)], 'flags': [(SR_N, 1), (SR_Z, 0), (SR_C, 0), (SR_V, 1)], 'code': "4f6f", 'data': "" }
),
(
'RLC.b r15 (destination C=1 + negative + overflow)',
{ 'regs': [(REG_R15, 0x1155)], 'flags': [(SR_N, 0), (SR_Z, 0), (SR_C, 1), (SR_V, 0)], 'code': "4f6f", 'data': "" },
{ 'regs': [(REG_R15, 0xab)], 'flags': [(SR_N, 1), (SR_Z, 0), (SR_C, 0), (SR_V, 1)], 'code': "4f6f", 'data': "" }
),
(
'RLC.b r15 (destination carry + zero + overflow)',
{ 'regs': [(REG_R15, 0x1180)], 'flags': [(SR_N, 0), (SR_Z, 0), (SR_C, 0), (SR_V, 0)], 'code': "4f6f", 'data': "" },
{ 'regs': [(REG_R15, 0x0)], 'flags': [(SR_N, 0), (SR_Z, 1), (SR_C, 1), (SR_V, 1)], 'code': "4f6f", 'data': "" }
),
(
'RLC.b r15 (destination negative + overflow)',
{ 'regs': [(REG_R15, 0x1140)], 'flags': [(SR_N, 0), (SR_Z, 0), (SR_C, 0), (SR_V, 0)], 'code': "4f6f", 'data': "" },
{ 'regs': [(REG_R15, 0x80)], 'flags': [(SR_N, 1), (SR_Z, 0), (SR_C, 0), (SR_V, 1)], 'code': "4f6f", 'data': "" }
),
(
'RLC.b r15 (destination negative + carry)',
{ 'regs': [(REG_R15, 0x11c0)], 'flags': [(SR_N, 0), (SR_Z, 0), (SR_C, 0), (SR_V, 0)], 'code': "4f6f", 'data': "" },
{ 'regs': [(REG_R15, 0x80)], 'flags': [(SR_N, 1), (SR_Z, 0), (SR_C, 1), (SR_V, 0)], 'code': "4f6f", 'data': "" }
),
]
|
[
"jroi.martin@gmail.com"
] |
jroi.martin@gmail.com
|
1a59a935ece142b1fba84eebe71dbdb2f3ddd079
|
9161503ddd4d3044a9481cb519a4f30b7f371335
|
/venv/bin/pip3
|
3e07a514109385314762d4908a7ff1c67d334d3f
|
[] |
no_license
|
cuixiaozhao/HelloFlask
|
48112e72300549dc06cc5abfe3c0869a902ce9ab
|
165c8a69204f9dec9b09de72c4eb0468ec1d41a0
|
refs/heads/master
| 2020-03-28T13:36:39.983346
| 2018-09-12T03:48:55
| 2018-09-12T03:48:55
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 418
|
#!/Users/cuixiaozhao/PycharmProjects/Flask/HelloFlask/venv/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==10.0.1','console_scripts','pip3'
__requires__ = 'pip==10.0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==10.0.1', 'console_scripts', 'pip3')()
)
|
[
"19930911cXS"
] |
19930911cXS
|
|
f008b4ce82e6c21360f7bef5df058c46001c8a1f
|
17f6881c70401dc63757cc7b5fa4d9dd396689e3
|
/src/main/com/libin/yfl/10.py
|
606053f9ee34ad635daf307378dc978dcea86602
|
[] |
no_license
|
BigDataRoad/Algorithm
|
0ab493eeb478125b4beb62d78ce18c73e30b0496
|
2f2fb4f4b84f6c9df8adbada63b327c43ce29ddd
|
refs/heads/master
| 2023-07-02T04:06:51.025648
| 2021-07-28T14:04:55
| 2021-07-28T14:04:55
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 658
|
py
|
'''
203. 移除链表元素
删除链表中等于给定值 val 的所有节点。
示例:
输入: 1->2->6->3->4->5->6, val = 6
输出: 1->2->3->4->5
'''
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution:
def removeElements(self, head: ListNode, val: int) -> ListNode:
cur_1 = ListNode(0)
cur = cur_1
while head:
if head.val == val:
head = head.next
else:
cur.next = ListNode(head.val)
cur = cur.next
head = head.next
return cur_1.next
|
[
"yangfengling@inttech.cn"
] |
yangfengling@inttech.cn
|
be9c5ee84e7952ac4d8ffdddcb2eb46f037ed1d2
|
4fcb2e797ba83b310fe05461d48f02931ea5a427
|
/2021/day-12/solution.py
|
d066e599b9ac7a138623fee02b10f61387a92589
|
[] |
no_license
|
BrentChesny/AdventOfCode
|
5a642d081505563f7518c5244bb814e9e4dfc5de
|
dad5224961539149bed5757bbae0ccc35a3a293d
|
refs/heads/master
| 2022-12-11T19:51:22.138655
| 2022-12-04T21:46:29
| 2022-12-04T21:46:29
| 47,266,210
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,775
|
py
|
from collections import defaultdict
def parse_input():
caves = defaultdict(list)
for line in open("input.txt"):
fr, to = line.strip().split("-")
caves[fr].append(to)
caves[to].append(fr)
return caves
def is_small(cave):
return cave.islower()
def solve(caves, current, visited):
if current == "end":
return 1
paths = 0
for destination in caves[current]:
if is_small(destination) and destination in visited:
continue
paths += solve(caves, destination, set(visited | {current}))
return paths
def solve_part_one():
caves = parse_input()
return solve(caves, "start", set())
def solve_with_revisit(caves, current, visited, revisited):
if current == "end":
return 1
paths = 0
for destination in caves[current]:
if is_small(destination):
if destination in visited:
if revisited:
continue
else:
if destination not in ["start", "end"]:
paths += solve_with_revisit(
caves, destination, set(visited | {current}), destination
)
else:
paths += solve_with_revisit(
caves, destination, set(visited | {current}), revisited
)
else:
paths += solve_with_revisit(
caves, destination, set(visited | {current}), revisited
)
return paths
def solve_part_two():
caves = parse_input()
return solve_with_revisit(caves, "start", set(), None)
def main():
print("Part one: ", solve_part_one())
print("Part two: ", solve_part_two())
if __name__ == "__main__":
main()
|
[
"brent.chesny@gmail.com"
] |
brent.chesny@gmail.com
|
a547a4cbb4dff01e327264fb4b7c55d089927cc9
|
9a5b81fd11a5e6fcae6ac166fc44a2d80f7c22e1
|
/pyflow/demo/helloWorld/helloWorld.py
|
0efe70710cb4bd9645ef0cc98de6e89dadd707b2
|
[] |
no_license
|
moleculo/pyflow
|
a636cbed88dc4014394bd8a55660e6e6f57fe977
|
62ecdf32889d099e5b37eac0b4e17ed6612c6443
|
refs/heads/master
| 2021-01-18T06:05:05.281246
| 2013-05-08T01:13:33
| 2013-05-08T01:13:33
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,286
|
py
|
#!/usr/bin/env python
#
# Copyright (c) 2012-2013 Illumina, Inc.
#
# This software is provided under the terms and conditions of the
# Illumina Open Source Software License 1.
#
# You should have received a copy of the Illumina Open Source
# Software License 1 along with this program. If not, see
# <https://github.com/downloads/sequencing/licenses/>.
#
#
# This demo shows possibly the simplist possible pyflow we can create --
# a single 'hello world' task. After experimenting with this file
# please see the 'simpleDemo' for coverage of a few more pyflow features
#
import os.path
import sys
# add module path by hand
#
sys.path.append(os.path.abspath(os.path.dirname(__file__)) + "/../../src")
from pyflow import WorkflowRunner
# all pyflow workflows are written into classes derived from pyflow.WorkflowRunner:
#
class HelloWorkflow(WorkflowRunner) :
# a workflow is defined by overloading the WorkflowRunner.workflow() method:
#
def workflow(self) :
#
# The output for this task will be written to the file helloWorld.out.txt
#
self.addTask("easy_task1", "echo 'Hello World!' >| helloWorld.out.txt")
# Instantiate the workflow
#
wflow = HelloWorkflow()
# Run the worklow:
#
retval = wflow.run()
# done!
sys.exit(retval)
|
[
"csaunders@illumina.com"
] |
csaunders@illumina.com
|
5348c082b53d461884706e83f902f1bd079d2e12
|
31fb7c74b94e46a325e6b05501c6972a401cf423
|
/PYTHON/BASIC_PYTHON/수업내용/04/04-029.py
|
1d0861db9a70b872a9db5c3adf7ecd597cd10dd6
|
[] |
no_license
|
superf2t/TIL
|
f2dacc30d6b89f3717c0190ac449730ef341f6a4
|
cadaaf952c44474bed9b8af71e70754f3dbf86fa
|
refs/heads/master
| 2022-04-10T13:55:24.019310
| 2019-12-12T11:15:31
| 2019-12-12T11:15:31
| 268,215,746
| 1
| 0
| null | 2020-05-31T05:32:46
| 2020-05-31T05:32:46
| null |
UTF-8
|
Python
| false
| false
| 1,098
|
py
|
#04-029.py
nums='1237894673683038478236749192738623234234'
if 1:
cnt = {}
for num in nums:
cnt.setdefault(num, 0)
cnt[num] += 1
else:
# 위에서 for문을 돌려서 만든 cnt를 Counter 메소드를 통해 한 번에 만들 수도 있음!
from collections import Counter
cnt = Counter(nums)
print(cnt)
# 1. 등장 횟수(빈도 수)를 기준으로 오름차순으로 정렬 가능!?!?
# 어렵고도 신기함..!
if 0:
# 1-1)
cnt_tmp = { i:cnt[i] for i in sorted(cnt, key = lambda x : cnt[x]) }
print(cnt_tmp)
else:
# 1-2)
cnt_tmp = { i:j for i, j in sorted(cnt.items(), key = lambda x : x[1]) }
print(cnt_tmp)
# 2. key 0 ~ 9까지의 ...
##cnt_tmp = dict.fromkeys("0123456789", 0)
cnt_tmp = { k : cnt.get(k, 0) for k in "0123456789"}
##for i in cnt_tmp:
## cnt_tmp[i] = cnt.get(i, 0)
print(cnt_tmp)
##from collections import Counter
##X = Counter(nums)
##y = [ (x, y) for x, y in X.items() ]
##y.sort()
##X = { x:y for x, y in y }
##print(X)
|
[
"noreply@github.com"
] |
superf2t.noreply@github.com
|
fbf299007fe1f34f9f48f8ad4ed2ef2bd8f6d4e2
|
1b2d5f0635459a02f82b574e5de632f67679210a
|
/5/11_sin_gru_tf.py
|
9a2132f121e7e66adb7875b7fbb2408c1a4302a2
|
[] |
no_license
|
ydocore/deeplearning-keras-tf2-torch
|
f9b117e693b4a122bfb37fc77ae082de2140afd7
|
19aa983de1b0f55985179549603327281b92fcb2
|
refs/heads/master
| 2022-12-22T16:26:11.249773
| 2020-09-15T12:54:26
| 2020-09-15T12:54:26
| 276,814,737
| 0
| 1
| null | 2020-07-03T05:24:38
| 2020-07-03T05:24:38
| null |
UTF-8
|
Python
| false
| false
| 4,399
|
py
|
'''
5.3.2 GRU - TensorFlow (sin波)
'''
import numpy as np
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.utils import shuffle
import tensorflow as tf
from tensorflow.keras import datasets
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Dense, GRU
from tensorflow.keras import optimizers
from tensorflow.keras import losses
from tensorflow.keras import metrics
from callbacks import EarlyStopping
class RNN(Model):
def __init__(self, hidden_dim):
super().__init__()
self.l1 = GRU(hidden_dim, activation='tanh',
recurrent_activation='sigmoid',
kernel_initializer='glorot_normal',
recurrent_initializer='orthogonal')
self.l2 = Dense(1, activation='linear')
def call(self, x):
h = self.l1(x)
y = self.l2(h)
return y
if __name__ == '__main__':
np.random.seed(123)
tf.random.set_seed(123)
'''
1. データの準備
'''
def sin(x, T=100):
return np.sin(2.0 * np.pi * x / T)
def toy_problem(T=100, ampl=0.05):
x = np.arange(0, 2*T + 1)
noise = ampl * np.random.uniform(low=-1.0, high=1.0,
size=len(x))
return sin(x) + noise
T = 100
f = toy_problem(T).astype(np.float32)
length_of_sequences = len(f)
maxlen = 25
x = []
t = []
for i in range(length_of_sequences - maxlen):
x.append(f[i:i+maxlen])
t.append(f[i+maxlen])
x = np.array(x).reshape(-1, maxlen, 1)
t = np.array(t).reshape(-1, 1)
x_train, x_val, t_train, t_val = \
train_test_split(x, t, test_size=0.2, shuffle=False)
'''
2. モデルの構築
'''
model = RNN(50)
'''
3. モデルの学習
'''
criterion = losses.MeanSquaredError()
optimizer = optimizers.Adam(learning_rate=0.001,
beta_1=0.9, beta_2=0.999, amsgrad=True)
train_loss = metrics.Mean()
val_loss = metrics.Mean()
def compute_loss(t, y):
return criterion(t, y)
def train_step(x, t):
with tf.GradientTape() as tape:
preds = model(x)
loss = compute_loss(t, preds)
grads = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(zip(grads, model.trainable_variables))
train_loss(loss)
return loss
def val_step(x, t):
preds = model(x)
loss = compute_loss(t, preds)
val_loss(loss)
epochs = 1000
batch_size = 100
n_batches_train = x_train.shape[0] // batch_size + 1
n_batches_val = x_val.shape[0] // batch_size + 1
hist = {'loss': [], 'val_loss': []}
es = EarlyStopping(patience=10, verbose=1)
for epoch in range(epochs):
x_, t_ = shuffle(x_train, t_train)
for batch in range(n_batches_train):
start = batch * batch_size
end = start + batch_size
train_step(x_[start:end], t_[start:end])
for batch in range(n_batches_val):
start = batch * batch_size
end = start + batch_size
val_step(x_val[start:end], t_val[start:end])
hist['loss'].append(train_loss.result())
hist['val_loss'].append(val_loss.result())
print('epoch: {}, loss: {:.3}, val_loss: {:.3f}'.format(
epoch+1,
train_loss.result(),
val_loss.result()
))
if es(val_loss.result()):
break
'''
4. モデルの評価
'''
# sin波の予測
sin = toy_problem(T, ampl=0.)
gen = [None for i in range(maxlen)]
z = x[:1]
for i in range(length_of_sequences - maxlen):
preds = model.predict(z[-1:])
# preds = model(z[-1:])
z = np.append(z, preds)[1:]
z = z.reshape(-1, maxlen, 1)
gen.append(preds[0, 0])
# 予測値を可視化
fig = plt.figure()
plt.rc('font', family='serif')
plt.xlim([0, 2*T])
plt.ylim([-1.5, 1.5])
plt.plot(range(len(f)), sin,
color='gray',
linestyle='--', linewidth=0.5)
plt.plot(range(len(f)), gen,
color='black', linewidth=1,
marker='o', markersize=1, markerfacecolor='black',
markeredgecolor='black')
# plt.savefig('output.jpg')
plt.show()
|
[
"me@yusugomori.com"
] |
me@yusugomori.com
|
de058075cb519f64d30d752973071422f9008b5b
|
c27c51f5c33e0431dbe7db6e18c21b249d476cfa
|
/OpenSource_Python_Code/horizon-master/openstack_dashboard/dashboards/project/images_and_snapshots/snapshots/views.py
|
299ec446133658878663a3ffe30cf05d036707cd
|
[
"Apache-2.0"
] |
permissive
|
bopopescu/Python_Stuff
|
9bef74e0db17bb5e3ba2d908ced01ee744820d80
|
9aa94a0fa5e4e802090c7b29ec88b840e304d9e5
|
refs/heads/master
| 2022-11-20T06:54:36.581623
| 2017-12-04T18:56:02
| 2017-12-04T18:56:02
| 282,171,169
| 0
| 0
| null | 2020-07-24T08:54:37
| 2020-07-24T08:54:36
| null |
UTF-8
|
Python
| false
| false
| 2,173
|
py
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Views for managing instance snapshots.
"""
from django.core.urlresolvers import reverse
from django.core.urlresolvers import reverse_lazy
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import forms
from horizon.utils import memoized
from openstack_dashboard import api
from openstack_dashboard.dashboards.project.images_and_snapshots.snapshots \
import forms as project_forms
class CreateView(forms.ModalFormView):
form_class = project_forms.CreateSnapshot
template_name = 'project/images_and_snapshots/snapshots/create.html'
success_url = reverse_lazy("horizon:project:images_and_snapshots:index")
@memoized.memoized_method
def get_object(self):
try:
return api.nova.server_get(self.request,
self.kwargs["instance_id"])
except Exception:
redirect = reverse('horizon:project:instances:index')
exceptions.handle(self.request,
_("Unable to retrieve instance."),
redirect=redirect)
def get_initial(self):
return {"instance_id": self.kwargs["instance_id"]}
def get_context_data(self, **kwargs):
context = super(CreateView, self).get_context_data(**kwargs)
context['instance'] = self.get_object()
return context
|
[
"thelma1944@gmail.com"
] |
thelma1944@gmail.com
|
24055a1a6e8b5a0c6a0d50ceec70784bc1200932
|
f576f0ea3725d54bd2551883901b25b863fe6688
|
/sdk/datadog/azure-mgmt-datadog/azure/mgmt/datadog/aio/_configuration.py
|
8f5a2a189161e3f0b90013118775e86eb5fd19a4
|
[
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
] |
permissive
|
Azure/azure-sdk-for-python
|
02e3838e53a33d8ba27e9bcc22bd84e790e4ca7c
|
c2ca191e736bb06bfbbbc9493e8325763ba990bb
|
refs/heads/main
| 2023-09-06T09:30:13.135012
| 2023-09-06T01:08:06
| 2023-09-06T01:08:06
| 4,127,088
| 4,046
| 2,755
|
MIT
| 2023-09-14T21:48:49
| 2012-04-24T16:46:12
|
Python
|
UTF-8
|
Python
| false
| false
| 3,791
|
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import sys
from typing import Any, TYPE_CHECKING
from azure.core.configuration import Configuration
from azure.core.pipeline import policies
from azure.mgmt.core.policies import ARMHttpLoggingPolicy, AsyncARMChallengeAuthenticationPolicy
from .._version import VERSION
if sys.version_info >= (3, 8):
from typing import Literal # pylint: disable=no-name-in-module, ungrouped-imports
else:
from typing_extensions import Literal # type: ignore # pylint: disable=ungrouped-imports
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from azure.core.credentials_async import AsyncTokenCredential
class MicrosoftDatadogClientConfiguration(Configuration): # pylint: disable=too-many-instance-attributes
"""Configuration for MicrosoftDatadogClient.
Note that all parameters used to create this instance are saved as instance
attributes.
:param credential: Credential needed for the client to connect to Azure. Required.
:type credential: ~azure.core.credentials_async.AsyncTokenCredential
:param subscription_id: The ID of the target subscription. Required.
:type subscription_id: str
:keyword api_version: Api Version. Default value is "2022-06-01". Note that overriding this
default value may result in unsupported behavior.
:paramtype api_version: str
"""
def __init__(self, credential: "AsyncTokenCredential", subscription_id: str, **kwargs: Any) -> None:
super(MicrosoftDatadogClientConfiguration, self).__init__(**kwargs)
api_version = kwargs.pop("api_version", "2022-06-01") # type: Literal["2022-06-01"]
if credential is None:
raise ValueError("Parameter 'credential' must not be None.")
if subscription_id is None:
raise ValueError("Parameter 'subscription_id' must not be None.")
self.credential = credential
self.subscription_id = subscription_id
self.api_version = api_version
self.credential_scopes = kwargs.pop("credential_scopes", ["https://management.azure.com/.default"])
kwargs.setdefault("sdk_moniker", "mgmt-datadog/{}".format(VERSION))
self._configure(**kwargs)
def _configure(self, **kwargs: Any) -> None:
self.user_agent_policy = kwargs.get("user_agent_policy") or policies.UserAgentPolicy(**kwargs)
self.headers_policy = kwargs.get("headers_policy") or policies.HeadersPolicy(**kwargs)
self.proxy_policy = kwargs.get("proxy_policy") or policies.ProxyPolicy(**kwargs)
self.logging_policy = kwargs.get("logging_policy") or policies.NetworkTraceLoggingPolicy(**kwargs)
self.http_logging_policy = kwargs.get("http_logging_policy") or ARMHttpLoggingPolicy(**kwargs)
self.retry_policy = kwargs.get("retry_policy") or policies.AsyncRetryPolicy(**kwargs)
self.custom_hook_policy = kwargs.get("custom_hook_policy") or policies.CustomHookPolicy(**kwargs)
self.redirect_policy = kwargs.get("redirect_policy") or policies.AsyncRedirectPolicy(**kwargs)
self.authentication_policy = kwargs.get("authentication_policy")
if self.credential and not self.authentication_policy:
self.authentication_policy = AsyncARMChallengeAuthenticationPolicy(
self.credential, *self.credential_scopes, **kwargs
)
|
[
"noreply@github.com"
] |
Azure.noreply@github.com
|
0106a8b00c02f1cf93357f0dbdee964c833a0cad
|
9ed385053e7f28bfd0c6f186fc4963faac43eb96
|
/store/models.py
|
3f05789263ca28aa75c52f50113c0aaacc36d485
|
[] |
no_license
|
Pagante/greatkart-django
|
ffadfb5d4827220f3df588fb1d21dc28f1359ce0
|
d4bb679c7fd270435f4ce0cc8854bdb3d2e134dd
|
refs/heads/main
| 2023-05-12T01:07:53.092949
| 2021-05-30T16:34:07
| 2021-05-30T16:34:07
| 365,899,060
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,999
|
py
|
from django.db import models
from django.db.models.aggregates import Count
from django.db.models.deletion import CASCADE
from django.db.models.expressions import Case
from django.urls.base import reverse
from category.models import Category
from accounts.models import Account
from django.db.models import Avg, Count
# Create your models here.
class Product(models.Model):
product_name = models.CharField(max_length=200, unique=True)
slug = models.SlugField(max_length=200, unique=True)
description = models.TextField(max_length=500,blank=True)
price = models.IntegerField()
images = models.ImageField(upload_to='photos/products')
stock = models.IntegerField()
is_available = models.BooleanField(default=False)
category = models.ForeignKey(Category, on_delete=models.CASCADE)
create_date = models.DateTimeField(auto_now_add=True)
modified_date = models.DateTimeField(auto_now=True)
def get_url(self):
return reverse('product_detail', args= [self.category.slug, self.slug])
def __str__(self):
return self.product_name
def averageReview(self):
reviews = reviewRating.objects.filter(product=self, status=True).aggregate(average=Avg('rating'))
avg = 0
if reviews['average'] is not None:
avg = float(reviews['average'])
return avg
def countReviews(self):
reviews = reviewRating.objects.filter(product=self, status=True).aggregate(count=Count('id'))
count=0
if reviews['count'] is not None:
count = int(reviews['count'])
return count
class VariationManager(models.Manager):
def colors(self):
return super(VariationManager, self).filter(variation_category='color', is_active = True)
def sizes(self):
return super(VariationManager, self).filter(variation_category ='size', is_active = True)
variation_category_choices = (
('color', 'color'),
('size', 'size')
)
class Variation(models.Model):
product = models.ForeignKey(Product, on_delete=models.CASCADE)
variation_category = models.CharField(max_length=200, choices= variation_category_choices)
variation_value = models.CharField(max_length=200)
is_active = models.BooleanField(default=True)
create_date = models.DateTimeField(auto_now=True)
objects = VariationManager()
def __str__(self):
return self.variation_value
class reviewRating(models.Model):
product = models.ForeignKey(Product, on_delete= models.CASCADE)
user = models.ForeignKey(Account, on_delete=CASCADE)
subject = models.CharField(max_length=50, blank=True)
reviews = models.TextField(max_length=500, blank=True)
rating = models.FloatField()
ip = models.CharField(max_length=20)
status = models.BooleanField(default=True)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
def __str__(self):
return self.subject
|
[
"55301195+Pagante@users.noreply.github.com"
] |
55301195+Pagante@users.noreply.github.com
|
1a7bd63272c5441eea5544e3e4fd0a3dd2a93d9b
|
537345f90de44dac4e2a20037d21f858f82e3120
|
/concatenateGenbankFiles.py
|
08d2e28a0b99eda871a365d7827e7b9b97120c28
|
[] |
no_license
|
kaiyaprovost/misc_scripts
|
f8fc8ca646c5c97ad3495e612bc9656e2b8d238c
|
5c460ea608c13ff271fa6772fe548b89aa68c225
|
refs/heads/master
| 2021-11-11T15:33:34.211463
| 2021-11-10T23:11:56
| 2021-11-10T23:11:56
| 237,049,894
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,176
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Fri Dec 9 18:27:49 2016
@author: kprovost
This script takes multiple genbank .gb files adn turns them into one file
Usage: give path, creates a folder
python concatenateGenbankFiles.py <optional path>
"""
def concatGB(filename,outfile):
with open(filename,"r") as infile, open(outfile,"a") as outfile:
entry = infile.read()
outfile.write(entry+"\n")
def main():
import glob
import os
import sys
#path = sys.argv[1]
try:
path = sys.argv[1]
print("\tPath entered: ",searchTerm)
except:
print("No path given, using current working directory")
path = os.getcwd()
print(path)
os.chdir(path)
outpath = path+"/concatGenbankFiles/"
if not os.path.exists(outpath):
print("creating folder: ",outpath)
os.makedirs(outpath)
concatName = "ConcatenatedGbFiles.gb"
print("Concatenated file: ",concatName)
outfile = outpath+concatName
os.chdir(path)
for filename in glob.glob("*.gb"):
concatGB(filename,outfile)
if __name__ == "__main__":
main()
|
[
"17089935+kaiyaprovost@users.noreply.github.com"
] |
17089935+kaiyaprovost@users.noreply.github.com
|
9395ac23fad1778148b66efae8bb997bf22d7431
|
18a645c8e543c905528364fad8c429e209903e80
|
/acapy-client/acapy_client/api/issue_credential_v_10/post_issue_credential_send_offer.py
|
7d95d291316c0d7eac46e78b101473fa2a2c0925
|
[] |
no_license
|
cjhowland/acapy-revocation-demo
|
854e9aff4236c034ae9cc00206abde87f257bc45
|
01c21eb38d085c5633e505908c26c2e9ebfe3110
|
refs/heads/main
| 2023-07-16T02:01:05.659695
| 2021-05-12T17:00:44
| 2021-05-12T17:00:44
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,430
|
py
|
from typing import Any, Dict, Optional
import httpx
from ...client import Client
from ...models.v10_credential_exchange import V10CredentialExchange
from ...models.v10_credential_offer_request import V10CredentialOfferRequest
from ...types import Response
def _get_kwargs(
*,
client: Client,
json_body: V10CredentialOfferRequest,
) -> Dict[str, Any]:
url = "{}/issue-credential/send-offer".format(client.base_url)
headers: Dict[str, Any] = client.get_headers()
cookies: Dict[str, Any] = client.get_cookies()
json_json_body = json_body.to_dict()
return {
"url": url,
"headers": headers,
"cookies": cookies,
"timeout": client.get_timeout(),
"json": json_json_body,
}
def _parse_response(*, response: httpx.Response) -> Optional[V10CredentialExchange]:
if response.status_code == 200:
response_200 = V10CredentialExchange.from_dict(response.json())
return response_200
return None
def _build_response(*, response: httpx.Response) -> Response[V10CredentialExchange]:
return Response(
status_code=response.status_code,
content=response.content,
headers=response.headers,
parsed=_parse_response(response=response),
)
def sync_detailed(
*,
client: Client,
json_body: V10CredentialOfferRequest,
) -> Response[V10CredentialExchange]:
kwargs = _get_kwargs(
client=client,
json_body=json_body,
)
response = httpx.post(
**kwargs,
)
return _build_response(response=response)
def sync(
*,
client: Client,
json_body: V10CredentialOfferRequest,
) -> Optional[V10CredentialExchange]:
""" """
return sync_detailed(
client=client,
json_body=json_body,
).parsed
async def asyncio_detailed(
*,
client: Client,
json_body: V10CredentialOfferRequest,
) -> Response[V10CredentialExchange]:
kwargs = _get_kwargs(
client=client,
json_body=json_body,
)
async with httpx.AsyncClient() as _client:
response = await _client.post(**kwargs)
return _build_response(response=response)
async def asyncio(
*,
client: Client,
json_body: V10CredentialOfferRequest,
) -> Optional[V10CredentialExchange]:
""" """
return (
await asyncio_detailed(
client=client,
json_body=json_body,
)
).parsed
|
[
"dbluhm@pm.me"
] |
dbluhm@pm.me
|
299bb263f3c5c29e06546b2fafcc922341219476
|
7e516383bd528e79719f04e88e8839671de5f81b
|
/l10n_ec_talent_growth/__manifest__.py
|
7342e79f47b66f49b21ce8565d79023d863d2ae3
|
[] |
no_license
|
hc-mic29/primerabase
|
c96b1bd8ee77d4217b528dd4f9f50274f5711fca
|
16fcc33bbf5bfcda236cc1a7a595cccf15aa5b44
|
refs/heads/main
| 2023-06-14T23:50:46.970941
| 2021-07-06T22:38:40
| 2021-07-06T22:38:40
| 383,600,215
| 0
| 0
| null | 2021-07-06T21:17:38
| 2021-07-06T21:17:38
| null |
UTF-8
|
Python
| false
| false
| 1,033
|
py
|
# -*- coding: utf-8 -*-
{
'name': "Talent Growth",
'summary': """
Modulo de talent Growth""",
'description': """
En el presente modulo se llevara acabo el control de desarrollo y crecimiento del personal
""",
'author': "Opa Consulting",
'website': "http://www.opa-consulting.com",
# Categories can be used to filter modules in modules listing
# Check https://github.com/odoo/odoo/blob/12.0/odoo/addons/base/data/ir_module_category_data.xml
# for the full list
'category': 'Employee',
'version': '0.1',
# any module necessary for this one to work correctly
'depends': ['base','hr'],
# always loaded
'data': [
'security/hr_talent_growth_security.xml',
'security/ir.model.access.csv',
'views/hr_employee_view.xml',
'views/hr_talent_growth.xml',
# 'views/templates.xml',
],
# only loaded in demonstration mode
'demo': [
# 'demo/demo.xml',
],
'installable':True,
'application':True,
}
|
[
"glabois@opa-consulting.com"
] |
glabois@opa-consulting.com
|
7d4ec04cdb2b19f3b0eb63afcab1dce44a9b3f4a
|
62def70e2d802375b1ad28b0ac85fee2010ee0a9
|
/flask/server/app2.py
|
5ba7860c73d4a3a14efbd47ab066e88ac9058194
|
[] |
no_license
|
MarkAYoder/BeagleBoard-exercises
|
c48028b6e919d8c04dedfd2040a133c760f0f567
|
2fab7c7f7aa09bf101168dfb279e690bc43a6514
|
refs/heads/master
| 2023-07-22T08:06:19.482358
| 2023-07-12T19:24:51
| 2023-07-12T19:24:51
| 5,111,513
| 48
| 41
| null | 2021-07-29T18:02:29
| 2012-07-19T15:07:14
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 674
|
py
|
#!/usr/bin/env python3
# From: https://towardsdatascience.com/python-webserver-with-flask-and-raspberry-pi-398423cc6f5d
import gpiod
CHIP = '0' # P9_11
offsets=[30]
from flask import Flask, render_template
app = Flask(__name__)
chip = gpiod.Chip(CHIP)
lines = chip.get_lines(offsets)
# Set button as an input
lines.request(consumer="app2.py", type=gpiod.LINE_REQ_DIR_IN)
@app.route("/")
def index():
# Read Button Status
vals = lines.get_values()
templateData = {
'title' : 'GPIO input Status!',
'button' : vals,
}
return render_template('index2.html', **templateData)
if __name__ == "__main__":
app.run(host='0.0.0.0', port=8081, debug=True)
|
[
"Mark.A.Yoder@Rose-Hulman.edu"
] |
Mark.A.Yoder@Rose-Hulman.edu
|
5971a56f860c99200f932f59b086d7cf6ebe4b6a
|
facb8b9155a569b09ba66aefc22564a5bf9cd319
|
/wp2/merra_scripts/03_model_fitting/merraRF882/641-tideGauge.py
|
dac5196ec7ba7de35a7df47a6c6f4edc7937cd21
|
[] |
no_license
|
moinabyssinia/modeling-global-storm-surges
|
13e69faa8f45a1244a964c5de4e2a5a6c95b2128
|
6e385b2a5f0867df8ceabd155e17ba876779c1bd
|
refs/heads/master
| 2023-06-09T00:40:39.319465
| 2021-06-25T21:00:44
| 2021-06-25T21:00:44
| 229,080,191
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,456
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Mon May 4 15:51:30 2020
This program is designed to validate a Random Forest
model by using the KFOLD method
@author: Michael Tadesse
"""
#import packages
import os
import glob
import numpy as np
import pandas as pd
from sklearn import metrics
from scipy import stats
import seaborn as sns
import matplotlib.pyplot as plt
from datetime import datetime
from sklearn.ensemble import RandomForestRegressor
from sklearn.decomposition import PCA
from sklearn.model_selection import KFold
from sklearn.preprocessing import StandardScaler
def validateRF():
"""
run KFOLD method for regression
"""
#defining directories
dir_in = "/lustre/fs0/home/mtadesse/merraAllLagged"
dir_out = "/lustre/fs0/home/mtadesse/merraRFValidation"
surge_path = "/lustre/fs0/home/mtadesse/05_dmax_surge_georef"
#cd to the lagged predictors directory
os.chdir(dir_in)
x = 641
y = 642
#empty dataframe for model validation
df = pd.DataFrame(columns = ['tg', 'lon', 'lat', 'num_year', \
'num_95pcs','corrn', 'rmse'])
#looping through
for tg in range(x,y):
os.chdir(dir_in)
#filter only .csv files
tgNames = []
for file in glob.glob("*.csv"):
tgNames.append(file)
tg_name = sorted(tgNames)[tg]
print(tg_name)
##########################################
#check if this tg is already taken care of
##########################################
os.chdir(dir_out)
if os.path.isfile(tg_name):
print("this tide gauge is already taken care of")
return "file already analyzed!"
os.chdir(dir_in)
#load predictor
pred = pd.read_csv(tg_name)
pred.drop('Unnamed: 0', axis = 1, inplace = True)
#add squared and cubed wind terms (as in WPI model)
pickTerms = lambda x: x.startswith('wnd')
wndTerms = pred.columns[list(map(pickTerms, pred.columns))]
wnd_sqr = pred[wndTerms]**2
wnd_cbd = pred[wndTerms]**3
pred = pd.concat([pred, wnd_sqr, wnd_cbd], axis = 1)
#standardize predictor data
dat = pred.iloc[:,1:]
scaler = StandardScaler()
print(scaler.fit(dat))
dat_standardized = pd.DataFrame(scaler.transform(dat), \
columns = dat.columns)
pred_standardized = pd.concat([pred['date'], dat_standardized], axis = 1)
#load surge data
os.chdir(surge_path)
surge = pd.read_csv(tg_name)
surge.drop('Unnamed: 0', axis = 1, inplace = True)
#remove duplicated surge rows
surge.drop(surge[surge['ymd'].duplicated()].index, axis = 0, inplace = True)
surge.reset_index(inplace = True)
surge.drop('index', axis = 1, inplace = True)
#adjust surge time format to match that of pred
time_str = lambda x: str(datetime.strptime(x, '%Y-%m-%d'))
surge_time = pd.DataFrame(list(map(time_str, surge['ymd'])), columns = ['date'])
time_stamp = lambda x: (datetime.strptime(x, '%Y-%m-%d %H:%M:%S'))
surge_new = pd.concat([surge_time, surge[['surge', 'lon', 'lat']]], axis = 1)
#merge predictors and surge to find common time frame
pred_surge = pd.merge(pred_standardized, surge_new.iloc[:,:2], on='date', how='right')
pred_surge.sort_values(by = 'date', inplace = True)
#find rows that have nans and remove them
row_nan = pred_surge[pred_surge.isna().any(axis =1)]
pred_surge.drop(row_nan.index, axis = 0, inplace = True)
pred_surge.reset_index(inplace = True)
pred_surge.drop('index', axis = 1, inplace = True)
#in case pred and surge don't overlap
if pred_surge.shape[0] == 0:
print('-'*80)
print('Predictors and Surge don''t overlap')
print('-'*80)
continue
pred_surge['date'] = pd.DataFrame(list(map(time_stamp, \
pred_surge['date'])), \
columns = ['date'])
#prepare data for training/testing
X = pred_surge.iloc[:,1:-1]
y = pd.DataFrame(pred_surge['surge'])
y = y.reset_index()
y.drop(['index'], axis = 1, inplace = True)
#apply PCA
pca = PCA(.95)
pca.fit(X)
X_pca = pca.transform(X)
#apply 10 fold cross validation
kf = KFold(n_splits=10, random_state=29)
metric_corr = []; metric_rmse = []; #combo = pd.DataFrame(columns = ['pred', 'obs'])
for train_index, test_index in kf.split(X):
X_train, X_test = X_pca[train_index], X_pca[test_index]
y_train, y_test = y['surge'][train_index], y['surge'][test_index]
#train regression model
rf= RandomForestRegressor(n_estimators = 50, random_state = 101, \
min_samples_leaf = 1)
rf.fit(X_train, y_train)
#predictions
predictions = rf.predict(X_test)
# pred_obs = pd.concat([pd.DataFrame(np.array(predictions)), \
# pd.DataFrame(np.array(y_test))], \
# axis = 1)
# pred_obs.columns = ['pred', 'obs']
# combo = pd.concat([combo, pred_obs], axis = 0)
#evaluation matrix - check p value
if stats.pearsonr(y_test, predictions)[1] >= 0.05:
print("insignificant correlation!")
continue
else:
print(stats.pearsonr(y_test, predictions))
metric_corr.append(stats.pearsonr(y_test, predictions)[0])
print(np.sqrt(metrics.mean_squared_error(y_test, predictions)))
print()
metric_rmse.append(np.sqrt(metrics.mean_squared_error(y_test, predictions)))
#number of years used to train/test model
num_years = (pred_surge['date'][pred_surge.shape[0]-1] -\
pred_surge['date'][0]).days/365
longitude = surge['lon'][0]
latitude = surge['lat'][0]
num_pc = X_pca.shape[1] #number of principal components
corr = np.mean(metric_corr)
rmse = np.mean(metric_rmse)
print('num_year = ', num_years, ' num_pc = ', num_pc ,'avg_corr = ',np.mean(metric_corr), ' - avg_rmse (m) = ', \
np.mean(metric_rmse), '\n')
#original size and pca size of matrix added
new_df = pd.DataFrame([tg_name, longitude, latitude, num_years, num_pc, corr, rmse]).T
new_df.columns = ['tg', 'lon', 'lat', 'num_year', \
'num_95pcs','corrn', 'rmse']
df = pd.concat([df, new_df], axis = 0)
#save df as cs - in case of interruption
os.chdir(dir_out)
df.to_csv(tg_name)
#run script
validateRF()
|
[
"michaelg.tadesse@gmail.com"
] |
michaelg.tadesse@gmail.com
|
d0306b119643795c0a1f9cc58722de910337f986
|
83efa5604be59078372c55998b9c686774e73e89
|
/utils/utils.py
|
17f3c5461f1aaaaefd48e7dc908a631518ace97a
|
[
"MIT"
] |
permissive
|
Dawa406/import_to_gee
|
5c7a2db656cf6fe8ad3b4d954bcc38e06b4a0d32
|
4d13a261fff371eb6a18076fdd1ea742fddd814b
|
refs/heads/master
| 2023-02-07T14:12:07.878640
| 2020-12-24T08:35:31
| 2020-12-24T08:35:31
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 899
|
py
|
import os
import ee
from utils import message as ms
ee.Initialize()
def display_asset(output, asset):
"""remove the manifest from the asset name and display it to the user"""
asset = asset.replace('projects/earthengine-legacy/assets/', '')
output.add_msg(ms.asset_created.format(asset), 'success')
return
def isAsset(asset_descripsion, folder):
"""Check if the asset already exist in the user asset folder
Args:
asset_descripsion (str) : the descripsion of the asset
folder (str): the folder of the glad assets
Returns:
exist (bool): true if already in folder
"""
exist = False
liste = ee.data.listAssets({'parent': folder})['assets']
for asset in liste:
if asset['name'] == os.path.join(folder,asset_descripsion):
exist = True
break
return exist
|
[
"pierrick.rambaud49@gmail.com"
] |
pierrick.rambaud49@gmail.com
|
fab5b4d8041420a521a36b1045fd6be52e330cc1
|
e298bf40ae88c2bd8e0a07f3e92f3e08a92edcc6
|
/nova/cmd/compute.py
|
08389a16f24119b2c164ccd399e82698802ab5ff
|
[] |
no_license
|
KevinKaiQian/polar-bear
|
46a814c746246394f76505846166673a049f12f2
|
61d4e0ccd7328a6aa543af3b75e5f7fedf98bf8e
|
refs/heads/master
| 2022-04-29T02:15:35.536039
| 2021-05-19T12:33:07
| 2021-05-19T12:33:07
| 172,068,536
| 2
| 0
| null | 2022-03-29T21:56:51
| 2019-02-22T13:11:58
|
Python
|
UTF-8
|
Python
| false
| false
| 2,444
|
py
|
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Starter script for Nova Compute."""
import shlex
import sys
import os
#sys.path.append(os.path.dirname(os.getcwd()))
sys.path.append(os.path.dirname(os.path.dirname(os.getcwd())))
from oslo_log import log as logging
from oslo_privsep import priv_context
#import pdb;pdb.set_trace()
from nova.cmd import common as cmd_common
from nova.conductor import rpcapi as conductor_rpcapi
from nova import config
from nova.i18n import _LW
from nova import objects
from nova.objects import base as objects_base
from nova import service
from nova import utils
#from nova import version
from nova import rpc
from nova.db.sqlalchemy import api as sqlalchemy_api
CONF = config.CONF
LOG = logging.getLogger('nova.compute')
def main():
#config.parse_args(sys.argv)
logging.setup(CONF, 'nova')
rpc.set_defaults(control_exchange='nova')
rpc.init(CONF)
sqlalchemy_api.configure(CONF)
#priv_context.init(root_helper=shlex.split(utils.get_root_helper()))
#utils.monkey_patch()
objects.register_all()
#gmr.TextGuruMeditation.setup_autorun(version)
if not CONF.conductor.use_local:
cmd_common.block_db_access('nova-compute')
objects_base.NovaObject.indirection_api = \
conductor_rpcapi.ConductorAPI()
else:
LOG.warning(_LW('Conductor local mode is deprecated and will '
'be removed in a subsequent release'))
#import pdb;pdb.set_trace()
server = service.Service.create(binary='nova-compute',
topic=CONF.compute_topic,
db_allowed=CONF.conductor.use_local)
service.serve(server)
service.wait()
if "__main__" == __name__:
main()
|
[
"root@localhost.localdomain"
] |
root@localhost.localdomain
|
a3023c4c318b3cbafd0372ec93f51a1666a9e0cf
|
090e04cd5c7f020a03eb6f0dfdb7d37cce555288
|
/my_navigation_interface/navigation_interface.py
|
55904118838f7e447016caec8a17dff0372079da
|
[] |
no_license
|
imfog/Groove
|
dbcddbc040dbd4cd30991b20568046d9ac5590d3
|
3bcdc980b798e901eb1e3e87ebdada268c36f1d4
|
refs/heads/master
| 2022-12-28T21:57:12.125621
| 2020-10-15T03:32:15
| 2020-10-15T03:32:15
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,491
|
py
|
# coding:utf-8
from ctypes.wintypes import HWND
from PyQt5.QtCore import Qt, pyqtSignal, QPoint, QEvent
from PyQt5.QtWidgets import QWidget
from .navigation_bar import NavigationBar
from .navigation_widget import NavigationWidget
from .navigation_menu import NavigationMenu
from effects import WindowEffect
class NavigationInterface(QWidget):
""" 导航界面 """
COMPACT = 0 # 折叠窗口
OVERLAY = 1 # 显示导航菜单,窗口不展开
IN_LINE = 2 # 导航窗口展开
displayModeChanged = pyqtSignal(int)
switchInterfaceSig = pyqtSignal(int)
showPlayingInterfaceSig = pyqtSignal()
showCreatePlaylistPanelSig = pyqtSignal()
switchToSettingInterfaceSig = pyqtSignal()
switchToMyMusicInterfaceSig = pyqtSignal()
switchToPlaylistCardInterfaceSig = pyqtSignal()
def __init__(self, parent=None):
super().__init__(parent)
self.effect = WindowEffect()
# 创建部件
self.navigationBar = NavigationBar(self)
self.navigationWidget = NavigationWidget(self)
self.navigationMenu = NavigationMenu(self)
self.__navigation_list = [self.navigationBar,
self.navigationWidget, self.navigationMenu]
# 设置显示导航菜单/导航部件标志位
self.__displayMode = self.COMPACT
self.__isExpanded = False
self.__isOverlay = False
# 初始化
self.__initWidget()
def __initWidget(self):
""" 初始化小部件 """
self.resize(self.navigationBar.width(), 800)
self.setCurrentIndex(0)
self.navigationWidget.hide()
# 信号连接到槽
self.__connectSignalToSlot()
# 安装事件过滤器
self.navigationMenu.installEventFilter(self)
def __connectSignalToSlot(self):
""" 信号连接到槽 """
# 发送切换窗口信号
self.navigationBar.switchInterfaceSig.connect(self.switchInterfaceSig)
self.navigationMenu.switchInterfaceSig.connect(self.switchInterfaceSig)
# 同步按钮选中状态
self.navigationBar.selectedButtonChanged.connect(
self.__selectedButtonChangedSlot)
self.navigationWidget.selectedButtonChanged.connect(
self.__selectedButtonChangedSlot)
self.navigationMenu.selectedButtonChanged.connect(
self.__selectedButtonChangedSlot)
# 发送切换窗口信号
self.navigationWidget.switchInterfaceSig.connect(
self.switchInterfaceSig)
# 按钮点击信号连接到槽
self.navigationBar.showMenuButton.clicked.connect(
self.__expandNavigationWindow)
self.navigationBar.searchButton.clicked.connect(
self.__expandNavigationWindow)
self.navigationMenu.showBarButton.clicked.connect(
self.__collapseWindow)
self.navigationWidget.showBarButton.clicked.connect(
self.__collapseWindow)
self.navigationMenu.playingButton.clicked.connect(
self.__collapseWindow)
for widget in self.__navigation_list:
widget.playingButton.clicked.connect(
self.showPlayingInterfaceSig)
widget.settingButton.clicked.connect(
self.switchToSettingInterfaceSig)
widget.musicGroupButton.clicked.connect(
self.switchToMyMusicInterfaceSig)
widget.playlistButton.clicked.connect(
self.switchToPlaylistCardInterfaceSig)
widget.createPlaylistButton.clicked.connect(
self.showCreatePlaylistPanelSig)
def resizeEvent(self, e):
""" 调整小部件尺寸 """
self.navigationBar.resize(self.navigationBar.width(), self.height())
self.navigationMenu.resize(self.navigationMenu.width(), self.height())
self.navigationWidget.resize(
self.navigationWidget.width(), self.height())
def eventFilter(self, obj, e: QEvent):
""" 过滤事件 """
if obj == self.navigationMenu:
if e.type() == QEvent.Hide:
self.navigationBar.show()
return super().eventFilter(obj, e)
def __expandNavigationWindow(self):
""" 展开导航窗口 """
self.__isExpanded = True
if not self.__isOverlay:
# 显示导航部件
self.__displayMode = self.IN_LINE
self.resize(self.navigationWidget.width(), self.height())
self.navigationWidget.updateWindow()
self.displayModeChanged.emit(self.IN_LINE)
self.navigationWidget.show()
self.navigationBar.hide()
else:
# 显示导航菜单
self.__displayMode = self.OVERLAY
self.navigationMenu.move(self.mapToGlobal(QPoint(0, 0)))
self.navigationMenu.updateWindow()
self.navigationMenu.aniShow()
# self.displayModeChanged.emit(self.OVERLAY)
self.navigationBar.hide()
def __collapseWindow(self):
""" 折叠导航窗口 """
self.__isExpanded = False
self.__displayMode = self.COMPACT
self.navigationBar.show()
self.navigationWidget.hide()
if self.sender() is self.navigationMenu.showBarButton:
self.navigationMenu.aniHide()
elif self.sender() is self.navigationMenu.playingButton:
self.navigationMenu.hide()
self.resize(self.navigationBar.width(), self.height())
self.displayModeChanged.emit(self.__displayMode)
def setOverlay(self, isOverlay: bool):
""" 设置展开导航界面时是否为overlay显示模式 """
self.__isOverlay = isOverlay
def __selectedButtonChangedSlot(self, name):
""" 选中的按钮变化对应的槽函数 """
for widget in self.__navigation_list:
if not (widget is self.sender()):
widget.setSelectedButton(name)
def setCurrentIndex(self, index: int):
""" 选中下标对应的按钮 """
for widget in self.__navigation_list:
widget.setCurrentIndex(index)
def updateWindow(self):
""" 更新窗口 """
self.navigationMenu.updateWindow()
self.navigationWidget.updateWindow()
@property
def isOverlay(self):
return self.__isOverlay
@property
def isExpanded(self):
return self.__isExpanded
@property
def displayMode(self):
return self.__displayMode
|
[
"1319158137@qq.com"
] |
1319158137@qq.com
|
e792e19c6c071844b8e14e4097606cc35d55f43f
|
4f8a363ad77ffa2772d1916673a390719729ff0f
|
/example/example.py
|
afc0c571b7435400d8452ab1ee83c2922e2697b0
|
[
"MIT"
] |
permissive
|
HannahVMeyer/bgen-reader-py
|
c9453063a612c2bc7690c97809a1e746f61b8ebc
|
2bbdfbec30df98550d53a13d253fb580bc401690
|
refs/heads/master
| 2020-03-23T07:10:46.479208
| 2018-07-16T12:49:23
| 2018-07-16T12:49:23
| 141,252,940
| 0
| 0
| null | 2018-07-17T07:58:06
| 2018-07-17T07:58:06
| null |
UTF-8
|
Python
| false
| false
| 288
|
py
|
from bgen_reader import read_bgen
if __name__ == "__main__":
bgen = read_bgen("example.bgen", verbose=False)
print(bgen["variants"].head())
print(bgen["samples"].head())
print(len(bgen["genotype"]))
p = bgen["genotype"][0].compute()
print(p)
print(p.shape)
|
[
"danilo.horta@gmail.com"
] |
danilo.horta@gmail.com
|
051b4d205eb8834085b0c4a3388e32cdf989b777
|
2affcf450f0bab36b74dd7c8b29522ad38955155
|
/pyspedas/geotail/load.py
|
f27b097b0e3fde3d8f21e205f9036251f591478f
|
[
"MIT"
] |
permissive
|
nargesahmadi/pyspedas
|
1148de83641196681ad65b54e43df0d0c185baf2
|
73ebdabcdef0f6e1087a2a5eb18c3e2384c4fb54
|
refs/heads/master
| 2022-02-16T18:51:35.204137
| 2022-01-10T23:25:20
| 2022-01-10T23:25:20
| 174,030,853
| 0
| 0
|
MIT
| 2019-03-05T22:34:32
| 2019-03-05T22:34:24
|
Python
|
UTF-8
|
Python
| false
| false
| 2,604
|
py
|
from pyspedas.utilities.dailynames import dailynames
from pyspedas.utilities.download import download
from pyspedas.analysis.time_clip import time_clip as tclip
from pytplot import cdf_to_tplot
from .config import CONFIG
def load(trange=['2013-11-5', '2013-11-6'],
instrument='mgf',
datatype='k0',
suffix='',
get_support_data=False,
varformat=None,
varnames=[],
downloadonly=False,
notplot=False,
no_update=False,
time_clip=False):
"""
This function loads data from the Geotail mission; this function is not meant
to be called directly; instead, see the wrappers:
pyspedas.geotail.mgf
pyspedas.geotail.efd
pyspedas.geotail.lep
pyspedas.geotail.cpi
pyspedas.geotail.epi
pyspedas.geotail.pwi
"""
if instrument == 'mgf':
if datatype == 'k0':
pathformat = 'mgf/mgf_k0/%Y/ge_'+datatype+'_mgf_%Y%m%d_v??.cdf'
elif datatype == 'eda3sec' or datatype == 'edb3sec':
pathformat = 'mgf/'+datatype+'_mgf/%Y/ge_'+datatype+'_mgf_%Y%m%d_v??.cdf'
elif instrument == 'efd':
pathformat = instrument+'/'+instrument+'_'+datatype+'/%Y/ge_'+datatype+'_'+instrument+'_%Y%m%d_v??.cdf'
elif instrument == 'lep':
if datatype == 'k0':
pathformat = 'lep/lep_k0/%Y/ge_'+datatype+'_lep_%Y%m%d_v??.cdf'
elif instrument == 'cpi':
pathformat = instrument+'/'+instrument+'_'+datatype+'/%Y/ge_'+datatype+'_'+instrument+'_%Y%m%d_v??.cdf'
elif instrument == 'epi':
pathformat = 'epic/'+instrument+'_'+datatype+'/%Y/ge_'+datatype+'_'+instrument+'_%Y%m%d_v??.cdf'
elif instrument == 'pwi':
pathformat = instrument+'/'+instrument+'_'+datatype+'/%Y/ge_'+datatype+'_'+instrument+'_%Y%m%d_v??.cdf'
# find the full remote path names using the trange
remote_names = dailynames(file_format=pathformat, trange=trange)
out_files = []
files = download(remote_file=remote_names, remote_path=CONFIG['remote_data_dir'], local_path=CONFIG['local_data_dir'], no_download=no_update)
if files is not None:
for file in files:
out_files.append(file)
out_files = sorted(out_files)
if downloadonly:
return out_files
tvars = cdf_to_tplot(out_files, suffix=suffix, get_support_data=get_support_data, varformat=varformat, varnames=varnames, notplot=notplot)
if notplot:
return tvars
if time_clip:
for new_var in tvars:
tclip(new_var, trange[0], trange[1], suffix='')
return tvars
|
[
"egrimes@igpp.ucla.edu"
] |
egrimes@igpp.ucla.edu
|
6b396ca0c75e591c9c9ba4624a333c13ce6f7238
|
36cebe3f80c547aa43c8c015484d37cd8e70722b
|
/dingtalk/callback/__init__.py
|
bd11949f57e96341f0ec3b7a894c6ea0882f23cc
|
[
"Apache-2.0"
] |
permissive
|
007gzs/dingtalk-python
|
7e62f4a722484f9a98a22fc1ad21edebb6b7fddc
|
d9bc5d1294fc000cc7339b4b82c212c63a419cc6
|
refs/heads/master
| 2020-03-14T22:17:34.191090
| 2018-04-26T02:18:13
| 2018-04-26T02:18:13
| 131,817,956
| 1
| 0
| null | 2018-05-02T08:00:54
| 2018-05-02T08:00:53
| null |
UTF-8
|
Python
| false
| false
| 6,837
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time: 2018/3/1 上午11:20
# @Author: BlackMatrix
# @Site: https://github.com/blackmatrix7
# @File: __init__.py
# @Software: PyCharm
from .crypto import *
from .callback import *
from functools import partial
from ..exceptions import DingTalkExceptions
from ..foundation import dingtalk_method, get_timestamp
__author__ = 'blackmatrix'
METHODS = {}
method = partial(dingtalk_method, methods=METHODS)
class CallBack:
def __init__(self, auth, aes_key, token, callback_url, corp_id, noncestr):
self.auth = auth
self.methods = METHODS
self.aes_key = aes_key
self.token = token
self.callback_url = callback_url
self.corp_id = corp_id
self.noncestr = noncestr
@property
def timestamp(self):
return get_timestamp()
def encrypt(self, plaintext, buf=None):
"""
钉钉加密数据
:param plaintext: 明文
:param buf:
:return:
"""
if self.aes_key is None:
raise RuntimeError('加密解密前需要在初始化DingTalk App时传入aes_key')
from dingtalk.callback.crypto import encrypt
ciphertext = encrypt(aes_key=self.aes_key, plaintext=plaintext, key=self.corp_id, buf=buf)
return ciphertext
def decrypt(self, ciphertext: str):
"""
钉钉解密数据
:param ciphertext: 密文
:return:
"""
if self.aes_key is None:
raise RuntimeError('加密解密前需要在初始化DingTalk App时传入aes_key')
from dingtalk.callback.crypto import decrypt
msg, key, buf = decrypt(self.aes_key, ciphertext)
return msg, key, buf
def encrypt_text(self, plaintext: str):
"""
对纯文本进行加密
:param plaintext: 明文
:return:
"""
if self.aes_key is None:
raise RuntimeError('加密解密前需要在初始化DingTalk App时传入aes_key')
from dingtalk.callback.crypto import encrypt_text
ciphertext = encrypt_text(aes_key=self.aes_key, plaintext=plaintext)
return ciphertext
def decrypt_text(self, ciphertext: str):
"""
对纯文本进行解密
:param ciphertext: 密文
:return:
"""
if self.aes_key is None:
raise RuntimeError('加密解密前需要在初始化DingTalk App时传入aes_key')
from dingtalk.callback.crypto import decrypt_text
temp = decrypt_text(self.aes_key, ciphertext)
return temp
def register_callback(self, callback_tag):
"""
向钉钉注册回调接口,只能注册一次,后续需要修改,请调用更新回调接口
注册回调前需要在初始化DingTalk App时传入aes_key和callback_url
其中callback_url必须返回经过加密的字符串“success”的json数据
可以使用return_success()方法直接返回一个符合要求的json格式。
:param callback_tag:
:return:
"""
if self.aes_key is None or self.callback_url is None:
raise RuntimeError('注册回调前需要在初始化DingTalk App时传入aes_key和callback_url')
data = register_callback(self.auth.access_token, self.token, callback_tag, self.aes_key, self.callback_url)
return data
def update_callback(self, callback_tag):
"""
向钉钉更新回调接口
只能在注册回调接口后使用
:param callback_tag:
:return:
"""
if self.aes_key is None or self.callback_url is None:
raise RuntimeError('更新回调前需要在初始化DingTalk App时传入aes_key和callback_url')
data = update_callback(self.auth.access_token, self.token, callback_tag, self.aes_key, self.callback_url)
return data
def get_call_back_failed_result(self):
"""
获取处理失败的钉钉回调
:return:
"""
data = get_callback_failed_result(self.auth.access_token)
return data['failed_list']
def generate_callback_signature(self, data, timestamp, nonce):
"""
创建回调函数的签名,可以用于验证钉钉回调时,传入的签名是否合法
:param data:
:param timestamp:
:param nonce:
:return:
"""
from .crypto import generate_callback_signature
sign = generate_callback_signature(self.token, data, timestamp, nonce)
return sign
def check_callback_signature(self, signature, ciphertext, timestamp, nonce):
"""
验证钉钉回调接口的签名
算法请访问
https://open-doc.dingtalk.com/docs/doc.htm?spm=a219a.7386797.0.0.EkauZY&source=search&treeId=366&articleId=107524&docType=1
:param signature: 需要验证的签名
:param ciphertext: 加密后的数据
:param timestamp: 时间戳
:param nonce: 随机字符串
:return:
"""
from .crypto import check_callback_signature
return check_callback_signature(self.token, ciphertext, signature, timestamp, nonce)
def return_success(self):
"""
钉钉回调需要返回含有success的json,提供一个方法,快速返回一个符合钉钉要求的success json
:return:
"""
# 加密success数据
encrypt_str = self.encrypt('success').decode()
# 创建时间戳
timestamp = str(self.timestamp)
# 获取随机字符串
nonce = self.noncestr
# 创建签名
signature = self.generate_callback_signature(encrypt_str, timestamp, nonce)
# 返回结果
return {'msg_signature': signature, 'timeStamp': timestamp, 'nonce': nonce, 'encrypt': encrypt_str}
def check_url(self, ding_nonce, ding_sign, ding_timestamp, ding_encrypt):
"""
一个钉钉注册回调的check_url方法
文档:
https://open-doc.dingtalk.com/docs/doc.htm?spm=a219a.7629140.0.0.x75fVY&treeId=385&articleId=104975&docType=1#s12
:param ding_nonce: 钉钉返回的随机字符串
:param ding_sign: 钉钉返回的签名
:param ding_timestamp: 钉钉返回的时间戳
:param ding_encrypt: 钉钉返回的加密后数据
:return: 返回带success的json
"""
# 验证签名
if self.check_callback_signature(ding_sign, ding_encrypt, ding_timestamp, ding_nonce) is False:
raise DingTalkExceptions.sign_err
# 签名验证成功后,解密数据
ding_data, corp_id, buf = self.decrypt(ding_encrypt)
assert ding_data and corp_id and buf
# 返回结果
result = self.return_success()
return result
if __name__ == '__main__':
pass
|
[
"codecolor@outlook.com"
] |
codecolor@outlook.com
|
9ae7ef98bcee9e4b2d7db1dc46125c4be3eda2a4
|
b1571f4ee376d789b8094777fd81c4fb47a89cf1
|
/AtCoder/練習/others/sumitrust2019/D1.py
|
d4afae0be30a66063c7a97e2992736d77893989d
|
[] |
no_license
|
hiroyaonoe/Competitive-programming
|
e49e43f8853602ba73e658cab423bd91ebbe9286
|
2949e10eec3a38498bedb57ea41a2491916bab1c
|
refs/heads/master
| 2021-06-23T21:56:33.232931
| 2021-05-30T15:27:31
| 2021-05-30T15:27:31
| 225,863,783
| 2
| 0
| null | 2020-06-14T17:54:28
| 2019-12-04T12:37:24
|
Python
|
UTF-8
|
Python
| false
| false
| 319
|
py
|
n=int(input())
s=list(map(int,list(input())))
ans=0
for i in range(10):
try:
ss=s[s.index(i)+1:]
for j in range(10):
try:
sss=ss[ss.index(j)+1:]
for k in range(10):
if k in sss:ans+=1
except:pass
except:pass
print(ans)
|
[
"onoehiroya@gmail.com"
] |
onoehiroya@gmail.com
|
59f8182ce946966628f9ed37706d098e615c12dc
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_048/ch25_2020_03_31_22_53_41_450736.py
|
44e131685efa1784e312a6679f408074c7a2fedb
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628
| 2020-12-16T05:21:31
| 2020-12-16T05:21:31
| 306,735,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 211
|
py
|
v=float(input("qual a velocidade"))
o=float(input("qual o angulo"))
g=9.8
import math
d=((v**2)*math.sin(2*o))/g
if d<98:
print('Muito perto')
elif 102<d:
print('Muito longe')
else:
print('Acertou!')
|
[
"you@example.com"
] |
you@example.com
|
92c29e1e0f95c6c8c50f1bb43c30c205f4387ff9
|
69d407235771f364277f78aeb3a03896804cb690
|
/astrobf/run/run_mask.py
|
029b428e4d2404ca033b7017c4f8c8dc2fb933f6
|
[] |
no_license
|
Hoseung/astroBF
|
1bb16de1c867b943751ff0d73dfabc5ab7e723c6
|
e04efff26e99886c8b7eba42a897277318338d61
|
refs/heads/main
| 2023-07-17T12:27:10.184080
| 2021-09-01T05:42:05
| 2021-09-01T05:42:05
| 313,825,096
| 0
| 0
| null | 2021-07-22T15:16:58
| 2020-11-18T04:44:01
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 3,500
|
py
|
import matplotlib.pyplot as plt
import numpy as np
import pickle
import sys, math
from glob import glob
from astropy.io import fits
import astrobf
from astrobf.utils import mask_utils
from astrobf.utils.mask_utils import *
import re
def extract_gid(g_path):
import re
return int(re.split('(\d+)',g_path.split('/')[-2])[1])
def chunks(lst, n):
for i in range(0, len(lst), n):
yield lst[i:i + n]
dataset = ['EFIFI','Nair'][1]
basedir = ['../../bf_data/EFIGI_catalog/','./'][1]
fitsdir = basedir + ['fits_temp_Jan_19/','fits_temp_Dec_28/', 'fits_temp_Feb_3/'][2]
out_dir = basedir+'out1/'
#wdir = '../../OBSdata/efigi-1.6/ima_r/'
fns_g = glob(fitsdir+"*/*g.fits")
fns_r = glob(fitsdir+"*/*r.fits")
fns_i = glob(fitsdir+"*/*i.fits")
fns_g.sort()
fns_r.sort()
fns_i.sort()
gids = [extract_gid(fn) for fn in fns_r]
sub_rows = 3
eps = 1e-5
do_charm=False
FeatureVectors=[]
plt.ioff()
print("# files", len(fns_r))
for ichunk, sub in enumerate(chunks(fns_r, sub_rows**2)):
if ichunk < 597: # 300 / 500
continue
fig, axs = plt.subplots(sub_rows, sub_rows)
fig.set_size_inches(12,12)
try:
axs = axs.ravel()
except:
axs = [axs]
for ax, fn in zip(axs, sub):
#try:
if True:
img_name = fn.split("/")[-2]
int_name = int(re.split('(\d+)',img_name)[1])
#if int_name < 50229:
# continue
if dataset=="Nair": img_name = img_name.split('.')[0]
hdulist = fits.open(fn)
# Ensure pixel values are positive
hdulist[0].data -= (hdulist[0].data.min() - eps)
#hdulist[0].data[hdulist[0].data < 10*eps] = eps
mask, img, mask_new = mask_utils.gmm_mask(hdulist,
max_n_comp=20,
sig_factor=2.0,
verbose=False,
do_plot=False,
npix_min=50)
pickle.dump(mask_new, open(out_dir+f"{img_name}_mask.pickle", "wb"))
# Feature Vectors
img[~mask] = 0
ax.imshow(np.log10(img))
#ax.imshow(mask, alpha=0.5)
#mask_new = mask_hull(mask, ax)
ax.text(0.05,0.05, img_name, transform=ax.transAxes)
if do_charm:
# Each 'matrix' is distinct instance??
# And numpy_matrix is pointing to matrix..?
matrix = PyImageMatrix()
matrix.allocate(img.shape[1], img.shape[0])
numpy_matrix = matrix.as_ndarray()
numpy_matrix[:] = (img-img.min())/img.ptp()*255
# Need to scale to ...?
fv = FeatureVector(name='FromNumpyMatrix', long=True, original_px_plane=matrix )# Why not numpy_matrix??
# fv == None for now.
fv.GenerateFeatures(quiet=False, write_to_disk=True)
FeatureVectors.append({img_name:fv.values})
stamp = gen_stamp(img, pad=10, aspect_ratio="no", eps=eps)
stamp -= (stamp.min() - eps)
else:
stamp = gen_stamp(img, pad=10, aspect_ratio="no", eps=eps)
stamp -= (stamp.min() - eps)
#except:
print("ERROR")
continue
plt.tight_layout()
plt.savefig(out_dir+f"{ichunk}.png", dpi=144)
plt.close()
print(f'{ichunk}-th chunk done')
|
[
"hopung@gmail.com"
] |
hopung@gmail.com
|
1f18bb4f0f316c64abea5e788982bfbf7ef4a0b5
|
54f63580a298ffa63520771c734f5b6dd15894bc
|
/edabit/4_Hard/Next_Prime.py
|
1db7b12fc4d3d66d64f66a33297fbc19c046eedb
|
[] |
no_license
|
CKMaxwell/Python_online_challenge
|
048d24128b588d1af3db03379fb462cf7ea908a9
|
f13444612d93cf98aff760a6ff01d82a18082725
|
refs/heads/master
| 2023-01-03T20:19:27.001425
| 2020-10-26T18:25:07
| 2020-10-26T18:25:07
| 287,968,703
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 396
|
py
|
# 20201005 - Next Prime
def next_prime(num):
def prime(current):
check = True
for i in range(2, current//2):
if current % i == 0:
check = False
return check
if prime(num) == True:
return num
else:
while True:
num += 1
if prime(num) == True:
return num
print(next_prime(24))
|
[
"39400519+CKMaxwell@users.noreply.github.com"
] |
39400519+CKMaxwell@users.noreply.github.com
|
dc3186d2a2455dc2d0a0a9cff49870a71018fe2c
|
fb86f0dca6e525b8a8ddb63f10b8d220ddd7f7fe
|
/test/functional/rpc_users.py
|
e0e558491976166d2bcf20a7ad60b036f1e5687b
|
[
"MIT"
] |
permissive
|
ORO-mlm/UNO-Core
|
14fcdb3c2db4bde256e48ea661ada61579ccf403
|
d6e6769ce57466cfc9e7cab681eab880cdb8e3e8
|
refs/heads/main
| 2023-06-16T08:21:00.808606
| 2021-07-12T07:08:35
| 2021-07-12T07:08:35
| 383,350,655
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,951
|
py
|
#!/usr/bin/env python3
# Copyright (c) 2015-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test multiple RPC users."""
from test_framework.test_framework import UnoTestFramework
from test_framework.util import str_to_b64str, assert_equal
import os
import http.client
import urllib.parse
class HTTPBasicsTest (UnoTestFramework):
def set_test_params(self):
self.num_nodes = 2
def setup_chain(self):
super().setup_chain()
#Append rpcauth to uno.conf before initialization
rpcauth = "rpcauth=rt:93648e835a54c573682c2eb19f882535$7681e9c5b74bdd85e78166031d2058e1069b3ed7ed967c93fc63abba06f31144"
rpcauth2 = "rpcauth=rt2:f8607b1a88861fac29dfccf9b52ff9f$ff36a0c23c8c62b4846112e50fa888416e94c17bfd4c42f88fd8f55ec6a3137e"
rpcuser = "rpcuser=rpcuser�"
rpcpassword = "rpcpassword=rpcpassword�"
with open(os.path.join(self.options.tmpdir+"/node0", "uno.conf"), 'a', encoding='utf8') as f:
f.write(rpcauth+"\n")
f.write(rpcauth2+"\n")
with open(os.path.join(self.options.tmpdir+"/node1", "uno.conf"), 'a', encoding='utf8') as f:
f.write(rpcuser+"\n")
f.write(rpcpassword+"\n")
def run_test(self):
##################################################
# Check correctness of the rpcauth config option #
##################################################
url = urllib.parse.urlparse(self.nodes[0].url)
#Old authpair
authpair = url.username + ':' + url.password
#New authpair generated via share/rpcuser tool
password = "cA773lm788buwYe4g4WT+05pKyNruVKjQ25x3n0DQcM="
#Second authpair with different username
password2 = "8/F3uMDw4KSEbw96U3CA1C4X05dkHDN2BPFjTgZW4KI="
authpairnew = "rt:"+password
headers = {"Authorization": "Basic " + str_to_b64str(authpair)}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status, 200)
conn.close()
#Use new authpair to confirm both work
headers = {"Authorization": "Basic " + str_to_b64str(authpairnew)}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status, 200)
conn.close()
#Wrong login name with rt's password
authpairnew = "rtwrong:"+password
headers = {"Authorization": "Basic " + str_to_b64str(authpairnew)}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status, 401)
conn.close()
#Wrong password for rt
authpairnew = "rt:"+password+"wrong"
headers = {"Authorization": "Basic " + str_to_b64str(authpairnew)}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status, 401)
conn.close()
#Correct for rt2
authpairnew = "rt2:"+password2
headers = {"Authorization": "Basic " + str_to_b64str(authpairnew)}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status, 200)
conn.close()
#Wrong password for rt2
authpairnew = "rt2:"+password2+"wrong"
headers = {"Authorization": "Basic " + str_to_b64str(authpairnew)}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status, 401)
conn.close()
###############################################################
# Check correctness of the rpcuser/rpcpassword config options #
###############################################################
url = urllib.parse.urlparse(self.nodes[1].url)
# rpcuser and rpcpassword authpair
rpcuserauthpair = "rpcuser�:rpcpassword�"
headers = {"Authorization": "Basic " + str_to_b64str(rpcuserauthpair)}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status, 200)
conn.close()
#Wrong login name with rpcuser's password
rpcuserauthpair = "rpcuserwrong:rpcpassword"
headers = {"Authorization": "Basic " + str_to_b64str(rpcuserauthpair)}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status, 401)
conn.close()
#Wrong password for rpcuser
rpcuserauthpair = "rpcuser:rpcpasswordwrong"
headers = {"Authorization": "Basic " + str_to_b64str(rpcuserauthpair)}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status, 401)
conn.close()
if __name__ == '__main__':
HTTPBasicsTest ().main ()
|
[
"brandon2davincci@gmail.com"
] |
brandon2davincci@gmail.com
|
c0e2d24a1bc7297c88a9beb74ab0133e4e5aac89
|
9556f7e1d81a305d71a66b9768eba199e396d733
|
/CloudVerify/hz_topic/plot.py
|
447e8168a5f86e890edab291b63a8088f90bc23a
|
[] |
no_license
|
gitgaoqian/Python
|
301a2823b50ec754a2c1a3f47c39ae8b0b8e6890
|
164f5271044b235d256a9bbe0a34caacf1e81fc8
|
refs/heads/master
| 2023-01-08T21:23:59.640828
| 2020-11-01T13:06:21
| 2020-11-01T13:06:21
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,121
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Wed Jan 17 20:41:36 2018
@author: ros
"""
import matplotlib.pyplot as plt
def load_data(fileName):
inFile = open(fileName, 'r')
t=[]
rate=[]
num=1
for line in inFile:
t.append(num) #第一部分,即文件中的第一列数据逐一添加到list X 中
rate.append(float(line.strip('\n'))) #第二部分,即文件中的第二列数据逐一添加到list y 中
num=num+1
return (t, rate) # X,y组成一个元组,这样可以通过函数一次性返回
def main():
filename='/home/ros/image1.txt'
[x,y]=load_data(filename)
plt.figure()
plt.plot(x,y,color='b', linewidth=1.5, linestyle='--',label='/camera/left/image_raw')
filename='/home/ros/image1.txt'
[x,y]=load_data(filename)
plt.plot(x,y,color='r', linewidth=1.5, linestyle='--', label='/camera/scan')
plt.xlabel('t')
plt.xlim(0,175)
plt.ylabel('rate')
plt.ylim((0, 12))
plt.title("The rate of topic")
plt.legend(loc='upper right')
plt.savefig(filename+'.pdf')
plt.show()
if __name__ == '__main__':
main()
|
[
"734756851@qq.com"
] |
734756851@qq.com
|
ed902afbfae4296e1b7fd7cc37f83453df65e33c
|
0937646b6ce9249a8d193987f308ce398dc28bd1
|
/statistics.py
|
4a51b15c3db32e5303a16077cf08ea2c86d4f3e8
|
[] |
no_license
|
barry800414/JobTitleNLP
|
98622d02b25b1418f28698f7d772c8de96642032
|
b379c2052447e6483d17f5db51fb918b37ac7a52
|
refs/heads/master
| 2021-06-08T19:36:39.044757
| 2016-10-21T03:11:10
| 2016-10-21T03:11:10
| 66,043,111
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 917
|
py
|
import sys, json
from collections import defaultdict
from getCat import *
with open('jobs.min.json', 'r') as f:
data = json.load(f)
with open('104RawCategory.json', 'r') as f:
rawCat = json.load(f)
to2 = getL3ToL2(rawCat)
to1 = getL2ToL1(rawCat)
L3Cnt = dict()
L2Cnt = defaultdict(int)
L1Cnt = defaultdict(int)
for L3 in data.keys():
L3Cnt[L3] = len(data[L3])
L2Cnt[to2[L3]] += L3Cnt[L3]
L1Cnt[to1[to2[L3]]] += L3Cnt[L3]
with open('L1.csv', 'w') as f:
for name, cnt in sorted(list(L1Cnt.items()), key=lambda x:x[1], reverse=True):
print(name, cnt, sep=',', file=f)
with open('L2.csv', 'w') as f:
for name, cnt in sorted(list(L2Cnt.items()), key=lambda x:x[1], reverse=True):
print(name, cnt, sep=',', file=f)
with open('L3.csv', 'w') as f:
for name, cnt in sorted(list(L3Cnt.items()), key=lambda x:x[1], reverse=True):
print(name, cnt, sep=',', file=f)
|
[
"barry800414@gmail.com"
] |
barry800414@gmail.com
|
4581ca0ff8deedc11195f9b9e61bd3b02094bc6a
|
f4dcb14111539e9a22300256fd6f8fefc61f2d50
|
/src/flua/Compiler/Output/cpp/CPPNamespace.py
|
dc0efdf08384a361bdec9fa561fa5145d8bb7ac0
|
[] |
no_license
|
GWRon/flua
|
276c3ea4ce1cfcf68a1000fb44512460b5161c4e
|
1cf051f1d5aec3ba4da48442a0d7257d399e5b36
|
refs/heads/master
| 2021-01-15T17:37:03.914965
| 2012-10-24T12:57:27
| 2012-10-24T12:57:27
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,462
|
py
|
####################################################################
# Header
####################################################################
# File: Namespace class
# Author: Eduard Urbach
####################################################################
# License
####################################################################
# (C) 2012 Eduard Urbach
#
# This file is part of Blitzprog.
#
# Blitzprog is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Blitzprog is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Blitzprog. If not, see <http://www.gnu.org/licenses/>.
####################################################################
# Imports
####################################################################
from flua.Compiler.Output import *
####################################################################
# Classes
####################################################################
class CPPNamespace(BaseNamespace):
def __init__(self, name):
super().__init__(name)
|
[
"e.urbach@gmail.com"
] |
e.urbach@gmail.com
|
ec65f29f39de790b1e38281d3dde053db6f88073
|
4ed5069638a0e684e8e813e4ef34dcfd1b68cd74
|
/boj/python/9012.py
|
23dfb7c5b49336d327f0b59ebfdff9939db6a327
|
[] |
no_license
|
youngerous/algorithm
|
11dafe9c54edf83646c915c59b1d7d4d18569005
|
fe599d958fdf51b956d2250088a3d5f1c5b22854
|
refs/heads/master
| 2022-01-27T02:12:49.237009
| 2022-01-16T12:29:40
| 2022-01-16T12:29:40
| 133,662,997
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 501
|
py
|
from sys import stdin
input = stdin.readline
N = int(input())
for _ in range(N):
ps = input().strip()
if ps.count("(") != ps.count(")"):
print("NO")
continue
# 처음부터 stack을 int 0으로 할당하면 시간을 더 줄일 수 있을 것
stack = []
try:
for each in ps:
stack.append(each) if each == "(" else stack.pop()
except Exception as e:
print("NO")
continue
print("YES") if len(stack) == 0 else print("NO")
|
[
"youngerous@gmail.com"
] |
youngerous@gmail.com
|
692841366fdad59ec8bf6e644ed7a843673fbc53
|
4c61f3bc0620758224bca72d4edec2707b41ecf0
|
/tests/test_victorspx.py
|
a586ffc0109f6fa13675f07e23206ea903e99df4
|
[
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] |
permissive
|
CrispyBacon1999/robotpy-ctre
|
6b5ec68606e5b9094669e468fb4f01cd27f05c9d
|
b57346dda3de46c6f3bf25dddfe166fbf192846f
|
refs/heads/master
| 2020-12-11T11:47:28.578527
| 2020-01-02T00:35:29
| 2020-01-02T00:35:29
| 233,840,408
| 0
| 0
|
NOASSERTION
| 2020-01-14T12:50:12
| 2020-01-14T12:50:11
| null |
UTF-8
|
Python
| false
| false
| 424
|
py
|
import pytest
@pytest.fixture(scope="function")
def victor(ctre):
return ctre.WPI_VictorSPX(1)
@pytest.fixture(scope="function")
def cdata(victor, hal_data):
return hal_data["CAN"][1]
def test_victor_init(ctre, hal_data):
assert 1 not in hal_data["CAN"]
ctre.WPI_VictorSPX(1)
assert 1 in hal_data["CAN"]
assert hal_data["CAN"][1]["type"] == "victorspx"
# Victor tests are covered by TalonSRX
|
[
"dustin@virtualroadside.com"
] |
dustin@virtualroadside.com
|
e99d44683d10030de32eea47d3b31d3a6ce832c4
|
1ec89e2731d84f4fc4210060212ea80b002db277
|
/pysph/examples/ghia_cavity_data.py
|
0c9ded4878f9770d5ce8010d28b8f98994555831
|
[
"BSD-3-Clause"
] |
permissive
|
fight1314/pysph
|
15600a3053f5bac41bf9c862914e870d93454e20
|
9bfa8d65cee39fbd470b8231e38e972df199a4da
|
refs/heads/master
| 2020-07-21T20:44:26.866017
| 2019-09-04T20:16:53
| 2019-09-04T20:16:53
| 206,971,679
| 3
| 0
|
NOASSERTION
| 2019-09-07T13:24:51
| 2019-09-07T13:24:51
| null |
UTF-8
|
Python
| false
| false
| 3,655
|
py
|
"""This module provides a few convenient functions for
the Lid Driven Cavity solutions in the paper:
"High-Re solutions for incompressible flow using the Navier-Stokes
equations and a multigrid method", U Ghia, K.N Ghia, C.T Shin,
JCP, Volume 48, Issue 3, December 1982, Pages 387-411.
"""
import numpy as np
from io import StringIO
RE = [100, 400, 1000, 3200, 5000, 7500, 10000]
# u velocity along vertical line through center (Table I)
table1 = u"""
1.0000 1.0000 1.00000 1.00000 1.00000 1.00000 1.00000 1.00000
0.9766 0.84123 0.75837 0.65928 0.53236 0.48223 0.47244 0.47221
0.9688 0.78871 0.68439 0.57492 0.48296 0.46120 0.47048 0.47783
0.9609 0.73722 0.61756 0.51117 0.46547 0.45992 0.47323 0.48070
0.9531 0.68717 0.55892 0.46604 0.46101 0.46036 0.47167 0.47804
0.8516 0.23151 0.29093 0.33304 0.34682 0.33556 0.34228 0.34635
0.7344 0.00332 0.16256 0.18719 0.19791 0.20087 0.2059 0.20673
0.6172 -0.13641 0.02135 0.05702 0.07156 0.08183 0.08342 0.08344
0.5000 -0.20581 -0.11477 -0.06080 -0.04272 -0.03039 -0.03800 0.03111
0.4531 -0.21090 -0.17119 -0.10648 -0.86636 -0.07404 -0.07503 -0.07540
0.2813 -0.15662 -0.32726 -0.27805 -0.24427 -0.22855 -0.23176 -0.23186
0.1719 -0.10150 -0.24299 -0.38289 -0.34323 -0.33050 -0.32393 -0.32709
0.1016 -0.06434 -0.14612 -0.29730 -0.41933 -0.40435 -0.38324 -0.38000
0.0703 -0.04775 -0.10338 -0.22220 -0.37827 -0.43643 -0.43025 -0.41657
0.0625 -0.04192 -0.09266 -0.20196 -0.35344 -0.42901 -0.43590 -0.42537
0.0547 -0.03717 -0.08186 -0.18109 -0.32407 -0.41165 -0.43154 -0.42735
0.0000 0.00000 0.00000 0.00000 0.00000 0.00000 0.00000 0.00000
"""
# v velocity along horizontal line through center (Table II)
table2 = u"""
1.0000 0.00000 0.00000 0.00000 0.00000 0.00000 0.00000 0.00000
0.9688 -0.05906 -0.12146 -0.21388 -0.39017 -0.49774 -0.53858 -0.54302
0.9609 -0.07391 -0.15663 -0.27669 -0.47425 -0.55069 -0.55216 -0.52987
0.9531 -0.08864 -0.19254 -0.33714 -0.52357 -0.55408 -0.52347 -0.49099
0.9453 -0.10313 -0.22847 -0.39188 -0.54053 -0.52876 -0.48590 -0.45863
0.9063 -0.16914 -0.23827 -0.51550 -0.44307 -0.41442 -0.41050 -0.41496
0.8594 -0.22445 -0.44993 -0.42665 -0.37401 -0.36214 -0.36213 -0.36737
0.8047 -0.24533 -0.38598 -0.31966 -0.31184 -0.30018 -0.30448 -0.30719
0.5000 0.05454 0.05188 0.02526 0.00999 0.00945 0.00824 0.00831
0.2344 0.17527 0.30174 0.32235 0.28188 0.27280 0.27348 0.27224
0.2266 0.17507 0.30203 0.33075 0.29030 0.28066 0.28117 0.28003
0.1563 0.16077 0.28124 0.37095 0.37119 0.35368 0.35060 0.35070
0.0938 0.12317 0.22965 0.32627 0.42768 0.42951 0.41824 0.41487
0.0781 0.10890 0.20920 0.30353 0.41906 0.43648 0.43564 0.43124
0.0703 0.10091 0.19713 0.29012 0.40917 0.43329 0.44030 0.43733
0.0625 0.09233 0.18360 0.27485 0.39560 0.42447 0.43979 0.43983
0.0000 0.00000 0.00000 0.00000 0.00000 0.00000 0.00000 0.00000
"""
def _get_data(table):
data = np.loadtxt(StringIO(table))
y = data[:,0]
result = {}
for i, r in enumerate(RE):
result[r] = data[:,i+1]
return y, result
def get_u_vs_y():
"""Return the data from table 1, this returns an array y and a
dictionary, with the keys as the available Reynolds numbers.
"""
return _get_data(table1)
def get_v_vs_x():
"""Return the data from table 2, this returns an array x and a
dictionary, with the keys as the available Reynolds numbers.
"""
return _get_data(table2)
|
[
"prabhu@aero.iitb.ac.in"
] |
prabhu@aero.iitb.ac.in
|
a411c7c8ec083f0a3500bac6c9f4fbebb6cbf0d4
|
afdf82890966bd3061db0a2478ad600fb8528475
|
/Chapter4/garbage.py
|
3ae8e92079e19f310faffcd854cd780535b234ba
|
[] |
no_license
|
humuhimi/wkwk_nlp
|
7cd6653d7cdb2b8f171447c2628b8a517dd65d13
|
48a6ef3745bf2c97c5581034bf85450be5783664
|
refs/heads/master
| 2022-01-11T09:15:06.378166
| 2019-07-03T08:43:49
| 2019-07-03T08:43:49
| 191,727,302
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 962
|
py
|
# ストップワードの除去:使用頻度の高い言葉を処理対象外にする
import MeCab
# mecab-ipadic-NEologd辞書指定してオブジェクト生成
tagger = MeCab.Tagger()
tagger.parse("")
# 形態素解析の結果をリストで取得、単語ごとにリストの要素に入ってる
node = tagger.parseToNode("機械学習をマスターしたい。")
result = []
#助詞や助動詞は拾わない
while node is not None:
# 品詞情報取得
# Node.featureのフォーマット:品詞,品詞細分類1,品詞細分類2,品詞細分類3,活用形,活用型,原形,読み,発音
hinshi = node.feature.split(",")[0]
if hinshi in ["名詞"]:
# 表層形の取得、単語の文字が入ってる
result.append(node.surface)
elif hinshi in["動詞","形容詞"]:
# 形態素情報から原形情報取得
result.append(node.feature.split(",")[6])
node = node.next
print(result)
|
[
"masa19951009@gmail.com"
] |
masa19951009@gmail.com
|
cdb0d8b6cd08ee4eb88fdc17d0efedfad34bbef7
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/nouns/_angina.py
|
a3b48acb4bd4e8c024e14b432ddc21b2b819b361
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 420
|
py
|
#calss header
class _ANGINA():
def __init__(self,):
self.name = "ANGINA"
self.definitions = [u'a condition that causes strong chest pains because blood containing oxygen is prevented from reaching the heart muscle by blocked arteries: ']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'nouns'
def run(self, obj1 = [], obj2 = []):
return self.jsondata
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
74d6da60d8fb42c6a20f8541c42289571346ac04
|
942b0a9a24efa9dfc49ff4743180d9a412070359
|
/LEARNING/IMPORT/3_RemoteLoad/base.py
|
1150d0f6700110235ce38cf885994b3e832de242
|
[] |
no_license
|
atomicoo/python-tutorials
|
c48cd0e4b2b6e70ba177e40ea847c7b398139b62
|
e630c9bb3bcddda874a4c0a5b02c7e4d47e1eb7e
|
refs/heads/master
| 2023-02-02T09:46:21.147516
| 2020-12-17T03:59:06
| 2020-12-17T03:59:06
| 317,762,439
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,842
|
py
|
import sys
from importlib import abc
from importlib.machinery import ModuleSpec
import imp
from urllib.request import urlopen
# Debugging
import logging
logging.basicConfig(format='%(asctime)s - %(pathname)s[line:%(lineno)d] - %(levelname)s: %(message)s',
level=logging.DEBUG)
logger = logging.getLogger(__name__)
def load_module(url):
logger.debug("Load module: get source from %s", url)
u = urlopen(url)
s = u.read().decode('utf-8')
logger.debug("Load module: create module %s", url)
m = sys.modules.setdefault(url, imp.new_module(url))
c = compile(s, url, 'exec')
m.__file__ = url
m.__package__ = ''
logger.debug("Load module: exec code object %s", c)
exec(c, m.__dict__)
return m
class UrlMetaFinder(abc.MetaPathFinder):
def __init__(self, baseurl):
self._baseurl = baseurl
def find_module(self, fullname, path=None):
logger.debug("Find module: fullname=%s, path=%s", fullname, path)
if path is None:
baseurl = self._baseurl
else:
if not path.startswith(self._baseurl):
return None
baseurl = path
try:
logger.debug("Find module: module %s found", fullname)
loader = UrlMetaLoader(baseurl)
return loader
except Exception:
logger.debug("Find module: module %s not found", fullname)
return None
# def find_spec(self, fullname, path=None, target=None):
# if path is None:
# baseurl = self._baseurl
# else:
# if not path.startswith(self._baseurl):
# return None
# baseurl = path
# try:
# loader = UrlMetaLoader(baseurl)
# return ModuleSpec(fullname, loader, is_package=loader.is_package(fullname))
# except Exception:
# return None
class UrlMetaLoader(abc.SourceLoader):
def __init__(self, baseurl):
self._baseurl = baseurl
# def load_module(self, fullname):
# c = self.get_code(fullname)
# m = sys.modules.setdefault(fullname, imp.new_module(fullname))
# m.__file__ = self.get_filename(fullname)
# m.__loader__ = self
# m.__package__ = fullname
# exec(c, m.__dict__)
# return None
def get_code(self, fullname):
u = urlopen(self.get_filename(fullname))
return u.read()
# def execute_module(self, module):
# pass
def get_data(self):
pass
def get_filename(self, fullname):
return self._baseurl + fullname + '.py'
def install_meta(address):
finder = UrlMetaFinder(address)
sys.meta_path.append(finder)
logger.debug('%s installed on sys.meta_path', finder)
if __name__ == '__main__':
print("Base Url Import.")
|
[
"atomicoo95@gmail.com"
] |
atomicoo95@gmail.com
|
a5cf496490118bc838a7e7ce72e2a774864993cf
|
20aadf6ec9fd64d1d6dffff56b05853e0ab26b1f
|
/l5/L5_pbm9.py
|
d4eb630c9efd666ed8dafec822a97ccc812ed104
|
[] |
no_license
|
feminas-k/MITx---6.00.1x
|
9a8e81630be784e5aaa890d811674962c66d56eb
|
1ddf24c25220f8b5f78d36e2a3342b6babb40669
|
refs/heads/master
| 2021-01-19T00:59:57.434511
| 2016-06-13T18:13:17
| 2016-06-13T18:13:17
| 61,058,244
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 404
|
py
|
def semordnilap(str1, str2):
'''
str1: a string
str2: a string
returns: True if str1 and str2 are semordnilap;
False otherwise.
'''
if len(str1) != len(str2):
return False
if len (str1)==1:
return str1 == str2
if str1[0]== str2[-1]:
return semordnilap(str1[1:],str2[:-1])
else:
return False
|
[
"femi1991@gmail.com"
] |
femi1991@gmail.com
|
e59e2f20fba580c5a353b118da2c2220bc2a4e2a
|
9adc810b07f7172a7d0341f0b38088b4f5829cf4
|
/rlkit/torch/dqn/double_dqn.py
|
74b2f68bcafd5be7ce67a3fd89c50b4e34968553
|
[
"MIT"
] |
permissive
|
Asap7772/railrl_evalsawyer
|
7ee9358b5277b9ddf2468f0c6d28beb92a5a0879
|
baba8ce634d32a48c7dfe4dc03b123e18e96e0a3
|
refs/heads/main
| 2023-05-29T10:00:50.126508
| 2021-06-18T03:08:12
| 2021-06-18T03:08:12
| 375,810,557
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,826
|
py
|
import numpy as np
import torch
import rlkit.torch.pytorch_util as ptu
from rlkit.misc.eval_util import create_stats_ordered_dict
from rlkit.torch.dqn.dqn import DQNTrainer
class DoubleDQNTrainer(DQNTrainer):
def train_from_torch(self, batch):
rewards = batch['rewards']
terminals = batch['terminals']
obs = batch['observations']
actions = batch['actions']
next_obs = batch['next_observations']
"""
Compute loss
"""
best_action_idxs = self.qf(next_obs).max(
1, keepdim=True
)[1]
target_q_values = self.target_qf(next_obs).gather(
1, best_action_idxs
).detach()
y_target = rewards + (1. - terminals) * self.discount * target_q_values
y_target = y_target.detach()
# actions is a one-hot vector
y_pred = torch.sum(self.qf(obs) * actions, dim=1, keepdim=True)
qf_loss = self.qf_criterion(y_pred, y_target)
"""
Update networks
"""
self.qf_optimizer.zero_grad()
qf_loss.backward()
self.qf_optimizer.step()
"""
Soft target network updates
"""
if self._n_train_steps_total % self.target_update_period == 0:
ptu.soft_update_from_to(
self.qf, self.target_qf, self.soft_target_tau
)
"""
Save some statistics for eval using just one batch.
"""
if self._need_to_update_eval_statistics:
self._need_to_update_eval_statistics = False
self.eval_statistics['QF Loss'] = np.mean(ptu.get_numpy(qf_loss))
self.eval_statistics.update(create_stats_ordered_dict(
'Y Predictions',
ptu.get_numpy(y_pred),
))
self._n_train_steps_total += 1
|
[
"alexanderkhazatsky@gmail.com"
] |
alexanderkhazatsky@gmail.com
|
d213cd9894b49709b6f412912e23fb194187d63a
|
c4b636a2fffbf8ef3096e4de9de61b30ea3df72a
|
/hackerrank/find_tortosian_angle.py
|
d30648b1f4f0359a1f1c8378e6b8211b53386bd0
|
[
"MIT"
] |
permissive
|
FelixTheC/hackerrank_exercises
|
f63fbbc55a783ee4cecfa04302301a0fb66d45fe
|
24eedbedebd122c53fd2cb6018cc3535d0d4c6a0
|
refs/heads/master
| 2021-01-04T22:10:47.538372
| 2020-11-01T15:57:20
| 2020-11-01T15:57:20
| 240,779,506
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,223
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@created: 07.07.20
@author: felix
"""
import math
class Points:
__slots__ = ['x', 'y', 'z']
def __init__(self, x, y, z):
self.x, self.y, self.z = x, y, z
def __sub__(self, other):
return Points(self.x - other.x,
self.y - other.y,
self.z - other.z)
def dot(self, other):
return (self.x * other.x) + (self.y * other.y) + (self.z * other.z)
def cross(self, other):
return Points((self.y * other.z) - (self.z * other.y),
(self.z * other.x) - (self.x * other.z),
(self.x * other.y) - (self.y * other.x))
def absolute(self):
return pow((self.x ** 2 + self.y ** 2 + self.z ** 2), 0.5)
def __repr__(self):
return f'Point({self.x}, {self.y}, {self.z})'
if __name__ == '__main__':
points = [list(map(float, input().split())) for i in range(4)]
a, b, c, d = Points(*points[0]), Points(*points[1]), Points(*points[2]), Points(*points[3])
x = (b - a).cross(c - b)
y = (c - b).cross(d - c)
angle = math.acos(x.dot(y) / (x.absolute() * y.absolute()))
print(f'{math.degrees(angle):.2f}')
|
[
"felixeisenmenger@gmx.net"
] |
felixeisenmenger@gmx.net
|
3ac7de780e04dfd3fb4a83d22d22c72c9b191128
|
2e3f09aa3ad09a33cb9133133a2e7aa92578de00
|
/GenPIDMatching/test/test_wETL_cfg.py
|
5cb68687b035b0a716ba5b46670157d97bb36f40
|
[] |
no_license
|
yszhang95/MTDHIAnalysis
|
238f470e941d3d5a5a5c91f5e3d496323dc7a1db
|
de1e76a2a6d9cc43df992bd3e598f82c77aeebc2
|
refs/heads/master
| 2020-04-19T00:54:11.669859
| 2018-12-03T12:12:37
| 2018-12-03T12:12:37
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,637
|
py
|
import FWCore.ParameterSet.Config as cms
process = cms.Process("mtdhi")
# initialize MessageLogger and output report
process.load("FWCore.MessageLogger.MessageLogger_cfi")
process.MessageLogger.cerr.FwkReport.reportEvery = cms.untracked.int32(1)
process.options = cms.untracked.PSet( wantSummary =
cms.untracked.bool(True) )
process.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(5)
)
process.source = cms.Source("PoolSource",
fileNames = cms.untracked.vstring(
'file:/eos/cms/store/group/phys_heavyions/flowcorr/step1.root',
#'root://xrootd.cmsaf.mit.edu//store/user/davidlw/Hydjet_Quenched_MinBias_5020GeV_PhaseII/GEN_SIM_102X_test_v2/180924_215458/0000/step1_1.root',
#'/store/user/davidlw/Hydjet_Quenched_MinBias_5020GeV_PhaseII/GEN_SIM_102X_test_v2/180924_215458/0000/step1_2.root',
#'/store/user/davidlw/Hydjet_Quenched_MinBias_5020GeV_PhaseII/GEN_SIM_102X_test_v2/180924_215458/0000/step1_3.root',
#'/store/user/davidlw/Hydjet_Quenched_MinBias_5020GeV_PhaseII/GEN_SIM_102X_test_v2/180924_215458/0000/step1_4.root',
#'/store/user/davidlw/Hydjet_Quenched_MinBias_5020GeV_PhaseII/GEN_SIM_102X_test_v2/180924_215458/0000/step1_5.root',
#'/store/user/davidlw/Hydjet_Quenched_MinBias_5020GeV_PhaseII/GEN_SIM_102X_test_v2/180924_215458/0000/step1_6.root',
#'/store/user/davidlw/Hydjet_Quenched_MinBias_5020GeV_PhaseII/GEN_SIM_102X_test_v2/180924_215458/0000/step1_7.root',
#'/store/user/davidlw/Hydjet_Quenched_MinBias_5020GeV_PhaseII/GEN_SIM_102X_test_v2/180924_215458/0000/step1_8.root',
#'/store/user/davidlw/Hydjet_Quenched_MinBias_5020GeV_PhaseII/GEN_SIM_102X_test_v2/180924_215458/0000/step1_9.root',
#'/store/user/davidlw/Hydjet_Quenched_MinBias_5020GeV_PhaseII/GEN_SIM_102X_test_v2/180924_215458/0000/step1_10.root',
#'/store/user/davidlw/Hydjet_Quenched_MinBias_5020GeV_PhaseII/GEN_SIM_102X_test_v2/180924_215458/0000/step1_11.root',
#'/store/user/davidlw/Hydjet_Quenched_MinBias_5020GeV_PhaseII/GEN_SIM_102X_test_v2/180924_215458/0000/step1_12.root',
#'/store/user/davidlw/Hydjet_Quenched_MinBias_5020GeV_PhaseII/GEN_SIM_102X_test_v2/180924_215458/0000/step1_13.root',
#'/store/user/davidlw/Hydjet_Quenched_MinBias_5020GeV_PhaseII/GEN_SIM_102X_test_v2/180924_215458/0000/step1_14.root',
#'/store/user/davidlw/Hydjet_Quenched_MinBias_5020GeV_PhaseII/GEN_SIM_102X_test_v2/180924_215458/0000/step1_15.root',
#'/store/user/davidlw/Hydjet_Quenched_MinBias_5020GeV_PhaseII/GEN_SIM_102X_test_v2/180924_215458/0000/step1_16.root',
#'/store/user/davidlw/Hydjet_Quenched_MinBias_5020GeV_PhaseII/GEN_SIM_102X_test_v2/180924_215458/0000/step1_17.root',
#'/store/user/davidlw/Hydjet_Quenched_MinBias_5020GeV_PhaseII/GEN_SIM_102X_test_v2/180924_215458/0000/step1_18.root',
#'/store/user/davidlw/Hydjet_Quenched_MinBias_5020GeV_PhaseII/GEN_SIM_102X_test_v2/180924_215458/0000/step1_19.root',
#'/store/user/davidlw/Hydjet_Quenched_MinBias_5020GeV_PhaseII/GEN_SIM_102X_test_v2/180924_215458/0000/step1_20.root',
),
#secondaryFileNames = cms.untracked.vstring(
#)
)
process.TFileService = cms.Service("TFileService",
fileName =
cms.string('mtdhi_wETL.root')
)
process.load("MTDHIAnalysis.GenPIDMatching.genpidmatching_cfi")
process.mtdhiana.isETL = cms.untracked.bool(True)
process.mtdhiana_20ps_3sigma = process.mtdhiana.clone()
process.mtdhiana_30ps_3sigma = process.mtdhiana.clone()
process.mtdhiana_50ps_3sigma = process.mtdhiana.clone()
process.mtdhiana_70ps_3sigma = process.mtdhiana.clone()
process.mtdhiana_20ps_2sigma = process.mtdhiana.clone()
process.mtdhiana_20ps_2sigma.nSigmaT = cms.untracked.double(2.0)
process.mtdhiana_30ps_2sigma = process.mtdhiana_20ps_2sigma.clone()
process.mtdhiana_50ps_2sigma = process.mtdhiana_20ps_2sigma.clone()
process.mtdhiana_70ps_2sigma = process.mtdhiana_20ps_2sigma.clone()
process.mtdhiana_20ps_1sigma = process.mtdhiana.clone()
process.mtdhiana_20ps_1sigma.nSigmaT = cms.untracked.double(1.0)
process.mtdhiana_30ps_1sigma = process.mtdhiana_20ps_1sigma.clone()
process.mtdhiana_50ps_1sigma = process.mtdhiana_20ps_1sigma.clone()
process.mtdhiana_70ps_1sigma = process.mtdhiana_20ps_1sigma.clone()
process.mtdhiana_20ps_3sigma.sigmaT = cms.untracked.double(0.02)
process.mtdhiana_30ps_3sigma.sigmaT = cms.untracked.double(0.03)
process.mtdhiana_50ps_3sigma.sigmaT = cms.untracked.double(0.05)
process.mtdhiana_70ps_3sigma.sigmaT = cms.untracked.double(0.07)
process.mtdhiana_20ps_2sigma.sigmaT = cms.untracked.double(0.02)
process.mtdhiana_30ps_2sigma.sigmaT = cms.untracked.double(0.03)
process.mtdhiana_50ps_2sigma.sigmaT = cms.untracked.double(0.05)
process.mtdhiana_70ps_2sigma.sigmaT = cms.untracked.double(0.07)
process.mtdhiana_20ps_1sigma.sigmaT = cms.untracked.double(0.02)
process.mtdhiana_30ps_1sigma.sigmaT = cms.untracked.double(0.03)
process.mtdhiana_50ps_1sigma.sigmaT = cms.untracked.double(0.05)
process.mtdhiana_70ps_1sigma.sigmaT = cms.untracked.double(0.07)
#process.p = cms.Path(process.mtdhiana_20ps_1sigma)
#process.p1 = cms.Path(process.mtdhiana_30ps_1sigma)
#process.p2 = cms.Path(process.mtdhiana_50ps_1sigma)
#process.p3 = cms.Path(process.mtdhiana_70ps_1sigma)
#process.p4 = cms.Path(process.mtdhiana_20ps_2sigma)
#process.p5 = cms.Path(process.mtdhiana_30ps_2sigma)
#process.p6 = cms.Path(process.mtdhiana_50ps_2sigma)
#process.p7 = cms.Path(process.mtdhiana_70ps_2sigma)
process.p8 = cms.Path(process.mtdhiana_20ps_3sigma)
process.p9 = cms.Path(process.mtdhiana_30ps_3sigma)
process.p10 = cms.Path(process.mtdhiana_50ps_3sigma)
process.p11 = cms.Path(process.mtdhiana_70ps_3sigma)
|
[
"liwei810812@gmail.com"
] |
liwei810812@gmail.com
|
3291bf56514aaf9d24dd01745b5735be76b4a58e
|
6b2a8dd202fdce77c971c412717e305e1caaac51
|
/solutions_5738606668808192_1/Python/thomasahle/c.py
|
eef8d580501ceb9469eb0cf84e9016a7098c1326
|
[] |
no_license
|
alexandraback/datacollection
|
0bc67a9ace00abbc843f4912562f3a064992e0e9
|
076a7bc7693f3abf07bfdbdac838cb4ef65ccfcf
|
refs/heads/master
| 2021-01-24T18:27:24.417992
| 2017-05-23T09:23:38
| 2017-05-23T09:23:38
| 84,313,442
| 2
| 4
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,164
|
py
|
import sys
read = lambda t=int: list(map(t,sys.stdin.readline().split()))
#def isweakprime(n):
# return all(n%q!=0 for q in [2,3,5,7,11])
#def isprime(n):
# return all(n%q!=0 for q in range(2,int(n**.5)+1))
#def brute(n):
# for i in range(2**(n-2)):
# j = 1<<(n-1) | i<<1 | 1
# bs = [sum(b**i for i in range(n) if j&1<<i) for b in range(2,11)]
# if not any(isweakprime(b) for b in bs):
# yield j
#
#import itertools
#for n in range(2,33):
# print('====', n, '====')
# it = brute(n)
# example = list(itertools.islice(it,500))
# print('\n'.join(map(bin, example)))
# count = sum(1 for _ in it) + len(example)
# print('total', count, '({})'.format(count/2**(n-2)))
T, = read()
for testCase in range(T):
n, j = read()
print('Case #{}:'.format(testCase+1))
for i in range(2**(n-2)):
coin = 1<<(n-1) | i<<1 | 1
bs = [sum(b**i for i in range(n) if coin&1<<i) for b in range(2,11)]
ds = [[d for d in [2,3,5,7,11] if b%d==0] for b in bs]
if all(ds):
#print(bs)
#print(ds)
print(bin(coin)[2:], ' '.join(str(d[0]) for d in ds))
j -= 1
if j == 0: break
|
[
"alexandra1.back@gmail.com"
] |
alexandra1.back@gmail.com
|
2200a43585376dd23f9cbc67bb71411c8aebf3b8
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02272/s422945045.py
|
66f42ba322c67f725b33af923bcbd37a20695982
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 645
|
py
|
def mergeSort(A,left,right,B):
if left + 1 < right:
mid = (left+right)//2
mergeSort(A,left,mid,B)
mergeSort(A,mid,right,B)
merge(A,left,mid,right,B)
def merge(A,left,mid,right,B):
L = A[left:mid]
R = A[mid:right]
L.append(1000000000)
R.append(1000000000)
i = 0
j = 0
for k in range(left,right):
B.append(1)
if L[i] < R[j]:
A[k] = L[i]
i += 1
else:
A[k] = R[j]
j += 1
n = int(input())
nums = list(map(int,input().split(" ")))
B = []
mergeSort(nums,0,n,B)
print(' '.join(map(str,nums)))
print(str(len(B)))
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.