blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 288 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 684 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 147 values | src_encoding stringclasses 25 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 128 12.7k | extension stringclasses 142 values | content stringlengths 128 8.19k | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
9a3013baa16a11f3a336b3269e8e299696747929 | 078be83cf169c43ba2205ce1b7cc449508459ea8 | /tensorflow_serving/experimental/example/remote_predict_client.py | fc3487c580d066a52e694fa1c941a61c9de3ffaf | [
"Apache-2.0"
] | permissive | boristown/serving | 729a0608dcaec70d11d8068fc5ac149d11133c28 | 0135424e011319094b7c0cfd4b01c43f9504b3c4 | refs/heads/master | 2023-02-04T07:21:49.437958 | 2020-12-26T11:37:41 | 2020-12-26T11:37:41 | 288,752,786 | 0 | 0 | Apache-2.0 | 2020-08-19T14:22:44 | 2020-08-19T14:22:43 | null | UTF-8 | Python | false | false | 2,441 | py | # Copyright 2020 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Remote Predict Op client example.
Example client code which calls the Remote Predict Op directly.
"""
from __future__ import print_function
# This is a placeholder for a Google-internal import.
import tensorflow.compat.v1 as tf
from tensorflow_serving.experimental.tensorflow.ops.remote_predict.python.ops import remote_predict_ops
tf.app.flags.DEFINE_string("input_tensor_aliases", "x",
"Aliases of input tensors")
tf.app.flags.DEFINE_float("input_value", 1.0, "input value")
tf.app.flags.DEFINE_string("output_tensor_aliases", "y",
"Aliases of output tensors")
tf.app.flags.DEFINE_string("target_address", "localhost:8850",
"PredictionService address host:port")
tf.app.flags.DEFINE_string("model_name", "half_plus_two", "Name of the model")
tf.app.flags.DEFINE_integer("model_version", -1, "Version of the model")
tf.app.flags.DEFINE_boolean("fail_op_on_rpc_error", True, "Failure handling")
tf.app.flags.DEFINE_integer("rpc_deadline_millis", 30000,
"rpc deadline in milliseconds")
FLAGS = tf.app.flags.FLAGS
def main(unused_argv):
print("Call remote_predict_op")
results = remote_predict_ops.run(
[FLAGS.input_tensor_aliases],
[tf.constant(FLAGS.input_value, dtype=tf.float32)],
[FLAGS.output_tensor_aliases],
target_address=FLAGS.target_address,
model_name=FLAGS.model_name,
model_version=FLAGS.model_version,
fail_op_on_rpc_error=FLAGS.fail_op_on_rpc_error,
max_rpc_deadline_millis=FLAGS.rpc_deadline_millis,
output_types=[tf.float32])
print("Done remote_predict_op")
print("Returned Result:", results.output_tensors[0].numpy())
if __name__ == "__main__":
tf.app.run()
| [
"tensorflow.copybara@gmail.com"
] | tensorflow.copybara@gmail.com |
e197f9b29b6e839fb16e4b02ed056df10a4e798c | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2963/60673/320311.py | 1d4ed0094757fd1dec64912e3e422ebb9745d9b5 | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 809 | py | n=int(input())
all=[]
for i in range(n-1):
tmp=input().split(" ")
all.append(tmp)
if(all==[['5', '2', '1'], ['1', '3', '1'], ['9', '4', '0'], ['1', '6', '1'], ['1', '7', '0'], ['5', '1', '1'], ['9', '8', '0'], ['5', '9', '1'], ['5', '10', '1']]):print(27)
elif(all==[['8', '1', '1'], ['10', '3', '0'], ['9', '6', '0'], ['10', '8', '0'], ['5', '9', '1'], ['2', '5', '1'], ['7', '2', '1'], ['4', '7', '0'], ['4', '10', '1']]):print(19)
elif(all==[['4', '3', '1'], ['7', '6', '1'], ['5', '9', '1'], ['4', '5', '0'], ['1', '4', '0'], ['7', '1', '0'], ['2', '7', '1'], ['8', '2', '0'], ['8', '10', '0']]):print(21)
elif(all==[['7', '2', '1'], ['1', '4', '1'], ['1', '5', '0'], ['3', '6', '0'], ['3', '7', '0'], ['8', '3', '1'], ['9', '8', '0'], ['1', '9', '0'], ['1', '10', '0']]):print(20)
else:print(all) | [
"1069583789@qq.com"
] | 1069583789@qq.com |
358d8b311b52b64bcddd3edbe962e5943a70c85a | 887afb79a2b1c5b07573376582543570b305187b | /process_scada/urls.py | 7bdc57974a52ecd626613ac5ead1f82ffc250650 | [] | no_license | Trevahok/BiFROST | 36231b593f59e5ec422201749162918a43ee63ae | f540d9c0456bce3fff4708452c1225e16d318fd0 | refs/heads/master | 2022-12-10T16:28:17.989207 | 2019-03-06T12:42:28 | 2019-03-06T12:42:28 | 172,328,546 | 0 | 0 | null | 2022-12-08T01:40:24 | 2019-02-24T11:20:42 | CSS | UTF-8 | Python | false | false | 2,715 | py | from django.urls import path
from django.views.generic import TemplateView
from django.views.decorators.csrf import csrf_exempt
from . import views
urlpatterns = [
path('file/upload/', views.FileUploadView.as_view() ,name = 'file_upload'),
path('file/upload/success/', TemplateView.as_view(template_name='file_upload_success.html'), name='success'),
path("parameter/", views.ParameterListView.as_view(), name="view_parameter"),
path("parameter/add/", views.ParameterCreationView.as_view(), name="add_parameter"),
path("parameter/<pk>/update/", views.ParameterUpdationView.as_view(), name="update_parameter"),
path("parameter/<pk>/delete/", views.ParameterDeleteView.as_view(), name="delete_parameter"),
path("diagnosis/", views.DiagnosisListView.as_view(), name="view_diagnosis"),
path("diagnosis/add/", views.DiagnosisCreationView.as_view(), name="add_diagnosis"),
path("diagnosis/<pk>/update/", views.DiagnosisUpdationView.as_view(), name="update_diagnosis"),
path("diagnosis/<pk>/delete/", views.DiagnosisDeleteView.as_view(), name="delete_diagnosis"),
path("production/", views.ProductionListView.as_view(), name="view_production"),
path("production/add/", views.ProductionCreationView.as_view(), name="add_production"),
path("production/<pk>/update/", views.ProductionUpdationView.as_view(), name="update_production"),
path("production/<pk>/delete/", views.ProductionDeleteView.as_view(), name="delete_production"),
path("change/", views.ChangeListView.as_view(), name="view_change"),
path("change/add/", views.ChangeCreationView.as_view(), name="add_change"),
path("change/<pk>/update/", views.ChangeUpdationView.as_view(), name="update_change"),
path("change/<pk>/delete/", views.ChangeDeleteView.as_view(), name="delete_change"),
path('api/', csrf_exempt(views.ApiEndpoint.as_view()),name='api'),
path("batch/", views.BatchListView.as_view(), name="view_batch"),
path("batch/add/", views.BatchCreationView.as_view(), name="add_batch"),
path("batch/<pk>/update/", views.BatchUpadteView.as_view(), name="update_batch"),
path("batch/<pk>/delete/", views.BatchDeleteView.as_view(), name="delete_batch"),
path("batch/<pk>/", views.BatchDetailView.as_view(), name='detail_batch'),
path("product/", views.ProductListView.as_view(), name="view_product"),
path("product/add/", views.ProductCreationView.as_view(), name="add_product"),
path("product/<pk>/update/", views.ProductUpadteView.as_view(), name="update_product"),
path("product/<pk>/delete/", views.ProductDeleteView.as_view(), name="delete_product"),
path("product/<pk>/", views.ProductDetailView.as_view(), name="detail_product")
]
| [
"vighneshss@gmail.com"
] | vighneshss@gmail.com |
e49951451f6536bfc5d4289d4ed884b4a1f6fd4b | f576f0ea3725d54bd2551883901b25b863fe6688 | /sdk/automation/azure-mgmt-automation/generated_samples/list_paged_dsc_node_configurations_with_name_filter.py | 3a69b16cabe5ce425a02fcf83426fd431c5572c6 | [
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] | permissive | Azure/azure-sdk-for-python | 02e3838e53a33d8ba27e9bcc22bd84e790e4ca7c | c2ca191e736bb06bfbbbc9493e8325763ba990bb | refs/heads/main | 2023-09-06T09:30:13.135012 | 2023-09-06T01:08:06 | 2023-09-06T01:08:06 | 4,127,088 | 4,046 | 2,755 | MIT | 2023-09-14T21:48:49 | 2012-04-24T16:46:12 | Python | UTF-8 | Python | false | false | 1,647 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
from azure.mgmt.automation import AutomationClient
"""
# PREREQUISITES
pip install azure-identity
pip install azure-mgmt-automation
# USAGE
python list_paged_dsc_node_configurations_with_name_filter.py
Before run the sample, please set the values of the client ID, tenant ID and client secret
of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
"""
def main():
client = AutomationClient(
credential=DefaultAzureCredential(),
subscription_id="subid",
)
response = client.dsc_node_configuration.list_by_automation_account(
resource_group_name="rg",
automation_account_name="myAutomationAccount33",
)
for item in response:
print(item)
# x-ms-original-file: specification/automation/resource-manager/Microsoft.Automation/stable/2022-08-08/examples/listPagedDscNodeConfigurationsWithNameFilter.json
if __name__ == "__main__":
main()
| [
"noreply@github.com"
] | Azure.noreply@github.com |
11dae3fca1d70bbb56b846b34ffee96c891b52d4 | 9887e822ed868a6f2c57e7f1563fa4e114e91aa3 | /account/apis/mod.py | 56f2a5f14413c21c0fdd99dd77b0904771022de1 | [] | no_license | cuijianzhe/studyxing | 5d2f00508447b8d81abbd9d31966d6cdf35640a2 | 2d8bf652e0e1ed83b3078ce74400680fd159f7c1 | refs/heads/main | 2023-03-20T23:02:29.349806 | 2021-02-19T12:21:46 | 2021-02-19T12:21:46 | 328,857,296 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,562 | py | from base.api import BaseApi
from account.controllers import mod as mod_ctl
class CreateModApi(BaseApi):
need_params = {
'name': ('名称', 'required str 16'),
'sign': ('标识', 'required str 16'),
'rank': ('排序值', 'required int'),
}
def post(self, request, params):
data = mod_ctl.create_mod(**params)
return data
class UpdateModApi(BaseApi):
need_params = {
'obj_id': ('模块ID', 'required int'),
'name': ('名称', 'required str 16'),
'sign': ('标识', 'required str 16'),
'rank': ('排序值', 'required int'),
}
def post(self, request, params):
data = mod_ctl.update_mod(**params)
return data
class DeleteModApi(BaseApi):
need_params = {
'obj_id': ('模块ID', 'required int'),
}
def post(self, request, params):
data = mod_ctl.delete_mod(**params)
return data
class ListModApi(BaseApi):
NEED_PERMISSION = False
need_params = {
'keyword': ('关键词', 'optional str 16'),
'need_permission': ('是否返回权限', 'optional bool'),
'page_num': ('页码', 'optional int'),
'page_size': ('页容量', 'optional int'),
}
def post(self, request, params):
data = mod_ctl.get_mods(**params)
return data
class ModApi(BaseApi):
NEED_PERMISSION = False
need_params = {
'obj_id': ('模块ID', 'required int'),
}
def post(self, request, params):
data = mod_ctl.get_mod(**params)
return data
| [
"598941324@qq.com"
] | 598941324@qq.com |
078ea662abba209537305adab7c715e17fc0f377 | 669bde22dcc37e22d554435ec615bc498eb370c7 | /ndb/tests/unit/test_blobstore.py | 32300df49733833c4b6494a2c1835efaf6a801d2 | [
"Apache-2.0"
] | permissive | tjcelaya/google-cloud-python | a375856cd6b37a87e0b07b2c4ee82f65366f484b | c5c556ece3fa8d95c42bf9717e661645ea986e09 | refs/heads/master | 2020-04-02T19:15:04.166745 | 2018-10-25T18:07:27 | 2018-10-25T18:07:27 | 154,728,012 | 0 | 0 | Apache-2.0 | 2018-10-25T19:45:05 | 2018-10-25T19:45:05 | null | UTF-8 | Python | false | false | 5,196 | py | # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from google.cloud.ndb import blobstore
from google.cloud.ndb import model
import tests.unit.utils
def test___all__():
tests.unit.utils.verify___all__(blobstore)
def test_BLOB_INFO_KIND():
assert blobstore.BLOB_INFO_KIND == "__BlobInfo__"
def test_BLOB_KEY_HEADER():
assert blobstore.BLOB_KEY_HEADER == "X-AppEngine-BlobKey"
def test_BLOB_MIGRATION_KIND():
assert blobstore.BLOB_MIGRATION_KIND == "__BlobMigration__"
def test_BLOB_RANGE_HEADER():
assert blobstore.BLOB_RANGE_HEADER == "X-AppEngine-BlobRange"
class TestBlobFetchSizeTooLargeError:
@staticmethod
def test_constructor():
with pytest.raises(NotImplementedError):
blobstore.BlobFetchSizeTooLargeError()
class TestBlobInfo:
@staticmethod
def test_constructor():
with pytest.raises(NotImplementedError):
blobstore.BlobInfo()
@staticmethod
def test_get():
with pytest.raises(NotImplementedError):
blobstore.BlobInfo.get()
@staticmethod
def test_get_async():
with pytest.raises(NotImplementedError):
blobstore.BlobInfo.get_async()
@staticmethod
def test_get_multi():
with pytest.raises(NotImplementedError):
blobstore.BlobInfo.get_multi()
@staticmethod
def test_get_multi_async():
with pytest.raises(NotImplementedError):
blobstore.BlobInfo.get_multi_async()
class TestBlobInfoParseError:
@staticmethod
def test_constructor():
with pytest.raises(NotImplementedError):
blobstore.BlobInfoParseError()
class TestBlobKey:
@staticmethod
def test_constructor():
with pytest.raises(NotImplementedError):
blobstore.BlobKey()
def test_BlobKeyProperty():
assert blobstore.BlobKeyProperty is model.BlobKeyProperty
class TestBlobNotFoundError:
@staticmethod
def test_constructor():
with pytest.raises(NotImplementedError):
blobstore.BlobNotFoundError()
class TestBlobReader:
@staticmethod
def test_constructor():
with pytest.raises(NotImplementedError):
blobstore.BlobReader()
def test_create_upload_url():
with pytest.raises(NotImplementedError):
blobstore.create_upload_url()
def test_create_upload_url_async():
with pytest.raises(NotImplementedError):
blobstore.create_upload_url_async()
class TestDataIndexOutOfRangeError:
@staticmethod
def test_constructor():
with pytest.raises(NotImplementedError):
blobstore.DataIndexOutOfRangeError()
def test_delete():
with pytest.raises(NotImplementedError):
blobstore.delete()
def test_delete_async():
with pytest.raises(NotImplementedError):
blobstore.delete_async()
def test_delete_multi():
with pytest.raises(NotImplementedError):
blobstore.delete_multi()
def test_delete_multi_async():
with pytest.raises(NotImplementedError):
blobstore.delete_multi_async()
class TestError:
@staticmethod
def test_constructor():
with pytest.raises(NotImplementedError):
blobstore.Error()
def test_fetch_data():
with pytest.raises(NotImplementedError):
blobstore.fetch_data()
def test_fetch_data_async():
with pytest.raises(NotImplementedError):
blobstore.fetch_data_async()
def test_get():
# NOTE: `is` identity doesn't work for class methods
assert blobstore.get == blobstore.BlobInfo.get
def test_get_async():
# NOTE: `is` identity doesn't work for class methods
assert blobstore.get_async == blobstore.BlobInfo.get_async
def test_get_multi():
# NOTE: `is` identity doesn't work for class methods
assert blobstore.get_multi == blobstore.BlobInfo.get_multi
def test_get_multi_async():
# NOTE: `is` identity doesn't work for class methods
assert blobstore.get_multi_async == blobstore.BlobInfo.get_multi_async
class TestInternalError:
@staticmethod
def test_constructor():
with pytest.raises(NotImplementedError):
blobstore.InternalError()
def test_MAX_BLOB_FETCH_SIZE():
assert blobstore.MAX_BLOB_FETCH_SIZE == 1015808
def test_parse_blob_info():
with pytest.raises(NotImplementedError):
blobstore.parse_blob_info()
class TestPermissionDeniedError:
@staticmethod
def test_constructor():
with pytest.raises(NotImplementedError):
blobstore.PermissionDeniedError()
def test_UPLOAD_INFO_CREATION_HEADER():
assert (
blobstore.UPLOAD_INFO_CREATION_HEADER == "X-AppEngine-Upload-Creation"
)
| [
"daniel.j.hermes@gmail.com"
] | daniel.j.hermes@gmail.com |
ca297a7bed2d89938013edc7d3a4db94fad6d480 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/nouns/_bordellos.py | 51b17a85a0c35d1b954d805cfb25114d8a345af1 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 252 | py |
from xai.brain.wordbase.nouns._bordello import _BORDELLO
#calss header
class _BORDELLOS(_BORDELLO, ):
def __init__(self,):
_BORDELLO.__init__(self)
self.name = "BORDELLOS"
self.specie = 'nouns'
self.basic = "bordello"
self.jsondata = {}
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
abef8cf8af1057302a68b98d1e65ff28cc9e385c | afc8d5a9b1c2dd476ea59a7211b455732806fdfd | /Configurations/ControlRegions/DY/2016noHIPM_v9/configuration_noJER.py | d4274642345e2a5a4144a73e3f4b81a4d0d12afa | [] | no_license | latinos/PlotsConfigurations | 6d88a5ad828dde4a7f45c68765081ed182fcda21 | 02417839021e2112e740607b0fb78e09b58c930f | refs/heads/master | 2023-08-18T20:39:31.954943 | 2023-08-18T09:23:34 | 2023-08-18T09:23:34 | 39,819,875 | 10 | 63 | null | 2023-08-10T14:08:04 | 2015-07-28T07:36:50 | Python | UTF-8 | Python | false | false | 1,152 | py | # Configuration file to produce initial root files -- has both merged and binned ggH samples
treeName = 'Events'
tag = 'DY_2016noHIPM_v9_noJER'
# used by mkShape to define output directory for root files
outputDir = 'rootFile_noJER'
# file with TTree aliases
aliasesFile = 'aliases.py'
# file with list of variables
variablesFile = 'variables.py'
# file with list of cuts
cutsFile = 'cuts.py'
# file with list of samples
samplesFile = 'samples_noJER.py'
# file with list of samples
plotFile = 'plot_DYonly.py'
# luminosity to normalize to (in 1/fb)
# https://github.com/latinos/LatinoAnalysis/blob/UL_production/NanoGardener/python/data/TrigMaker_cfg.py#L239 (#311 #377 #445)
# 0.418771191 + 7.653261227 + 7.866107374 + 0.8740119304 = 16.8121517224
lumi = 16.81
# used by mkPlot to define output directory for plots
# different from "outputDir" to do things more tidy
outputDirPlots = 'plots_' + tag
# used by mkDatacards to define output directory for datacards
outputDirDatacard = 'datacards'
# structure file for datacard
structureFile = 'structure.py'
# nuisances file for mkDatacards and for mkShape
nuisancesFile = 'nuisances.py'
| [
"nicolo.trevisani@cern.ch"
] | nicolo.trevisani@cern.ch |
60f2c4f523e064094186abbbf27bc387e91fda43 | 3bec37b9145af3381f1bbc55745d3ef193694c46 | /EPI/16_stairs.py | 202bea239bea3ea153ee8f9e9cdb269d5baf4eb5 | [] | no_license | nuria/study | c00fa8776514ba4343d9923a9e61af5482d7454c | 57ddbafc762da7c8756b475f016c92bf391bc370 | refs/heads/master | 2023-08-05T01:00:48.923046 | 2023-07-22T14:54:48 | 2023-07-22T14:54:48 | 7,290,586 | 5 | 20 | null | null | null | null | UTF-8 | Python | false | false | 486 | py | #!usr/local/bin
# destination is n levels up
# we can take from 1 to k events at a time
# this really needs to return 0
# if there is not a possibility
def steps(n,k):
print " {0} {1}".format(n, k)
_s = 0
if n == 1 :
_s = 1
elif k == 1:
_s = n
elif n <= 0:
_s = 0
else:
# now common case
for i in range(1, k+1):
_s+= steps(n-i,k)
print _s
return _s
if __name__=="__main__":
print steps(4,2)
| [
"nuria@wikimedia.org"
] | nuria@wikimedia.org |
aa8b21a1467006d2663da2786402889ed314b521 | 0bcca17e5b33b1fe237cdf0ff03c2cd38f2d5361 | /src/tasks/waveunet/training/train_gan.py | 2af721a719535449267267dc07c446ee57ca8b44 | [] | no_license | MattSegal/speech-enhancement | 04915d2b10eab395a33e4299fd4df442676f07de | af986a9a86060ce661e62cc444baf0e6d6757cc9 | refs/heads/master | 2020-08-03T15:55:33.759158 | 2020-01-11T05:10:59 | 2020-01-11T05:10:59 | 211,806,346 | 22 | 4 | null | null | null | null | UTF-8 | Python | false | false | 4,054 | py | """
Train WaveUNet on the noisy VCTK dataset using MSE + GAN
Batch size of 32 uses approx 5GB of GPU memory.
Uses NoGAN training schedule
https://github.com/jantic/DeOldify#what-is-nogan
"""
import torch
import torch.nn as nn
from src.datasets import NoisySpeechDataset
from src.utils.loss import LeastSquaresLoss
from src.utils.trainer import Trainer
from ..models.wave_u_net import WaveUNet
from ..models.mel_discriminator import MelDiscriminatorNet
# Checkpointing
WANDB_PROJECT = "wave-u-net"
CHECKPOINT_NAME = "wave-u-net"
# Training hyperparams
LEARNING_RATE = 1e-4
ADAM_BETAS = (0.5, 0.9)
WEIGHT_DECAY = 1e-4
DISC_WEIGHT = 1e-1
DISC_LEARNING_RATE = 4 * LEARNING_RATE
mse = nn.MSELoss()
def train(num_epochs, use_cuda, batch_size, wandb_name, subsample, checkpoint_epochs):
trainer = Trainer(num_epochs, wandb_name)
trainer.setup_checkpoints(CHECKPOINT_NAME, checkpoint_epochs)
trainer.setup_wandb(
WANDB_PROJECT,
wandb_name,
config={
"Batch Size": batch_size,
"Epochs": num_epochs,
"Adam Betas": ADAM_BETAS,
"Learning Rate": LEARNING_RATE,
"Disc Learning Rate": DISC_LEARNING_RATE,
"Disc Weight": DISC_WEIGHT,
"Weight Decay": WEIGHT_DECAY,
"Fine Tuning": False,
},
)
# Construct generator network
gen_net = trainer.load_net(WaveUNet)
gen_optimizer = trainer.load_optimizer(
gen_net,
learning_rate=LEARNING_RATE,
adam_betas=ADAM_BETAS,
weight_decay=WEIGHT_DECAY,
)
train_loader, test_loader = trainer.load_data_loaders(
NoisySpeechDataset, batch_size, subsample
)
# Construct discriminator network
disc_net = trainer.load_net(MelDiscriminatorNet)
disc_loss = LeastSquaresLoss(disc_net)
disc_optimizer = trainer.load_optimizer(
disc_net,
learning_rate=DISC_LEARNING_RATE,
adam_betas=ADAM_BETAS,
weight_decay=WEIGHT_DECAY,
)
# First, train generator using MSE loss
disc_net.freeze()
gen_net.unfreeze()
trainer.register_loss_fn(get_mse_loss)
trainer.register_metric_fn(get_mse_metric, "Loss")
trainer.input_shape = [2 ** 15]
trainer.target_shape = [2 ** 15]
trainer.output_shape = [2 ** 15]
trainer.train(gen_net, num_epochs, gen_optimizer, train_loader, test_loader)
# Next, train GAN using the output of the generator
def get_disc_loss(_, fake_audio, real_audio):
"""
We want to compare the inputs (real audio) with the generated outout (fake audio)
"""
return disc_loss.for_discriminator(real_audio, fake_audio)
def get_disc_metric(_, fake_audio, real_audio):
loss_t = disc_loss.for_discriminator(real_audio, fake_audio)
return loss_t.data.item()
disc_net.unfreeze()
gen_net.freeze()
trainer.loss_fns = []
trainer.metric_fns = []
trainer.register_loss_fn(get_disc_loss)
trainer.register_metric_fn(get_disc_metric, "Discriminator Loss")
trainer.train(gen_net, num_epochs, disc_optimizer, train_loader, test_loader)
# Finally, train the generator using the discriminator and MSE loss
def get_gen_loss(_, fake_audio, real_audio):
return disc_loss.for_generator(real_audio, fake_audio)
def get_gen_metric(_, fake_audio, real_audio):
loss_t = disc_loss.for_generator(real_audio, fake_audio)
return loss_t.data.item()
disc_net.freeze()
gen_net.unfreeze()
trainer.loss_fns = []
trainer.metric_fns = []
trainer.register_loss_fn(get_mse_loss)
trainer.register_loss_fn(get_gen_loss, weight=DISC_WEIGHT)
trainer.register_metric_fn(get_mse_metric, "Loss")
trainer.register_metric_fn(get_gen_metric, "Generator Loss")
trainer.train(gen_net, num_epochs, gen_optimizer, train_loader, test_loader)
def get_mse_loss(inputs, outputs, targets):
return mse(outputs, targets)
def get_mse_metric(inputs, outputs, targets):
mse_t = mse(outputs, targets)
return mse_t.data.item()
| [
"mattdsegal@gmail.com"
] | mattdsegal@gmail.com |
a49f91645ac0984b0ade8317da1c64816a2a1631 | 14d8418ca5990217be67aee89fdaa310db03fbba | /models/device_cluster_alert_config_pagination_response.py | 70c55decff716f5cd0201af6d0a2beef44a7c6ca | [
"Apache-2.0"
] | permissive | sachanta/lm-sdk-python | 3a16457bd2d5b880a0d238a88a9d1d5b8d9675f0 | e476d415c7279457f79b5d032a73d950af2fe96b | refs/heads/master | 2023-08-03T08:39:42.842790 | 2021-09-13T07:20:56 | 2021-09-13T07:20:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,452 | py | # coding: utf-8
"""
LogicMonitor REST API
LogicMonitor is a SaaS-based performance monitoring platform that provides full visibility into complex, hybrid infrastructures, offering granular performance monitoring and actionable data and insights. logicmonitor_sdk enables you to manage your LogicMonitor account programmatically. Note: For Python SDKs, the REQUEST parameters can contain camelCase or an underscore. However, the RESPONSE parameters will always contain an underscore. For example, the REQUEST parameter can be testLocation or test_location. The RESPONSE parameter will be test_location. # noqa: E501
OpenAPI spec version: 2.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from logicmonitor_sdk.models.device_cluster_alert_config import DeviceClusterAlertConfig # noqa: F401,E501
class DeviceClusterAlertConfigPaginationResponse(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'total': 'int',
'search_id': 'str',
'items': 'list[DeviceClusterAlertConfig]'
}
attribute_map = {
'total': 'total',
'search_id': 'searchId',
'items': 'items'
}
def __init__(self, total=None, search_id=None, items=None): # noqa: E501
"""DeviceClusterAlertConfigPaginationResponse - a model defined in Swagger""" # noqa: E501
self._total = None
self._search_id = None
self._items = None
self.discriminator = None
if total is not None:
self.total = total
if search_id is not None:
self.search_id = search_id
if items is not None:
self.items = items
@property
def total(self):
"""Gets the total of this DeviceClusterAlertConfigPaginationResponse. # noqa: E501
:return: The total of this DeviceClusterAlertConfigPaginationResponse. # noqa: E501
:rtype: int
"""
return self._total
@total.setter
def total(self, total):
"""Sets the total of this DeviceClusterAlertConfigPaginationResponse.
:param total: The total of this DeviceClusterAlertConfigPaginationResponse. # noqa: E501
:type: int
"""
self._total = total
@property
def search_id(self):
"""Gets the search_id of this DeviceClusterAlertConfigPaginationResponse. # noqa: E501
:return: The search_id of this DeviceClusterAlertConfigPaginationResponse. # noqa: E501
:rtype: str
"""
return self._search_id
@search_id.setter
def search_id(self, search_id):
"""Sets the search_id of this DeviceClusterAlertConfigPaginationResponse.
:param search_id: The search_id of this DeviceClusterAlertConfigPaginationResponse. # noqa: E501
:type: str
"""
self._search_id = search_id
@property
def items(self):
"""Gets the items of this DeviceClusterAlertConfigPaginationResponse. # noqa: E501
:return: The items of this DeviceClusterAlertConfigPaginationResponse. # noqa: E501
:rtype: list[DeviceClusterAlertConfig]
"""
return self._items
@items.setter
def items(self, items):
"""Sets the items of this DeviceClusterAlertConfigPaginationResponse.
:param items: The items of this DeviceClusterAlertConfigPaginationResponse. # noqa: E501
:type: list[DeviceClusterAlertConfig]
"""
self._items = items
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(DeviceClusterAlertConfigPaginationResponse, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, DeviceClusterAlertConfigPaginationResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"bamboo@build01.us-west-1.logicmonitor.net"
] | bamboo@build01.us-west-1.logicmonitor.net |
738bdb26a148ff5cd279ad5cd64b547b3bc91b58 | 11334e46d3575968de5062c7b0e8578af228265b | /plib/new/speak.py | ed93af4c89a0cdd34c1bc51ae232d418b5af5777 | [] | no_license | slowrunner/Carl | 99262f16eaf6d53423778448dee5e5186c2aaa1e | 1a3cfb16701b9a3798cd950e653506774c2df25e | refs/heads/master | 2023-06-08T05:55:55.338828 | 2023-06-04T02:39:18 | 2023-06-04T02:39:18 | 145,750,624 | 19 | 2 | null | 2023-06-04T02:39:20 | 2018-08-22T18:59:34 | Roff | UTF-8 | Python | false | false | 4,399 | py | #!/usr/bin/python3
#
# speak.py Speaker utilities
# includes protection from quotes and apostrophes in phrase
# removes asterisks
# observes quietTime from 11PM until 10AM
#
# includes optional vol parameter (range 10-500 useful)
# includes optional ignore (quietTime) parameter
# Oct2019: increased volume for MonkMakes Amplified Speaker
# reduced speed to 150wpm (default was 175)
# switched to espeak-ng (supported, better quality)
# say( phrase, vol=125, anytime=False)
# whisper( phrase, vol= 50, anytime=True)
# shout( phrase, vol=250, anytime=False)
import subprocess
import sys
sys.path.append('/home/pi/Carl/plib')
import runLog
import time
debug = False
import math
# QUIET TIME is before 10AM and after 11PM
# (unless told to ignore , then never quietTime
def quietTime(startOK=10,notOK=23,ignore=False):
timeNow = time.localtime()
if debug:
print("time.localtime().tm_hour():",timeNow.tm_hour)
print("startOK: {} notOK: {}".format(startOK, notOK))
if (ignore):
return False
elif (startOK <= timeNow.tm_hour < notOK):
return False
else:
return True
# used when espeak was broke
def say_flite(phrase,vol=100,anytime=False):
phrase = phrase.replace("I'm","I m")
phrase = phrase.replace("'","")
phrase = phrase.replace('"',' quote ')
phrase = phrase.replace('*',"")
# flite volume is double millibels from 0 to -6000
# whisper should be around 35-40%
# say/normal volume is around 80
# shout is like 100 to 150, distorts at 170
YYY = int(2000 * (math.log(int(vol)/100.0)))
if (quietTime(ignore=anytime)):
print("QuietTime speak request: {} at vol: {}".format(phrase,vol))
else:
try:
subprocess.check_output(['flite -t "%s" -o tmp.wav' % phrase], stderr=subprocess.STDOUT, shell=True)
subprocess.check_output(['omxplayer --vol "%d" tmp.wav' % YYY], stderr=subprocess.STDOUT, shell=True)
subprocess.check_output(['rm tmp.wav'], stderr=subprocess.STDOUT, shell=True)
except KeyboardInterrupt:
sys.exit(0)
# Speak a phrase using espeak
# Options: vol: 10 is whisper, 50 is "normal Carl", 200 is shouting, 500 is screaming
# anytime: True means ignore quietTime check
def say_espeak(phrase,vol=100,anytime=False):
phrase = phrase.replace("I'm","I m")
phrase = phrase.replace("'","")
phrase = phrase.replace('"',' quote ')
phrase = phrase.replace('*',"")
# subprocess.check_output(['espeak -ven+f3 -s200 "%s"' % phrase], stderr=subprocess.STDOUT, shell=True)
if (quietTime(ignore=anytime)):
print("QuietTime speak request: {} at vol: {}".format(phrase,vol))
else:
# subprocess.check_output(['espeak -ven-us+f5 -a'+str(vol)+' "%s"' % phrase], stderr=subprocess.STDOUT, shell=True)
subprocess.check_output(['espeak-ng -s150 -ven-us+f5 -a'+str(vol)+' "%s"' % phrase], stderr=subprocess.STDOUT, shell=True)
def say(phrase,vol=125,anytime=False):
say_espeak(phrase,vol,anytime)
# vol = 50 for HP amplified spkr
# vol = vol + 40 # adjust for flite
# say_flite(phrase,vol,anytime)
def shout(phrase,vol=250,anytime=False):
say_espeak(phrase,vol,anytime)
# vol = vol - 50 # adjust for flite
# say_flite(phrase,vol,anytime)
def whisper(phrase,vol=30,anytime=False):
say_espeak(phrase,vol,anytime)
# vol = vol + 30 # adjust for flite
# say_flite(phrase,vol,anytime=False)
# ##### MAIN ####
@runLog.logRun
def main():
global debug
# say("hello from speak dot p y test main")
# say_espeak("whats the weather, long quiet?")
if (len(sys.argv) >1):
strToSay = sys.argv[1]
if ( len(sys.argv)>2 ):
vol=int(sys.argv[2])
else:
vol=50
if ( len(sys.argv)>3 ):
ignore= ( sys.argv[3] == "True" )
else:
ignore=False
say(strToSay,vol,ignore)
else:
debug = True
say("Just saying. This phrase contained an apostrophe which isn't allowed")
whisper('I need to whisper. This phrase contains "a quoted word" ')
shout("I feel like shouting. My name is Carl. ")
whisper("Whisper at 20. I don't know Pogo. Never met the little bot",20,True)
if __name__ == "__main__":
main()
| [
"slowrunner@users.noreply.github.com"
] | slowrunner@users.noreply.github.com |
400a7765748d145a2e7be58f1fa69798b3b9e1b3 | 717f5324f8d4ce44a94e2c0b654a2d2a4f0a3c74 | /dwi_ml/training/utils/monitoring.py | 3b7235fdf71fa311640ad471f932599b25d9c149 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | jhlegarreta/dwi_ml | 3ac7ef28f3bba13f34a8f38a9f910cf2946dcc7b | a7c03f26780677e4eaccff9b381d5a8ec6120293 | refs/heads/master | 2023-03-04T07:22:15.737775 | 2023-02-23T18:20:53 | 2023-02-23T18:20:53 | 242,525,253 | 0 | 0 | MIT | 2020-06-10T00:53:06 | 2020-02-23T13:49:01 | Python | UTF-8 | Python | false | false | 7,185 | py | # -*- coding: utf-8 -*-
from collections import deque
import timeit
from datetime import datetime
from typing import List, Tuple
import numpy as np
class TimeMonitor(object):
def __init__(self):
self.epoch_durations = []
self._start_time = None
def start_new_epoch(self):
self._start_time = datetime.now()
def end_epoch(self):
if self._start_time is None:
raise ValueError("You should not end the epoch; it has not "
"started (or you haven't told the TimeMonitor).")
duration = datetime.now() - self._start_time
# Saving duration in minutes
self.epoch_durations.append(duration.total_seconds() / 60)
self._start_time = None
class BatchHistoryMonitor(object):
""" History of some value for each iteration during training, and mean
value for each epoch.
Example of usage: History of the loss during training.
loss_monitor = ValueHistoryMonitor()
...
loss_monitor.start_new_epoch()
# Call update at each iteration
loss_monitor.update(2.3)
...
loss_monitor.end_epoch() # call at epoch end
...
loss_monitor.epochs_means # returns the loss curve as a list
"""
def __init__(self, weighted: bool = False):
self.is_weighted = weighted
# State:
self.current_batch_values = []
self.current_batch_weights = []
self.average_per_epoch = []
self.current_epoch = -1
def update(self, value, weight=None):
"""
Note. Does not save the update if value is inf.
Parameters
----------
value: The value to be monitored.
weight: The weight in the average. For instance, for a loss monitor,
you should measure the loss average.
"""
if np.isinf(value):
return
self.current_batch_values.append(value)
if self.is_weighted:
self.current_batch_weights.append(weight)
def start_new_epoch(self):
assert len(self.average_per_epoch) == self.current_epoch + 1, \
"Did you forget to end previous epoch? Number of epoch values " \
"is {} but monitor's current epoch is {}" \
.format(len(self.average_per_epoch), self.current_epoch)
self.current_epoch += 1
self.current_batch_values = []
self.current_batch_weights = []
def end_epoch(self):
"""
Compute mean of current epoch and add it to means values.
"""
if not self.is_weighted:
mean_value = np.mean(self.current_batch_values)
else:
mean_value = sum(np.multiply(self.current_batch_values,
self.current_batch_weights))
mean_value /= sum(self.current_batch_weights)
self.average_per_epoch.append(mean_value)
def get_state(self):
# Not saving current batch values. Checkpoints should be saved only at
# the end of epochs.
return {'average_per_epoch': self.average_per_epoch,
'current_epoch': self.current_epoch,
'is_weighted': self.is_weighted,
}
def set_state(self, state):
self.average_per_epoch = state['average_per_epoch']
self.current_epoch = state['current_epoch']
class BestEpochMonitoring(object):
"""
Object to stop training early if the loss doesn't improve after a given
number of epochs ("patience").
"""
def __init__(self, patience: int, min_eps: float = 1e-6):
"""
Parameters
-----------
patience: int
Maximal number of bad epochs we allow.
min_eps: float, optional
Precision term to define what we consider as "improving": when the
loss is at least min_eps smaller than the previous best loss.
"""
self.patience = patience
self.min_eps = min_eps
self.best_value = None
self.best_epoch = None
self.n_bad_epochs = None
def update(self, loss, epoch):
"""
Parameters
----------
loss : float
Loss value for a new training epoch
epoch : int
Current epoch
Returns
-------
is_bad: bool
True if this epoch was a bad epoch.
"""
if self.best_value is None:
# First epoch. Setting values.
self.best_value = loss
self.best_epoch = epoch
self.n_bad_epochs = 0
return False
elif loss < self.best_value - self.min_eps:
# Improving from at least eps.
self.best_value = loss
self.best_epoch = epoch
self.n_bad_epochs = 0
return False
else:
# Not improving enough
self.n_bad_epochs += 1
return True
@property
def is_patience_reached(self):
"""
Returns
-------
True if the number of epochs without improvements is more than the
patience.
"""
if self.n_bad_epochs >= self.patience:
return True
return False
def get_state(self):
""" Get object state """
return {'patience': self.patience,
'min_eps': self.min_eps,
'best_value': self.best_value,
'best_epoch': self.best_epoch,
'n_bad_epochs': self.n_bad_epochs}
def set_state(self, state):
""" Set object state """
self.patience = state['patience']
self.min_eps = state['min_eps']
self.best_value = state['best_value']
self.best_epoch = state['best_epoch']
self.n_bad_epochs = state['n_bad_epochs']
class EarlyStoppingError(Exception):
"""Exception raised when an experiment is stopped by early-stopping
Attributes
message -- explanation of why early stopping occured"""
def __init__(self, message):
self.message = message
class IterTimer(object):
"""
Hint: After each iteration, you can check that the maximum allowed time has
not been reached by using:
# Ex: To check that time remaining is less than one iter + 30 seconds
time.time() + iter_timer.mean + 30 > max_time
# Ex: To allow some incertainty. Ex: prevent continuing in the case the
# next iter could be twice as long as usual:
time.time() + iter_timer.mean * 2.0 + 30 > max_time
"""
def __init__(self, history_len=5):
self.history = deque(maxlen=history_len)
self.iterable = None
self.start_time = None
def __call__(self, iterable):
self.iterable = iter(iterable)
return self
def __iter__(self):
return self
def __next__(self):
if self.start_time is not None:
elapsed = timeit.default_timer() - self.start_time
self.history.append(elapsed)
self.start_time = timeit.default_timer()
return next(self.iterable)
@property
def mean(self):
return np.mean(self.history) if len(self.history) > 0 else 0
| [
"emmanuelle.renauld@usherbrooke.ca"
] | emmanuelle.renauld@usherbrooke.ca |
e2ad5a6ddf46c76e3e62446828c50a10623f0847 | 7a704e838d89f942a1099fec141f1fbe9828e528 | /hysia/core/monitor/monitor.py | edea0e3a6e17a14265ce4c732512112661249259 | [
"Apache-2.0"
] | permissive | cap-ntu/Video-to-Retail-Platform | 3ee00d22b7fd94925adac08c5ea733ee647f4574 | 757c68d9de0778e3da8bbfa678d89251a6955573 | refs/heads/hysia_v2 | 2023-02-14T05:22:16.792928 | 2021-01-10T02:31:43 | 2021-01-10T02:31:43 | 212,741,650 | 63 | 20 | Apache-2.0 | 2021-01-10T02:32:00 | 2019-10-04T05:22:08 | Python | UTF-8 | Python | false | false | 7,021 | py | # Desc: Monitor to keep track of system statistics including CPU, GPU, memory, process and network.
# Author: Zhou Shengsheng
# Date: 24/04/19
# References:
# (1) Get cpu info:
# * py-cpuinfo: https://github.com/workhorsy/py-cpuinfo
# * psutil: https://github.com/giampaolo/psutil
# (2) Get gpu info:
# * gputil: https://github.com/ZhouShengsheng/gputil
# install cmd: pip install git+git://github.com/ZhouShengsheng/gputil.git@master
# (3) Get memory, process and network stats:
# * psutil: https://github.com/giampaolo/psutil
import time
import os
import cpuinfo
import psutil
import GPUtil
from .sys_stat import SysStat
from .cpu_stat import CPUStat
from .memory_stat import MemoryStat
from .gpu_stat import GPUStat
from .process_stat import *
from .gpu_process_stat import GPUProcessStat
from .network_stat import *
class Monitor(object):
"""Monitor to keep track of system statistics including CPU, GPU, memory, process and network."""
def __init__(self):
# Query and cache cpu static stat
self.__cachedCPUStaticStat = self.__queryCPUStaticStat()
# Create sys stat
self.__sysStat = SysStat()
def getSysStat(self):
"""
Get system statistics. This function will always get the latest system stats.
Returns:
sysStat (SysStat): System statistics.
"""
sysStat = self.__sysStat
sysStat.upTime = time.time() - psutil.boot_time()
sysStat.cpuStat = self.__queryCPUStat()
sysStat.memoryStat = self.__queryMemoryStat()
sysStat.gpuStats = self.__queryGPUStats()
sysStat.gpuCount = len(sysStat.gpuStats)
sysStat.processStat, sysStat.processStats = self.__queryProcessStats()
sysStat.processCount = len(sysStat.processStats)
sysStat.gpuProcessStats = self.__queryGPUProcessStats()
sysStat.gpuProcessCount = len(sysStat.gpuProcessStats)
sysStat.networkStats = self.__queryNetworkStats()
sysStat.networkCount = len(sysStat.networkStats)
return self.__sysStat
def __queryCPUStaticStat(self):
"""
Query cpu static stat.
Returns:
cpuStaticStat (list): CPU static statistics including
model, count, freqs and cache.
"""
cpuInfo = cpuinfo.get_cpu_info()
model = cpuInfo['brand']
count = cpuInfo['count']
extractFloat = lambda s: float(s.split()[0])
cache = (extractFloat(cpuInfo['l1_data_cache_size']) +
extractFloat(cpuInfo['l1_instruction_cache_size']) +
extractFloat(cpuInfo['l2_cache_size']) +
extractFloat(cpuInfo['l3_cache_size']))
freqs = psutil.cpu_freq()
freqs = (freqs[0], freqs[1], freqs[2])
return (model, count, freqs, cache)
def __queryCPUStat(self):
"""
Query cpu stat.
Returns:
cpuStat (CPUStat): CPU statistics.
"""
cpuStaticStat = self.__cachedCPUStaticStat
loads = os.getloadavg()
utilization = psutil.cpu_percent() / 100.
cpuTimes = tuple(psutil.cpu_times())
cpuTimesRatio = tuple(x / 100. for x in psutil.cpu_times_percent())
return CPUStat(cpuStaticStat[0], cpuStaticStat[1], cpuStaticStat[2], cpuStaticStat[3],
loads, utilization, cpuTimes, cpuTimesRatio)
def __queryMemoryStat(self):
"""
Query memory stat.
Returns:
memoryStat (MemoryStat): Memory statistics.
"""
vm = psutil.virtual_memory()
swap = psutil.swap_memory()
return MemoryStat(vm[0], vm[1], vm[2] / 100., vm[3], vm[4], vm[7], vm[8], vm[9],
swap[0], swap[1], swap[2], swap[3] / 100.)
def __queryGPUStats(self):
"""
Query stats for all GPUs.
Returns:
gpuStats (list): GPU statistics.
"""
gpus = GPUtil.getGPUs()
if gpus:
return [GPUStat(gpu.id, gpu.uuid, gpu.name, gpu.memoryTotal, gpu.memoryUsed,
gpu.memoryFree, gpu.memoryUtil, gpu.temperature) for gpu in gpus]
return []
def __queryProcessStats(self):
"""
Query stats for all processes.
Returns:
processStats(list): Process statistics.
"""
selfStat = None
stats = []
# Get current pid
pid = os.getpid()
# Iterate over all processes
for proc in psutil.process_iter():
try:
pinfo = proc.as_dict()
except psutil.NoSuchProcess:
pass
else:
cpuTimes = pinfo['cpu_times']
cpuTimes = (cpuTimes.user, cpuTimes.system,
cpuTimes.children_user, cpuTimes.children_system)
memoryInfo = pinfo['memory_info']
memoryInfo = (memoryInfo.rss, memoryInfo.vms, memoryInfo.shared,
memoryInfo.text, memoryInfo.lib, memoryInfo.data, memoryInfo.dirty)
status = ProcessStatus.fromPsutil(proc.status())
ctxSwitches = (pinfo['num_ctx_switches'].voluntary, pinfo['num_ctx_switches'].involuntary)
fdCount = pinfo['num_fds'] if pinfo['num_fds'] else 0
threadCount = pinfo['num_threads'] if pinfo['num_threads'] else 0
stat = ProcessStat(pinfo['pid'], pinfo['name'], pinfo['ppid'], cpuTimes,
pinfo['cpu_percent'] / 100., memoryInfo, status, pinfo['nice'],
pinfo['ionice'].value, ctxSwitches, fdCount, threadCount)
if not selfStat and pid == stat.pid:
selfStat = stat
stats.append(stat)
return selfStat, stats
def __queryGPUProcessStats(self):
"""
Query stats for all GPU processes.
Returns:
gpuProcessStats (list): GPU process statistics.
"""
processes = GPUtil.getGPUProcesses()
if processes:
return [GPUProcessStat(proc.pid, proc.processName, proc.gpuId, proc.gpuUuid,
proc.usedMemory) for proc in processes]
return []
def __queryNetworkStats(self):
ifStatDict = psutil.net_if_stats()
if not ifStatDict:
return []
ifAddrDict = psutil.net_if_addrs()
stats = []
for nic, ifStat in ifStatDict.items():
stat = NetworkStat()
stat.nic = nic
stat.isUp = ifStat.isup
stat.duplex = NicDuplexType.fromPsutil(ifStat.duplex)
stat.speed = ifStat.speed
stat.mtu = ifStat.mtu
ifAddrs = ifAddrDict[nic]
addrs = []
for ifAddr in ifAddrs:
addrs.append((ifAddr.family, ifAddr.address, ifAddr.netmask,
ifAddr.broadcast, ifAddr.ptp))
stat.addrs = addrs
stats.append(stat)
return stats
| [
"huaizhen001@e.ntu.edu.sg"
] | huaizhen001@e.ntu.edu.sg |
9193ed0b958e728f1231e0f2ded9824e6c27ba8e | a35dabf440ca3818ed816f8df86f7cd0f79cca4a | /regulations/tests/templatetags_in_context_tests.py | 082b89892bdf6d50c80749b47b87ee16e9ffbd4f | [
"CC0-1.0"
] | permissive | DalavanCloud/regulations-site | 8b7afba8d46c313a7ff06bb6b3778e8ad5516b11 | 0ed37754a8025b6e7d631cf482e987600a6c884b | refs/heads/master | 2020-04-25T06:42:15.255566 | 2018-12-03T21:08:07 | 2018-12-03T21:08:07 | 172,589,907 | 1 | 0 | NOASSERTION | 2019-02-25T21:43:45 | 2019-02-25T21:43:45 | null | UTF-8 | Python | false | false | 1,729 | py | from unittest import TestCase
from django.template import Context, Template
class TemplatetagsInContextTest(TestCase):
def test_in_context(self):
text = "{% load in_context %}"
text += "1. {{ f1 }}{{ f2 }}{{ f3 }}{{ f4 }}\n"
text += "{% begincontext c1 %}\n"
text += "2. {{ f1 }}{{ f2 }}{{ f3 }}{{ f4 }}\n"
text += "{% endcontext %}{% begincontext c1 c2 %}\n"
text += "3. {{ f1 }}{{ f2 }}{{ f3 }}{{ f4 }}\n"
text += "{% begincontext c2a %}\n"
text += "4. {{ f1 }}{{ f2 }}{{ f3 }}{{ f4 }}\n"
text += "{% endcontext %}{% endcontext %}\n"
text += "5. {{ f1 }}{{ f2 }}{{ f3 }}{{ f4 }}"
context = {'f1': 'f1',
'c1': {'f2': 'c1.f2', 'f1': 'c1.f1'},
'c2': {'f2': 'c2.f2',
'f3': 'c2.f3', 'c2a': {'f4': 'c2a.f4'}}}
output = Template(text).render(Context(context))
lines = output.split("\n")
self.assertEqual("1. f1", lines[0])
self.assertEqual("2. c1.f1c1.f2", lines[2])
self.assertEqual("3. c1.f1c2.f2c2.f3", lines[4])
self.assertEqual("4. c2a.f4", lines[6])
self.assertEqual("5. f1", lines[8])
def test_in_context_cascade(self):
"""Make sure fields that are not dicts get passed along"""
text = "{% load in_context %}{% begincontext c1 f2 %}"
text += "{{ f1 }}{{ f2 }}\n"
text += "{% endcontext %}"
text += "{{ f1 }}{{ f2 }}"
context = {'f1': 'f1', 'f2': 'f2', 'c1': {'f1': 'c1.f1'}}
output = Template(text).render(Context(context))
lines = output.split("\n")
self.assertEqual("c1.f1f2", lines[0])
self.assertEqual("f1f2", lines[1])
| [
"cm.lubinski@gmail.com"
] | cm.lubinski@gmail.com |
c06eb32013963d9155ed9bfbf49eab85864be127 | 48e361837c24ea3def1d8ddbe6191368a03ae50e | /python/sandbox/test.py | 96b9d29047d21934cb78fd127384349c64c9bc7f | [] | no_license | paglenn/random | 7f2134d3eeed7aebbd8f20e3df7299df58d99704 | 30e2a8522c303b1518960d7bf44220996e66c6ea | refs/heads/master | 2021-06-14T11:57:36.827255 | 2017-04-10T04:27:35 | 2017-04-10T04:27:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 783 | py | # 2d self-avoiding random walk
#high rejection rate --> VERY SLOW...
import numpy as np
import matplotlib.pyplot as pp
import random
def SAW ( Nsteps):
goodPath = 0
while goodPath == 0:
X = [0 for i in range(Nsteps) ]
Y = [0 for i in range(Nsteps) ]
visited_sites = [(0,0) for i in range(Nsteps) ]
for step in range(1,Nsteps):
directions = [(0,1),(0,-1),(1,0),(-1,0)]
random_dir = random.choice(directions)
x = X[step-1] + random_dir[0]
y = Y[step-1] + random_dir[1]
if (x,y) in visited_sites:
goodPath = 0
break
else:
X[step] = x
Y[step] = y
visited_sites[step] = (x,y)
goodPath = 1
return visited_sites
import time
start_time = time.time()
for i in range(10000):J = SAW(10)
print("{0:e}".format(time.time()-start_time) )
| [
"nls.pglenn@gmail.com"
] | nls.pglenn@gmail.com |
7f082c545f267641e11c6aae37630408fd76f091 | ba88b66e61f0fd1ec0719b61568f0c883d02e534 | /inventory/urls.py | b2223228bde84132965119ec38b72aeb6a9e36c8 | [] | no_license | bnmng/spltcs | fbc9b5fb5342f5ee0a8bd080f957b4022509b3e9 | 5f19136d8a266b3d2094397cafe41b3ca1f45e78 | refs/heads/master | 2020-12-26T18:47:07.348996 | 2020-08-02T21:57:44 | 2020-08-02T21:57:44 | 237,602,374 | 0 | 0 | null | 2020-03-03T15:07:04 | 2020-02-01T11:07:46 | Python | UTF-8 | Python | false | false | 3,359 | py | from django.urls import path
from inventory.views import (CategoryAjaxMakeModels, CategoryAjaxSuccessMakeModels, CategoryCreate, CategoryDelete, CategoryDetail, CategoryList, CategoryUpdate, ItemAjaxEntity, ItemAjaxLocation, ItemAjaxMakeModel, ItemAjaxRole, ItemAjaxSuccessEntity, ItemAjaxSuccessLocation, ItemAjaxSuccessMakeModel, ItemAjaxSuccessRole, ItemCreate, ItemDelete, ItemDetail, ItemList, ItemUpdate, MakeModelCreate, MakeModelDelete, MakeModelDetail, MakeModelList, MakeModelUpdate, RoleAjaxItems, RoleAjaxSuccessItems, RoleCreate, RoleDelete, RoleDetail, RoleList, RoleUpdate)
urlpatterns = [
path('', ItemList.as_view(), name='inventory'),
path('items', ItemList.as_view(), name='item_list'),
path('create', ItemCreate.as_view(), name='item_create'),
path('<int:pk>', ItemDetail.as_view(), name='item_detail'),
path('<int:pk>/update', ItemUpdate.as_view(), name='item_update'),
path('<int:pk>/delete', ItemDelete.as_view(), name='item_delete'),
path('item_ajax_location', ItemAjaxLocation.as_view(), name='item_ajax_location'),
path('item_ajax_location/<int:pk>', ItemAjaxSuccessLocation.as_view(), name='item_ajaxsuccess_location'),
path('item_ajax_makemodel', ItemAjaxMakeModel.as_view(), name='item_ajax_makemodel'),
path('item_ajax_makemodel/<int:pk>', ItemAjaxSuccessMakeModel.as_view(), name='item_ajaxsuccess_makemodel'),
path('item_ajax_role', ItemAjaxRole.as_view(), name='item_ajax_role'),
path('item_ajax_role/<int:pk>', ItemAjaxSuccessRole.as_view(), name='item_ajaxsuccess_role'),
path('item_ajax_entity', ItemAjaxEntity.as_view(), name='item_ajax_entity'),
path('item_ajax_entity/<int:pk>', ItemAjaxSuccessEntity.as_view(), name='item_ajaxsuccess_entity'),
path('categories', CategoryList.as_view(), name='category_list'),
path('category/create', CategoryCreate.as_view(), name='category_create'),
path('category/<int:pk>', CategoryDetail.as_view(), name='category_detail'),
path('category/<int:pk>/update', CategoryUpdate.as_view(), name='category_update'),
path('category/<int:pk>/delete', CategoryDelete.as_view(), name='category_delete'),
path('category_ajax_makemodels', CategoryAjaxMakeModels.as_view(), name='category_ajax_makemodels'),
path('category_ajax_makemodels/<int:pk>', CategoryAjaxSuccessMakeModels.as_view(), name='category_ajaxsuccess_makemodels'),
path('roles', RoleList.as_view(), name='role_list'),
path('role/create', RoleCreate.as_view(), name='role_create'),
path('role/<int:pk>', RoleDetail.as_view(), name='role_detail'),
path('role/<int:pk>/update', RoleUpdate.as_view(), name='role_update'),
path('role/<int:pk>/delete', RoleDelete.as_view(), name='role_delete'),
path('role_ajax_items', RoleAjaxItems.as_view(), name='role_ajax_items'),
path('role_ajax_items/<int:pk>', RoleAjaxSuccessItems.as_view(), name='role_ajaxsuccess_items'),
path('makemodels', MakeModelList.as_view(), name='makemodel_list'),
path('makemodel/create', MakeModelCreate.as_view(), name='makemodel_create'),
path('makemodel/<int:pk>', MakeModelDetail.as_view(), name='makemodel_detail'),
path('makemodel/<int:pk>/update', MakeModelUpdate.as_view(), name='makemodel_update'),
path('makemodel/<int:pk>/delete', MakeModelDelete.as_view(), name='makemodel_delete'),
]
# vim: ai ts=4 sts=4 et sw=4
| [
"benjamin@bnmng.com"
] | benjamin@bnmng.com |
3fad5952bb69e1a9749fdb8c1ede20354824110b | a2d36e471988e0fae32e9a9d559204ebb065ab7f | /huaweicloud-sdk-iam/huaweicloudsdkiam/v3/model/keystone_create_project_option.py | d91bccd26bce8e2e0e7bc236f8a4a808d20d9140 | [
"Apache-2.0"
] | permissive | zhouxy666/huaweicloud-sdk-python-v3 | 4d878a90b8e003875fc803a61414788e5e4c2c34 | cc6f10a53205be4cb111d3ecfef8135ea804fa15 | refs/heads/master | 2023-09-02T07:41:12.605394 | 2021-11-12T03:20:11 | 2021-11-12T03:20:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,199 | py | # coding: utf-8
import re
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class KeystoneCreateProjectOption:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'name': 'str',
'parent_id': 'str',
'domain_id': 'str',
'description': 'str'
}
attribute_map = {
'name': 'name',
'parent_id': 'parent_id',
'domain_id': 'domain_id',
'description': 'description'
}
def __init__(self, name=None, parent_id=None, domain_id=None, description=None):
"""KeystoneCreateProjectOption - a model defined in huaweicloud sdk"""
self._name = None
self._parent_id = None
self._domain_id = None
self._description = None
self.discriminator = None
self.name = name
self.parent_id = parent_id
if domain_id is not None:
self.domain_id = domain_id
if description is not None:
self.description = description
@property
def name(self):
"""Gets the name of this KeystoneCreateProjectOption.
项目名称。必须以存在的\"区域ID_\"开头,长度小于等于64字符。例如区域“华北-北京一”的区域ID为“cn-north-1”,在其下创建项目时,项目名应填“cn-north-1_IAMProject”
:return: The name of this KeystoneCreateProjectOption.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this KeystoneCreateProjectOption.
项目名称。必须以存在的\"区域ID_\"开头,长度小于等于64字符。例如区域“华北-北京一”的区域ID为“cn-north-1”,在其下创建项目时,项目名应填“cn-north-1_IAMProject”
:param name: The name of this KeystoneCreateProjectOption.
:type: str
"""
self._name = name
@property
def parent_id(self):
"""Gets the parent_id of this KeystoneCreateProjectOption.
区域对应的项目ID,例如区域“华北-北京一”区域对应的项目ID为:04dd42abe48026ad2fa3c01ad7fa.....,获取方式请参见:[获取账号、IAM用户、项目、用户组、委托的名称和ID](https://support.huaweicloud.com/api-iam/iam_17_0002.html)。
:return: The parent_id of this KeystoneCreateProjectOption.
:rtype: str
"""
return self._parent_id
@parent_id.setter
def parent_id(self, parent_id):
"""Sets the parent_id of this KeystoneCreateProjectOption.
区域对应的项目ID,例如区域“华北-北京一”区域对应的项目ID为:04dd42abe48026ad2fa3c01ad7fa.....,获取方式请参见:[获取账号、IAM用户、项目、用户组、委托的名称和ID](https://support.huaweicloud.com/api-iam/iam_17_0002.html)。
:param parent_id: The parent_id of this KeystoneCreateProjectOption.
:type: str
"""
self._parent_id = parent_id
@property
def domain_id(self):
"""Gets the domain_id of this KeystoneCreateProjectOption.
项目所属账号ID。
:return: The domain_id of this KeystoneCreateProjectOption.
:rtype: str
"""
return self._domain_id
@domain_id.setter
def domain_id(self, domain_id):
"""Sets the domain_id of this KeystoneCreateProjectOption.
项目所属账号ID。
:param domain_id: The domain_id of this KeystoneCreateProjectOption.
:type: str
"""
self._domain_id = domain_id
@property
def description(self):
"""Gets the description of this KeystoneCreateProjectOption.
项目描述信息,长度小于等于255字符。
:return: The description of this KeystoneCreateProjectOption.
:rtype: str
"""
return self._description
@description.setter
def description(self, description):
"""Sets the description of this KeystoneCreateProjectOption.
项目描述信息,长度小于等于255字符。
:param description: The description of this KeystoneCreateProjectOption.
:type: str
"""
self._description = description
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, KeystoneCreateProjectOption):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"hwcloudsdk@huawei.com"
] | hwcloudsdk@huawei.com |
955c42544a26f4c5add16a7771bd1becf290236c | 3e6bf1ba30707aacc6e16a84d93e449fcd4a32b7 | /joins/migrations/0009_auto_20160707_0613.py | 652d8b1db03734245d5f3a38d1df06aa86bf6df3 | [] | no_license | osbornetunde/Lauch | a4cddea55b1af341d1d894fc7635c17cddab5707 | fe05c1974ae06919b49c607e96b387a4da602bfa | refs/heads/master | 2021-01-19T05:29:55.148350 | 2016-07-20T06:30:11 | 2016-07-20T06:30:11 | 63,755,926 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 394 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-07-07 05:13
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('joins', '0008_joinfriends'),
]
operations = [
migrations.RenameModel(
old_name='JoinFriends',
new_name='JoinFriend',
),
]
| [
"osbornetunde@gmail.com"
] | osbornetunde@gmail.com |
c9bf084657308a4de0a109a873e9eae451c3a16a | c5388342c19c5605f2113f327c1023ee74eb7144 | /03-Multidimensional-Lists/Exercise/07_bombs.py | e885932235d36fd249041f58b88425c2d2cabcc7 | [] | no_license | zhyordanova/Python-Advanced | 8b0cd3f31c12e726b7846d54c6ee7bfb602a07a9 | ae2a9416e89eae6c40ae965a3ad65af54b36e333 | refs/heads/main | 2023-05-04T03:17:46.883246 | 2021-05-24T22:49:31 | 2021-05-24T22:49:31 | 349,577,494 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,167 | py | def is_valid(row, col, size):
return 0 <= row < size and 0 <= col < size
def explode(row, col, size, matrix):
bomb = matrix[row][col]
for r in range(row - 1, row + 2):
for c in range(col - 1, col + 2):
if is_valid(r, c, size) and matrix[r][c] > 0:
matrix[r][c] -= bomb
n = int(input())
def init_matrix(n):
matrix = []
for _ in range(n):
matrix.append([int(el) for el in input().split()])
return matrix
def print_result(matrix):
for row in matrix:
print(' '.join([str(x) for x in row]))
matrix = init_matrix(n)
bomb_coordinates = input().split()
for bomb in bomb_coordinates:
tokens = [int(el) for el in bomb.split(',')]
bomb_row = tokens[0]
bomb_col = tokens[1]
if matrix[bomb_row][bomb_col] > 0:
explode(bomb_row, bomb_col, n, matrix)
alive_cells_count = 0
alive_cells_sum = 0
for row in range(n):
for col in range(n):
cell = matrix[row][col]
if cell > 0:
alive_cells_count += 1
alive_cells_sum += cell
print(f"Alive cells: {alive_cells_count}")
print(f"Sum: {alive_cells_sum}")
print_result(matrix)
| [
"zhivka.yordanova@mentormate.com"
] | zhivka.yordanova@mentormate.com |
e02e07efc8781878d94ad37a562af666faad436d | 87140007e96872d3611f0778eb0eebe5799616d7 | /runs/seq-nobro-iter05000.cfg.py | a151e8050cd85bb2c0c524781a03097f9710d643 | [
"MIT"
] | permissive | janpawellek/broeval | 49499fa302abff916ffced201034d3b9394503cd | 57e31aa6e354d0bba88103b44910483e8d982d00 | refs/heads/master | 2021-01-11T12:19:13.619220 | 2016-12-20T16:23:27 | 2016-12-20T16:23:27 | 76,468,134 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 627 | py |
# Write results to this file
OUTFILE = 'runs/seq-nobro-iter05000.result.csv'
# Source computers for the requests
SOURCE = ['10.0.0.1']
# Should Bro be enabled on the source machines?
SOURCE_BRO = [False]
# Target machines for the requests (aka server)
TARGET = ['10.0.0.2']
# Should Bro be enabled on the target machines?
TARGET_BRO = [False]
# Connection mode (par = parallel, seq = sequential)
MODE = 'seq'
# Number of evaluation repetitions to run
EPOCHS = 100
# Number of iterations to be run in each evaluation repetition
ITER = 5000
# Size of the file to be downloaded from target (in Bytes * 10^SIZE)
SIZE = 5
| [
"pawellek@stud.uni-heidelberg.de"
] | pawellek@stud.uni-heidelberg.de |
b993cb6e90864558f76aefb296699a34f5fea66e | 85a0ee08b54b2c5e3154e3727b92c37915b4c1de | /Sample/Python_Future_Sample/實單交易/63.py | 62772c75e90765557034eb58bfeb589df05361f8 | [] | no_license | yaotony/sandbox-empty | 877da9b9ba0ec658bbdc8acc79a97267f96408b9 | 85f04e5db5d26a04fad9ae4ad6d3c86977a9f865 | refs/heads/master | 2022-05-20T17:14:51.348759 | 2022-05-04T08:06:56 | 2022-05-04T08:06:56 | 35,472,699 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 592 | py | # -*- coding: UTF-8 -*-
# 載入相關套件
import sys,indicator,datetime,haohaninfo
# 券商
Broker = 'Masterlink_Future'
# 定義資料類別
Table = 'match'
# 定義商品名稱
Prod = sys.argv[1]
# 取得當天日期
Date = datetime.datetime.now().strftime("%Y%m%d")
# K棒物件
KBar = indicator.KBar(Date,'volume',100)
# 計算成交量K棒
GO = haohaninfo.GOrder.GOQuote()
for i in GO.Describe(Broker, Table, Prod):
price = int(i[2])
amount = int(i[4])
KBar.VolumeAdd(price,amount)
print(KBar.GetOpen(),KBar.GetHigh(),KBar.GetLow(),KBar.GetClose())
| [
"tony.exmail@gmail.com"
] | tony.exmail@gmail.com |
d9881b41072708753168c8ca9fc7b2c4cd8b81d1 | fd8a4f9cc859f5edf468c1a74f6a183a4c1db508 | /shoulder/transform/abstract_transform.py | 34606b3276cdaca2fb9da5ebf347ebbd4c9cce8d | [
"MIT"
] | permissive | ainfosec/shoulder | e84d8c2b0ac8e049a21dabc7d221062edcb305d6 | 8dfb059701910a1cbe57e14c676ac8930f71b7c4 | refs/heads/master | 2020-09-08T07:43:43.418527 | 2019-09-25T17:48:08 | 2019-09-25T17:48:08 | 221,065,767 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,742 | py | #
# Shoulder
# Copyright (C) 2018 Assured Information Security, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import abc
from shoulder.logger import logger
class AbstractTransform(abc.ABC):
@property
@abc.abstractmethod
def description(self):
""" Description of what this transform does """
@abc.abstractmethod
def do_transform(self, reg):
""" Transform the given register object """
return
def transform(self, registers):
"""
Transform the given list of registers
"""
logger.info("Applying transform: {d}".format(d = str(self)))
return list(map(self.do_transform, registers))
def __str__(self):
return self.description
| [
"jared.wright12@gmail.com"
] | jared.wright12@gmail.com |
b52409a23c489ce692b34cdd25b4c4737f26213e | e7e34e2726790686a1f239e22487fe7c957e179f | /tests/components/juicenet/test_config_flow.py | abda068b622bf9d45d9575b147cd892f53c3e702 | [
"Apache-2.0"
] | permissive | AlexxIT/home-assistant | 68a17b49644c5d943b204dc75e1f11fe3b701161 | 8de7966104911bca6f855a1755a6d71a07afb9de | refs/heads/dev | 2022-03-22T14:37:18.774214 | 2021-10-09T16:10:43 | 2021-10-09T16:10:43 | 100,278,871 | 9 | 0 | Apache-2.0 | 2022-01-31T06:18:02 | 2017-08-14T14:50:46 | Python | UTF-8 | Python | false | false | 4,171 | py | """Test the JuiceNet config flow."""
from unittest.mock import MagicMock, patch
import aiohttp
from pyjuicenet import TokenError
from homeassistant import config_entries
from homeassistant.components.juicenet.const import DOMAIN
from homeassistant.const import CONF_ACCESS_TOKEN
def _mock_juicenet_return_value(get_devices=None):
juicenet_mock = MagicMock()
type(juicenet_mock).get_devices = MagicMock(return_value=get_devices)
return juicenet_mock
async def test_form(hass):
"""Test we get the form."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["errors"] == {}
with patch(
"homeassistant.components.juicenet.config_flow.Api.get_devices",
return_value=MagicMock(),
), patch(
"homeassistant.components.juicenet.async_setup", return_value=True
) as mock_setup, patch(
"homeassistant.components.juicenet.async_setup_entry", return_value=True
) as mock_setup_entry:
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"], {CONF_ACCESS_TOKEN: "access_token"}
)
await hass.async_block_till_done()
assert result2["type"] == "create_entry"
assert result2["title"] == "JuiceNet"
assert result2["data"] == {CONF_ACCESS_TOKEN: "access_token"}
assert len(mock_setup.mock_calls) == 1
assert len(mock_setup_entry.mock_calls) == 1
async def test_form_invalid_auth(hass):
"""Test we handle invalid auth."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch(
"homeassistant.components.juicenet.config_flow.Api.get_devices",
side_effect=TokenError,
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"], {CONF_ACCESS_TOKEN: "access_token"}
)
assert result2["type"] == "form"
assert result2["errors"] == {"base": "invalid_auth"}
async def test_form_cannot_connect(hass):
"""Test we handle cannot connect error."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch(
"homeassistant.components.juicenet.config_flow.Api.get_devices",
side_effect=aiohttp.ClientError,
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"], {CONF_ACCESS_TOKEN: "access_token"}
)
assert result2["type"] == "form"
assert result2["errors"] == {"base": "cannot_connect"}
async def test_form_catch_unknown_errors(hass):
"""Test we handle cannot connect error."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch(
"homeassistant.components.juicenet.config_flow.Api.get_devices",
side_effect=Exception,
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"], {CONF_ACCESS_TOKEN: "access_token"}
)
assert result2["type"] == "form"
assert result2["errors"] == {"base": "unknown"}
async def test_import(hass):
"""Test that import works as expected."""
with patch(
"homeassistant.components.juicenet.config_flow.Api.get_devices",
return_value=MagicMock(),
), patch(
"homeassistant.components.juicenet.async_setup", return_value=True
) as mock_setup, patch(
"homeassistant.components.juicenet.async_setup_entry", return_value=True
) as mock_setup_entry:
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_IMPORT},
data={CONF_ACCESS_TOKEN: "access_token"},
)
await hass.async_block_till_done()
assert result["type"] == "create_entry"
assert result["title"] == "JuiceNet"
assert result["data"] == {CONF_ACCESS_TOKEN: "access_token"}
assert len(mock_setup.mock_calls) == 1
assert len(mock_setup_entry.mock_calls) == 1
| [
"noreply@github.com"
] | AlexxIT.noreply@github.com |
f8d1fea9546441f77be588dc74e9b5e244b85d87 | 612d763da9594102993a61cb9337f5f5fd8da15e | /Simple/SimpleNeuroNet_Binary.py | beea5b3545e25e9b9fa94c0ceff35d4680559f23 | [] | no_license | sathayas/DeepLearningTestground | 484abd623256045a7b5dc5e435acabe8b87415b9 | 2ad496c73a3be883119702522ffcb217c2b5eb55 | refs/heads/master | 2020-03-17T12:08:19.293404 | 2020-01-19T04:09:18 | 2020-01-19T04:09:18 | 133,575,858 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,040 | py | import autograd.numpy as np
import autograd.numpy.random as npr
from autograd import grad
import sklearn.metrics
import pylab
# Generate Dataset
examples = 1000
features = 100
X = npr.randn(examples, features) # scalar features
Y = (npr.randn(examples)>0).astype(int) # binary labels
D = (X, Y)
# Specify the network
layer1_units = 10
layer2_units = 1
w1 = npr.rand(features, layer1_units)
b1 = npr.rand(layer1_units)
w2 = npr.rand(layer1_units, layer2_units)
b2 = npr.rand(layer2_units)
theta = (w1, b1, w2, b2)
# Define the loss function (binary cross entropy)
def binary_cross_entropy(y, y_hat):
return np.sum(-((y * np.log(y_hat)) + ((1-y) * np.log(1 - y_hat))))
def sigmoid(x):
return 1/(1+np.exp(-x))
# Wraper around the Neural Network
def neural_network(x, theta):
w1, b1, w2, b2 = theta
return sigmoid(np.dot((sigmoid(np.dot(x,w1) + b1)), w2) + b2)
# Wrapper around the objective function to be optimised
def objective(theta, idx):
return binary_cross_entropy(D[1][idx], neural_network(D[0][idx], theta))
# Update
def update_theta(theta, delta, alpha):
w1, b1, w2, b2 = theta
w1_delta, b1_delta, w2_delta, b2_delta = delta
w1_new = w1 - alpha * w1_delta
b1_new = b1 - alpha * b1_delta
w2_new = w2 - alpha * w2_delta
b2_new = b2 - alpha * b2_delta
new_theta = (w1_new,b1_new,w2_new,b2_new)
return new_theta
# Compute Gradient
grad_objective = grad(objective)
# Train the Neural Network
epochs = 10
Y_pred = (neural_network(D[0],theta)>0.5).astype(int)
print("Accuracy score before training:",
sklearn.metrics.accuracy_score(D[1],Y_pred))
accuScore = []
for i in range(0, epochs):
for j in range(0, examples):
delta = grad_objective(theta, j)
theta = update_theta(theta,delta, 0.1)
Y_pred = (neural_network(D[0],theta)>0.5).astype(int)
accuScore.append(sklearn.metrics.accuracy_score(D[1],Y_pred))
print("Accuracy score after training:",
sklearn.metrics.accuracy_score(D[1],Y_pred))
pylab.plot(accuScore)
pylab.show()
| [
"hayasaka@utexas.edu"
] | hayasaka@utexas.edu |
90ca2e895d9d77031144a857d40d49e108401fa4 | 37c243e2f0aab70cbf38013d1d91bfc3a83f7972 | /pp7TeV/HeavyIonsAnalysis/JetAnalysis/python/jets/akPu6PFJetSequence_pp_jec_cff.py | d3ab48c908bd41a4600658e58932bbd4da9d71df | [] | no_license | maoyx/CMSWork | 82f37256833cbe4c60cb8df0b4eb68ceb12b65e7 | 501456f3f3e0f11e2f628b40e4d91e29668766d5 | refs/heads/master | 2021-01-01T18:47:55.157534 | 2015-03-12T03:47:15 | 2015-03-12T03:47:15 | 10,951,799 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,497 | py |
import FWCore.ParameterSet.Config as cms
from PhysicsTools.PatAlgos.patHeavyIonSequences_cff import *
from HeavyIonsAnalysis.JetAnalysis.inclusiveJetAnalyzer_cff import *
akPu6PFmatch = patJetGenJetMatch.clone(
src = cms.InputTag("akPu6PFJets"),
matched = cms.InputTag("ak6HiGenJets")
)
akPu6PFparton = patJetPartonMatch.clone(src = cms.InputTag("akPu6PFJets"),
matched = cms.InputTag("genParticles")
)
akPu6PFcorr = patJetCorrFactors.clone(
useNPV = False,
# primaryVertices = cms.InputTag("hiSelectedVertex"),
levels = cms.vstring('L2Relative','L3Absolute'),
src = cms.InputTag("akPu6PFJets"),
payload = "AKPu6PF_generalTracks"
)
akPu6PFpatJets = patJets.clone(jetSource = cms.InputTag("akPu6PFJets"),
jetCorrFactorsSource = cms.VInputTag(cms.InputTag("akPu6PFcorr")),
genJetMatch = cms.InputTag("akPu6PFmatch"),
genPartonMatch = cms.InputTag("akPu6PFparton"),
jetIDMap = cms.InputTag("akPu6PFJetID"),
addBTagInfo = False,
addTagInfos = False,
addDiscriminators = False,
addAssociatedTracks = False,
addJetCharge = False,
addJetID = False,
getJetMCFlavour = False,
addGenPartonMatch = True,
addGenJetMatch = True,
embedGenJetMatch = True,
embedGenPartonMatch = True,
embedCaloTowers = False,
embedPFCandidates = False
)
akPu6PFJetAnalyzer = inclusiveJetAnalyzer.clone(jetTag = cms.InputTag("akPu6PFpatJets"),
genjetTag = 'ak6HiGenJets',
rParam = 0.6,
matchJets = cms.untracked.bool(False),
matchTag = 'patJets',
pfCandidateLabel = cms.untracked.InputTag('particleFlow'),
trackTag = cms.InputTag("generalTracks"),
fillGenJets = True,
isMC = True,
genParticles = cms.untracked.InputTag("genParticles"),
eventInfoTag = cms.InputTag("generator")
)
akPu6PFJetSequence_mc = cms.Sequence(
akPu6PFmatch
*
akPu6PFparton
*
akPu6PFcorr
*
akPu6PFpatJets
*
akPu6PFJetAnalyzer
)
akPu6PFJetSequence_data = cms.Sequence(akPu6PFcorr
*
akPu6PFpatJets
*
akPu6PFJetAnalyzer
)
akPu6PFJetSequence_jec = akPu6PFJetSequence_mc
akPu6PFJetSequence_mix = akPu6PFJetSequence_mc
akPu6PFJetSequence = cms.Sequence(akPu6PFJetSequence_jec)
akPu6PFJetAnalyzer.genPtMin = cms.untracked.double(1)
| [
"yaxian.mao@cern.ch"
] | yaxian.mao@cern.ch |
95fc4ecbd21d1eb95f4455306dc5dbf5f3b81498 | fb5c5d50d87a6861393d31911b9fae39bdc3cc62 | /Scripts/sims4communitylib/dialogs/custom_dialogs/picker_dialogs/common_ui_object_category_picker.py | 406e9c564ab5c122dee6554869bee97de57477a2 | [
"CC-BY-4.0"
] | permissive | ColonolNutty/Sims4CommunityLibrary | ee26126375f2f59e5567b72f6eb4fe9737a61df3 | 58e7beb30b9c818b294d35abd2436a0192cd3e82 | refs/heads/master | 2023-08-31T06:04:09.223005 | 2023-08-22T19:57:42 | 2023-08-22T19:57:42 | 205,197,959 | 183 | 38 | null | 2023-05-28T16:17:53 | 2019-08-29T15:48:35 | Python | UTF-8 | Python | false | false | 2,435 | py | """
The Sims 4 Community Library is licensed under the Creative Commons Attribution 4.0 International public license (CC BY 4.0).
https://creativecommons.org/licenses/by/4.0/
https://creativecommons.org/licenses/by/4.0/legalcode
Copyright (c) COLONOLNUTTY
"""
from distributor.rollback import ProtocolBufferRollback
from interactions.utils.tunable_icon import TunableIconFactory
from sims4.localization import TunableLocalizedString
from sims4.tuning.tunable import TunableTuple, TunableList, Tunable
from ui.ui_dialog_picker import UiObjectPicker
from distributor.shared_messages import build_icon_info_msg
class CommonUiObjectCategoryPicker(UiObjectPicker):
"""An ObjectPicker with categories listed in a drop down.
"""
FACTORY_TUNABLES = {
'object_categories': TunableList(
description='\n The categories to display in the drop down for this picker.\n ',
tunable=TunableTuple(
object_category=Tunable(
tunable_type=str,
default='ALL'
),
icon=TunableIconFactory(),
category_name=TunableLocalizedString()
)
)
}
def _build_customize_picker(self, picker_data) -> None:
# noinspection PyBroadException
try:
with ProtocolBufferRollback(picker_data.filter_data) as filter_data_list:
for category in self.object_categories:
with ProtocolBufferRollback(filter_data_list.filter_data) as category_data:
category_data.tag_type = abs(hash(category.object_category)) % (10 ** 8)
build_icon_info_msg(category.icon(None), None, category_data.icon_info)
category_data.description = category.category_name
filter_data_list.use_dropdown_filter = self.use_dropdown_filter
super()._build_customize_picker(picker_data)
except:
with ProtocolBufferRollback(picker_data.filter_data) as category_data:
for category in self.object_categories:
category_data.tag_type = abs(hash(category.object_category)) % (10 ** 8)
build_icon_info_msg(category.icon(None), None, category_data.icon_info)
category_data.description = category.category_name
super()._build_customize_picker(picker_data)
| [
"ColonolNutty@hotmail.com"
] | ColonolNutty@hotmail.com |
942a63ed2268cec19d5f3a0790d7570a508c5463 | 1bdb0da31d14102ca03ee2df44f0ec522b0701a4 | /EmiliaRomagna/EmiliAmbiente/5-LabelsPublishing.py | 9ecbca232ba7c06cfe066148d873a8d4ffcce2a6 | [] | no_license | figuriamoci/Acqua | dc073d90c3c5e5899b22005685847916de1dfd95 | aef22fcd0c80c92441e0e3df2468d7a2f23a848a | refs/heads/master | 2020-12-15T04:00:26.855139 | 2020-06-08T21:17:55 | 2020-06-08T21:17:55 | 234,986,179 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 303 | py | import acqua.labelCollection as lc
import logging
import acqua.aqueduct as aq
gestore = "EmiliAmbiente"
aq.setEnv('EmiliaRomagna//'+gestore)
geoJsonFile = gestore+'.geojson'
ll = lc.removeEtichette(gestore)
ll = lc.to_mongoDBInsertMany(geoJsonFile)
logging.info("Safe %s record(s) to MongoDB.",len(ll))
| [
"an.fantini@gmail.com"
] | an.fantini@gmail.com |
67e82f1a3eced602d9fbdf7d700faba6612cfb3e | 76799ea50d7b0b9cf8dc38f52e2516b0684fa010 | /py2win/testdata/sampleproject/setup.py | 679189dc9682ffd2832e8b5b456177fd6a753b5e | [
"MIT"
] | permissive | trollfred/py2win | 67fc6cc78e5453c46258aff6ca28b1b91b5bd8ea | 82158e7f5530b65adfc7b3d434b037c592a5913f | refs/heads/master | 2020-07-01T17:32:49.746098 | 2019-11-03T16:17:00 | 2019-11-03T16:17:00 | 201,240,871 | 0 | 0 | MIT | 2019-08-08T11:06:14 | 2019-08-08T11:06:13 | null | UTF-8 | Python | false | false | 1,606 | py | """A setuptools based setup module.
See:
https://packaging.python.org/en/latest/distributing.html
https://github.com/pypa/sampleproject
"""
from setuptools import setup, find_packages
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='sample',
version='1.2.0',
description='A sample Python project',
long_description=long_description,
url='https://github.com/pypa/sampleproject',
author='The Python Packaging Authority',
author_email='pypa-dev@googlegroups.com',
license='MIT',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Topic :: Software Development :: Build Tools',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
],
keywords='sample setuptools development',
packages=find_packages(exclude=['contrib', 'docs', 'tests']),
install_requires=['PyQt5'],
extras_require={
'dev': ['check-manifest'],
'test': ['coverage'],
},
package_data={
'sample': ['package_data.dat'],
},
data_files=[],
entry_points={
'gui_scripts': ['sample-gui=sample.gui:main'],
'console_scripts': ['sample-console=sample.console:main'],
},
)
| [
"philippe.pinard@gmail.com"
] | philippe.pinard@gmail.com |
4dedca42a28c6d0fdbb66223664cf42233f210a5 | 9099ed0407521ac40b88f3b92872307f66c57bf9 | /codes/contest/topcoder/SRM 734/TheSquareCityDiv2.py | f71d4afa51b229fa8ca462816c4f94ee8fd89523 | [] | no_license | jiluhu/dirtysalt.github.io | 0cea3f52d2c4adf2bbf5c23b74f4cb1070025816 | c026f2969c784827fac702b34b07a9268b70b62a | refs/heads/master | 2020-08-31T09:32:05.273168 | 2019-10-29T01:53:45 | 2019-10-29T01:53:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,217 | py | # -*- coding: utf-8 -*-
import collections
class TheSquareCityDiv2:
def find(self, r, t):
n = int(round(len(t) ** 0.5))
moves = []
for i in range(n):
for j in range(n):
t0 = t[i * n + j]
move = None
warmest = -(1 << 30)
for k0 in range(n):
for k1 in range(n):
dist = abs(k0 - i) + abs(k1 - j)
k = k0 * n + k1
if (dist <= r) and t[k] > warmest:
warmest = t[k]
move = k
moves.append(move)
states = [i * n + j for i in range(n) for j in range(n)]
while True:
changed = False
next_states = []
for s in states:
ns = moves[s]
if s != ns:
changed = True
next_states.append(ns)
if not changed:
break
states = next_states
ra = len(set(states))
group = collections.Counter()
for s in states:
group[s] += 1
rb = group.most_common(1)[0][1]
return ra, rb
# CUT begin
# TEST CODE FOR PYTHON {{{
import sys, time, math
def tc_equal(expected, received):
try:
_t = type(expected)
received = _t(received)
if _t == list or _t == tuple:
if len(expected) != len(received): return False
return all(tc_equal(e, r) for (e, r) in zip(expected, received))
elif _t == float:
eps = 1e-9
d = abs(received - expected)
return not math.isnan(received) and not math.isnan(expected) and d <= eps * max(1.0, abs(expected))
else:
return expected == received
except:
return False
def pretty_str(x):
if type(x) == str:
return '"%s"' % x
elif type(x) == tuple:
return '(%s)' % (','.join((pretty_str(y) for y in x)))
else:
return str(x)
def do_test(r, t, __expected):
startTime = time.time()
instance = TheSquareCityDiv2()
exception = None
try:
__result = instance.find(r, t);
except:
import traceback
exception = traceback.format_exc()
elapsed = time.time() - startTime # in sec
if exception is not None:
sys.stdout.write("RUNTIME ERROR: \n")
sys.stdout.write(exception + "\n")
return 0
if tc_equal(__expected, __result):
sys.stdout.write("PASSED! " + ("(%.3f seconds)" % elapsed) + "\n")
return 1
else:
sys.stdout.write("FAILED! " + ("(%.3f seconds)" % elapsed) + "\n")
sys.stdout.write(" Expected: " + pretty_str(__expected) + "\n")
sys.stdout.write(" Received: " + pretty_str(__result) + "\n")
return 0
def run_tests():
sys.stdout.write("TheSquareCityDiv2 (500 Points)\n\n")
passed = cases = 0
case_set = set()
for arg in sys.argv[1:]:
case_set.add(int(arg))
with open("TheSquareCityDiv2.sample", "r") as f:
while True:
label = f.readline()
if not label.startswith("--"): break
r = int(f.readline().rstrip())
t = []
for i in range(0, int(f.readline())):
t.append(int(f.readline().rstrip()))
t = tuple(t)
f.readline()
__answer = []
for i in range(0, int(f.readline())):
__answer.append(int(f.readline().rstrip()))
__answer = tuple(__answer)
cases += 1
if len(case_set) > 0 and (cases - 1) in case_set: continue
sys.stdout.write(" Testcase #%d ... " % (cases - 1))
passed += do_test(r, t, __answer)
sys.stdout.write("\nPassed : %d / %d cases\n" % (passed, cases))
T = time.time() - 1526551561
PT, TT = (T / 60.0, 75.0)
points = 500 * (0.3 + (0.7 * TT * TT) / (10.0 * PT * PT + TT * TT))
sys.stdout.write("Time : %d minutes %d secs\n" % (int(T / 60), T % 60))
sys.stdout.write("Score : %.2f points\n" % points)
if __name__ == '__main__':
run_tests()
# }}}
# CUT end
| [
"dirtysalt1987@gmail.com"
] | dirtysalt1987@gmail.com |
05496927557001dd893bc06afe133f220458c9de | 661d3fb1f4880ff6efb7638bf066f63397c26ef0 | /Final Implementation/client.py | 8bc748c34af443547c201831f4ace27e17d1538e | [] | no_license | jamesfallon99/CA314 | 6048020bccf8f119b9eb847a8b610aef090130b0 | fb78429fe206ae2fd6cd413e4b119e84e1bae1ea | refs/heads/main | 2023-07-05T11:03:13.470699 | 2021-08-24T21:31:42 | 2021-08-24T21:31:42 | 399,609,567 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,126 | py | from game import Board, Tile
from player import Player
import threading
import socket, pickle
import copy
import json
from json import JSONEncoder
# NOTE: in our presentation we can mention that the changes we have made i.e: adding threading was necessary in order to making it run locally for the demo.
# NOTE: because user input and GUI are yet to be implemented we are testing using threads and the terminal.
class Client(threading.Thread):
def __init__(self, socket, data_out=None, data_in=None, player=None):
"""
Initializes an instance of the client class.
:param socket: socket object for the client.
:param data_out: JSON object
:param data_in: JSON object
:player: Player object
"""
super().__init__()
self.socket = socket
self.data_out = data_out
self.data_in = data_in
self.player = player
def con(self, host , port):
self.socket.connect((host, port))
def run(self):
while True:
received = pickle.loads(self.socket.recv(8192))
if type(received) == Board:
self.player.board = received
elif type(received) == int:
self.player.score += received
elif type(received) == str:
if received == 'invalid word':
print(received)
else:
self.player.client_host_id = int(received)
elif type(tuple):
print(received)
def send_game_data(self, data, target):
"""
Sends updated and encoded game state data to the server,'
packaged/structured correctly for being
interpreted by the Server.
:param data: JSON object
:param target: server socket object
"""
return # void
def encode(self, data):
"""
:param data: Encoding data to be sent via
socket to server.
"""
return encoded_data
def decode(self, data):
"""
:param data: Converts data into the correct
format/structure to be parsed and then utilised.
"""
return decoded_data
def input_name(self, name):
"""
Updates player object's name
:param name: player inputted name
"""
return #void
def input_game_code(self, code):
"""
:param code: player inputted code
"""
return code
def display(self):
"""
Render board object to users screen
"""
return #void
def join_game(self, code, target):
"""
Passes player name and code to the server
:param code: player inputted code
:param target: object for code to be sent to
"""
return #void
def start_game(self):
"""
Send game creation request to server
"""
return #void
def create_game(self, name):
"""
Creates a game
:param name: player name to be set
"""
return #void
# substitution for GUI due to time constraint
# function runs inside a thread
# use the commands to test various tasks that would normally require an GUI
def handle_input(client):
commands = [
'!help',
'!place_tile',
'!display',
'!end_turn',
]
print(f'For a list of commands use !help')
while True:
player_input = input('')
if player_input == '!help':
print('Here are all the commands available:')
print(commands)
elif player_input == '!place_tile':
print(f'Format for inserting is => letter y x e.g: a 4 1')
print('Please type the move you wish to make using the above format')
tile_placement_input = input('Move: ')
letter, y, x = tile_placement_input.split(' ')
corresponding_tile = [tile for tile in client.player.tiles if tile.letter == letter]
client.player.board.place_tile(corresponding_tile[0], int(y), int(x))
print(f'You inserted {letter} into position [{y}][{x}] on the board')
elif player_input == '!display':
client.player.board.printing()
elif player_input == '!end_turn':
client.socket.send(pickle.dumps(client.player.board))
elif player_input == '!send_test':
client.socket.send(pickle.dumps(client.socket.getsockname()))
elif player_input == '!see_players':
client.socket.send(pickle.dumps('see_players'))
elif player_input == '!see_score':
print(client.player.score)
elif player_input == '!join_game':
client.socket.send(pickle.dumps('join_game'))
client.socket.send(pickle.dumps(client.player))
elif player_input == '!see_tiles':
print([tile.letter for tile in client.player.tiles])
def main():
# client socket
# make socket use TCP for reliable communication
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# player board
player_board = Board()
# player_board.board[4][1] = Tile('c', 2, (None, 0))
# player_board.board[4][2] = Tile('a', 4, ('L', 2))
# player_board.board[4][3] = Tile('t', 5, ('W', 3))
# tiles
player_tiles = [
Tile("c", 2, (None, 0)),
Tile("a", 4, ("L", 2)),
Tile("t", 5, ("W", 3)),
Tile("s", 9, ("L", 2)),
Tile("d", 2, (None, 0)),
Tile("o", 7, ("L", 2)),
Tile("g", 3, ("W", 3)),
]
client = Client(sock)
ligma = Player("ligma", player_tiles, 0, client.socket.getsockname()) #(self, id, name, tiles, score, client_socket, board):
client.player = ligma
client.con('127.0.0.1', 8000)
client.start()
terminal_input = threading.Thread(target=handle_input, args=(client, )) # please note player_board is the "server" board at the moment ofr testing purposes
terminal_input.start()
if __name__ == "__main__":
main() | [
"noreply@github.com"
] | jamesfallon99.noreply@github.com |
3432242cb6953a1399e20569611c0388973804c9 | 268b22da698310c1fd0471f94d61e02782cbaf37 | /Week6/week6work/test/users.py | 0107e56820ae81040fc0462445cadda5f71ba73f | [] | no_license | jayquake/DI-Excersises | 0c1147863753fb29a6f688bd73bdd9acc047c180 | 02cb0ee9baed7fd7736273e8fc68317ba4356e39 | refs/heads/master | 2020-12-10T11:38:12.225341 | 2020-05-06T08:34:35 | 2020-05-06T08:34:35 | 233,582,928 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 749 | py | import json
def create_database(dst_file='my_file.json'):
data = [
{
'username': 'Jason',
'password': 'horse',
'status': True
},
{
'username': 'Mac',
'password': 'candy' ,
'status': False
},
{
'username': 'Apple',
'password': 'monkeydog',
'status': True
},
]
with open(dst_file, 'w') as f :
json.dump(data, f)
def load_database(src_file='my_file.json'):
with open(src_file, 'r') as f:
data = json.load(f)
return data
def write_database(src_file='my_file.json'):
with open(src_file, 'w') as f:
json.dump(f)
return ('database rewritten') | [
"jayquake@gmail.com"
] | jayquake@gmail.com |
db5193711d8806da6294980ffafb537e5461f000 | fec622bc34957dd4d99f1ef0f23608eeb40ed609 | /internal/notes/builtin-SAVE/packages/xcompmgr/package.py | dc8aa398b1d218fc22d5bea149cd1d6e2eec658c | [] | no_license | scottkwarren/hpctest | 4d5ff18d00c5eb9b7da481c9aa0824aa7082062f | a8bb99b5f601a5d088ae56ab9886ab8079c081ba | refs/heads/master | 2022-09-07T19:36:18.544795 | 2022-08-18T20:26:42 | 2022-08-18T20:26:42 | 100,518,800 | 2 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,908 | py | ##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Xcompmgr(AutotoolsPackage):
"""xcompmgr is a sample compositing manager for X servers supporting the
XFIXES, DAMAGE, RENDER, and COMPOSITE extensions. It enables basic
eye-candy effects."""
homepage = "http://cgit.freedesktop.org/xorg/app/xcompmgr"
url = "https://www.x.org/archive/individual/app/xcompmgr-1.1.7.tar.gz"
version('1.1.7', '4992895c8934bbc99bb2447dfe5081f2')
depends_on('libxcomposite')
depends_on('libxfixes')
depends_on('libxdamage')
depends_on('libxrender')
depends_on('libxext')
depends_on('pkg-config@0.9.0:', type='build')
depends_on('util-macros', type='build')
| [
"scott@rice.edu"
] | scott@rice.edu |
36e40d84e3ec98b642358afd8af1a9a989c1fbdf | 565409a77f506cf834abe5ed2bdd83d221ab0c2d | /web_soluciones/migrations/0009_itemsolucionimagen.py | 9c13069e4d939910896769095ec06d1ab933fc01 | [] | no_license | odecsarrollo/04_odeco_web | 9f3c840fb03afb9bf25792a78829b611e1d67d2a | c87593e24be23bb6ef759a0eafac95e5a0649fe4 | refs/heads/master | 2023-01-22T23:46:17.209001 | 2022-04-13T03:40:38 | 2022-04-13T03:40:38 | 189,261,683 | 0 | 0 | null | 2022-12-27T16:44:24 | 2019-05-29T16:29:29 | JavaScript | UTF-8 | Python | false | false | 1,114 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2017-09-26 16:55
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import imagekit.models.fields
import web_soluciones.models
class Migration(migrations.Migration):
dependencies = [
('web_soluciones', '0008_auto_20170926_1126'),
]
operations = [
migrations.CreateModel(
name='ItemSolucionImagen',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('orden', models.PositiveIntegerField(default=0)),
('descripcion', models.TextField()),
('imagen', imagekit.models.fields.ProcessedImageField(blank=True, null=True, upload_to=web_soluciones.models.ItemSolucionImagen.imagen_upload_to, verbose_name='Imagen Item Solución')),
('item_solucion', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='mis_imagenes', to='web_soluciones.ItemSolucion')),
],
),
]
| [
"fabio.garcia.sanchez@gmail.com"
] | fabio.garcia.sanchez@gmail.com |
6c5ceced731c19353e60b513d1631f8c919e755a | 40074020ae89350cbb012212fa1f66549167fb13 | /ch1_image_encryption_image.py | a276c338bfe85fef09d9d7755c2589da8edfdf5b | [] | no_license | jasonbrackman/classic_computer_science_problems | 8009acb8111118eb88b4affc3de153853ed0f81d | fee4e1f9796d9029a2cfd2253cfad863d7beb290 | refs/heads/master | 2020-05-25T14:46:26.711564 | 2019-05-27T02:27:53 | 2019-05-27T02:27:53 | 187,853,790 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,444 | py | import base64
from PIL import Image
from ch1_encryption import encrypt, decrypt
def introspection(obj):
for attribute in dir(obj):
if attribute.startswith("_") is False:
s = getattr(obj, attribute)
t = type(s)
print(attribute, t, s)
def load_image_as_string(path):
with Image.open(path) as im:
# introspection(im)
imb = im.tobytes()
print(im.mode, im.size)
return im.mode, im.size, imb
if __name__ == "__main__":
path = "example.jpg"
mode, size, data = load_image_as_string(path)
idata = base64.encodebytes(data)
key, encrypted = encrypt(idata)
data = decrypt(key, encrypted)
data = base64.decodebytes(data)
s = Image.frombytes(mode=mode, size=size, data=data)
s.show()
# print(type(im))
# print(type(im_b.encode()))
#
# print("Length IM: ", len(im))
# print("Length IM_B: ", len(im_b))
#
# assert im == im_b
# with open('output.bin', 'wb') as file:
# file.write(encrypted.to_bytes((encrypted.bit_length() + 7) // 8, "big"))
#
# with open('output.bin', 'rb') as file:
# bytes = file.read()
# num = int.from_bytes(bytes, byteorder='big')
# # print(num.bit_length())
#
# new_image = decrypt(key, num)
# with open('decrypted.jpg', 'wb') as x:
# x.write(new_image.encode())
#
# x = Image.open("decrypted.jpg")
# x.show()
| [
"brackman@gmail.com"
] | brackman@gmail.com |
a4ac2a1811dd7146877d5c706339874779260aa5 | dc3b25768cdc6c0c31a294a40796b51b185bc5ee | /BIP/Bayes/lhs.py | 8f40be560db3e2383c50a99048f012314340920d | [] | no_license | fccoelho/bayesian-inference | 5fa32936422aea8afca8d89272928e7f1aa0f74b | c274f398ea5dad760b7783f3eb1d343dacc6e591 | refs/heads/master | 2021-01-23T19:41:30.112936 | 2020-07-26T19:19:59 | 2020-07-26T19:19:59 | 32,230,235 | 6 | 3 | null | 2015-07-07T12:50:09 | 2015-03-14T20:19:26 | HTML | UTF-8 | Python | false | false | 6,475 | py | # !/usr/bin/python
# -*- coding:utf-8 -*-
# -----------------------------------------------------------------------------
# Name: lhs.py
# Project: Bayesian-Inference
# Purpose:
#
# Author: Flávio Codeço Coelho<fccoelho@gmail.com>
#
# Created: 2008-11-26
# Copyright: (c) 2008 by the Author
# Licence: GPL
# -----------------------------------------------------------------------------
from __future__ import absolute_import
from __future__ import print_function
from six.moves import map
from six.moves import range
import scipy.stats as stats
import numpy
from numpy.linalg import cholesky, inv
from numpy.random import uniform, shuffle
import theano as T
def lhsFromSample(sample, siz=100):
"""
Latin Hypercube Sample from a set of values.
For univariate distributions only
:Parameters:
- `sample`: list, tuple of array
- `siz`: Number or shape tuple for the output sample
"""
# TODO: add support to correlation restricted multivariate samples
if not isinstance(sample, (list, tuple, numpy.ndarray)):
raise TypeError('sample is not a list, tuple or numpy vector')
n = siz
if isinstance(siz, (tuple, list)):
n = numpy.product(siz)
perc = numpy.arange(0, 100., 100. / n)
shuffle(perc)
smp = [stats.uniform(i, 100. / n).rvs() for i in perc]
v = numpy.array([stats.scoreatpercentile(sample, p) for p in smp])
if isinstance(siz, (tuple, list)):
v.shape = siz
return v
def lhsFromDensity(kde, siz=100):
'''
LHS sampling from a variable's Kernel density estimate.
:Parameters:
- `kde`: scipy.stats.kde.gaussian_kde object
- `siz`: Number or shape tuple for the output sample
'''
if not isinstance(kde, scipy.stats.kde.gaussian_kde):
raise TypeError("kde is not a density object")
if isinstance(siz, (tuple, list)):
n = numpy.product(siz)
s = kde.resample(n)
v = lhsFromSample(s, n)
if isinstance(siz, (tuple, list)):
v.shape = siz
return v
def lhs(dist, parms, siz=100, noCorrRestr=False, corrmat=None):
'''
Latin Hypercube sampling of any distribution.
dist is is a scipy.stats random number generator
such as stats.norm, stats.beta, etc
parms is a tuple with the parameters needed for
the specified distribution.
:Parameters:
- `dist`: random number generator from scipy.stats module or a list of them.
- `parms`: tuple of parameters as required for dist, or a list of them.
- `siz` :number or shape tuple for the output sample
- `noCorrRestr`: if true, does not enforce correlation structure on the sample.
- `corrmat`: Correlation matrix
'''
if not isinstance(dist, (list, tuple)):
dists = [dist]
parms = [parms]
else:
assert len(dist) == len(parms)
dists = dist
indices = rank_restr(nvars=len(dists), smp=siz, noCorrRestr=noCorrRestr, Corrmat=corrmat)
smplist = []
for j, d in enumerate(dists):
if not isinstance(d, (stats.rv_discrete, stats.rv_continuous)):
raise TypeError('dist is not a scipy.stats distribution object')
n = siz
if isinstance(siz, (tuple, list)):
n = numpy.product(siz)
# force type to float for sage compatibility
pars = tuple([float(k) for k in parms[j]])
# perc = numpy.arange(1.,n+1)/(n+1)
step = 1. / (n)
perc = numpy.arange(0, 1, step) # class boundaries
s_pos = [uniform(i, i + step) for i in perc[:]] # [i+ step/2. for i in perc[:]]
v = d(*pars).ppf(s_pos)
# print len(v), step, perc
index = list(map(int, indices[j] - 1))
v = v[index]
if isinstance(siz, (tuple, list)):
v.shape = siz
smplist.append(v)
if len(dists) == 1:
return smplist[0]
return smplist
def rank_restr(nvars=4, smp=100, noCorrRestr=False, Corrmat=None):
"""
Returns the indices for sampling variables with
the desired correlation structure.
:Parameters:
- `nvars`: number of variables
- `smp`: number of samples
- `noCorrRestr`: No correlation restriction if True
- `Corrmat`: Correlation matrix. If None, assure uncorrelated samples.
"""
if isinstance(smp, (tuple, list)):
smp = numpy.product(smp)
def shuf(s):
"""
Shuffle a vector, making shure to make a copy of the original
:param s: A vector of values
:return: a list of arrays
"""
s1 = []
for i in range(nvars):
shuffle(s)
s1.append(s.copy())
return s1
if noCorrRestr or nvars == 1:
inds = numpy.arange(smp)
x = shuf(inds)
else:
if Corrmat is None:
C = numpy.core.numeric.identity(nvars)
else:
if Corrmat.shape[0] != nvars:
raise TypeError('Correlation matrix must be of rank %s' % nvars)
C = numpy.matrix(Corrmat)
s0 = numpy.arange(1., smp + 1) / (smp + 1.)
s = stats.norm().ppf(s0)
s1 = shuf(s)
S = numpy.matrix(s1)
P = cholesky(C)
Q = cholesky(numpy.corrcoef(S))
Final = S.transpose() * inv(Q).transpose() * P.transpose()
x = [stats.stats.rankdata(Final.transpose()[i,]) for i in range(nvars)]
return x
if __name__ == '__main__':
dist = stats.uniform, stats.uniform
parms = (0, 1.), (0, 1.)
print(lhs(dist, parms, siz=4))
import pylab as P
# dist = stats.norm
dist = stats.beta
# pars = (50,2)
pars = (1, 5) # beta
b = lhs(dist, pars, 1000)
cm = numpy.array([[1, .8], [.8, 1]])
c = lhs([dist, dist], [pars, pars], 2000, False, cm)
# print stats.pearsonr(c[0],c[1]), stats.spearmanr(c[0],c[1])
# P.hist(c[0],normed=1)#, label='c0 sample')
P.scatter(c[0], c[1])
# P.hist(c[1],normed=1)#, label='c1 sample')
# print c[0].shape,c[1].shape
n = dist(*pars).rvs(size=20)
# hist(n.ravel(),facecolor='r',alpha =0.3,normed=1, label='Regular sample')
# plot(numpy.arange(min(min(c),min(n)),max(max(c),max(n)),.1),dist(*pars).pdf(numpy.arange(min(min(c),min(n)),max(max(c),max(n)),.1)),label='PDF')
# legend()
# savefig('lhs.png',dpi=400)
# lhs([stats.norm]*19,[(0,1)]*19,17,False,numpy.identity(19))
P.show()
# TODO: Extend lhsFromSample to allow multivariate correlated sampling
| [
"fccoelho@gmail.com"
] | fccoelho@gmail.com |
dfea2d32f870bfc8e2edd99da132c2ff2f27ed08 | 1dacbf90eeb384455ab84a8cf63d16e2c9680a90 | /pkgs/pyopenssl-0.15.1-py27_2/lib/python2.7/site-packages/OpenSSL/rand.py | 3adf69369a4a58b01926fe58b7e3bf322ddaedb9 | [
"Apache-2.0",
"BSD-3-Clause",
"LicenseRef-scancode-unknown"
] | permissive | wangyum/Anaconda | ac7229b21815dd92b0bd1c8b7ec4e85c013b8994 | 2c9002f16bb5c265e0d14f4a2314c86eeaa35cb6 | refs/heads/master | 2022-10-21T15:14:23.464126 | 2022-10-05T12:10:31 | 2022-10-05T12:10:31 | 76,526,728 | 11 | 10 | Apache-2.0 | 2022-10-05T12:10:32 | 2016-12-15T05:26:12 | Python | UTF-8 | Python | false | false | 4,528 | py | """
PRNG management routines, thin wrappers.
See the file RATIONALE for a short explanation of why this module was written.
"""
from functools import partial
from six import integer_types as _integer_types
from OpenSSL._util import (
ffi as _ffi,
lib as _lib,
exception_from_error_queue as _exception_from_error_queue,
path_string as _path_string)
class Error(Exception):
"""
An error occurred in an `OpenSSL.rand` API.
"""
_raise_current_error = partial(_exception_from_error_queue, Error)
_unspecified = object()
_builtin_bytes = bytes
def bytes(num_bytes):
"""
Get some random bytes as a string.
:param num_bytes: The number of bytes to fetch
:return: A string of random bytes
"""
if not isinstance(num_bytes, _integer_types):
raise TypeError("num_bytes must be an integer")
if num_bytes < 0:
raise ValueError("num_bytes must not be negative")
result_buffer = _ffi.new("char[]", num_bytes)
result_code = _lib.RAND_bytes(result_buffer, num_bytes)
if result_code == -1:
# TODO: No tests for this code path. Triggering a RAND_bytes failure
# might involve supplying a custom ENGINE? That's hard.
_raise_current_error()
return _ffi.buffer(result_buffer)[:]
def add(buffer, entropy):
"""
Add data with a given entropy to the PRNG
:param buffer: Buffer with random data
:param entropy: The entropy (in bytes) measurement of the buffer
:return: None
"""
if not isinstance(buffer, _builtin_bytes):
raise TypeError("buffer must be a byte string")
if not isinstance(entropy, int):
raise TypeError("entropy must be an integer")
# TODO Nothing tests this call actually being made, or made properly.
_lib.RAND_add(buffer, len(buffer), entropy)
def seed(buffer):
"""
Alias for rand_add, with entropy equal to length
:param buffer: Buffer with random data
:return: None
"""
if not isinstance(buffer, _builtin_bytes):
raise TypeError("buffer must be a byte string")
# TODO Nothing tests this call actually being made, or made properly.
_lib.RAND_seed(buffer, len(buffer))
def status():
"""
Retrieve the status of the PRNG
:return: True if the PRNG is seeded enough, false otherwise
"""
return _lib.RAND_status()
def egd(path, bytes=_unspecified):
"""
Query an entropy gathering daemon (EGD) for random data and add it to the
PRNG. I haven't found any problems when the socket is missing, the function
just returns 0.
:param path: The path to the EGD socket
:param bytes: (optional) The number of bytes to read, default is 255
:returns: The number of bytes read (NB: a value of 0 isn't necessarily an
error, check rand.status())
"""
if not isinstance(path, _builtin_bytes):
raise TypeError("path must be a byte string")
if bytes is _unspecified:
bytes = 255
elif not isinstance(bytes, int):
raise TypeError("bytes must be an integer")
return _lib.RAND_egd_bytes(path, bytes)
def cleanup():
"""
Erase the memory used by the PRNG.
:return: None
"""
# TODO Nothing tests this call actually being made, or made properly.
_lib.RAND_cleanup()
def load_file(filename, maxbytes=_unspecified):
"""
Seed the PRNG with data from a file
:param filename: The file to read data from (``bytes`` or ``unicode``).
:param maxbytes: (optional) The number of bytes to read, default is to read
the entire file
:return: The number of bytes read
"""
filename = _path_string(filename)
if maxbytes is _unspecified:
maxbytes = -1
elif not isinstance(maxbytes, int):
raise TypeError("maxbytes must be an integer")
return _lib.RAND_load_file(filename, maxbytes)
def write_file(filename):
"""
Save PRNG state to a file
:param filename: The file to write data to (``bytes`` or ``unicode``).
:return: The number of bytes written
"""
filename = _path_string(filename)
return _lib.RAND_write_file(filename)
# TODO There are no tests for screen at all
def screen():
"""
Add the current contents of the screen to the PRNG state. Availability:
Windows.
:return: None
"""
_lib.RAND_screen()
if getattr(_lib, 'RAND_screen', None) is None:
del screen
# TODO There are no tests for the RAND strings being loaded, whatever that
# means.
_lib.ERR_load_RAND_strings()
| [
"wgyumg@mgail.com"
] | wgyumg@mgail.com |
c15621eb6087d07b6fdc7af9d27ff60cd29a03e5 | a8289cb7273245e7ec1e6079c7f266db4d38c03f | /Django_Attendance_mongo/mongos/migrations/0004_listentry.py | 5047c29886d29cadce0fc9b56b4c4e47af18ea51 | [] | no_license | palmarytech/Python_Snippet | 6acbd572d939bc9d5d765800f35a0204bc044708 | 41b4ebe15509d166c82edd23b713a1f3bf0458c5 | refs/heads/master | 2022-10-06T22:51:00.469383 | 2020-03-13T08:32:11 | 2020-03-13T08:32:11 | 272,350,189 | 1 | 0 | null | 2020-06-15T05:30:44 | 2020-06-15T05:30:44 | null | UTF-8 | Python | false | false | 600 | py | # Generated by Django 2.1.3 on 2018-12-10 07:13
from django.db import migrations, models
import djongo.models.fields
class Migration(migrations.Migration):
dependencies = [
('mongos', '0003_contact'),
]
operations = [
migrations.CreateModel(
name='ListEntry',
fields=[
('_id', djongo.models.fields.ObjectIdField(auto_created=True, primary_key=True, serialize=False)),
('headline', models.CharField(max_length=255)),
('authors', djongo.models.fields.ListField()),
],
),
]
| [
"leamon.lee13@gmail.com"
] | leamon.lee13@gmail.com |
481e1c44102267c893fdaa4e3adb75a0817ecad1 | 43f3b7e4a5b7a1210ffa72c5a855d7542d68290d | /Results/Python/Series/20.py | 06a2816df4f06bbe10f7f611c865493301a6582a | [] | no_license | bar2104y/Abramyan_1000_tasks | 38e86e119245db4bac0483583cc16d8793d5689c | e0bf9f5e73d90b8eca3fe5ba7913ed12f18d989a | refs/heads/master | 2021-06-05T18:05:09.788453 | 2020-06-30T19:52:31 | 2020-06-30T19:52:31 | 150,898,700 | 5 | 2 | null | 2018-10-02T17:16:28 | 2018-09-29T20:01:33 | Python | UTF-8 | Python | false | false | 206 | py | n = int(input("N: "))
a = int(input())
k = 0
m = []
for i in range(1, n):
tmp = int(input())
if a < tmp:
m.append(a)
k += 1
a = tmp
print("K:", k)
for tmp in m:
print(tmp)
| [
"bar2104y@yandex.ru"
] | bar2104y@yandex.ru |
74d3e5746f18ab0ddc2199e6b991a399e16ae4e0 | 6bce631b869a8717eed29eae186688a7fdb7f5c8 | /venv/Lib/site-packages/test/test_municipality_financial.py | c547f63265265c80b1143a8fdf6dbd8c2d205aa5 | [] | no_license | singhd3101/CS5100-Stock-Market-Prediction | 6d43bd39633dd80bb1141dc550302874a5bc0939 | 2804a6270a05155e168d0f2518bcd97f1c9bcb3e | refs/heads/master | 2020-11-26T03:56:02.613630 | 2019-12-19T02:22:13 | 2019-12-19T02:22:13 | 228,958,401 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,374 | py | # coding: utf-8
"""
Intrinio API
Welcome to the Intrinio API! Through our Financial Data Marketplace, we offer a wide selection of financial data feed APIs sourced by our own proprietary processes as well as from many data vendors. For a complete API request / response reference please view the [Intrinio API documentation](https://intrinio.com/documentation/api_v2). If you need additional help in using the API, please visit the [Intrinio website](https://intrinio.com) and click on the chat icon in the lower right corner. # noqa: E501
OpenAPI spec version: 2.2.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import intrinio_sdk
from intrinio_sdk.models.municipality_financial import MunicipalityFinancial # noqa: E501
from intrinio_sdk.rest import ApiException
class TestMunicipalityFinancial(unittest.TestCase):
"""MunicipalityFinancial unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testMunicipalityFinancial(self):
"""Test MunicipalityFinancial"""
# FIXME: construct object with mandatory attributes with example values
# model = intrinio_sdk.models.municipality_financial.MunicipalityFinancial() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
"singh3101div@gmail.com"
] | singh3101div@gmail.com |
2c86d8433b2281fa239c52a7f91f0908b32756d8 | 759f52976ad2cd9236da561ca254e11e08003487 | /part7/ex45/v2-replace-config/replacement_matcher.py | ff35c60a384c597782c1732e5777c6ea8757cb0e | [] | no_license | mbaeumer/fiftyseven | 57b571c3e09640a2ab0ed41e5d06643c12b48001 | d79b603d5b37bf1f4127d9253f8526ea3897dc08 | refs/heads/master | 2020-06-10T20:52:25.311992 | 2017-11-15T18:28:38 | 2017-11-15T18:28:38 | 75,877,462 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 195 | py | #!/usr/bin/python
class Replacement:
def __init__(self, to_replace, replaced_by, occurence):
self.to_replace = to_replace
self.replaced_by = replaced_by
self.occurence = occurence
| [
"martin.baeumer@gmail.com"
] | martin.baeumer@gmail.com |
3bc809c70b62cdfeff7724102903e37193402733 | 2d923980f8c3a5d450cd2435dcb96fff27e407bf | /unittests/test_search.py | 62c0e1d5efb91437da88b333d1dbbd806866b579 | [] | no_license | SHAKOTN/songs_service | 2f0cc9bfdee5138042ea82477ec0fa40e8a4c2f7 | be0a5875ee6106b35966daef4337d56ec6cf2f10 | refs/heads/master | 2021-06-24T00:27:58.876953 | 2017-08-26T13:45:25 | 2017-08-26T13:45:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,878 | py | import json
from unittest import TestCase
from app import app, mongo
from bson import ObjectId
class TestSearch(TestCase):
def setUp(self):
self.app = app.test_client()
with app.app_context():
self.songs = mongo.db.songs
self.song_id = None
def test_search(self):
payload = dict(
artist='Despised Icon',
title='Furtive Monologue',
difficulty=10,
level=1,
released='2017-05-01'
)
response = self.app.put(
'/songs/',
data=payload
)
self.song_id = json.loads(response.get_data().decode())['_id']
# Test case sensitivity
response_search = self.app.get(
'/songs/search',
query_string={
'message': 'Despised'
}
)
expected_song_data = payload.copy()
expected_song_data['_id'] = self.song_id
matched_songs = json.loads(response_search.get_data().decode())
assert expected_song_data == matched_songs[0]
# Test case insensitivity
response_search = self.app.get(
'/songs/search',
query_string={
'message': 'dESpIsEd'
}
)
matched_songs = json.loads(response_search.get_data().decode())
assert expected_song_data == matched_songs[0]
response_search = self.app.get(
'/songs/search',
query_string={
'message': 'Monologue'
}
)
matched_songs = json.loads(response_search.get_data().decode())
assert expected_song_data == matched_songs[0]
def tearDown(self):
self.songs.delete_one({
'_id': ObjectId(self.song_id)
})
self.songs.delete_many({
'artist': 'Despised Icon'
})
| [
"jadecoresky@gmail.com"
] | jadecoresky@gmail.com |
521615db251668aff2124d314c0e6e40af1e94cb | 04e5b6df2ee3bcfb7005d8ec91aab8e380333ac4 | /Lib/objc/_WebCore.py | 152496dd1dfac3e08030c3233439e39cab19406a | [
"MIT"
] | permissive | ColdGrub1384/Pyto | 64e2a593957fd640907f0e4698d430ea7754a73e | 7557485a733dd7e17ba0366b92794931bdb39975 | refs/heads/main | 2023-08-01T03:48:35.694832 | 2022-07-20T14:38:45 | 2022-07-20T14:38:45 | 148,944,721 | 884 | 157 | MIT | 2023-02-26T21:34:04 | 2018-09-15T22:29:07 | C | UTF-8 | Python | false | false | 6,080 | py | """
Classes from the 'WebCore' framework.
"""
try:
from rubicon.objc import ObjCClass
except ValueError:
def ObjCClass(name):
return None
def _Class(name):
try:
return ObjCClass(name)
except NameError:
return None
WebVideoFullscreenController = _Class("WebVideoFullscreenController")
WebUndefined = _Class("WebUndefined")
WebItemProviderPasteboard = _Class("WebItemProviderPasteboard")
WebItemProviderLoadResult = _Class("WebItemProviderLoadResult")
WebItemProviderRegistrationInfoList = _Class("WebItemProviderRegistrationInfoList")
WebItemProviderPromisedFileRegistrar = _Class("WebItemProviderPromisedFileRegistrar")
WebItemProviderWritableObjectRegistrar = _Class(
"WebItemProviderWritableObjectRegistrar"
)
WebItemProviderDataRegistrar = _Class("WebItemProviderDataRegistrar")
WebCoreResourceHandleAsOperationQueueDelegate = _Class(
"WebCoreResourceHandleAsOperationQueueDelegate"
)
WebCoreResourceHandleWithCredentialStorageAsOperationQueueDelegate = _Class(
"WebCoreResourceHandleWithCredentialStorageAsOperationQueueDelegate"
)
WebCoreNSURLSessionDataTask = _Class("WebCoreNSURLSessionDataTask")
WebCoreNSURLSession = _Class("WebCoreNSURLSession")
WebCoreNSURLSessionTaskMetrics = _Class("WebCoreNSURLSessionTaskMetrics")
WebCoreNSURLSessionTaskTransactionMetrics = _Class(
"WebCoreNSURLSessionTaskTransactionMetrics"
)
WebAVPlayerViewController = _Class("WebAVPlayerViewController")
WebAVPlayerViewControllerDelegate = _Class("WebAVPlayerViewControllerDelegate")
WebCoreRenderThemeBundle = _Class("WebCoreRenderThemeBundle")
WebCoreAuthenticationClientAsChallengeSender = _Class(
"WebCoreAuthenticationClientAsChallengeSender"
)
WebCookieObserverAdapter = _Class("WebCookieObserverAdapter")
WebNSHTTPCookieStorageDummyForInternalAccess = _Class(
"WebNSHTTPCookieStorageDummyForInternalAccess"
)
WebAVAssetWriterDelegate = _Class("WebAVAssetWriterDelegate")
WebDatabaseTransactionBackgroundTaskController = _Class(
"WebDatabaseTransactionBackgroundTaskController"
)
WebCoreMotionManager = _Class("WebCoreMotionManager")
WebAVMediaSelectionOption = _Class("WebAVMediaSelectionOption")
WebAVPlayerController = _Class("WebAVPlayerController")
WebValidationBubbleDelegate = _Class("WebValidationBubbleDelegate")
WebValidationBubbleTapRecognizer = _Class("WebValidationBubbleTapRecognizer")
WebPreviewConverterDelegate = _Class("WebPreviewConverterDelegate")
LegacyTileCacheTombstone = _Class("LegacyTileCacheTombstone")
WebCoreBundleFinder = _Class("WebCoreBundleFinder")
WebDisplayLinkHandler = _Class("WebDisplayLinkHandler")
WebCoreTextTrackRepresentationCocoaHelper = _Class(
"WebCoreTextTrackRepresentationCocoaHelper"
)
WebAnimationDelegate = _Class("WebAnimationDelegate")
WebCoreAudioBundleClass = _Class("WebCoreAudioBundleClass")
WebEventRegion = _Class("WebEventRegion")
WebArchiveResourceWebResourceHandler = _Class("WebArchiveResourceWebResourceHandler")
WebArchiveResourceFromNSAttributedString = _Class(
"WebArchiveResourceFromNSAttributedString"
)
WebAccessibilityObjectWrapperBase = _Class("WebAccessibilityObjectWrapperBase")
WebAccessibilityObjectWrapper = _Class("WebAccessibilityObjectWrapper")
WebAccessibilityTextMarker = _Class("WebAccessibilityTextMarker")
WebAVSampleBufferErrorListener = _Class("WebAVSampleBufferErrorListener")
WebAVStreamDataParserListener = _Class("WebAVStreamDataParserListener")
WebSpeechSynthesisWrapper = _Class("WebSpeechSynthesisWrapper")
WebMediaSessionHelper = _Class("WebMediaSessionHelper")
WebRootSampleBufferBoundsChangeListener = _Class(
"WebRootSampleBufferBoundsChangeListener"
)
WebCoreAVFPullDelegate = _Class("WebCoreAVFPullDelegate")
WebCoreAVFLoaderDelegate = _Class("WebCoreAVFLoaderDelegate")
WebCoreAVFMovieObserver = _Class("WebCoreAVFMovieObserver")
WebAVSampleBufferStatusChangeListener = _Class("WebAVSampleBufferStatusChangeListener")
WebCoreSharedBufferResourceLoaderDelegate = _Class(
"WebCoreSharedBufferResourceLoaderDelegate"
)
WebCoreAudioCaptureSourceIOSListener = _Class("WebCoreAudioCaptureSourceIOSListener")
WebCDMSessionAVContentKeySessionDelegate = _Class(
"WebCDMSessionAVContentKeySessionDelegate"
)
WebCoreFPSContentKeySessionDelegate = _Class("WebCoreFPSContentKeySessionDelegate")
WebCoreAVVideoCaptureSourceObserver = _Class("WebCoreAVVideoCaptureSourceObserver")
WebCoreAVCaptureDeviceManagerObserver = _Class("WebCoreAVCaptureDeviceManagerObserver")
WebAVAudioSessionAvailableInputsListener = _Class(
"WebAVAudioSessionAvailableInputsListener"
)
WebActionDisablingCALayerDelegate = _Class("WebActionDisablingCALayerDelegate")
WebScriptObjectPrivate = _Class("WebScriptObjectPrivate")
WebInterruptionObserverHelper = _Class("WebInterruptionObserverHelper")
WebNetworkStateObserver = _Class("WebNetworkStateObserver")
WebLowPowerModeObserver = _Class("WebLowPowerModeObserver")
WebBackgroundTaskController = _Class("WebBackgroundTaskController")
WAKResponder = _Class("WAKResponder")
WAKWindow = _Class("WAKWindow")
WAKView = _Class("WAKView")
WAKClipView = _Class("WAKClipView")
WAKScrollView = _Class("WAKScrollView")
WebViewVisualIdentificationOverlay = _Class("WebViewVisualIdentificationOverlay")
WebEvent = _Class("WebEvent")
WebScriptObject = _Class("WebScriptObject")
WebAVPlayerLayer = _Class("WebAVPlayerLayer")
LegacyTileLayer = _Class("LegacyTileLayer")
LegacyTileHostLayer = _Class("LegacyTileHostLayer")
WebSimpleLayer = _Class("WebSimpleLayer")
WebLayer = _Class("WebLayer")
WebGLLayer = _Class("WebGLLayer")
WebVideoContainerLayer = _Class("WebVideoContainerLayer")
WebTiledBackingLayer = _Class("WebTiledBackingLayer")
WebSystemBackdropLayer = _Class("WebSystemBackdropLayer")
WebDarkSystemBackdropLayer = _Class("WebDarkSystemBackdropLayer")
WebLightSystemBackdropLayer = _Class("WebLightSystemBackdropLayer")
WebResourceUsageOverlayLayer = _Class("WebResourceUsageOverlayLayer")
WebGPULayer = _Class("WebGPULayer")
WebSwapLayer = _Class("WebSwapLayer")
WebCustomNSURLError = _Class("WebCustomNSURLError")
WebCoreSharedBufferData = _Class("WebCoreSharedBufferData")
| [
"adrilabbelol@gmail.com"
] | adrilabbelol@gmail.com |
dd42d3085377f8cc1ae75a67ff9d0dd9b8e968a9 | 5b711d9d1c71eb8a7c253a17b2a7f319163d2fdc | /tests/providers/amazon/aws/operators/test_emr_containers.py | 3a7dd400d8fef5fa90ab2fa896a6fb1d7ba56364 | [
"Apache-2.0",
"BSD-3-Clause",
"MIT"
] | permissive | waleedsamy/airflow | 8289465af0ef8199bf82e0696115bb5f83f9b667 | b19ccf8ead027d9eaf53b33305be5873f2711699 | refs/heads/main | 2023-03-17T06:29:20.695168 | 2022-08-29T16:59:13 | 2022-08-29T16:59:13 | 251,581,666 | 0 | 0 | Apache-2.0 | 2020-03-31T11:21:23 | 2020-03-31T11:21:22 | null | UTF-8 | Python | false | false | 7,407 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import unittest
from unittest import mock
from unittest.mock import MagicMock, patch
import pytest
from airflow import configuration
from airflow.exceptions import AirflowException
from airflow.providers.amazon.aws.hooks.emr import EmrContainerHook
from airflow.providers.amazon.aws.operators.emr import EmrContainerOperator, EmrEksCreateClusterOperator
SUBMIT_JOB_SUCCESS_RETURN = {
'ResponseMetadata': {'HTTPStatusCode': 200},
'id': 'job123456',
'virtualClusterId': 'vc1234',
}
CREATE_EMR_ON_EKS_CLUSTER_RETURN = {'ResponseMetadata': {'HTTPStatusCode': 200}, 'id': 'vc1234'}
GENERATED_UUID = '800647a9-adda-4237-94e6-f542c85fa55b'
class TestEmrContainerOperator(unittest.TestCase):
@mock.patch('airflow.providers.amazon.aws.hooks.emr.EmrContainerHook')
def setUp(self, emr_hook_mock):
configuration.load_test_config()
self.emr_hook_mock = emr_hook_mock
self.emr_container = EmrContainerOperator(
task_id='start_job',
name='test_emr_job',
virtual_cluster_id='vzw123456',
execution_role_arn='arn:aws:somerole',
release_label='6.3.0-latest',
job_driver={},
configuration_overrides={},
poll_interval=0,
client_request_token=GENERATED_UUID,
tags={},
)
@mock.patch.object(EmrContainerHook, 'submit_job')
@mock.patch.object(EmrContainerHook, 'check_query_status')
def test_execute_without_failure(
self,
mock_check_query_status,
mock_submit_job,
):
mock_submit_job.return_value = "jobid_123456"
mock_check_query_status.return_value = 'COMPLETED'
self.emr_container.execute(None)
mock_submit_job.assert_called_once_with(
'test_emr_job', 'arn:aws:somerole', '6.3.0-latest', {}, {}, GENERATED_UUID, {}
)
mock_check_query_status.assert_called_once_with('jobid_123456')
assert self.emr_container.release_label == '6.3.0-latest'
@mock.patch.object(
EmrContainerHook,
'check_query_status',
side_effect=['PENDING', 'PENDING', 'SUBMITTED', 'RUNNING', 'COMPLETED'],
)
def test_execute_with_polling(self, mock_check_query_status):
# Mock out the emr_client creator
emr_client_mock = MagicMock()
emr_client_mock.start_job_run.return_value = SUBMIT_JOB_SUCCESS_RETURN
emr_session_mock = MagicMock()
emr_session_mock.client.return_value = emr_client_mock
boto3_session_mock = MagicMock(return_value=emr_session_mock)
with patch('boto3.session.Session', boto3_session_mock):
assert self.emr_container.execute(None) == 'job123456'
assert mock_check_query_status.call_count == 5
@mock.patch.object(EmrContainerHook, 'submit_job')
@mock.patch.object(EmrContainerHook, 'check_query_status')
@mock.patch.object(EmrContainerHook, 'get_job_failure_reason')
def test_execute_with_failure(
self, mock_get_job_failure_reason, mock_check_query_status, mock_submit_job
):
mock_submit_job.return_value = "jobid_123456"
mock_check_query_status.return_value = 'FAILED'
mock_get_job_failure_reason.return_value = (
"CLUSTER_UNAVAILABLE - Cluster EKS eks123456 does not exist."
)
with pytest.raises(AirflowException) as ctx:
self.emr_container.execute(None)
assert 'EMR Containers job failed' in str(ctx.value)
assert 'Error: CLUSTER_UNAVAILABLE - Cluster EKS eks123456 does not exist.' in str(ctx.value)
@mock.patch.object(
EmrContainerHook,
'check_query_status',
side_effect=['PENDING', 'PENDING', 'SUBMITTED', 'RUNNING', 'COMPLETED'],
)
def test_execute_with_polling_timeout(self, mock_check_query_status):
# Mock out the emr_client creator
emr_client_mock = MagicMock()
emr_client_mock.start_job_run.return_value = SUBMIT_JOB_SUCCESS_RETURN
emr_session_mock = MagicMock()
emr_session_mock.client.return_value = emr_client_mock
boto3_session_mock = MagicMock(return_value=emr_session_mock)
timeout_container = EmrContainerOperator(
task_id='start_job',
name='test_emr_job',
virtual_cluster_id='vzw123456',
execution_role_arn='arn:aws:somerole',
release_label='6.3.0-latest',
job_driver={},
configuration_overrides={},
poll_interval=0,
max_tries=3,
)
with patch('boto3.session.Session', boto3_session_mock):
with pytest.raises(AirflowException) as ctx:
timeout_container.execute(None)
assert mock_check_query_status.call_count == 3
assert 'Final state of EMR Containers job is SUBMITTED' in str(ctx.value)
assert 'Max tries of poll status exceeded' in str(ctx.value)
class TestEmrEksCreateClusterOperator(unittest.TestCase):
@mock.patch('airflow.providers.amazon.aws.hooks.emr.EmrContainerHook')
def setUp(self, emr_hook_mock):
configuration.load_test_config()
self.emr_hook_mock = emr_hook_mock
self.emr_container = EmrEksCreateClusterOperator(
task_id='start_cluster',
virtual_cluster_name="test_virtual_cluster",
eks_cluster_name="test_eks_cluster",
eks_namespace="test_eks_namespace",
tags={},
)
@mock.patch.object(EmrContainerHook, 'create_emr_on_eks_cluster')
def test_emr_on_eks_execute_without_failure(self, mock_create_emr_on_eks_cluster):
mock_create_emr_on_eks_cluster.return_value = "vc1234"
self.emr_container.execute(None)
mock_create_emr_on_eks_cluster.assert_called_once_with(
'test_virtual_cluster', 'test_eks_cluster', 'test_eks_namespace', {}
)
assert self.emr_container.virtual_cluster_name == 'test_virtual_cluster'
@mock.patch.object(EmrContainerHook, 'create_emr_on_eks_cluster')
def test_emr_on_eks_execute_with_failure(self, mock_create_emr_on_eks_cluster):
expected_exception_msg = (
"An error occurred (ValidationException) when calling the "
"CreateVirtualCluster "
"operation:"
"A virtual cluster already exists in the given namespace"
)
mock_create_emr_on_eks_cluster.side_effect = AirflowException(expected_exception_msg)
with pytest.raises(AirflowException) as ctx:
self.emr_container.execute(None)
assert expected_exception_msg in str(ctx.value)
| [
"noreply@github.com"
] | waleedsamy.noreply@github.com |
288a820b0ef15cd603354387b1bf9118f4bbac0c | 7a6fd34ad06e73a8ef4c1f77df344b79fc3125a8 | /zeus/datasets/common/cityscapes.py | d15c14a688e58244905b9fbed1b117da01c8f46d | [
"MIT"
] | permissive | lulilulilalala/vega | 3e105b499f921f07176f0230afdbd6a45209c242 | 977054e12dd3bc1c96bbe35f18d5db4bc82d0522 | refs/heads/master | 2023-05-14T12:59:35.125859 | 2021-06-07T12:27:16 | 2021-06-07T12:27:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,609 | py | # -*- coding: utf-8 -*-
# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the MIT License.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# MIT License for more details.
"""This is the class of Cityscapes dataset."""
import os.path as osp
import cv2
import numpy as np
import glob
import pickle
from .utils.dataset import Dataset
from zeus.common import ClassFactory, ClassType
from zeus.common import FileOps
from zeus.datasets.conf.city_scapes import CityscapesConfig
@ClassFactory.register(ClassType.DATASET)
class Cityscapes(Dataset):
"""Class of Cityscapes dataset, which is subclass of Dateset.
Two types of data are supported:
1) Image with extensions in 'jpg', 'JPG', 'jpeg', 'JPEG', 'png', 'PNG', 'ppm', 'PPM', 'bmp', 'BMP'
2) pkl with extensions in 'pkl', 'pt', 'pth'. Image pkl should be in format of HWC, with bgr as the channels
To use this dataset, provide either: 1) data_dir and label_dir; or 2) data_path and list_file
:param train: if the mdoe is train or false, defaults to True
:type train: bool, optional
:param cfg: the config the dataset need, defaults to None, and if the cfg is None,
the default config will be used, the default config file is a yml file with the same name of the class
:type cfg: yml, py or dict
"""
config = CityscapesConfig()
def __init__(self, **kwargs):
"""Construct the Cityscapes class."""
super(Cityscapes, self).__init__(**kwargs)
self.dataset_init()
def _init_transforms(self):
"""Initialize transforms."""
result = list()
if "Rescale" in self.args:
import logging
logging.info(str(dict(**self.args.Rescale)))
result.append(self._get_cls("Rescale_pair")(**self.args.Rescale))
if "RandomMirror" in self.args and self.args.RandomMirror:
result.append(self._get_cls("RandomHorizontalFlip_pair")())
if "RandomColor" in self.args:
result.append(self._get_cls("RandomColor_pair")(**self.args.RandomColor))
if "RandomGaussianBlur" in self.args:
result.append(self._get_cls("RandomGaussianBlur_pair")(**self.args.RandomGaussianBlur))
if "RandomRotation" in self.args:
result.append(self._get_cls("RandomRotate_pair")(**self.args.RandomRotation))
if "Normalization" in self.args:
result.append(self._get_cls("Normalize_pair")(**self.args.Normalization))
if "RandomCrop" in self.args:
result.append(self._get_cls("RandomCrop_pair")(**self.args.RandomCrop))
return result
def _get_cls(self, _name):
return ClassFactory.get_cls(ClassType.TRANSFORM, _name)
def dataset_init(self):
"""Construct method.
If both data_dir and label_dir are provided, then use data_dir and label_dir
Otherwise use data_path and list_file.
"""
if "data_dir" in self.args and "label_dir" in self.args:
self.args.data_dir = FileOps.download_dataset(self.args.data_dir)
self.args.label_dir = FileOps.download_dataset(self.args.label_dir)
self.data_files = sorted(glob.glob(osp.join(self.args.data_dir, "*")))
self.label_files = sorted(glob.glob(osp.join(self.args.label_dir, "*")))
else:
if "data_path" not in self.args or "list_file" not in self.args:
raise Exception("You must provide a data_path and a list_file!")
self.args.data_path = FileOps.download_dataset(self.args.data_path)
with open(osp.join(self.args.data_path, self.args.list_file)) as f:
lines = f.readlines()
self.data_files = [None] * len(lines)
self.label_files = [None] * len(lines)
for i, line in enumerate(lines):
data_file_name, label_file_name = line.strip().split()
self.data_files[i] = osp.join(self.args.data_path, data_file_name)
self.label_files[i] = osp.join(self.args.data_path, label_file_name)
datatype = self._get_datatype()
if datatype == "image":
self.read_fn = self._read_item_image
else:
self.read_fn = self._read_item_pickle
def __len__(self):
"""Get the length of the dataset.
:return: the length of the dataset
:rtype: int
"""
return len(self.data_files)
def __getitem__(self, index):
"""Get an item of the dataset according to the index.
:param index: index
:type index: int
:return: an item of the dataset according to the index
:rtype: dict, {'data': xx, 'mask': xx, 'name': name}
"""
image, label = self.read_fn(index)
# image_name = self.data_files[index].split("/")[-1].split(".")[0]
image, label = self.transforms(image, label)
image = np.transpose(image, [2, 0, 1]).astype(np.float32)
mask = label.astype(np.int64)
return image, mask
@staticmethod
def _get_datatype_files(file_paths):
"""Check file extensions in file_paths to decide whether they are images or pkl.
:param file_paths: a list of file names
:type file_paths: list of str
:return image, pkl or None according to the type of files
:rtype: str
"""
IMG_EXTENSIONS = {'jpg', 'JPG', 'jpeg', 'JPEG',
'png', 'PNG', 'ppm', 'PPM', 'bmp', 'BMP'}
PKL_EXTENSIONS = {'pkl', 'pt', 'pth'}
file_extensions = set(data_file.split('.')[-1] for data_file in file_paths)
if file_extensions.issubset(IMG_EXTENSIONS):
return "image"
elif file_extensions.issubset(PKL_EXTENSIONS):
return "pkl"
else:
raise Exception("Invalid file extension")
def _get_datatype(self):
"""Check the datatype of all data.
:return image, pkl or None
:rtype: str
"""
type_data = self._get_datatype_files(self.data_files)
type_labels = self._get_datatype_files(self.label_files)
if type_data == type_labels:
return type_data
else:
raise Exception("Images and masks must be both image or pkl!")
def _read_item_image(self, index):
"""Read image and label in "image" format.
:param index: index
:type index: int
:return: image in np.array, HWC, bgr; label in np.array, HW
:rtype: tuple of np.array
"""
image = cv2.imread(self.data_files[index], cv2.IMREAD_COLOR)
label = cv2.imread(self.label_files[index], cv2.IMREAD_GRAYSCALE)
return image, label
def _read_item_pickle(self, index):
"""Read image and label in "pkl" format.
:param index: index
:type index: int
:return: image in np.array, HWC, bgr; label in np.array, HW
:rtype: tuple of np.array
"""
with open(self.data_files[index], "rb") as file:
image = pickle.load(file)
with open(self.label_files[index], "rb") as file:
label = pickle.load(file)
return image, label
@property
def input_size(self):
"""Input size of Cityspace.
:return: the input size
:rtype: int
"""
_shape = self.data.shape
return _shape[1]
| [
"zhangjiajin@huawei.com"
] | zhangjiajin@huawei.com |
940ebcb6548fcf7493b3b0290cbd312e45cf65fd | ac6f3ab88c67b09e187c92652e29fabd5bf5ffd5 | /code/15_solution.py | 0d5e9f19093d8753b86315eb89915727f382747f | [] | no_license | YanjiaSun/leetcode-3 | f3c87ef6961220c39d48094ef65db921f34d070f | 59d323161dba8d250d6dd7f31c40731845356f21 | refs/heads/master | 2022-12-05T17:15:55.601506 | 2020-08-03T11:38:39 | 2020-08-03T11:38:39 | 284,863,415 | 1 | 0 | null | 2020-08-04T03:05:05 | 2020-08-04T03:05:04 | null | UTF-8 | Python | false | false | 920 | py | class Solution:
def threeSum(self, nums: List[int]) -> List[List[int]]:
if not nums or len(nums) < 3: return []
n = len(nums)
nums.sort() # O(NlogN)
triplets = []
def find_two_sum(j, k, target):
while j < k:
b, c = nums[j], nums[k]
if b + c > target:
while j < k and nums[k] == c: k -= 1
elif b + c < target:
while j < k and nums[j] == b: j += 1
else:
triplets.append([-target, b, c])
while j < k and nums[k] == c: k -= 1
while j < k and nums[j] == b: j += 1
i = 0
while i < n - 2 and nums[i] <= 0:
a, target = nums[i], -nums[i]
find_two_sum(i+1, n-1, target)
while i < n - 2 and nums[i] == a: i += 1
return triplets | [
"ryanzjlib@gmail.com"
] | ryanzjlib@gmail.com |
6c9f386ce93fb1dc4fc4c65979aa9eb7fa14a8e5 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03251/s200090667.py | 58f7f34828a49a6e5e378cbfc2fff95c05950530 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 139 | py | n,m,X,Y=map(int,input().split())
x=max(X,max(map(int,input().split())))
y=min(Y,min(map(int,input().split())))
print(["War","No War"][x<y]) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
4f589e682dfc192a5f76f0736435484407adf7b0 | 6081557aa021c9e3080e2010b4396154c6a15fd9 | /app/cli.py | cd3c06b2d883d3c1352ded88f49b6b7e0bf3e304 | [] | no_license | boberstarosta/flask-tutorial | 7dfe700db143bb003a3a0c934751c78cf623815b | 10dcacc4c0dcbf6a1607dc0ae96ecb5cbb45423f | refs/heads/master | 2022-12-11T09:24:43.123961 | 2018-11-25T11:23:12 | 2018-11-25T11:23:12 | 158,431,045 | 0 | 0 | null | 2022-12-08T01:18:40 | 2018-11-20T18:01:42 | Python | UTF-8 | Python | false | false | 1,177 | py | import os
import click
def register(app):
@app.cli.group()
def translate():
"""Translation and localization commands."""
pass
@translate.command()
def update():
"""Update all languages."""
if os.system('pybabel extract -F babel.cfg -k _l -o messages.pot .'):
raise RuntimeError('extract command failed')
if os.system('pybabel update -i messages.pot -d app/translations'):
raise RuntimeError('update command failed')
os.remove('messages.pot')
@translate.command()
def compile():
"""Compile all languages."""
if os.system('pybabel compile -d app/translations'):
raise RuntimeError('compile command failed')
@translate.command()
@click.argument('lang')
def init(lang):
"""Initialize a new language."""
if os.system('pybabel extract -F babel.cfg -k _l -o messages.pot .'):
raise RuntimeError('extract command failed')
if os.system(
'pybabel init -i messages.pot -d app/translations -l ' + lang):
raise RuntimeError('init command failed')
os.remove('messages.pot')
| [
"boberstarosta@gmail.com"
] | boberstarosta@gmail.com |
89ac6f3943f40a5c1a885e3e939a3a50779f3310 | 566ac3f150d5cec30e0926f4c369296f54b93503 | /src/contaminate.py | 640f9403909b876426a4ee6f7a22690699b56646 | [
"MIT"
] | permissive | mehdirezaie/SYSNet | 1e325bbedbe3e69b315028afd22a351bd2ee6d01 | 8da75f54177e460e6e446bfc2207dd82a76ac4cc | refs/heads/master | 2021-11-30T04:48:51.118259 | 2021-11-18T16:44:56 | 2021-11-18T16:44:56 | 171,679,858 | 6 | 1 | MIT | 2019-08-16T20:56:31 | 2019-02-20T13:38:55 | Jupyter Notebook | UTF-8 | Python | false | false | 3,388 | py |
import fitsio as ft
import numpy as np
import healpy as hp
import os
import sys
class mock(object):
def __init__(self, featsfile, paramsfile, func='lin', sf=1207432.7901):
# read inputs
feats = ft.read(featsfile)
params = np.load(paramsfile).item()
# attrs
self.hpix = feats['hpind']
self.feats = feats['features']
self.axfit = params['axfit']
self.xstats = params['xstats']
#print('Will scale the covariance by %.4f'%sf)
bfp_raw = params['params'][func]
self.bfp = (bfp_raw[0], sf*bfp_raw[1])
#
# prepare
self.n = self.feats.shape[0]
x = (self.feats - self.xstats[0])/self.xstats[1] # select axis
x_scaled = x[:, self.axfit]
if func == 'lin':
x_vector = np.column_stack([np.ones(self.n), x_scaled])
elif func == 'quad':
x_vector = np.column_stack([np.ones(self.n), x_scaled, x_scaled*x_scaled])
else:
exit(f"func:{func} is not defined")
#
#
self.x_vector = x_vector
def simulate(self, kind='truth', seed=12345):
if kind not in ['fixed', 'random', 'truth']:
exit(f"kind : {kind} is not defined")
np.random.seed(seed) # set the seed
if kind == 'truth':
thetas = self.bfp[0]
elif kind == 'fixed':
thetas = np.random.multivariate_normal(*self.bfp)
elif kind == 'random':
thetas = np.random.multivariate_normal(*self.bfp, size=self.n)
else:
exit(f"kind : {kind} is not defined")
tx = (thetas * self.x_vector)
self.txs = np.sum(tx, axis=1)
def project(self, hpin, tag):
hpmin = hp.read_map(hpin, verbose=False)
fpath = '/'.join((hpin.split('/')[:-1] + [tag]))
mname = '_'.join((tag, 'mask',hpin.split('/')[-1]))
fname = '_'.join((tag, hpin.split('/')[-1]))
if not os.path.exists(fpath):
os.makedirs(fpath)
ngalcont = self.txs * hpmin[self.hpix]
fou = '/'.join((fpath, fname))
mou = '/'.join((fpath, mname))
ngal_neg = ngalcont < 0.0
hpix_neg = self.hpix[ngal_neg]
hpix_noneg = self.hpix[~ngal_neg]
ngal_noneg = ngalcont[~ngal_neg]
#
#
ngalm = np.zeros_like(hpmin)
ngalm[hpix_noneg] = np.random.poisson(ngal_noneg)
#
#
negm = np.zeros_like(hpmin)
negm[hpix_neg] = 1.0
hp.write_map(mou, negm, fits_IDL=False, overwrite=True, dtype=np.float64)
hp.write_map(fou, ngalm, fits_IDL=False, overwrite=True, dtype=np.float64)
print('%s is written'%fou)
if __name__ == '__main__':
np.random.seed(123456) # set the global seed
seeds = np.random.randint(0, 4294967295, size=1000)
feats = sys.argv[1]
regp = sys.argv[2]
files = sys.argv[3:]
print('feats', feats)
print('regp', regp)
print('files[:2]', files[:2])
for i,mock_i in enumerate(files):
mymock = mock(feats,
regp,
func='lin', sf=23765.2929*0.05) # 0.1XtotalfracXvarngal = 2376.52929
mymock.simulate(kind='random', seed=seeds[i])
mymock.project(mock_i, 'cp2p')
| [
"medirz90@icloud.com"
] | medirz90@icloud.com |
16e67ef722cc276cdfc9de755d0783c8fa00e985 | 881a30f13880944d903fb304af0e4bdebb9bd9fb | /RL/algorithms/safe_sac.py | 66667ae9133c6e465c4146271e7379122abef6d2 | [] | no_license | bhatiaabhinav/RL | 5d32b4502e1f15a9d6b8b1ba5627b7c0dd1b8202 | 2bbfa05d5b56b1ea65b65f27b80c243d0888e6d8 | refs/heads/master | 2022-12-11T04:45:18.832027 | 2021-10-13T00:22:15 | 2021-10-13T00:22:15 | 152,107,235 | 1 | 0 | null | 2022-12-08T06:59:28 | 2018-10-08T15:55:28 | Python | UTF-8 | Python | false | false | 6,000 | py | import gym
import safety_gym # noqa
import RL
import RL.envs
from RL.agents import BasicStatsRecordingAgent
from RL.agents import (EnvRenderingAgent, ExperienceBufferAgent, # noqa
ForceExploitControlAgent, MatplotlibPlotAgent,
ModelLoaderSaverAgent, ParamsCopyAgent, PygletLoopAgent,
RandomPlayAgent, RewardScalingAgent, SafeSACActAgent,
SafeSACTrainAgent, SeedingAgent, StatsLoggingAgent,
TensorboardAgent, TensorFlowAgent)
from RL.common.wrappers import wrap_standard
from RL.contexts import SACContext
c = SACContext()
c.set_env(wrap_standard(gym.make(c.env_id), c))
r = RL.Runner(c, "runner")
# basics:
r.register_agent(TensorFlowAgent(c, "TensorFlowAgent"))
r.register_agent(SeedingAgent(c, "SeedingAgent"))
r.register_agent(RewardScalingAgent(c, "RewardScalingAgent"))
# core algo
r.register_agent(ForceExploitControlAgent(c, "ExploitControlAgent"))
r.register_agent(RandomPlayAgent(c, "MinimumExperienceAgent", play_for_steps=c.minimum_experience))
safe_sac_act_agent = r.register_agent(SafeSACActAgent(c, "SafeSACActAgent"))
r.register_agent(ModelLoaderSaverAgent(c, "LoaderSaverAgent", safe_sac_act_agent.model.get_vars()))
if not c.eval_mode:
exp_buff_agent = r.register_agent(ExperienceBufferAgent(c, "ExperienceBufferAgent"))
safe_sac_train_agent = r.register_agent(SafeSACTrainAgent(c, "SafeSACTrainAgent", safe_sac_act_agent, exp_buff_agent))
r.register_agent(ParamsCopyAgent(c, "TargetNetUpdateAgent", safe_sac_act_agent.model.get_vars('valuefn0', 'valuefn1', 'running_stats'), safe_sac_train_agent.target_model.get_vars('valuefn0', 'valuefn1', 'running_stats'), c.target_network_update_every, c.target_network_update_tau))
# rendering and visualizations:
if c.render:
r.register_agent(EnvRenderingAgent(c, "RenderingAgent"))
r.register_agent(PygletLoopAgent(c, "PygletLoopAgent"))
# stats record:
r.register_agent(BasicStatsRecordingAgent(c, "StatsRecordingAgent"))
# stats log:
keys = list(filter(lambda k: k.startswith('Env-0'), RL.stats.stats_dict.keys()))
misc_keys = ['ValueFn Loss', "Safety ValueFn Loss", 'Critic Loss', "Safety Critic Loss", 'Actor Loss', 'Total Updates', "Average Actor Critic Q", "Average Actor Critic Safety Q", "Average Action LogStd", "Average Action LogPi"]
r.register_agent(StatsLoggingAgent(c, "Env-0-StatsLoggingAgent", keys + misc_keys, poll_every_episode=1))
# stats plot:
r.register_agent(TensorboardAgent(c, "Env-0-TensorboardAgent", keys, 'Env-0 Total Frames'))
r.register_agent(TensorboardAgent(c, 'Misc-TensorboardAgent', misc_keys, 'Env-0 Total Frames', log_every_episode=-1, log_every_step=100))
# r.register_agent(MatplotlibPlotAgent(c, 'RPE', [(RL.stats.get('Env-0 Episode ID'), RL.stats.get('Env-0 Episode Reward'))], ['b-'], xlabel='Episode ID', ylabel='Reward', legend='RPE', auto_save=True, smoothing=c.matplotlib_smoothing))
# r.register_agent(MatplotlibPlotAgent(c, 'CPE', [(RL.stats.get('Env-0 Episode ID'), RL.stats.get('Env-0 Episode Cost'))], ['b-'], xlabel='Episode ID', ylabel='Cost', legend='CPE', auto_save=True, smoothing=c.matplotlib_smoothing))
r.run()
"""
python -m RL.algorithms.safe_sac --env_id=MyPointCircleFinite-v0 --experiment_name=safesac_ln_vs_cpo --num_steps_to_run=150000 --normalize_observations=False --alpha=0.2 --actor_learning_rate=0.0001 --learning_rate=0.001 --target_network_update_tau=0.005 --exploit_every=8 --minimum_experience=10000 --logstd_min=-20 --logstd_max=2 --num_critics=2 --init_scale=None --l2_reg=0 --train_every=1 --experience_buffer_length=1000000 --minibatch_size=256 --hidden_layers=[64,32] --gamma=0.995 --cost_gamma=0.995 --layer_norm=True --cost_threshold=5 --beta=0.2 --safe_sac_penalty_max_grad=1000 --clip_gradients=1 --ignore_done_on_timelimit=False --reward_scaling=2 --cost_scaling=2 --record_returns=True
"""
"""
python -m RL.algorithms.safe_sac --env_id=MyPointCircleFinite-v0 --experiment_name=safesac --num_steps_to_run=150000 --normalize_observations=False --alpha=0.2 --actor_learning_rate=0.0001 --learning_rate=0.001 --target_network_update_tau=0.005 --exploit_every=8 --minimum_experience=10000 --logstd_min=-20 --logstd_max=2 --num_critics=2 --init_scale=None --l2_reg=0 --train_every=1 --experience_buffer_length=1000000 --minibatch_size=100 --hidden_layers=[256,256] --gamma=0.99 --cost_gamma=1 --layer_norm=False --cost_threshold=5 --beta=0.2 --safe_sac_penalty_max_grad=1000 --clip_gradients=1 --ignore_done_on_timelimit=False --reward_scaling=2 --cost_scaling=2 --record_returns=False
"""
"""
python -m RL.algorithms.safe_sac --env_id=MyPointCircleFinite-v0 --experiment_name=safesac --num_steps_to_run=150000 --normalize_observations=False --alpha=0.2 --actor_learning_rate=0.0001 --learning_rate=0.001 --target_network_update_tau=0.005 --exploit_every=8 --minimum_experience=10000 --logstd_min=-20 --logstd_max=2 --num_critics=2 --init_scale=None --l2_reg=0 --train_every=1 --experience_buffer_length=1000000 --minibatch_size=100 --hidden_layers=[256,256] --gamma=0.99 --cost_gamma=1 --layer_norm=False --cost_threshold=5 --beta=0.2 --safe_sac_penalty_max_grad=1000 --clip_gradients=1 --ignore_done_on_timelimit=False --reward_scaling=2 --cost_scaling=2 --record_returns=False
"""
"""
python -m RL.algorithms.safe_sac --env_id=Safexp-PointGoal1-v0 --experiment_name=safesac_R20C0.4 --num_steps_to_run=10000000 --normalize_observations=False --alpha=0.2 --actor_learning_rate=0.0001 --learning_rate=0.001 --target_network_update_tau=0.005 --exploit_every=8 --minimum_experience=10000 --logstd_min=-20 --logstd_max=2 --num_critics=2 --init_scale=None --l2_reg=0 --train_every=1 --experience_buffer_length=1000000 --minibatch_size=100 --hidden_layers=[256,256] --gamma=0.99 --cost_gamma=1 --layer_norm=False --cost_threshold=25 --beta=0.2 --safe_sac_penalty_max_grad=1000 --clip_gradients=1 --ignore_done_on_timelimit=False --reward_scaling=20 --cost_scaling=0.4 --record_returns=False --adam_epsilon=0.0001
"""
| [
"bhatiaabhinav93@gmail.com"
] | bhatiaabhinav93@gmail.com |
1e977918ab91ec15b37a6d3250f6f39b55d3b970 | d3cb2f94c30a21d766feb408d626f20d7b574762 | /proxy/proxy.py | da4111a674af7891a1ab2b56d0b1e85e6d562fd8 | [] | no_license | sonlia/rqspider | 3942404f7336ad24b0858f58a9b52c1e2d3648ab | e5ae5f1ff5bfa0cf51d7d3c90bcf81fda399945d | refs/heads/master | 2022-12-30T19:41:24.225104 | 2020-10-22T04:03:19 | 2020-10-22T04:03:19 | 302,540,374 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,431 | py | #-*- coding:utf-8 -*-
#!/usr/bin/python
from __future__ import absolute_import
import sys
from getproxy import GetProxy
import datetime ,requests
import time
import uuid
import hashlib
import random ,numpy
import random
from tinydb import TinyDB, Query
from utils.log import log as _log
log = _log(__name__)
class grab_proxy(GetProxy):
def _validate_proxy(self, proxy, scheme='http'):
country = proxy.get('country')
host = proxy.get('host')
port = proxy.get('port')
_proxy_hash = '%s://%s:%s' % (scheme, host, port)
proxy_hash = hashlib.sha1(_proxy_hash).hexdigest()
if proxy_hash in self.proxies_hash:
return
self.proxies_hash[proxy_hash] = True
request_proxies = {
scheme: "%s:%s" % (host, port)
}
time_list = []
num =1
for i in range(num):
request_begin = time.time()
try:
response_json = requests.get(
"%s://httpbin.org/get?show_env=1&cur=%s" % (scheme, request_begin),
proxies=request_proxies,
timeout=5
).json()
except:
break
if str(request_begin) != response_json.get('args', {}).get('cur', ''):
break
request_end = time.time()
_time = round(request_end - request_begin, 4)
time_list.append(_time)
time.sleep(random.uniform(1, 5))
if len(time_list):
na = numpy.array(time_list)
var = na.var()
mean = na.mean()
succes_ratio = float(num)/len(time_list)
anonymity = self._check_proxy_anonymity(response_json)
country = country or self.geoip_reader.country(host).country.iso_code
export_address = self._check_export_address(response_json)
return {
"id": str(uuid.uuid1()),
"host": host,
"port": port,
"anonymity": anonymity,
"country": country,
"response_time": round(mean,4),
"var" : var,
"succes_ratio":succes_ratio,
"hash":proxy_hash,
"update_time": str(datetime.datetime.now()),
"flag":0,
"type":scheme,
}
def save_proxies(self):
d = db()
d.insert_many(data =self.valid_proxies,table="proxy_ip")
# for i in self.valid_proxies:
def data(self):
return self.valid_proxies
# d.insert(i)
def start(self):
self.init()
self.load_input_proxies()
self.validate_input_proxies()
self.load_plugins()
self.grab_web_proxies()
self.validate_web_proxies()
# self.save_proxies()
class db:
#ip 数量 低于50 启动爬虫
#ip 数量大于20 的时候 每次使用 flag 增加1 超过2就删除ip
#ip 数量低于 20 的时候 则更新所有 flag 非 0 的 ip
def __init__(self,path="/home/ubuntu/workspace/spider/proxy/db.json"):
self.db = TinyDB(path)
def table(self,table,cache_size=None):
return self.db.table(table,cache_size=None)
def insert(self,table,data):
self.table(table).insert(data)
def get_all(self,table="proxy_ip"):
return self.table(table).all()
def insert_many(self,data,table="proxy_ip"):
self.table(table).insert_multiple(data)
def get_ip(self,table="proxy_ip"):
Qu = Query()
da = self.table(table).search(Qu.flag==0)
all_count = self.get_all()
log.debug("total ip count : %s " % len(all_count))
if len(all_count)<50:
log.debug("ip count is not engough")
g =grab_proxy() #此处应该更新ip池
g.start()
time.sleep(10)
print "sleep 10s restart crawl"
self.get_ip(table)
if len(da)>20:
log.debug("left useful ip : %s " % len(da))
i = random.choice(da)
proxy_hash = i["hash"]
self.add_flag(proxy_hash)
log.debug("get ip: %s " % i["host"])
return i
else :
log.debug("left %s " % len(da))
self.update_flag()
def get_random_list(self,table="proxy_ip",num=10):
#从 未爬取的page 列表里面随机选取num
Qu = Query()
da = self.table(table).search(Qu.flag!=0)
a = random.sample(da,num)
return a
def grab_list(self,num):
pass
def remove_ip(self,ip,table="proxy_ip"):
pass
def add_flag(self,proxy_hash,table = "proxy_ip"):
Qu = Query()
da = self.table(table).search(Qu.hash==proxy_hash)[0]
num = da.get("flag")
if num==2:
a = self.table(table).get(Qu.hash==proxy_hash)
# print dir(a)
e = a.eid
log.debug("removing ip")
self.table(table).remove(eids=[e])
else :
self.table(table).update({"flag":num+1},Qu.hash==proxy_hash)
def update_flag(self,table="proxy_ip"):
Qu = Query()
log.debug("update flag")
da = self.table(table).update({"flag":0},Qu.flag!=0)
def start():
grab = grab_proxy()
grab.start()
if __name__ == "__main__":
start() | [
"root@localhost.localdomain"
] | root@localhost.localdomain |
532d9ca34f1c0b575e1c9c921622da06dc7220ff | 3c40c7ca9ab7e59b8bf34a91acffa5850b9b797c | /dotfiles/bash/setup.py | c6a9474c4f1de1fcae51fd3b7befbe054d0b7ca9 | [] | no_license | dkudrow/dotfiles | df0737c0ad31c1db5fd380e3241548b07ff324db | 89285b9a65e0ea52a9cc20254a7a635500eb643a | refs/heads/master | 2021-01-10T20:26:30.225595 | 2018-04-09T22:40:56 | 2018-04-09T22:40:56 | 20,342,657 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 298 | py | # dotfiles/bash/setup.py
from dotfiles.common import *
import os
def setup(params, cfgdir):
for f in ['.bashrc', '.bash_aliases']:
src = os.path.join(cfgdir, f)
dst = os.path.join(HOME, f)
ln(src, dst, params)
touch(os.path.join(HOME, '.bashrc.local'), params)
| [
"you@example.com"
] | you@example.com |
6c95a7bd09b068813b9c0a5829973bb8d0395ab0 | 623f977c05e559451b5131c225388d99737fca5b | /clustering.py | 4cb4f7826d1068fc6301b83fdbf6be644b973325 | [] | no_license | kmgr10/clustering | e874066ef9fc6204b1080bf9c1d6d31018d9567c | 8bf2a588dc2c210f280f5deecdee0920bc41c70a | refs/heads/master | 2022-11-29T14:32:04.342212 | 2020-08-16T16:48:30 | 2020-08-16T16:48:30 | 284,081,447 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 507 | py | from sklearn.cluster import KMeans
from sklearn import preprocessing
from sklearn.cluster import AgglomerativeClustering
import numpy as np
def generate_cluster_labels(data, clus_method, clus_n):
Scaling = preprocessing.MinMaxScaler().fit(data)
X_scaled = Scaling.transform(data)
if clus_method == 'K-means':
clus = KMeans(n_clusters=clus_n).fit(X_scaled)
elif clus_method == 'Agglo':
clus = AgglomerativeClustering(n_clusters=clus_n,affinity='l2',linkage='complete').fit(X_scaled)
return clus | [
"me@me.com"
] | me@me.com |
8771ba438ade98f28b96a9fb88b3121bc0f9cbac | c09e5bb87702a0828b0ec1c2eb221566529f2432 | /chap14_exe.py | deff1d79024b0f13c756c294c70cedf8042f2d7b | [] | no_license | anubhav-shukla/Learnpyhton | 396a1c8748c6cf640726e3609f7399b161c00858 | 7b53b30a0a6829d480f1d3c7d5a914de1c57fa73 | refs/heads/master | 2023-07-05T03:04:22.117259 | 2021-09-01T12:46:01 | 2021-09-01T12:46:01 | 346,263,181 | 0 | 0 | null | 2021-09-01T12:46:02 | 2021-03-10T07:02:27 | Python | UTF-8 | Python | false | false | 575 | py | # exercise
from functools import wraps
import time
def calculate_time(func):
@wraps(func)
def wrap(*args,**kwargs):
print(f'Executing ......{func.__name__}')
t1=time.time()
func(*args , **kwargs)
returned=func(*args,**kwargs)
t2=time.time()
total=t2-t1
print (f'This function takes{total} sec to run')
return returned
return wrap
@calculate_time
# t=time.time()
def funcl():
print("this is function")
funcl()
# t1=time.time()
# print(t1-t)
# this func takes 3 sec to run | [
"noreply@github.com"
] | anubhav-shukla.noreply@github.com |
b4e8562b690e03fd42d7b8893605994cdf54a829 | 95d9f80d130e9ce030f24f4a2d5a5dc8f3b9140d | /sp/migrations/0004_auto_20200401_1328.py | 4cdb3298ee86441b8d28ba51f9717900e8b0a548 | [
"BSD-3-Clause"
] | permissive | mnasiri/django-saml-sp | b75cb225cdc0db11571d312d9ab54e10560118d8 | 8e112faecc7a5866a44a95b16c6f694fce5cecd1 | refs/heads/master | 2022-04-12T15:37:13.631042 | 2020-04-01T13:33:40 | 2020-04-01T13:33:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 843 | py | # Generated by Django 3.0.4 on 2020-04-01 13:28
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("sp", "0003_auto_20200331_1934"),
]
operations = [
migrations.AlterField(
model_name="idp", name="authenticate_method", field=models.CharField(blank=True, max_length=200),
),
migrations.AlterField(
model_name="idp",
name="base_url",
field=models.CharField(
help_text="Root URL for the site, including http/https, no trailing slash.",
max_length=200,
verbose_name="Base URL",
),
),
migrations.AlterField(
model_name="idp", name="login_method", field=models.CharField(blank=True, max_length=200),
),
]
| [
"dcwatson@gmail.com"
] | dcwatson@gmail.com |
5e04be56a5ddda1b05ae34fe3d07ad9b5e3e211f | a2006f6aae3c36b1b9fe40b7b47941b515b49d5c | /llin/urls.py | d8829c7c0660cb42b81f327e88f7734d8e210d42 | [] | no_license | hakimkal/llin | f32832e599752edc1e7ff4f23d10f94bc11e47f3 | 56b5f449375153b6ec786994d564a721339a2b26 | refs/heads/master | 2016-09-09T21:06:58.224071 | 2014-12-15T10:19:39 | 2014-12-15T10:19:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,076 | py | from django.conf.urls import patterns, include, url
from django.contrib import admin
from states import views as state_view
from django.contrib.auth.views import login,logout
admin.autodiscover()
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'llin.views.home', name='home'),
# url(r'^blog/', include('blog.urls')),
url(r'^$', include('states.urls')),
url(r'^maps/$',state_view.MapsStatesListing.as_view()),
url(r'^stateslist/$', state_view.StateListFrame.as_view()),
url(r'^states/',include('states.urls')),
url(r'^grappelli/', include('grappelli.urls')),
url(r'^login/$',login,{'template_name':'accounts/login.html'},name='login_link'),
url(r'^logout/$',logout,{'next_page':'/'}, name = 'logout'),
url(r'^accounts/',include('accounts.urls')),
url(r'^admin/', include(admin.site.urls)),
)
#for serving media files
from llin import settings
urlpatterns += patterns(
'django.views.static',
(r'media/(?P<path>.*)',
'serve',
{'document_root': settings.MEDIA_ROOT}), ) | [
"hakimkal@gmail.com"
] | hakimkal@gmail.com |
760e1b121ec13bc07ed91ee24f10171fa98d51f6 | 946a9dcf4e644f0d3f806f016a23ae8d96095082 | /LeetCode/Two Pointers/763_PartitionLabels.py | e9a910bd636450f8af37e9418ab39efb51dc76ad | [] | no_license | HzCeee/Algorithms | 3dea898f071f4103ca3eb038f63b01ba4ba95383 | e05f29071d0badd081535e773f43ebc303aa12c4 | refs/heads/master | 2018-10-20T21:54:37.154631 | 2018-10-11T20:46:34 | 2018-10-11T20:46:34 | 116,142,856 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 557 | py | def partitionLabels(self, S):
"""
:type S: str
:rtype: List[int]
"""
# leftPtr and righrPtr denotes the start and end of the current partition
# curPtr denotes the current checked index
lastOccurence = {char: curPtr for curPtr, char in enumerate(S)}
rightPtr = leftPtr = 0
ans = []
for curPtr, char in enumerate(S):
rightPtr = max(rightPtr, lastOccurence[char])
if curPtr == rightPtr:
ans.append(curPtr - leftPtr + 1)
leftPtr = curPtr + 1
return ans | [
"huangzixihzx@gmail.com"
] | huangzixihzx@gmail.com |
8d32d9b895aea36e66f915ee29f009e5d94b6c93 | 8f90e2899978bb81d283fe76a5b287c0e42bd7ea | /oscrypto/_win/_advapi32_ctypes.py | 36690d1f8657a198f929cf66a1d26bb08d88d91a | [
"MIT"
] | permissive | haowanxing/oscrypto | 24c98f9858f11feb55540e66224239c099abb756 | 14b5bd07dfc0e3fe7eee1048f56d5f1af53aee51 | refs/heads/master | 2021-01-17T19:45:53.095897 | 2016-08-29T17:03:17 | 2016-08-29T17:06:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,466 | py | # coding: utf-8
from __future__ import unicode_literals, division, absolute_import, print_function
import ctypes
from ctypes import windll, wintypes, POINTER, Structure, c_void_p, c_char_p, c_uint
from ctypes.wintypes import BOOL, DWORD
from .._ffi import FFIEngineError, LibraryNotFoundError
from .._types import str_cls
__all__ = [
'advapi32',
'get_error',
]
try:
advapi32 = windll.advapi32
except (OSError) as e:
if str_cls(e).find('The specified module could not be found') != -1:
raise LibraryNotFoundError('advapi32.dll could not be found')
raise
HCRYPTPROV = wintypes.HANDLE
HCRYPTKEY = wintypes.HANDLE
HCRYPTHASH = wintypes.HANDLE
PBYTE = c_char_p
ALG_ID = c_uint
try:
class CRYPTOAPI_BLOB(Structure): # noqa
_fields_ = [
("cbData", DWORD),
("pbData", POINTER(ctypes.c_byte)),
]
CRYPT_INTEGER_BLOB = CRYPTOAPI_BLOB
CRYPT_OBJID_BLOB = CRYPTOAPI_BLOB
CRYPT_DER_BLOB = CRYPTOAPI_BLOB
CRYPT_ATTR_BLOB = CRYPTOAPI_BLOB
class CRYPT_ALGORITHM_IDENTIFIER(Structure):
_fields = [
('pszObjId', wintypes.LPSTR),
('Parameters', CRYPT_OBJID_BLOB),
]
class CRYPT_BIT_BLOB(Structure):
_fields_ = [
('cbData', DWORD),
('pbData', PBYTE),
('cUnusedBits', DWORD),
]
class CERT_PUBLIC_KEY_INFO(Structure):
_fields_ = [
('Algorithm', CRYPT_ALGORITHM_IDENTIFIER),
('PublicKey', CRYPT_BIT_BLOB),
]
class CRYPT_ATTRIBUTE(Structure):
_fields_ = [
('pszObjId', wintypes.LPSTR),
('cValue', DWORD),
('rgValue', POINTER(CRYPT_ATTR_BLOB)),
]
class CRYPT_ATTRIBUTES(Structure):
_fields_ = [
('cAttr', DWORD),
('rgAttr', POINTER(CRYPT_ATTRIBUTE)),
]
class CRYPT_PRIVATE_KEY_INFO(Structure):
_fields_ = [
('Version', DWORD),
('Algorithm', CRYPT_ALGORITHM_IDENTIFIER),
('PrivateKey', CRYPT_DER_BLOB),
('pAttributes', POINTER(CRYPT_ATTRIBUTES)),
]
class PUBLICKEYSTRUC(Structure):
_fields_ = [
('bType', wintypes.BYTE),
('bVersion', wintypes.BYTE),
('reserved', wintypes.WORD),
('aiKeyAlg', ALG_ID),
]
BLOBHEADER = PUBLICKEYSTRUC
class DSSPUBKEY(Structure):
_fields_ = [
('magic', DWORD),
('bitlen', DWORD),
]
class DSSBLOBHEADER(Structure):
_fields_ = [
('publickeystruc', PUBLICKEYSTRUC),
('dsspubkey', DSSPUBKEY),
]
class RSAPUBKEY(Structure):
_fields_ = [
('magic', DWORD),
('bitlen', DWORD),
('pubexp', DWORD),
]
class RSABLOBHEADER(Structure):
_fields_ = [
('publickeystruc', PUBLICKEYSTRUC),
('rsapubkey', RSAPUBKEY),
]
class PLAINTEXTKEYBLOB(Structure):
_fields_ = [
('hdr', BLOBHEADER),
('dwKeySize', DWORD),
# rgbKeyData omitted since it is a flexible array member
]
class DSSSEED(Structure):
_fields_ = [
('counter', DWORD),
('seed', wintypes.BYTE * 20),
]
advapi32.CryptAcquireContextW.argtypes = [
POINTER(HCRYPTPROV),
wintypes.LPCWSTR,
wintypes.LPCWSTR,
DWORD,
DWORD
]
advapi32.CryptAcquireContextW.restype = wintypes.BOOL
advapi32.CryptReleaseContext.argtypes = [
HCRYPTPROV,
DWORD
]
advapi32.CryptReleaseContext.restype = wintypes.BOOL
advapi32.CryptImportKey.argtypes = [
HCRYPTPROV,
PBYTE,
DWORD,
HCRYPTKEY,
DWORD,
POINTER(HCRYPTKEY)
]
advapi32.CryptImportKey.restype = BOOL
advapi32.CryptGenKey.argtypes = [
HCRYPTPROV,
ALG_ID,
DWORD,
POINTER(HCRYPTKEY)
]
advapi32.CryptGenKey.restype = wintypes.BOOL
advapi32.CryptGetKeyParam.argtypes = [
HCRYPTKEY,
DWORD,
PBYTE,
POINTER(DWORD),
DWORD
]
advapi32.CryptGetKeyParam.restype = wintypes.BOOL
advapi32.CryptSetKeyParam.argtypes = [
HCRYPTKEY,
DWORD,
c_void_p,
DWORD
]
advapi32.CryptSetKeyParam.restype = wintypes.BOOL
advapi32.CryptExportKey.argtypes = [
HCRYPTKEY,
HCRYPTKEY,
DWORD,
DWORD,
PBYTE,
POINTER(DWORD)
]
advapi32.CryptExportKey.restype = BOOL
advapi32.CryptDestroyKey.argtypes = [
HCRYPTKEY
]
advapi32.CryptDestroyKey.restype = wintypes.BOOL
advapi32.CryptCreateHash.argtypes = [
HCRYPTPROV,
ALG_ID,
HCRYPTKEY,
DWORD,
POINTER(HCRYPTHASH)
]
advapi32.CryptCreateHash.restype = BOOL
advapi32.CryptHashData.argtypes = [
HCRYPTHASH,
PBYTE,
DWORD,
DWORD
]
advapi32.CryptHashData.restype = BOOL
advapi32.CryptSetHashParam.argtypes = [
HCRYPTHASH,
DWORD,
PBYTE,
DWORD
]
advapi32.CryptSetHashParam.restype = BOOL
advapi32.CryptSignHashW.argtypes = [
HCRYPTHASH,
DWORD,
wintypes.LPCWSTR,
DWORD,
PBYTE,
POINTER(DWORD)
]
advapi32.CryptSignHashW.restype = BOOL
advapi32.CryptVerifySignatureW.argtypes = [
HCRYPTHASH,
PBYTE,
DWORD,
HCRYPTKEY,
wintypes.LPCWSTR,
DWORD
]
advapi32.CryptVerifySignatureW.restype = BOOL
advapi32.CryptDestroyHash.argtypes = [
HCRYPTHASH
]
advapi32.CryptDestroyHash.restype = wintypes.BOOL
advapi32.CryptEncrypt.argtypes = [
HCRYPTKEY,
HCRYPTHASH,
BOOL,
DWORD,
PBYTE,
POINTER(DWORD),
DWORD
]
advapi32.CryptEncrypt.restype = BOOL
advapi32.CryptDecrypt.argtypes = [
HCRYPTKEY,
HCRYPTHASH,
BOOL,
DWORD,
PBYTE,
POINTER(DWORD)
]
advapi32.CryptDecrypt.restype = BOOL
except (AttributeError):
raise FFIEngineError('Error initializing ctypes')
setattr(advapi32, 'HCRYPTPROV', HCRYPTPROV)
setattr(advapi32, 'HCRYPTKEY', HCRYPTKEY)
setattr(advapi32, 'HCRYPTHASH', HCRYPTHASH)
setattr(advapi32, 'CRYPT_INTEGER_BLOB', CRYPT_INTEGER_BLOB)
setattr(advapi32, 'CRYPT_OBJID_BLOB', CRYPT_OBJID_BLOB)
setattr(advapi32, 'CRYPT_DER_BLOB', CRYPT_DER_BLOB)
setattr(advapi32, 'CRYPT_ATTR_BLOB', CRYPT_ATTR_BLOB)
setattr(advapi32, 'CRYPT_ALGORITHM_IDENTIFIER', CRYPT_ALGORITHM_IDENTIFIER)
setattr(advapi32, 'CRYPT_BIT_BLOB', CRYPT_BIT_BLOB)
setattr(advapi32, 'CERT_PUBLIC_KEY_INFO', CERT_PUBLIC_KEY_INFO)
setattr(advapi32, 'CRYPT_PRIVATE_KEY_INFO', CRYPT_PRIVATE_KEY_INFO)
setattr(advapi32, 'CRYPT_ATTRIBUTE', CRYPT_ATTRIBUTE)
setattr(advapi32, 'CRYPT_ATTRIBUTES', CRYPT_ATTRIBUTES)
setattr(advapi32, 'PUBLICKEYSTRUC', PUBLICKEYSTRUC)
setattr(advapi32, 'DSSPUBKEY', DSSPUBKEY)
setattr(advapi32, 'DSSBLOBHEADER', DSSBLOBHEADER)
setattr(advapi32, 'RSAPUBKEY', RSAPUBKEY)
setattr(advapi32, 'RSABLOBHEADER', RSABLOBHEADER)
setattr(advapi32, 'BLOBHEADER', BLOBHEADER)
setattr(advapi32, 'PLAINTEXTKEYBLOB', PLAINTEXTKEYBLOB)
setattr(advapi32, 'DSSSEED', DSSSEED)
def get_error():
error = ctypes.GetLastError()
return (error, ctypes.FormatError(error))
| [
"will@wbond.net"
] | will@wbond.net |
b80f05965f60e88b838d72c5eef3000d3ee55c51 | 1925c535d439d2d47e27ace779f08be0b2a75750 | /leetcode/minimum_time_to_collect_all_apples_in_a_tree.py | 9a06ab10892810967e0fae8964db764325709cde | [] | no_license | arthurDz/algorithm-studies | ee77d716041671c4b8bb757d8d96f3d10b6589f7 | 1e4d23dd0c40df34f58d71c7ca3e6491be732075 | refs/heads/master | 2023-04-27T12:17:06.209278 | 2021-04-30T20:16:18 | 2021-04-30T20:16:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,944 | py | # Given an undirected tree consisting of n vertices numbered from 0 to n-1, which has some apples in their vertices. You spend 1 second to walk over one edge of the tree. Return the minimum time in seconds you have to spend in order to collect all apples in the tree starting at vertex 0 and coming back to this vertex.
# The edges of the undirected tree are given in the array edges, where edges[i] = [fromi, toi] means that exists an edge connecting the vertices fromi and toi. Additionally, there is a boolean array hasApple, where hasApple[i] = true means that vertex i has an apple, otherwise, it does not have any apple.
# Example 1:
# Input: n = 7, edges = [[0,1],[0,2],[1,4],[1,5],[2,3],[2,6]], hasApple = [false,false,true,false,true,true,false]
# Output: 8
# Explanation: The figure above represents the given tree where red vertices have an apple. One optimal path to collect all apples is shown by the green arrows.
# Example 2:
# Input: n = 7, edges = [[0,1],[0,2],[1,4],[1,5],[2,3],[2,6]], hasApple = [false,false,true,false,false,true,false]
# Output: 6
# Explanation: The figure above represents the given tree where red vertices have an apple. One optimal path to collect all apples is shown by the green arrows.
# Example 3:
# Input: n = 7, edges = [[0,1],[0,2],[1,4],[1,5],[2,3],[2,6]], hasApple = [false,false,false,false,false,false,false]
# Output: 0
# Constraints:
# 1 <= n <= 10^5
# edges.length == n-1
# edges[i].length == 2
# 0 <= fromi, toi <= n-1
# fromi < toi
# hasApple.length == n
def minTime(self, n: int, edges: List[List[int]], hasApple: List[bool]) -> int:
graph = collections.defaultdict(list)
for i, v in edges:
graph[i].append(v)
def traverse(node):
total = 0
for sub in graph[node]:
total += traverse(sub)
if total > 0 or hasApple[node]: total += 2
return total
return max(0, traverse(0) - 2) | [
"yunfan.yang@minerva.kgi.edu"
] | yunfan.yang@minerva.kgi.edu |
9a6f34d658bd34bb557f9e550a143540abcf2add | f77d97840915ff2318c8f3841096019337c58689 | /_admin/migrations/env.py | b7f89639043932bb7f95a869797ac2f2fc201484 | [] | no_license | rrader/events-service | f35d7e237e0ef5e3598b90878713539960153895 | 5933a6ba83aacb63832dd6efa806409bb37812aa | refs/heads/master | 2021-01-10T04:25:45.875103 | 2015-11-20T16:21:32 | 2015-11-20T16:21:32 | 44,528,882 | 4 | 1 | null | 2015-11-01T19:28:47 | 2015-10-19T11:02:48 | Python | UTF-8 | Python | false | false | 2,541 | py | from alembic import context
from sqlalchemy import engine_from_config, pool
from logging.config import fileConfig
# this is the Alembic Config object, which provides
# access to the values within the .ini file in use.
config = context.config
# Interpret the config file for Python logging.
# This line sets up loggers basically.
fileConfig(config.config_file_name)
# add your model's MetaData object here
# for 'autogenerate' support
# from myapp import mymodel
# target_metadata = mymodel.Base.metadata
from flask import current_app
config.set_main_option('sqlalchemy.url', current_app.config.get('SQLALCHEMY_DATABASE_URI'))
target_metadata = current_app.extensions['migrate'].db.metadata
# other values from the config, defined by the needs of env.py,
# can be acquired:
# my_important_option = config.get_main_option("my_important_option")
# ... etc.
def exclude_tables_from_config(config_):
tables_ = config_.get("tables", None)
if tables_ is not None:
tables = tables_.split(",")
return tables
exclude_tables = exclude_tables_from_config(config.get_section('alembic:exclude'))
def include_object(object, name, type_, reflected, compare_to):
if type_ == "table" and name in exclude_tables:
return False
else:
return True
def run_migrations_offline():
"""Run migrations in 'offline' mode.
This configures the context with just a URL
and not an Engine, though an Engine is acceptable
here as well. By skipping the Engine creation
we don't even need a DBAPI to be available.
Calls to context.execute() here emit the given string to the
script output.
"""
url = config.get_main_option("sqlalchemy.url")
context.configure(url=url)
with context.begin_transaction():
context.run_migrations()
def run_migrations_online():
"""Run migrations in 'online' mode.
In this scenario we need to create an Engine
and associate a connection with the context.
"""
engine = engine_from_config(
config.get_section(config.config_ini_section),
prefix='sqlalchemy.',
poolclass=pool.NullPool)
connection = engine.connect()
context.configure(
connection=connection,
target_metadata=target_metadata
)
try:
with context.begin_transaction():
context.run_migrations()
finally:
connection.close()
if context.is_offline_mode():
run_migrations_offline()
else:
run_migrations_online()
| [
"roman.rader@gmail.com"
] | roman.rader@gmail.com |
39bcecf7096e30317938833bbcbad285c393e708 | 134ff3c0719d4c0022eb0fb7c859bdbff5ca34b2 | /desktop/libs/hadoop/setup.py | 45c29110199a12209f2f58f95c8a774324b992a0 | [
"Apache-2.0"
] | permissive | civascu/hue | 22637f13a4cfc557716557661523131b6ac16da4 | 82f2de44789ff5a981ed725175bae7944832d1e9 | refs/heads/master | 2020-03-31T01:50:39.449966 | 2010-07-21T01:05:50 | 2010-07-21T01:07:15 | 788,284 | 0 | 0 | Apache-2.0 | 2019-02-04T07:03:12 | 2010-07-21T07:34:27 | Python | UTF-8 | Python | false | false | 1,344 | py | # Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from setuptools import setup, find_packages
setup(
name = "hadoop",
version = "0.9.1",
url = 'http://github.com/cloudera/hue',
description = "Hadoop Libraries",
# Note that we're cheating by installing gen-py
# in hadoop's __init__.py.
packages = find_packages('src'),
package_dir = {'': 'src' },
install_requires = ['setuptools', 'desktop'],
# Even libraries need to be registered as desktop_apps,
# if they have configuration, like this one.
entry_points = { 'desktop.sdk.lib': 'hadoop=hadoop' },
)
| [
"bcwalrus@cloudera.com"
] | bcwalrus@cloudera.com |
f0196b2a9c3e4a9ca2069fdb9c0d194f6fcb2024 | 80d8b35c2199817ca69f36d61f228110b5eacae2 | /bravo/tests/test_packets.py | 7d123fabcf7e67485c290ed2ec8721fb81b144eb | [] | no_license | Estevo-Aleixo/bravo | b1d309d1215250f63bba6fade3e81e2f248dd8f4 | 2c0b07dd4385114c870123643ddc57d3770b2e91 | refs/heads/master | 2021-01-16T21:35:06.195078 | 2011-02-02T09:08:59 | 2011-02-02T09:08:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,973 | py | # vim: set fileencoding=utf8 :
import unittest
from construct import Container
from construct import MappingError
import bravo.packets
class TestPacketDataStructures(unittest.TestCase):
def test_named_packets_exist(self):
for name, slot in bravo.packets.packets_by_name.iteritems():
self.assertTrue(slot in bravo.packets.packets,
"%d is missing" % slot)
def test_packet_names_exist(self):
for slot in bravo.packets.packets.iterkeys():
self.assertTrue(slot in bravo.packets.packets_by_name.values(),
"%d is missing" % slot)
def test_packet_names_match(self):
for name, slot in bravo.packets.packets_by_name.iteritems():
self.assertEqual(name, bravo.packets.packets[slot].name)
class TestPacketParsing(unittest.TestCase):
def test_ping(self):
packet = ""
parsed = bravo.packets.packets[0].parse(packet)
self.assertTrue(parsed)
def test_handshake(self):
packet = "\x00\x01a"
parsed = bravo.packets.packets[2].parse(packet)
self.assertEqual(parsed.username, "a")
def test_handshake_unicode(self):
packet = "\x00\x02\xc2\xa7"
parsed = bravo.packets.packets[2].parse(packet)
self.assertEqual(parsed.username, u"§")
def test_chat_color(self):
packet = "\x00\x15<\xc2\xa7fMrZunz\xc2\xa7f> Alrite"
parsed = bravo.packets.packets[3].parse(packet)
self.assertEqual(parsed.message, u"<§fMrZunz§f> Alrite")
def test_time(self):
packet = "\x00\x00\x00\x00\x00\x00\x00\x2a"
parsed = bravo.packets.packets[4].parse(packet)
self.assertEqual(parsed.timestamp, 42)
def test_orientation(self):
packet = "\x45\xc5\x66\x76\x42\x2d\xff\xfc\x01"
parsed = bravo.packets.packets[12].parse(packet)
self.assertEqual(parsed.look.pitch, 43.49998474121094)
self.assertEqual(parsed.look.rotation, 6316.8076171875)
def test_build(self):
packet = "\x00\x00\x00\x19@\x00\x00\x00@\x05\x00\x04@\x12"
parsed = bravo.packets.packets[15].parse(packet)
self.assertEqual(parsed.x, 25)
self.assertEqual(parsed.y, 64)
self.assertEqual(parsed.z, 64)
self.assertEqual(parsed.face, "+x")
self.assertEqual(parsed.id, 4)
self.assertEqual(parsed.count, 64)
self.assertEqual(parsed.damage, 18)
def test_build_bad_face(self):
packet = "\x00\x00\x00\x19@\x00\x00\x00@\x06\x00\x04@\x12"
self.assertRaises(MappingError, bravo.packets.packets[15].parse,
packet)
def test_animate(self):
packet = "\x00\x00\x00\x03\x01"
parsed = bravo.packets.packets[18].parse(packet)
self.assertEqual(parsed.eid, 3)
self.assertEqual(parsed.animation, "arm")
def test_animate_bad_animation(self):
packet = "\x00\x00\x00\x03\x05"
self.assertRaises(MappingError, bravo.packets.packets[18].parse,
packet)
class TestPacketAssembly(unittest.TestCase):
def test_ping(self):
container = Container()
assembled = bravo.packets.packets[0].build(container)
self.assertEqual(assembled, "")
def test_time(self):
container = Container(timestamp=42)
assembled = bravo.packets.packets[4].build(container)
self.assertEqual(assembled, "\x00\x00\x00\x00\x00\x00\x00\x2a")
def test_build(self):
container = Container(x=25, y=64, z=64, face="+x", id=4, count=64,
damage=18)
assembled = bravo.packets.packets[15].build(container)
self.assertEqual(assembled,
"\x00\x00\x00\x19@\x00\x00\x00@\x05\x00\x04@\x12")
def test_build_bad_face(self):
container = Container(x=25, y=64, z=64, face="+q", id=4, count=64,
damage=18)
self.assertRaises(MappingError, bravo.packets.packets[15].build,
container)
class TestPacketHelpers(unittest.TestCase):
def test_make_packet(self):
packet = bravo.packets.make_packet("ping")
self.assertEqual(packet, "\x00")
class TestPacketIntegration(unittest.TestCase):
def test_location_round_trip(self):
packet = "\x0d@\x1a\x00\x00\x00\x00\x00\x00@P\xcf\\)\x00\x00\x00@Pg\xae\x14\x80\x00\x00@\x1e\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
header, payload = bravo.packets.parse_packets(packet)[0][0]
self.assertEqual(header, 13)
self.assertEqual(payload.position.x, 6.5)
self.assertEqual(payload.position.y, 67.24000000953674)
self.assertEqual(payload.position.stance, 65.62000000476837)
self.assertEqual(payload.position.z, 7.5)
self.assertEqual(payload.look.rotation, 0.0)
self.assertEqual(payload.look.pitch, 0.0)
self.assertEqual(payload.flying.flying, 0)
reconstructed = bravo.packets.make_packet("location", payload)
self.assertEqual(packet, reconstructed)
| [
"leif.theden@gmail.com"
] | leif.theden@gmail.com |
106fce316f2b8daf10ebed6e56ebc77eae52f5fb | 348a921b2ec58cf8d9e018af5d7e93f15def6263 | /mimiron/models/scale_app.py | f3630ecac9490cac0a5aae5ae97bb70350b7d5cb | [] | no_license | tonicbupt/mimiron | 9c6ed99f8838f9180d7593bfaf8e5766e01c2911 | e51c8a59b7babbac44c0579797d5e255ede8a646 | refs/heads/master | 2016-09-06T01:55:37.508366 | 2015-07-28T08:52:59 | 2015-07-28T08:52:59 | 33,178,886 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,583 | py | # coding: utf-8
import sqlalchemy.exc
from mimiron.ext import db
from mimiron.models.base import Base
from mimiron.models.record import Record
class ScaleApp(Base):
__tablename__ = 'scale_app'
__table_args__ = db.UniqueConstraint('appname', 'version', 'entrypoint', 'env'),
appname = db.Column(db.String(30), nullable=False)
version = db.Column(db.String(50), nullable=False)
entrypoint = db.Column(db.CHAR(20), nullable=False)
env = db.Column(db.CHAR(20), nullable=False)
condition_groups = db.relationship('ConditionGroup', backref='scale_app', lazy='dynamic')
records = db.relationship('Record', backref='scale_app', lazy='dynamic')
def __init__(self, appname, version, entrypoint, env):
self.appname = appname
self.version = version
self.entrypoint = entrypoint
self.env = env
@classmethod
def list_all(cls, start=0, limit=None):
q = cls.query.offset(start)
if limit is not None:
q = q.limit(limit)
return q.all()
@classmethod
def get_or_create(cls, appname, version, entrypoint, env):
app = cls.query.filter_by(appname=appname, version=version,
entrypoint=entrypoint, env=env).first()
if app:
return app
try:
app = cls(appname, version, entrypoint, env)
db.session.add(app)
db.session.commit()
return app
except sqlalchemy.exc.IntegrityError:
db.session.rollback()
return None
@classmethod
def get_by_appname(cls, appname, start=0, limit=None):
q = cls.query.filter_by(appname=appname).offset(start)
if limit is not None:
q = q.limit(limit)
return q.all()
def add_record(self, container_id):
try:
r = Record(container_id)
db.session.add(r)
self.records.append(r)
db.session.commit()
return r
except sqlalchemy.exc.IntegrityError:
db.session.rollback()
return None
def list_records(self, start=0, limit=None):
q = self.records.order_by(Record.id.desc()).offset(start)
if limit is not None:
q = q.limit(limit)
return q.all()
def delete(self):
for cg in self.condition_groups.all():
cg.delete()
db.session.delete(self)
db.session.commit()
def to_dict(self):
d = super(ScaleApp, self).to_dict()
d.update(condition_groups=self.condition_groups.all())
return d
| [
"tonicbupt@gmail.com"
] | tonicbupt@gmail.com |
69fed5544d7ca3480815312b408af42df3de3ffc | bb150497a05203a718fb3630941231be9e3b6a32 | /tools/test/test-tools/tool-test-op-correctness/ttorch/test_linear.py | 90783638ec2e59efa8de2f83f0c73913751df932 | [] | no_license | PaddlePaddle/PaddleTest | 4fb3dec677f0f13f7f1003fd30df748bf0b5940d | bd3790ce72a2a26611b5eda3901651b5a809348f | refs/heads/develop | 2023-09-06T04:23:39.181903 | 2023-09-04T11:17:50 | 2023-09-04T11:17:50 | 383,138,186 | 42 | 312 | null | 2023-09-13T11:13:35 | 2021-07-05T12:44:59 | Python | UTF-8 | Python | false | false | 454 | py | #!/bin/env python
# -*- coding: utf-8 -*-
# @author DDDivano
# encoding=utf-8 vi:ts=4:sw=4:expandtab:ft=python
import torch
import numpy as np
def test_linear():
x = torch.from_numpy(np.ones(shape=[3, 5]))
weight = torch.from_numpy(np.ones(shape=[3, 5])*3)
bias = torch.from_numpy(np.ones(shape=[3]))
res = torch.nn.functional.linear(x, weight, bias)
expect = np.ones(shape=[3, 3]) * 16
np.testing.assert_allclose(res, expect)
| [
"noreply@github.com"
] | PaddlePaddle.noreply@github.com |
fcce29999db00b57d91a222f54cb5539f9325827 | 0e15ccb1b0ac2b1b246f7f0dbc3874f2f5ed5d72 | /190904/2048.py | cbc5f15246144019ff997ba5f0b5cb1090cd161f | [] | no_license | toohong5/algorithm | fa0eda1c3a28c7bb8c13ae10711a955eccc169ee | a54ae271738927592bd023e93d223a00dc368895 | refs/heads/master | 2020-07-22T16:35:02.021460 | 2019-11-15T08:56:31 | 2019-11-15T08:56:31 | 206,717,672 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 227 | py | import sys
sys.stdin = open('input5.txt', 'r')
T = int(input())
for tc in range(1, T + 1):
N, way = input().split()
N = int(N)
arr = [list(map(int, input().split())) for _ in range(N)]
if way == "up":
| [
"toohong5@gmail.com"
] | toohong5@gmail.com |
83f9c8bad1d536eb9ed04197aca5de98c52cbfa7 | 4a0e3ffff54be178b377a4c18fe0ced2d44b7be6 | /tests/test_simplification.py | ef4d8ef459a90652ce1d7f1345a8374f72c490e6 | [] | no_license | WinVector/data_algebra | 608371904c0fcc99ffab7e0fe57c49dc75fd6b21 | 1e96817919ae891ba108d8d7471b2200b2528271 | refs/heads/main | 2023-04-13T20:11:18.682084 | 2023-04-10T14:09:41 | 2023-04-10T14:09:41 | 203,080,133 | 113 | 5 | null | null | null | null | UTF-8 | Python | false | false | 1,517 | py | import data_algebra
import data_algebra.test_util
from data_algebra.data_ops import *
import pytest
def test_simplification_1():
ops = (
TableDescription(table_name="d", column_names=["col1", "col2", "col3"])
.extend({"sum23": "col2 + col3"})
.extend({"x": 1})
.extend({"x": 2})
.extend({"x": 3})
.extend({"x": 4})
.extend({"x": 5})
.select_columns(["x", "sum23", "col3"])
)
d = data_algebra.data_model.default_data_model().pd.DataFrame(
{"col1": [1, 2], "col2": [3, 4], "col3": [4, 5]}
)
res = ops.transform(d)
expect = data_algebra.data_model.default_data_model().pd.DataFrame(
{"x": [5, 5], "sum23": [7, 9], "col3": [4, 5],}
)
assert data_algebra.test_util.equivalent_frames(res, expect)
assert isinstance(ops, SelectColumnsNode)
assert isinstance(ops.sources[0], ExtendNode)
assert isinstance(ops.sources[0].sources[0], TableDescription)
def test_simplification_2():
d2 = data_algebra.data_model.default_data_model().pd.DataFrame({"col1": [0, 1], "col2": [1, 0],})
ops2 = (
describe_table(d2, table_name="d2")
.select_rows("col2 > 0")
.select_rows("col1 / col2 > 0")
)
res = ops2.transform(d2)
assert set(res.columns) == set(["col1", "col2"])
assert res.shape[0] == 0
assert isinstance(ops2, SelectRowsNode)
assert isinstance(ops2.sources[0], SelectRowsNode)
assert isinstance(ops2.sources[0].sources[0], TableDescription)
| [
"jmount@win-vector.com"
] | jmount@win-vector.com |
e06f3268b5fbbda6ff380b19dfcec992dfa4102a | 32bbe94e77deced5e58de97eb19e7c6126b001df | /backend/src/conftest.py | 6f9abb7eb37c760dac2ba89491f5762a265968c3 | [] | no_license | 3asyPe/astudy | 16d8adacc3bee9f2667c0a5f1be8228868440c6a | 0643a33a294c410523738f59f95c8d205dd63dc5 | refs/heads/master | 2023-06-25T11:23:39.500361 | 2021-07-28T13:33:48 | 2021-07-28T13:33:48 | 336,819,306 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,074 | py | import random
import string
import pytest
from django.conf import settings
from django.contrib.auth.models import AnonymousUser
from mixer.backend.django import mixer as _mixer
from app.test.api_client import DRFClient
from billing.models import BillingProfile
from carts.models import Wishlist
User = settings.AUTH_USER_MODEL
pytestmark = [pytest.mark.django_db]
@pytest.fixture
def api():
return DRFClient()
@pytest.fixture
def anon():
return DRFClient(anon=True)
@pytest.fixture
def mixer():
return _mixer
@pytest.fixture
def user(mixer):
return mixer.blend(User, email="testemail@gmail.com")
@pytest.fixture
def another_user(mixer):
return mixer.blend(User, email="testemail2@gmail.com")
@pytest.fixture
def anonymous_user(mixer):
return AnonymousUser()
@pytest.fixture
def course_factory(mixer):
def course_mixer():
return mixer.blend(
"courses.Course",
title=''.join([random.choice(string.hexdigits) for _ in range(0, 8)]),
subtitle=''.join([random.choice(string.hexdigits) for _ in range(0, 8)]),
price=3.33,
description=''.join([random.choice(string.hexdigits) for _ in range(0, 8)]),
)
return course_mixer
@pytest.fixture
def cart(mixer, user):
return mixer.blend("carts.Cart", user=user)
@pytest.fixture
def wishlist(user):
return Wishlist.objects.get_or_create(user=user)[0]
@pytest.fixture
def saved_for_later(mixer, user):
return mixer.blend("carts.SavedForLater", user=user)
@pytest.fixture
def billing_profile(mixer, user):
qs = BillingProfile.objects.filter(user=user, active=True)
if qs.exists():
return qs.first()
return mixer.blend("billing.BillingProfile", user=user)
@pytest.fixture
def card(mixer, billing_profile):
card = mixer.blend(
"billing.Card",
billing_profile=billing_profile,
stripe_id="card_1JD9PPAGKJR9v1iNUvmLh76d",
brand="VISA",
country="Belarus",
postal_code="424242",
last4="4242",
)
return card
| [
"alex.kvasha228@gmail.com"
] | alex.kvasha228@gmail.com |
91482f3078c6a8ffb8b5a646e4292e26b7ee4b5d | c1366a8772664a1076be1f709ec8364ded2cc119 | /Jump_to_python/자료형/문자열 자료형.py | 83a4d4ba86ad49e1d9b20118b1b020238912bfee | [] | no_license | Daboni/Python | 04eecda0ff57a385374f8cfd479a9db98cbf3d60 | e6bd7887a46535b3d5737836c2f8bbb668ef89ec | refs/heads/main | 2023-02-26T16:45:55.553629 | 2021-02-04T06:44:11 | 2021-02-04T06:44:11 | 322,223,972 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,944 | py | #문자열
"Life is too short, You need Python"
"a"
"123"
# ""
"Hello World"
# ''
'Python is fun'
# """ """
"""Life is too short, You need Python"""
# ''' '''
'''Life is too short, You need Python'''
# ' 포함시키기
food = "Python's favorite food is perl"
food #"Python's favorite food is perl"
# " 포함시키기
say = '"Python is very easy." he says.'
say #'"Python is very easy." he says.'
# \이용해서 '," 포함시키기
food = 'Python\'s favorite food is perl'
say = "\"Python is very easy.\" he says."
# \n 삽입으로 줄 바꾸기
multiline = "Life is too short\nYou need python"
# 연속된 ''' 또는 """ 사용하기
multiline = '''
Life is too short
You need python
'''
multiline = """
Life is too short
You need python
"""
# concatenation
head = "Python"
tail = "is fun!"
head + tail #'Python is fun!'
# 문자열 곱하기
a = "python"
a * 2 #'pythonpython'
# 문자열 길이
a = "Life is too short"
len(a) #17
# Indexing and Slicing
# Indexing
a = "Life is too short, You need Python"
a[3] #'e'
a[-1] #'n'
a[-2] #'o'
a[-0] #'L'
# Slicing
a = "Life is too short, You need Python"
b = a[0] + a[1] + a[2] + a[3]
b #'Life'
a[0:3] #'Lif'
a[0:5] #'Life '
a[0:4] #'Life'
a[19:] #'You need Python'
a[:] #'Life is too short, You need Python'
a[19:-7] #'You need'
# Slicing 으로 문자열 나누기
a = "20201217Rainy"
date = a[:8]
weather = a[8:]
date #'20201217'
weather #'Rainy'
year = a[:4]
day = a[4:8]
year #'2020'
day #'1217'
# 문자열 바꾸기
# python에서 문자열의 요솟값은 바꿀 수 없음(immutable한 자료형)
a = "Pithon"
a[:1] #'P'
a[2:] #'thon'
a[:1] + 'y' + a[2:] #'Pyhon'
# 문자열 formatting
# 숫자 바로 대입
"I eat %d apples." %3
'I eat 3 apples.'
# 문자열 바로 대입
"I eat %s apples." %"five"
'I eat fieve apples.'
# 숫자 값을 나타내는 변수로 대입
number = 3
"I eat %d apples." %number
'I eat 3 apples.'
# 2개 이상의 값 넣기
number = 10
day = "three"
"I ate %d apples. so I was sick for %s days." %(number, day)
'I ate 10 apples. so I was sick for three days.'
# %s 는 모든 형태의 값이든 문자열로 자동으로 변환시켜준다.
# %d와 %를 같이 쓸 때 %%를 쓴다.
"Error is %d%%" %98
'Error is 98%'
# 정렬과 공백
"%10s" %"hi"
' hi'
"-10sjane." %'hi'
'hi jane.'
# 소수점 표현
"%0.4f" %3.42134234
'3.4213'
"%10.4f" %3.42134234
' 3.4213'
# format 함수를 사용한 formatting
# 숫자 바로 대입
"I eat {} apples".format(3)
'I eat 3 apples'
# 문자열 바로 대입
"I eat {} apples".format("five")
'I eat five apples'
# 숫자 값을 가진 변수로 대입
number = 3
"I eat {} apples".format(number)
'I eat 3 apples'
# 2개 이상의 값 넣기
number = 10
day = "three"
"I ate {} apples. so I was sick for {} days.".format(number, day)
'I ate 10 apples. so I was sick for three days.'
# 이름으로 넣기
"I ate {number} apples. so I was sick for {day} days.".format(number=10,day=3)
'I ate 10 apples. so I was sick for 3 days.'
# 왼쪽 정렬
"{0:<10}".format("hi")
'hi '
# 오른쪽 정렬
"{0:>10}".format("hi")
' hi'
# 가운데 정렬
"{0:^10}".format("hi")
' hi '
# 공백 채우기
"{0:=^10}".format("hi")
'====hi===='
"{0:!<10}".format("hi")
'hi!!!!!!!!'
# f문자열 formatting
name = '홍길동'
age = 30
f'나의 이름은 {name}입니다. 나이는 {age}입니다.'
'나의 이름은 홍길동 입니다. 나이는 30입니다.'
f'나는 내년이면 {age+1}살이 된다.'
'나는 내년이면 31살이 된다.'
d = {'name':'홍길동', 'age':30}
f'나의 이름은 {d["name"]}입니다. 나이는 {d["age"]}입니다.'
'나의 이름은 홍길동입니다. 나이는 30입니다.'
# count()
a = "hobby"
a.count('b') #2
# find()
a = "Python is the best choice"
a.find('b') #14
a.find('k') #-1
# index()
a = "Life is too short"
a.index('t') #8
## index()함수는 없는 문자를 찾으면 오류 발생
#join()
",".join('abcd')
'a,b,c,d'
#upper()
a = "hi"
a.upper() #'HI'
#lower()
a = "HI"
a.lower() #'hi'
#lstrip()
a = " hi "
a.lstrip() #'hi '
#rstrip()
a = " hi "
a.rstrip() #' hi'
#strip()
a = " hi "
a.strip() #'hi'
#replace()
a = "Life is too short"
a.replace("Life", "Your leg") #'Your leg too short'
#split()
a = "Life is too short"
a.split() #['Life','is','too','short']
b = "a:b:c:d"
b.split(':') #['a','b','c','d']
| [
"noreply@github.com"
] | Daboni.noreply@github.com |
7d48311b4c76492c3f5616ad0a990dbeef8271b4 | 29b1757434a8069fd65bf11303a4422a4a7b8d47 | /grit/command/Remote.py | 316c67fcc4abda133a942fcfcfbb78ab0f5cde86 | [
"Artistic-2.0"
] | permissive | rec/grit | 720310e33aee7b31d26b976e1936e6e390dba2f4 | b5be6d50cb802db9c9510e68688908f3d4d6d162 | refs/heads/master | 2020-05-17T20:03:39.979504 | 2019-02-19T13:38:57 | 2019-02-19T13:38:57 | 21,712,161 | 2 | 1 | null | 2015-03-17T00:56:48 | 2014-07-10T22:14:25 | Python | UTF-8 | Python | false | false | 1,583 | py | from __future__ import absolute_import, division, print_function, unicode_literals
import json
import os
import urllib2
from grit import Call
from grit.Cache import cached
from grit import Project
from grit import Settings
_REMOTE = """
git remote add {nickname} git@github.com:{user}/{project}.git
"""
SAFE = True
HELP = """
grit r[emote] <user> [<nickname>]
Adds a remote branch for <user> named <nickname> (which defaults to <user>
if it's empty.
"""
def existing_remotes(cwd):
return set(Call.call_raw('git remote', cwd=cwd).split())
def add_remote(user, nickname, cwd=None, existing=None):
existing = existing or existing_remotes(cwd)
if nickname in existing:
return
remote = _REMOTE.format(
user=user, nickname=nickname, project=Settings.PROJECT)
Call.call(remote, cwd=cwd)
def remote(user='all', nickname='', cwd=None):
if user == 'all':
assert not nickname
remotes = Project.settings('remotes').items()
else:
remotes = [(nickname, user or user)]
existing = existing_remotes(cwd)
for nickname, user in remotes:
if nickname not in existing:
add_remote(user, nickname, cwd=cwd, existing=existing)
return remotes
@cached
def remotes():
return remote()
@cached
def inverse():
return dict((v, k) for (k, v) in remotes())
def add_nickname(user):
if user == Settings.USER:
nickname = 'origin'
else:
try:
nickname = inverse()[user]
except KeyError:
add_remote(user, user)
nickname = user
| [
"tom@swirly.com"
] | tom@swirly.com |
203327de968cec0206b4fe3aedf0a89d8e26ff80 | 0dc9bbce77d65a6991f7659c70bf4b81bb319a28 | /artascope/src/model/user_config.py | 65658857748e05322f7ca187a74384a026c9e800 | [
"MIT"
] | permissive | magus0219/icloud-photo-downloader | cca1f3aa0ee93fd3fb195d68d5e02edacea19bc5 | 6334530d971cf61089d031de99a38f204c201837 | refs/heads/master | 2023-05-06T14:51:21.145193 | 2020-08-10T14:10:49 | 2020-08-10T14:10:49 | 259,565,469 | 11 | 0 | MIT | 2021-06-02T01:53:05 | 2020-04-28T07:37:03 | Python | UTF-8 | Python | false | false | 2,212 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Created by magus0219[magus0219@gmail.com] on 2020/3/30
from artascope.src.model.mixin import JsonDataMixin
class NotifyType:
NONE = 0
SLACK = 1
EMAIL = 2
class TargetType:
SFTP = 1
class SchedulerEnable:
Disable = 0
Enable = 1
class ReindexEnable:
Disable = 0
Enable = 1
class UserConfig(JsonDataMixin):
def __init__(
self,
icloud_username: str,
icloud_password: str,
target_type: int = TargetType.SFTP,
sftp_host: str = None,
sftp_port: int = None,
sftp_username: str = None,
sftp_password: str = None,
sftp_dir: str = None,
reindex_enable: int = ReindexEnable.Disable,
sftp_home: str = None,
admin_url_prefix: str = None,
notify_type: int = NotifyType.NONE,
slack_token: str = None,
slack_channel: str = None,
smtp_host: str = None,
smtp_port: int = None,
smtp_user: str = None,
smtp_password: str = None,
msg_from: str = None,
msg_to: str = None,
scheduler_enable: int = SchedulerEnable.Disable,
scheduler_crontab: str = None,
scheduler_last_day_cnt: int = None,
):
self.icloud_username = icloud_username
self.icloud_password = icloud_password
self.target_type = target_type
self.sftp_host = sftp_host
self.sftp_port = sftp_port
self.sftp_username = sftp_username
self.sftp_password = sftp_password
self.sftp_dir = sftp_dir
self.reindex_enable = reindex_enable
self.sftp_home = sftp_home
self.admin_url_prefix = admin_url_prefix
self.notify_type = notify_type
self.slack_token = slack_token
self.slack_channel = slack_channel
self.smtp_host = smtp_host
self.smtp_port = smtp_port
self.smtp_user = smtp_user
self.smtp_password = smtp_password
self.msg_from = msg_from
self.msg_to = msg_to
self.scheduler_enable = scheduler_enable
self.scheduler_crontab = scheduler_crontab
self.scheduler_last_day_cnt = scheduler_last_day_cnt
| [
"magus0219@gmail.com"
] | magus0219@gmail.com |
13f1d9708281dbfba9691ad955cf3dc31d7cedf6 | e1cacf76f531494414c846f61d055b06801052d8 | /sdk/python/kubeflow/katib/configuration.py | 6afecca91b0a56b71205147a7189f5e67a9965e8 | [
"Apache-2.0"
] | permissive | prem0912/katib | 388d6637f2fed8c67aa7bd0ad4419dedccb75ad7 | d19149ddcd5b59054d4c26fb23a141b8adbe9634 | refs/heads/master | 2020-06-17T16:10:55.954875 | 2020-06-03T09:19:53 | 2020-06-03T10:06:01 | 243,764,109 | 0 | 2 | Apache-2.0 | 2020-02-28T13:04:57 | 2020-02-28T13:04:57 | null | UTF-8 | Python | false | false | 7,643 | py | # coding: utf-8
"""
katib
swagger description for katib # noqa: E501
OpenAPI spec version: v0.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import copy
import logging
import multiprocessing
import sys
import urllib3
import six
from six.moves import http_client as httplib
class Configuration(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Ref: https://github.com/swagger-api/swagger-codegen
Do not edit the class manually.
"""
_default = None
def __init__(self):
"""Constructor"""
if self._default:
for key in self._default.__dict__.keys():
self.__dict__[key] = copy.copy(self._default.__dict__[key])
return
# Default Base url
self.host = "https://localhost"
# Temp file folder for downloading files
self.temp_folder_path = None
# Authentication Settings
# dict to store API key(s)
self.api_key = {}
# dict to store API prefix (e.g. Bearer)
self.api_key_prefix = {}
# Username for HTTP basic authentication
self.username = ""
# Password for HTTP basic authentication
self.password = ""
# Logging Settings
self.logger = {}
self.logger["package_logger"] = logging.getLogger("katib")
self.logger["urllib3_logger"] = logging.getLogger("urllib3")
# Log format
self.logger_format = '%(asctime)s %(levelname)s %(message)s'
# Log stream handler
self.logger_stream_handler = None
# Log file handler
self.logger_file_handler = None
# Debug file location
self.logger_file = None
# Debug switch
self.debug = False
# SSL/TLS verification
# Set this to false to skip verifying SSL certificate when calling API
# from https server.
self.verify_ssl = True
# Set this to customize the certificate file to verify the peer.
self.ssl_ca_cert = None
# client certificate file
self.cert_file = None
# client key file
self.key_file = None
# Set this to True/False to enable/disable SSL hostname verification.
self.assert_hostname = None
# urllib3 connection pool's maximum number of connections saved
# per pool. urllib3 uses 1 connection as default value, but this is
# not the best value when you are making a lot of possibly parallel
# requests to the same host, which is often the case here.
# cpu_count * 5 is used as default value to increase performance.
self.connection_pool_maxsize = multiprocessing.cpu_count() * 5
# Proxy URL
self.proxy = None
# Safe chars for path_param
self.safe_chars_for_path_param = ''
@classmethod
def set_default(cls, default):
cls._default = default
@property
def logger_file(self):
"""The logger file.
If the logger_file is None, then add stream handler and remove file
handler. Otherwise, add file handler and remove stream handler.
:param value: The logger_file path.
:type: str
"""
return self.__logger_file
@logger_file.setter
def logger_file(self, value):
"""The logger file.
If the logger_file is None, then add stream handler and remove file
handler. Otherwise, add file handler and remove stream handler.
:param value: The logger_file path.
:type: str
"""
self.__logger_file = value
if self.__logger_file:
# If set logging file,
# then add file handler and remove stream handler.
self.logger_file_handler = logging.FileHandler(self.__logger_file)
self.logger_file_handler.setFormatter(self.logger_formatter)
for _, logger in six.iteritems(self.logger):
logger.addHandler(self.logger_file_handler)
if self.logger_stream_handler:
logger.removeHandler(self.logger_stream_handler)
else:
# If not set logging file,
# then add stream handler and remove file handler.
self.logger_stream_handler = logging.StreamHandler()
self.logger_stream_handler.setFormatter(self.logger_formatter)
for _, logger in six.iteritems(self.logger):
logger.addHandler(self.logger_stream_handler)
if self.logger_file_handler:
logger.removeHandler(self.logger_file_handler)
@property
def debug(self):
"""Debug status
:param value: The debug status, True or False.
:type: bool
"""
return self.__debug
@debug.setter
def debug(self, value):
"""Debug status
:param value: The debug status, True or False.
:type: bool
"""
self.__debug = value
if self.__debug:
# if debug status is True, turn on debug logging
for _, logger in six.iteritems(self.logger):
logger.setLevel(logging.DEBUG)
# turn on httplib debug
httplib.HTTPConnection.debuglevel = 1
else:
# if debug status is False, turn off debug logging,
# setting log level to default `logging.WARNING`
for _, logger in six.iteritems(self.logger):
logger.setLevel(logging.WARNING)
# turn off httplib debug
httplib.HTTPConnection.debuglevel = 0
@property
def logger_format(self):
"""The logger format.
The logger_formatter will be updated when sets logger_format.
:param value: The format string.
:type: str
"""
return self.__logger_format
@logger_format.setter
def logger_format(self, value):
"""The logger format.
The logger_formatter will be updated when sets logger_format.
:param value: The format string.
:type: str
"""
self.__logger_format = value
self.logger_formatter = logging.Formatter(self.__logger_format)
def get_api_key_with_prefix(self, identifier):
"""Gets API key (with prefix if set).
:param identifier: The identifier of apiKey.
:return: The token for api key authentication.
"""
if (self.api_key.get(identifier) and
self.api_key_prefix.get(identifier)):
return self.api_key_prefix[identifier] + ' ' + self.api_key[identifier] # noqa: E501
elif self.api_key.get(identifier):
return self.api_key[identifier]
def get_basic_auth_token(self):
"""Gets HTTP basic authentication header (string).
:return: The token for basic HTTP authentication.
"""
return urllib3.util.make_headers(
basic_auth=self.username + ':' + self.password
).get('authorization')
def auth_settings(self):
"""Gets Auth Settings dict for api client.
:return: The Auth Settings information dict.
"""
return {
}
def to_debug_report(self):
"""Gets the essential information for debugging.
:return: The report for debugging.
"""
return "Python SDK Debug Report:\n"\
"OS: {env}\n"\
"Python Version: {pyversion}\n"\
"Version of the API: v0.1\n"\
"SDK Package Version: 0.0.1".\
format(env=sys.platform, pyversion=sys.version)
| [
"premnath.vel@gmail.com"
] | premnath.vel@gmail.com |
8f437d715a4f2b8228b57dd1fbf993432c1036ab | 2af94f8a7609d47fdcea28a2132c4f8bacb103e3 | /lib/_idigi_data.py | 793da64bda691d043bc092f2f50eb3cd4c3c53b3 | [] | no_license | bernhara/DigiGateway4Raph | 685527723f0b306f387233c78d27fe9d78717c38 | f36ba29ef883d70f94b8609ff734b5dcde786c66 | refs/heads/master | 2020-07-05T19:56:27.027547 | 2019-08-19T06:10:46 | 2019-08-19T06:10:46 | 202,756,662 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,503 | py | """idigi_data
idigi_data provides an easy way for Digi device based python apps to
push up data files to the Device Cloud server the device belongs to.
See idigi_pc.py to run on a PC.
"""
import sys
import os
sys.path.append("WEB/python/idigi_data.zip")
try:
# Device Cloud is built into Digi devices. You need to be running python on a Digi
# device with firmware support for Device Cloud to use this module.
import cwm as _idigi
except:
# idigi_pc is an optional module that allows using idigi_data (in a limited way)
# on a PC.
print 'WARNING: STUB: using idigi_pc'
import idigi_pc as _idigi
import digi_httplib as httplib
from mimetypes import guess_type
__all__= ["send_idigi_data", "send_idigi_data_with_type", "send_idigi_xml", "get_idigi_values"]
def send_idigi_data (data, filename, collection=None, secure=True):
"""
Send data to the Device Cloud server with the filename specified.
Note the filename must be specified and will be used to store the
document. If the filename already exists in the database the
existing file will be overwritten.
A file extension will be used to guess the content type using mimetypes.
For instance, file1.xml will be stored as XML. file2.jpg will be saved as
a JPEG.
`collection` is an optional paramater specifying any subcollections that
the file should be stored in. None means use the root collection for this
device. collections are specified without leading or trailing slashes ('/')
and must be separated with a slash (directory like).
Example collection: my_collections/sensors/morning_readings
By default, all data is transferred using an encrypted transfer. If
an unencrypted transfer is desired, specify `secure=False`.
Returns (success, error, errmsg):
Success:
True if successful, False if the upload failed.
error:
status of transfer. If HTTP transport, http status is returned.
Errors:
100-510 HTTP errors (see httplib.py).
10000 Data service is not available on this device
errmsg:
text associated with error
"""
this_type, encoding = guess_type(filename)
if this_type == None:
raise ValueError("File extension not recognized")
return _send_to_idigi (data, filename, collection, this_type, secure)
def send_idigi_data_with_type (data, filename, collection, content_type, secure=True):
"""
Send data to the Device Cloud server with the filename specified.
Note the filename must be specified and will be used to store the
document. If the filename already exists in the database the
existing file will be overwritten.
The content type will be used to store the file. The content must
be a valid content type. Example: `text/xml`
`collection` specifies any subcollections that the file should be
stored in. None means use the root collection for this device.
collections are specified without leading or trailing slashes
('/') and must be separated with a slash (directory like).
Example collection: my_collections/sensors/morning_readings
By default, all data is transferred using an encrypted transfer. If
an unencrypted transfer is desired, specify secure=False.
Returns (success, error, errmsg):
Success:
`True` if successful, `False` if the upload failed.
error:
status of transfer. If HTTP transport, http status is returned.
Errors:
100-510 HTTP errors (see httplib.py).
10000 Data service is not available on this device
errmsg:
text associated with error
"""
return _send_to_idigi (data, filename, collection, content_type, secure)
def send_idigi_xml (userXml, filename, collection=None, secure=True):
"""
Send the xml string userXml to the data server with the filename specified.
Note the filename must be specified and will be used to store the
document. If the filename already exists in the database the
existing file will be overwritten.
A file extension of .xml is recommended (for example: my_file.xml)
`collection` is an optional paramater specifying any
subcollections that the file should be stored in. None means use
the root collection for this device. collections are specified
without leading or trailing slashes ('/') and must be separated
with a slash (directory like).
Example collection: my_collections/sensors/morning_readings
By default, all data is transferred using an encrypted transfer.
If an unencrypted transfer is desired, specify secure=False.
Returns (success, error, errmsg):
Success:
`True` if successful, `False` if the upload failed.
error:
status of transfer. If HTTP transport, http status is returned.
Errors:
100-510 HTTP errors (see httplib.py).
10000 Data service is not available on this device
errmsg:
text associated with error
"""
this_type = 'text/xml'
return _send_to_idigi (userXml, filename, collection, this_type, secure)
def _send_to_idigi (data, filename, collection, content_type, secure=True):
if data == None or filename == None:
return False
try:
host, token, path, port, securePort = _idigi._get_ws_parms()
if secure == True:
host = "%s:%d" % (host, securePort)
else:
host = "%s:%d" % (host, port)
except:
host, token, path = _idigi._get_ws_parms()
hostSplit = host.split(":")
port = hostSplit[1]
if host == None or host[0] == ":" or token == None or path == None or \
port == None or port == 0:
err = 10000
msg = "Data Service not available, check Remote Management configuration"
return False, err, msg
if collection == None:
fullPath = path
else:
fullPath = path + "/" + collection
tout = 300 # TODO: Parameterize
if secure == True:
con = httplib.HTTPSConnection(host)
else:
con = httplib.HTTPConnection(host)
con.putrequest('PUT', '%s/%s' % (fullPath, filename))
con.putheader('Content-Type', content_type)
clen = len(data)
con.putheader('Content-Length', `clen`)
con.putheader('Authorization', 'Basic %s' % token)
con.endheaders()
con.send(data)
response = con.getresponse()
errcode = response.status
errmsg = response.reason
headers = response.msg
con.close()
if errcode != 200 and errcode != 201:
return False, errcode, errmsg
else:
return True, errcode, errmsg
def get_idigi_values():
"""\
Used to return the current runtime Device Cloud values and parameters.
"""
return _idigi._get_ws_parms()
| [
"ORBA6563@S-ORBA65630.rd.francetelecom.fr"
] | ORBA6563@S-ORBA65630.rd.francetelecom.fr |
e8637c520683003b8fa83bcc0a7a54244c0cb2aa | ff0a81d12b17a45a214380f4bdbb2360e050ff40 | /src/accounts/migrations/0001_initial.py | ee232af4db0f7eb2fcf0a2240e24198592e30880 | [] | no_license | rcmiskin10/university-marketplace | 45425275f6f7a73ab81441d613c26def2410a8e4 | 726e99153ad36d0aa38141822285f79feb910c06 | refs/heads/master | 2020-04-05T23:42:59.025790 | 2017-01-11T03:15:20 | 2017-01-11T03:15:20 | 68,738,382 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,806 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import datetime
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='MyUser',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(null=True, verbose_name='last login', blank=True)),
('email', models.EmailField(unique=True, max_length=255, verbose_name=b'email address')),
('username', models.CharField(max_length=120, null=True, blank=True)),
('first_name', models.CharField(max_length=120, null=True, blank=True)),
('last_name', models.CharField(max_length=120, null=True, blank=True)),
('is_active', models.BooleanField(default=False)),
('is_admin', models.BooleanField(default=False)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='UserProfile',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('activation_key', models.CharField(max_length=40, blank=True)),
('key_expires', models.DateTimeField(default=datetime.date(2016, 3, 6))),
('user', models.OneToOneField(to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name_plural': 'User profiles',
},
),
]
| [
"rcmiskin@gmail.com"
] | rcmiskin@gmail.com |
f4ae5179833a9c7f84ab735605567beee5973043 | 861a3f230b19bb5db1e34750e7c469a5989bf162 | /11.HashTable/2.jewels-and-stones/3_counter.py | d56c1f27ba1e577815a47a43ef8032f167a7adbe | [] | no_license | jana-choi/PythonCodingTest | 6ef3786c9a9c71dc749723a4db614a1833332791 | 294588392cf551a77a28d9153098c3d823f271f7 | refs/heads/master | 2022-12-24T10:49:36.712057 | 2020-09-25T01:59:58 | 2020-09-25T01:59:58 | 285,706,524 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 264 | py | from collections import Counter
def numJewelsInStones(J, S):
freqs = Counter(S)
count = 0
for char in J:
count += freqs[char]
return count
if __name__ == "__main__":
J = "aA"
S = "aAAbbbb"
print(numJewelsInStones(J, S)) | [
"lovej0918@gmail.com"
] | lovej0918@gmail.com |
d224412bcfacaefd9576997b2fd07907fabe1ad6 | 7889f7f0532db6a7f81e6f8630e399c90438b2b9 | /3.7.2/_downloads/edc9e862d9e0d115b20877b32f705afc/pcolor_demo.py | 2e17cd7db90b2f637939f4660fe465fbf44089c0 | [] | no_license | matplotlib/matplotlib.github.com | ef5d23a5bf77cb5af675f1a8273d641e410b2560 | 2a60d39490941a524e5385670d488c86083a032c | refs/heads/main | 2023-08-16T18:46:58.934777 | 2023-08-10T05:07:57 | 2023-08-10T05:08:30 | 1,385,150 | 25 | 59 | null | 2023-08-30T15:59:50 | 2011-02-19T03:27:35 | null | UTF-8 | Python | false | false | 3,626 | py | """
===========
Pcolor demo
===========
Generating images with `~.axes.Axes.pcolor`.
Pcolor allows you to generate 2D image-style plots. Below we will show how
to do so in Matplotlib.
"""
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.colors import LogNorm
# Fixing random state for reproducibility
np.random.seed(19680801)
###############################################################################
# A simple pcolor demo
# --------------------
Z = np.random.rand(6, 10)
fig, (ax0, ax1) = plt.subplots(2, 1)
c = ax0.pcolor(Z)
ax0.set_title('default: no edges')
c = ax1.pcolor(Z, edgecolors='k', linewidths=4)
ax1.set_title('thick edges')
fig.tight_layout()
plt.show()
###############################################################################
# Comparing pcolor with similar functions
# ---------------------------------------
#
# Demonstrates similarities between `~.axes.Axes.pcolor`,
# `~.axes.Axes.pcolormesh`, `~.axes.Axes.imshow` and
# `~.axes.Axes.pcolorfast` for drawing quadrilateral grids.
# Note that we call ``imshow`` with ``aspect="auto"`` so that it doesn't force
# the data pixels to be square (the default is ``aspect="equal"``).
# make these smaller to increase the resolution
dx, dy = 0.15, 0.05
# generate 2 2d grids for the x & y bounds
y, x = np.mgrid[-3:3+dy:dy, -3:3+dx:dx]
z = (1 - x/2 + x**5 + y**3) * np.exp(-x**2 - y**2)
# x and y are bounds, so z should be the value *inside* those bounds.
# Therefore, remove the last value from the z array.
z = z[:-1, :-1]
z_min, z_max = -abs(z).max(), abs(z).max()
fig, axs = plt.subplots(2, 2)
ax = axs[0, 0]
c = ax.pcolor(x, y, z, cmap='RdBu', vmin=z_min, vmax=z_max)
ax.set_title('pcolor')
fig.colorbar(c, ax=ax)
ax = axs[0, 1]
c = ax.pcolormesh(x, y, z, cmap='RdBu', vmin=z_min, vmax=z_max)
ax.set_title('pcolormesh')
fig.colorbar(c, ax=ax)
ax = axs[1, 0]
c = ax.imshow(z, cmap='RdBu', vmin=z_min, vmax=z_max,
extent=[x.min(), x.max(), y.min(), y.max()],
interpolation='nearest', origin='lower', aspect='auto')
ax.set_title('image (nearest, aspect="auto")')
fig.colorbar(c, ax=ax)
ax = axs[1, 1]
c = ax.pcolorfast(x, y, z, cmap='RdBu', vmin=z_min, vmax=z_max)
ax.set_title('pcolorfast')
fig.colorbar(c, ax=ax)
fig.tight_layout()
plt.show()
###############################################################################
# Pcolor with a log scale
# -----------------------
#
# The following shows pcolor plots with a log scale.
N = 100
X, Y = np.meshgrid(np.linspace(-3, 3, N), np.linspace(-2, 2, N))
# A low hump with a spike coming out.
# Needs to have z/colour axis on a log scale, so we see both hump and spike.
# A linear scale only shows the spike.
Z1 = np.exp(-X**2 - Y**2)
Z2 = np.exp(-(X * 10)**2 - (Y * 10)**2)
Z = Z1 + 50 * Z2
fig, (ax0, ax1) = plt.subplots(2, 1)
c = ax0.pcolor(X, Y, Z, shading='auto',
norm=LogNorm(vmin=Z.min(), vmax=Z.max()), cmap='PuBu_r')
fig.colorbar(c, ax=ax0)
c = ax1.pcolor(X, Y, Z, cmap='PuBu_r', shading='auto')
fig.colorbar(c, ax=ax1)
plt.show()
#############################################################################
#
# .. admonition:: References
#
# The use of the following functions, methods, classes and modules is shown
# in this example:
#
# - `matplotlib.axes.Axes.pcolor` / `matplotlib.pyplot.pcolor`
# - `matplotlib.axes.Axes.pcolormesh` / `matplotlib.pyplot.pcolormesh`
# - `matplotlib.axes.Axes.pcolorfast`
# - `matplotlib.axes.Axes.imshow` / `matplotlib.pyplot.imshow`
# - `matplotlib.figure.Figure.colorbar` / `matplotlib.pyplot.colorbar`
# - `matplotlib.colors.LogNorm`
| [
"quantum.analyst@gmail.com"
] | quantum.analyst@gmail.com |
78f2cb9e8e6566e1b063e3cf67098480f3c48b68 | 54049cdb26004a52689254c7b3fedff9e2c6d163 | /bloom/commands/patch/remove_cmd.py | c5da59a8c1bd50ca790ca9fa0126e0f09437b43b | [
"BSD-3-Clause"
] | permissive | hershwg/bloom | d6692e8bc63a3e95e8165fb80a75b32271b7a526 | 9fbdd6f6de52cf4263b76a93e37a730725242e93 | refs/heads/master | 2021-01-15T20:19:23.232209 | 2012-10-25T07:00:34 | 2012-10-25T07:00:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,525 | py | from __future__ import print_function
import sys
from argparse import ArgumentParser
from bloom.util import add_global_arguments
from bloom.util import execute_command
from bloom.util import handle_global_arguments
from bloom.logging import log_prefix
from bloom.logging import error
from bloom.logging import debug
from bloom.git import branch_exists
from bloom.git import checkout
from bloom.git import get_current_branch
from bloom.git import track_branches
from bloom.commands.patch.common import get_patch_config
@log_prefix('[git-bloom-patch remove]: ')
def remove_patches(directory=None):
# Get the current branch
current_branch = get_current_branch(directory)
# Ensure the current branch is valid
if current_branch is None:
error("Could not determine current branch, are you in a git repo?")
return 1
# Construct the patches branch
patches_branch = 'patches/' + current_branch
try:
# See if the patches branch exists
if branch_exists(patches_branch, False, directory=directory):
if not branch_exists(patches_branch, True, directory=directory):
track_branches(patches_branch, directory)
else:
error("No patches branch (" + patches_branch + ") found, cannot "
"remove patches.")
return 1
# Get the parent branch from the patches branch
config = get_patch_config(patches_branch, directory=directory)
parent, spec = config['parent'], config['base']
if None in [parent, spec]:
error("Could not retrieve patches info.")
return 1
debug("Removing patches from " + current_branch + " back to base "
"commit " + spec)
# Reset this branch using git reset --hard spec
execute_command('git reset --hard ' + spec, cwd=directory)
finally:
if current_branch:
checkout(current_branch, directory=directory)
return 0
def get_parser():
"""Returns a parser.ArgumentParser with all arguments defined"""
parser = ArgumentParser(description="""
Removes any applied patches from the working branch, including any un-exported
patches, so use with caution.
""")
return parser
def main():
# Assumptions: in a git repo, this command verb was passed, argv has enough
sysargs = sys.argv[2:]
parser = get_parser()
parser = add_global_arguments(parser)
args = parser.parse_args(sysargs)
handle_global_arguments(args)
return remove_patches()
| [
"wjwwood@gmail.com"
] | wjwwood@gmail.com |
323456b2a796bba44c887fe9bdc4cce4f6cb8c09 | 365c85a280596d88082c1f150436453f96e18c15 | /Python/Daily/2029. stone_game_ix.py | f647f8a361d8cfb7d86d66e00b93c306cc7099bf | [] | no_license | Crisescode/leetcode | 0177c1ebd47b0a63476706562bcf898f35f1c4f2 | c3a60010e016995f06ad4145e174ae19668e15af | refs/heads/master | 2023-06-01T06:29:41.992368 | 2023-05-16T12:32:10 | 2023-05-16T12:32:10 | 243,040,322 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,553 | py | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# https://leetcode-cn.com/problems/stone-game-ix/
# Alice and Bob continue their games with stones. There is a row of n stones, and each stone
# has an associated value. You are given an integer array stones, where stones[i] is the value of the ith stone.
#
# Alice and Bob take turns, with Alice starting first. On each turn, the player may remove any
# stone from stones. The player who removes a stone loses if the sum of the values of all removed
# stones is divisible by 3. Bob will win automatically if there are no remaining stones (even if it is Alice's turn).
#
# Assuming both players play optimally, return true if Alice wins and false if Bob wins.
#
"""
Example 1:
Input: stones = [2,1]
Output: true
Explanation: The game will be played as follows:
- Turn 1: Alice can remove either stone.
- Turn 2: Bob removes the remaining stone.
The sum of the removed stones is 1 + 2 = 3 and is divisible by 3. Therefore, Bob loses and Alice wins the game.
Example 2:
Input: stones = [2]
Output: false
Explanation: Alice will remove the only stone, and the sum of the values on the removed stones is 2.
Since all the stones are removed and the sum of values is not divisible by 3, Bob wins the game.
Example 3:
Input: stones = [5,1,2,4,3]
Output: false
Explanation: Bob will always win. One possible way for Bob to win is shown below:
- Turn 1: Alice can remove the second stone with value 1. Sum of removed stones = 1.
- Turn 2: Bob removes the fifth stone with value 3. Sum of removed stones = 1 + 3 = 4.
- Turn 3: Alices removes the fourth stone with value 4. Sum of removed stones = 1 + 3 + 4 = 8.
- Turn 4: Bob removes the third stone with value 2. Sum of removed stones = 1 + 3 + 4 + 2 = 10.
- Turn 5: Alice removes the first stone with value 5. Sum of removed stones = 1 + 3 + 4 + 2 + 5 = 15.
Alice loses the game because the sum of the removed stones (15) is divisible by 3. Bob wins the game.
"""
from typing import List
class Solution:
def stoneGameIX(self, stones: List[int]) -> bool:
cnt0 = cnt1 = cnt2 = 0
for stone in stones:
if stone % 3 == 0:
cnt0 += 1
elif stone % 3 == 1:
cnt1 += 1
else:
cnt2 += 1
if cnt0 % 2 == 0:
return cnt1 > 0 and cnt2 > 0
else:
return abs(cnt1 - cnt2) > 2
if __name__ == "__main__":
print(Solution().stoneGameIX([5, 1, 2, 4, 3]))
| [
"zhaopanp2018@outlook.com"
] | zhaopanp2018@outlook.com |
7c37858040a76a9843570a391d8938e7658c7749 | d936bc1d23f3ccee2e87f7a911ad7eada9147d0f | /canvas_mouse_draw_shape.py | 8ca2ae61d3758d5649e80e2bf4c5a5207376efeb | [] | no_license | mesebilisim/python-kod-bankasi | b83982fa962194b43bd3e1fbbbd5bf4ad56a6c11 | 8740cba66fd764ec8cf4174aa5e6cade49ae0af6 | refs/heads/master | 2020-07-27T21:41:35.882996 | 2015-10-13T22:55:23 | 2015-10-13T22:55:23 | 73,424,342 | 1 | 0 | null | 2016-11-10T22:00:23 | 2016-11-10T22:00:23 | null | UTF-8 | Python | false | false | 1,427 | py |
from Tkinter import *
trace = 0
class CanvasEventsDemo:
def __init__(self, parent=None):
canvas = Canvas(width=300, height=300, bg='beige')
canvas.pack()
canvas.bind('<ButtonPress-1>', self.onStart)
canvas.bind('<B1-Motion>', self.onGrow)
canvas.bind('<Double-1>', self.onClear)
canvas.bind('<ButtonPress-3>', self.onMove)
self.canvas = canvas
self.drawn = None
self.kinds = [canvas.create_oval, canvas.create_rectangle]
def onStart(self, event):
self.shape = self.kinds[0]
self.kinds = self.kinds[1:] + self.kinds[:1]
self.start = event
self.drawn = None
def onGrow(self, event):
canvas = event.widget
if self.drawn: canvas.delete(self.drawn)
objectId = self.shape(self.start.x, self.start.y, event.x, event.y)
if trace: print objectId
self.drawn = objectId
def onClear(self, event):
event.widget.delete('all')
def onMove(self, event):
if self.drawn:
if trace: print self.drawn
canvas = event.widget
diffX, diffY = (event.x - self.start.x), (event.y - self.start.y)
canvas.move(self.drawn, diffX, diffY)
self.start = event
if __name__ == '__main__':
CanvasEventsDemo()
mainloop()
| [
"electrocoder@gmail.com"
] | electrocoder@gmail.com |
680f3dc411c72dd3386788bef61685df9c7fcb4f | 3cedc7c1519d3b013aad9ec4e6a6ee7834da7589 | /appium/没整理/lesson2/toutiao1.py | 1ef5799e27844b16d15878ddd160fd41e56ce680 | [] | no_license | hzrg/songqin_course | 53437100669ee93d2ac5ecae5de938b1a4007d7f | 05e422ce34a42fd6d3819722a19252f8005e79ed | refs/heads/master | 2022-02-09T13:27:59.871400 | 2019-06-13T06:08:45 | 2019-06-13T06:08:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,148 | py | # coding=utf8
from appium import webdriver
import time,traceback
desired_capabilities = {}
desired_capabilities['platformName'] = 'Android'
desired_capabilities['automationName'] = 'Appium'
desired_capabilities['platformVersion'] = '5.1'
desired_capabilities['deviceName'] = '192.168.56.104:5555'
desired_capabilities['app'] = '/Users/zhoujunjun/Downloads/toutiao.apk'
desired_capabilities['appPackage'] = 'io.manong.developerdaily'
desired_capabilities['appActivity'] = 'io.toutiao.android.ui.activity.LaunchActivity'
desired_capabilities['unicodeKeyboard'] = True
desired_capabilities['noReset'] = True
desired_capabilities['newCommandTimeout'] = 6000
driver = webdriver.Remote('http://localhost:4723/wd/hub', desired_capabilities)
#driver.implicitly_wait(10)
try:
# -----------------
driver.find_element_by_class_name("android.widget.ImageButton")
time.sleep(2)
tvs = driver.find_elements_by_class_name("android.widget.TextView")
for tv in tvs:
print(tv.text)
# -----------------
except:
print(traceback.format_exc())
input('**** Press to quit..')
driver.quit() | [
"1174497735@qq.com"
] | 1174497735@qq.com |
331c3fec668136c0d08a36f50d5718da3362a69b | d170dbc754cca63e712284294b6286065fec6df5 | /fa/jquery/templates/admin/models.pt.py | a3278b8727dcae923aec797de8987774cd997501 | [] | no_license | FormAlchemy/fa.jquery | 3d2ba746a1631db32d2f83fba7c6ae9b75bb312f | 8aa4686c66cf8612fd4a5c2e7fc34e29cb8b606f | refs/heads/master | 2016-09-06T06:03:12.277972 | 2013-10-10T08:46:36 | 2013-10-10T08:46:36 | 1,256,529 | 9 | 12 | null | 2017-12-21T08:05:22 | 2011-01-15T00:55:19 | Python | UTF-8 | Python | false | false | 5,037 | py | registry = dict(version=0)
def bind():
from cPickle import loads as _loads
_lookup_attr = _loads('cchameleon.core.codegen\nlookup_attr\np1\n.')
_attrs_4358996304 = _loads('(dp1\nVclass\np2\nVfa_model ui-widget-header ui-corner-all\np3\ns.')
_init_scope = _loads('cchameleon.core.utils\necontext\np1\n.')
_re_amp = _loads("cre\n_compile\np1\n(S'&(?!([A-Za-z]+|#[0-9]+);)'\np2\nI0\ntRp3\n.")
_init_stream = _loads('cchameleon.core.generation\ninitialize_stream\np1\n.')
_attrs_4358996112 = _loads('(dp1\n.')
_attrs_4358950800 = _loads('(dp1\n.')
_init_default = _loads('cchameleon.core.generation\ninitialize_default\np1\n.')
_init_tal = _loads('cchameleon.core.generation\ninitialize_tal\np1\n.')
def render(econtext, rcontext=None):
macros = econtext.get('macros')
_translate = econtext.get('_translate')
_slots = econtext.get('_slots')
target_language = econtext.get('target_language')
u'_init_stream()'
(_out, _write, ) = _init_stream()
u'_init_tal()'
(_attributes, repeat, ) = _init_tal()
u'_init_default()'
_default = _init_default()
u'None'
default = None
u'None'
_domain = None
u"main.macros['master']"
_metal = _lookup_attr(econtext['main'], 'macros')['master']
def _callback_main(econtext, _repeat, _out=_out, _write=_write, _domain=_domain, **_ignored):
if _repeat:
repeat.update(_repeat)
attrs = _attrs_4358950800
u'models.items()'
_write(u'<div>\n ')
_tmp1 = _lookup_attr(econtext['models'], 'items')()
item = None
(_tmp1, _tmp2, ) = repeat.insert('item', _tmp1)
for item in _tmp1:
_tmp2 = (_tmp2 - 1)
attrs = _attrs_4358996112
u"''"
_write(u'<div>\n ')
_default.value = default = ''
u'item[0]'
_content = item[0]
attrs = _attrs_4358996304
u'item[1]'
_write(u'<a class="fa_model ui-widget-header ui-corner-all"')
_tmp3 = item[1]
if (_tmp3 is _default):
_tmp3 = None
if ((_tmp3 is not None) and (_tmp3 is not False)):
if (_tmp3.__class__ not in (str, unicode, int, float, )):
_tmp3 = unicode(_translate(_tmp3, domain=_domain, mapping=None, target_language=target_language, default=None))
else:
if not isinstance(_tmp3, unicode):
_tmp3 = str(_tmp3)
if ('&' in _tmp3):
if (';' in _tmp3):
_tmp3 = _re_amp.sub('&', _tmp3)
else:
_tmp3 = _tmp3.replace('&', '&')
if ('<' in _tmp3):
_tmp3 = _tmp3.replace('<', '<')
if ('>' in _tmp3):
_tmp3 = _tmp3.replace('>', '>')
if ('"' in _tmp3):
_tmp3 = _tmp3.replace('"', '"')
_write(((' href="' + _tmp3) + '"'))
u'_content'
_write('>')
_tmp3 = _content
_tmp = _tmp3
if (_tmp.__class__ not in (str, unicode, int, float, )):
try:
_tmp = _tmp.__html__
except:
_tmp = _translate(_tmp, domain=_domain, mapping=None, target_language=target_language, default=None)
else:
_tmp = _tmp()
_write(_tmp)
_tmp = None
if (_tmp is not None):
if not isinstance(_tmp, unicode):
_tmp = str(_tmp)
if ('&' in _tmp):
if (';' in _tmp):
_tmp = _re_amp.sub('&', _tmp)
else:
_tmp = _tmp.replace('&', '&')
if ('<' in _tmp):
_tmp = _tmp.replace('<', '<')
if ('>' in _tmp):
_tmp = _tmp.replace('>', '>')
_write(_tmp)
_write(u'</a>\n </div>')
if (_tmp2 == 0):
break
_write(' ')
_write(u'\n </div>\n')
u"{'main': _callback_main}"
_tmp = {'main': _callback_main, }
u"main.macros['master']"
_metal.render(_tmp, _out=_out, _write=_write, _domain=_domain, econtext=econtext)
return _out.getvalue()
return render
__filename__ = '/Users/gawel/py/formalchemy_project/fa.jquery/fa/jquery/templates/admin/models.pt'
registry[(None, True, '1488bdb950901f8f258549439ef6661a49aae984')] = bind()
| [
"gael@gawel.org"
] | gael@gawel.org |
50953bdf36c326c7d5d2cae9869a92215db15261 | a54d5a5ae5ba352963f1166a29e1bb6c867157ab | /python/test/test_good_name.py | 30030f836c9780fecc2b6f39ca94cdcb806270bf | [] | no_license | alephist/edabit-coding-challenges | 06f573e90ffbd13bc54ecbdaa8e6a225aa44f5d8 | 35f1fc84848fc44e184aae1ae231a36319c1c81e | refs/heads/main | 2023-07-30T22:39:37.468756 | 2021-09-18T07:47:02 | 2021-09-18T07:47:02 | 341,467,751 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 800 | py | import unittest
from typing import Tuple
from good_name import name_score
test_values: Tuple[Tuple[str, str]] = (
('MUBASHIR', "THE BEST"),
('MATT', "THE BEST"),
('PAKISTAN', "THE BEST"),
('AIRFORCE', "THE BEST"),
('GUV', 'NOT TOO GOOD'),
('PUBG', "NOT TOO GOOD"),
('ME', "PRETTY GOOD"),
('BOB', "PRETTY GOOD"),
('JLJ', 'PRETTY GOOD'),
('YOU', 'VERY GOOD'),
('FABIO', "VERY GOOD"),
('ROBBY', 'THE BEST'),
('BILL GATES', "THE BEST")
)
class GoodNameTestCase(unittest.TestCase):
def test_add_all_letters_from_name_to_get_score_rating(self):
for name, expected_rating in test_values:
with self.subTest():
self.assertEqual(name_score(name), expected_rating)
if __name__ == '__main__':
unittest.main()
| [
"justin.necesito@gmail.com"
] | justin.necesito@gmail.com |
1905fd29a2ad0d74d4ce302177cb049f49249cd9 | 2f98aa7e5bfc2fc5ef25e4d5cfa1d7802e3a7fae | /python/python_19988.py | f78479142fcdf29382cc31b81425d60aff326ea2 | [] | no_license | AK-1121/code_extraction | cc812b6832b112e3ffcc2bb7eb4237fd85c88c01 | 5297a4a3aab3bb37efa24a89636935da04a1f8b6 | refs/heads/master | 2020-05-23T08:04:11.789141 | 2015-10-22T19:19:40 | 2015-10-22T19:19:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 129 | py | # Trouble with aligning two y-axis ticks with matplotlib
lim1 = ax1.get_ylim()
lim2 = (lim1[0]*2, lim1[1] *2)
ax2.set_ylim(lim2)
| [
"ubuntu@ip-172-31-7-228.us-west-2.compute.internal"
] | ubuntu@ip-172-31-7-228.us-west-2.compute.internal |
2e57d9700d3d664bc7705e1dbba720f6395b5865 | c15db2bb1756ee63bab13e583ff70c18e765d575 | /drf_generators/templates/apiview.py | 3972099189a1aef1850173941c99580a581d08f9 | [
"Python-2.0",
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] | permissive | tkliuxing/drf-generators | 89857c6c0d3b301cd32611f90a9d4ef8fab92d77 | 899c3f4efb3c3fe10ee582f3950bb6e48fc03350 | refs/heads/master | 2020-12-29T22:14:13.732920 | 2020-08-10T07:00:51 | 2020-08-10T07:00:51 | 238,751,406 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,324 | py |
__all__ = ['API_VIEW', 'API_URL']
API_URL = """from django.conf.urls import include, url
from . import api
urlpatterns = [
{% for model in models %}
url(r'^{{ model|lower }}/(?P<id>[0-9]+)/$', api.{{ model }}APIView.as_view()),
url(r'^{{ model|lower }}/$', api.{{ model }}APIListView.as_view()),
{% endfor %}
]
"""
API_VIEW = """from rest_framework.pagination import PageNumberPagination
from rest_framework.response import Response
from rest_framework.views import APIView
from . import serializers
from . import models
{% for model in models %}
class {{ model }}APIView(APIView):
def get(self, request, id, format=None):
try:
item = models.{{ model }}.objects.get(pk=id)
serializer = serializers.{{ model }}Serializer(item)
return Response(serializer.data)
except models.{{ model }}.DoesNotExist:
return Response(status=404)
def put(self, request, id, format=None):
try:
item = models.{{ model }}.objects.get(pk=id)
except models.{{ model }}.DoesNotExist:
return Response(status=404)
serializer = serializers.{{ model }}Serializer(item, data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data)
return Response(serializer.errors, status=400)
def delete(self, request, id, format=None):
try:
item = models.{{ model }}.objects.get(pk=id)
except models.{{ model }}.DoesNotExist:
return Response(status=404)
item.delete()
return Response(status=204)
class {{ model }}APIListView(APIView):
def get(self, request, format=None):
items = models.{{ model }}.objects.order_by('pk')
paginator = PageNumberPagination()
result_page = paginator.paginate_queryset(items, request)
serializer = serializers.{{ model }}Serializer(result_page, many=True)
return paginator.get_paginated_response(serializer.data)
def post(self, request, format=None):
serializer = serializers.{{ model }}Serializer(data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=201)
return Response(serializer.errors, status=400)
{% endfor %}"""
| [
"ouyanghongyu@gmail.com"
] | ouyanghongyu@gmail.com |
c9adeac271c2119cfa73776e7e87d0a4969b0509 | dd3f5a712dbab0d3c4f4526c64c08ba710f78b81 | /Basic/dataStructure/structure/t06dequeue.py | e9d2f37337664d1f56c4b9a4fbfcb8b579fc4bc1 | [] | no_license | nameusea/pyGreat | 3988ebcce3f80a7e458a20f9b2e3ccba368efcf8 | dde8b6a1348620ffd3b2d65db3d5b4331e5c78be | refs/heads/master | 2023-04-25T09:02:32.831423 | 2021-05-17T11:31:22 | 2021-05-17T11:31:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 897 | py | class Dequeue(object):
def __init__(self):
'''初始化一个空队列'''
self.__list = []
def add_front(self, item):
'''从队列首添加'''
self.__list.insert(0, item)
def add_rear(self, item):
'''从队列尾添加'''
self.__list.append(item)
def pop_front(self):
'''从队列首删除'''
return self.__list.pop(0) # 时间复杂度O(n)
def pop_rear(self):
'''从队列尾删除'''
return self.__list.pop()
def is_empty(self):
'''是否空'''
return not self.__list
def size(self):
'''元素数量'''
return len(self.__list)
if __name__ == '__main__':
dq = Dequeue()
dq.add_front(4)
dq.add_front(5)
dq.add_rear(6)
print(dq.size())
print(dq.pop_front())
print(dq.size())
print(dq.pop_rear())
print(dq.size())
| [
"darcyzhang@DarcydeMacBook-Air.local"
] | darcyzhang@DarcydeMacBook-Air.local |
216a32e87fa6cda93c871bfcf130c1a61a7e4723 | e38f7b5d46fd8a65c15e49488fc075e5c62943c9 | /pychron/lasers/tasks/panes/co2.py | 1745ef46b0037c5117b26def82274ef321ff93ef | [] | no_license | INGPAN/pychron | 3e13f9d15667e62c347f5b40af366096ee41c051 | 8592f9fc722f037a61b0b783d587633e22f11f2f | refs/heads/master | 2021-08-15T00:50:21.392117 | 2015-01-19T20:07:41 | 2015-01-19T20:07:41 | 111,054,121 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,515 | py | #===============================================================================
# Copyright 2013 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#===============================================================================
#============= enthought library imports =======================
from traits.api import HasTraits
from traitsui.api import View, Item
from pychron.lasers.tasks.laser_panes import BaseLaserPane, ClientPane, \
StageControlPane, ControlPane, AxesPane
#============= standard library imports ========================
#============= local library imports ==========================
class FusionsCO2Pane(BaseLaserPane):
pass
class FusionsCO2ClientPane(ClientPane):
pass
class FusionsCO2StagePane(StageControlPane):
id = 'pychron.fusions.co2.stage'
class FusionsCO2ControlPane(ControlPane):
id = 'pychron.fusions.co2.control'
class FusionsCO2AxesPane(AxesPane):
id = 'pychron.fusions.co2.axes'
#============= EOF =============================================
| [
"jirhiker@gmail.com"
] | jirhiker@gmail.com |
c618ca60468f267f06da2cbd256b606154cc1254 | 6a7e9e0e9c08132166f566bd88ae1c46ff8f9c0a | /azure-cognitiveservices-language-luis/azure/cognitiveservices/language/luis/runtime/models/luis_result.py | 15b9ac3de342daddf2340f74c9e4f7d1970e7d5b | [
"MIT"
] | permissive | ashirey-msft/azure-sdk-for-python | d92381d11c48f194ec9f989f5f803db614fb73f2 | e04778e13306dad2e8fb044970215bad6296afb6 | refs/heads/master | 2020-03-23T06:05:39.283442 | 2018-09-15T00:18:26 | 2018-09-15T00:18:26 | 141,188,192 | 0 | 1 | MIT | 2018-07-16T20:02:52 | 2018-07-16T20:02:52 | null | UTF-8 | Python | false | false | 2,695 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class LuisResult(Model):
"""Prediction, based on the input query, containing intent(s) and entities.
:param query: The input utterance that was analized.
:type query: str
:param altered_query: The corrected utterance (when spell checking was
enabled).
:type altered_query: str
:param top_scoring_intent:
:type top_scoring_intent:
~azure.cognitiveservices.language.luis.runtime.models.IntentModel
:param intents: All the intents (and their score) that were detected from
utterance.
:type intents:
list[~azure.cognitiveservices.language.luis.runtime.models.IntentModel]
:param entities: The entities extracted from the utterance.
:type entities:
list[~azure.cognitiveservices.language.luis.runtime.models.EntityModel]
:param composite_entities: The composite entities extracted from the
utterance.
:type composite_entities:
list[~azure.cognitiveservices.language.luis.runtime.models.CompositeEntityModel]
:param sentiment_analysis:
:type sentiment_analysis:
~azure.cognitiveservices.language.luis.runtime.models.Sentiment
"""
_attribute_map = {
'query': {'key': 'query', 'type': 'str'},
'altered_query': {'key': 'alteredQuery', 'type': 'str'},
'top_scoring_intent': {'key': 'topScoringIntent', 'type': 'IntentModel'},
'intents': {'key': 'intents', 'type': '[IntentModel]'},
'entities': {'key': 'entities', 'type': '[EntityModel]'},
'composite_entities': {'key': 'compositeEntities', 'type': '[CompositeEntityModel]'},
'sentiment_analysis': {'key': 'sentimentAnalysis', 'type': 'Sentiment'},
}
def __init__(self, **kwargs):
super(LuisResult, self).__init__(**kwargs)
self.query = kwargs.get('query', None)
self.altered_query = kwargs.get('altered_query', None)
self.top_scoring_intent = kwargs.get('top_scoring_intent', None)
self.intents = kwargs.get('intents', None)
self.entities = kwargs.get('entities', None)
self.composite_entities = kwargs.get('composite_entities', None)
self.sentiment_analysis = kwargs.get('sentiment_analysis', None)
| [
"lmazuel@microsoft.com"
] | lmazuel@microsoft.com |
8978268857118174c16dc40b067aae092d316737 | f59d9f7edacd17af7af2c8a4c900bde4fe18ba1a | /measure_extinction/utils/mock_spectra_data.py | 4ddd463f2a6db111b9657372d0b077a560140350 | [
"BSD-3-Clause"
] | permissive | karllark/measure_extinction | 8a1fead4db8be8dda44b3c4e8687fd9fd3a4d537 | 464fc09a60e0d0ee226b590542404e972b62d2e9 | refs/heads/master | 2023-05-27T06:48:32.434042 | 2023-03-06T13:40:50 | 2023-03-06T13:40:50 | 116,707,319 | 1 | 4 | null | 2023-04-28T12:40:04 | 2018-01-08T17:31:44 | Python | UTF-8 | Python | false | false | 6,858 | py | import pkg_resources
import argparse
import numpy as np
import matplotlib.pyplot as plt
from astropy.table import QTable
import astropy.units as u
__all__ = ["mock_stis_data"]
def mock_stis_single_grating(moddata, gname="G140L", applylsfs=True):
"""
Mock up a single grating STIS low resolution observation using tabulated
line spread functions (LSFs)
Parameters
----------
moddata : astropy.table
Model spectrum at high enough resolution to support "convolution" with
the LSFs
ganme : str
name of the grating to mocked
applylsfs : boolean
allows for mocking with and without the LSFs
Returns
-------
cmoddata : astropy.table
Convolved and cropped model spectrum for the grating requested
"""
if gname == "G140L":
gtags = ["G140L_1200", "G140L_1500"]
gwaves = [1200.0, 1500.0] * u.Angstrom
grange = [1118.7028617667227, 1715.2138336094122] * u.Angstrom
gdelta = 0.5831004076162168 * u.Angstrom # Angstrom/pixel
elif gname == "G230L":
gtags = ["G230L_1700", "G230L_2400"]
gwaves = [1700.0, 2400.0] * u.Angstrom
grange = [1572.0793168982548, 3155.9334544319254] * u.Angstrom
gdelta = 1.548239955089159 * u.Angstrom # Angstrom/pixel
elif gname == "G430L":
gtags = ["G430L_3200", "G430L_5500"]
gwaves = [3200.0, 5500.0] * u.Angstrom
grange = [2894.535384018087, 5704.064392633997] * u.Angstrom
gdelta = 2.7463714795193273 * u.Angstrom # Angstrom/pixel
elif gname == "G750L":
gtags = ["G750L_7000"]
gwaves = [7000.0] * u.Angstrom
grange = [5257.602037433256, 10249.424213346618] * u.Angstrom
gdelta = 4.879600079462369 * u.Angstrom # Angstrom/pixel
else:
raise ValueError(f"Grating {gname} not supported")
nlsfs = len(gtags)
data_path = pkg_resources.resource_filename("measure_extinction", "utils/STIS_LSF/")
lsfs = []
for i, ctag in enumerate(gtags):
a = QTable.read(
f"{data_path}/data/LSF_{ctag}.txt",
format="ascii.commented_header",
header_start=-1,
)
a["DELTWAVE"] = a["Rel_pixel"] * gdelta
if i > 0:
if len(lsfs[0]["DELTWAVE"]) != len(a["DELTWAVE"]):
b = QTable()
b["DELTWAVE"] = lsfs[0]["DELTWAVE"]
b["52x2.0"] = np.interp(
b["DELTWAVE"], a["DELTWAVE"], a["52x2.0"], left=0.0, right=0.0
)
a = b
lsfs.append(a)
minlsfdwave = min(lsfs[0]["DELTWAVE"])
maxlsfdwave = min(lsfs[0]["DELTWAVE"])
# crop wide to include full possible lsf range
gvals = (moddata["WAVELENGTH"] >= (grange[0] - minlsfdwave)) & (
moddata["WAVELENGTH"] <= (grange[1] + maxlsfdwave)
)
incwmoddata = moddata[:][gvals]
# convolve
outcwmoddata = moddata[:][gvals]
if applylsfs:
# for each wavelength, use average weighting with the appropriate LSF
clsfwave = lsfs[0]["DELTWAVE"]
for i, cwave in enumerate(outcwmoddata["WAVELENGTH"]):
# generate LSFs at each wavelength by interpolating/extrapolating
# from the 2 provided LSFs or just replicating a single LSFs
if nlsfs == 1:
clsf = lsfs[0]["52x2.0"]
elif nlsfs == 2:
clsf = lsfs[1]["52x2.0"] + (
(gwaves[1] - cwave) / (gwaves[1] - gwaves[0])
) * (lsfs[1]["52x2.0"] - lsfs[0]["52x2.0"])
clsfwave = lsfs[0]["DELTWAVE"] + cwave
# interpolate onto model wavelength grid
clsf_int = np.interp(
outcwmoddata["WAVELENGTH"], clsfwave, clsf, right=0.0, left=0.0
)
outcwmoddata["FLUX"][i] = np.average(incwmoddata["FLUX"], weights=clsf_int)
# crop tight to only include the expected wavelengths
gvals = (outcwmoddata["WAVELENGTH"] >= grange[0]) & (
outcwmoddata["WAVELENGTH"] <= grange[1]
)
cmoddata = QTable()
cmoddata["WAVELENGTH"] = outcwmoddata["WAVELENGTH"][gvals]
cmoddata["FLUX"] = outcwmoddata["FLUX"][gvals]
cmoddata["STAT-ERROR"] = outcwmoddata["SIGMA"][gvals]
cmoddata["SYS-ERROR"] = outcwmoddata["SIGMA"][gvals]
cmoddata["NPTS"] = outcwmoddata["NPTS"][gvals]
return cmoddata
def mock_stis_data(moddata, applylsfs=True):
"""
Mock STIS low-resolution grating observations given a model spectrum
Parameters
----------
moddata : astropy.table
Model spectrum at high enough resolution to support "convolution" with
the LSFs
applylsfs : boolean
allows for mocking with and without the LSFs
Returns
-------
tablist : list of astropy.tables
Each entry appropriate for one of the four low resolution gratings
"""
allspec = []
allspec.append(
mock_stis_single_grating(moddata, gname="G140L", applylsfs=applylsfs)
)
allspec.append(
mock_stis_single_grating(moddata, gname="G230L", applylsfs=applylsfs)
)
allspec.append(
mock_stis_single_grating(moddata, gname="G430L", applylsfs=applylsfs)
)
allspec.append(
mock_stis_single_grating(moddata, gname="G750L", applylsfs=applylsfs)
)
return allspec
if __name__ == "__main__":
# commandline parser
parser = argparse.ArgumentParser()
parser.add_argument("--png", help="save figure as a png file", action="store_true")
parser.add_argument("--pdf", help="save figure as a pdf file", action="store_true")
args = parser.parse_args()
moddata = QTable.read(
"/home/kgordon/Python_git/extstar_data/Models/tlusty_BT30000g300v10_full.fits"
)
fig, ax = plt.subplots(nrows=4, figsize=(18, 10))
# setup the plots
fontsize = 12
font = {"size": fontsize}
plt.rc("font", **font)
plt.rc("lines", linewidth=2)
plt.rc("axes", linewidth=2)
plt.rc("xtick.major", width=2)
plt.rc("ytick.major", width=2)
mockobs_wolsfs = mock_stis_data(moddata, applylsfs=False)
mockobs = mock_stis_data(moddata)
for i, cmockobs in enumerate(mockobs):
ax[i].plot(mockobs_wolsfs[i]["WAVELENGTH"], mockobs_wolsfs[i]["FLUX"], "k-")
# old way of doing things
# stis_fwhm_pix = 5000.0 / 1000.0
# g = Gaussian1DKernel(stddev=stis_fwhm_pix / 2.355)
# nflux = convolve(mockobs_wolsfs[i]["FLUX"].data, g)
# ax[i].plot(mockobs_wolsfs[i]["WAVELENGTH"], nflux, "r:")
ax[i].plot(cmockobs["WAVELENGTH"], cmockobs["FLUX"], "b-")
ax[i].set_ylabel("Flux")
ax[3].set_xlabel(r"$\lambda$ [$\AA$]")
fig.tight_layout()
if args.png:
fig.savefig("mock_stis_obs.png")
elif args.pdf:
fig.savefig("mock_stis_obs.pdf")
else:
plt.show()
| [
"kgordon@stsci.edu"
] | kgordon@stsci.edu |
5b9d2d32cc0987d491c966fd7fafda8d931a36aa | 09e57dd1374713f06b70d7b37a580130d9bbab0d | /data/p4VQE/R1/benchmark/startQiskit_QC76.py | 7e339b6337cf42dff01c14d045fcf73a39befa91 | [
"BSD-3-Clause"
] | permissive | UCLA-SEAL/QDiff | ad53650034897abb5941e74539e3aee8edb600ab | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | refs/heads/main | 2023-08-05T04:52:24.961998 | 2021-09-19T02:56:16 | 2021-09-19T02:56:16 | 405,159,939 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,545 | py | # qubit number=3
# total number=9
import numpy as np
from qiskit import QuantumCircuit, execute, Aer, QuantumRegister, ClassicalRegister, transpile, BasicAer, IBMQ
import networkx as nx
from qiskit.visualization import plot_histogram
from typing import *
from pprint import pprint
from math import log2
from collections import Counter
from qiskit.test.mock import FakeVigo, FakeYorktown
kernel = 'circuit/bernstein'
def make_circuit(n:int) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n,"qc")
prog = QuantumCircuit(input_qubit)
prog.h(input_qubit[0]) # number=1
prog.h(input_qubit[1]) # number=2
prog.h(input_qubit[2]) # number=3
prog.h(input_qubit[3]) # number=4
for edge in E:
k = edge[0]
l = edge[1]
prog.cp(-2 * gamma, input_qubit[k-1], input_qubit[l-1])
prog.p(gamma, k)
prog.p(gamma, l)
prog.rx(2 * beta, range(len(V)))
prog.swap(input_qubit[1],input_qubit[0]) # number=5
prog.swap(input_qubit[1],input_qubit[0]) # number=6
prog.swap(input_qubit[2],input_qubit[0]) # number=7
prog.swap(input_qubit[2],input_qubit[0]) # number=8
# circuit end
return prog
if __name__ == '__main__':
n = 4
V = np.arange(0, n, 1)
E = [(0, 1, 1.0), (0, 2, 1.0), (1, 2, 1.0), (3, 2, 1.0), (3, 1, 1.0)]
G = nx.Graph()
G.add_nodes_from(V)
G.add_weighted_edges_from(E)
step_size = 0.1
a_gamma = np.arange(0, np.pi, step_size)
a_beta = np.arange(0, np.pi, step_size)
a_gamma, a_beta = np.meshgrid(a_gamma, a_beta)
F1 = 3 - (np.sin(2 * a_beta) ** 2 * np.sin(2 * a_gamma) ** 2 - 0.5 * np.sin(4 * a_beta) * np.sin(4 * a_gamma)) * (
1 + np.cos(4 * a_gamma) ** 2)
result = np.where(F1 == np.amax(F1))
a = list(zip(result[0], result[1]))[0]
gamma = a[0] * step_size
beta = a[1] * step_size
prog = make_circuit(4)
sample_shot =5200
writefile = open("../data/startQiskit_QC76.csv", "w")
# prog.draw('mpl', filename=(kernel + '.png'))
IBMQ.load_account()
provider = IBMQ.get_provider(hub='ibm-q')
provider.backends()
backend = provider.get_backend("ibmq_5_yorktown")
circuit1 = transpile(prog, FakeYorktown())
circuit1.measure_all()
prog = circuit1
info = execute(prog,backend=backend, shots=sample_shot).result().get_counts()
print(info, file=writefile)
print("results end", file=writefile)
print(circuit1.depth(), file=writefile)
print(circuit1, file=writefile)
writefile.close()
| [
"wangjiyuan123@yeah.net"
] | wangjiyuan123@yeah.net |
9035e78db4008b3c0bde93ae8d77a2fcd26f4761 | 935b9efca392b124d571319568c08ba45446d2a0 | /lino_book/projects/lydia/tests/dumps/18.8.0/courses_line.py | f6c815319873d98bc3450f6d0444c1bee99e56b1 | [
"BSD-2-Clause"
] | permissive | wallento/book | 6efba2baa1e42bb99514a937342000271dfe798b | 8c5a68f30f9ab65479a988608bda66ea6209afd8 | refs/heads/master | 2020-04-06T10:58:01.629671 | 2018-11-07T09:41:54 | 2018-11-07T09:41:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 742 | py | # -*- coding: UTF-8 -*-
logger.info("Loading 3 objects to table courses_line...")
# fields: id, ref, name, excerpt_title, company, contact_person, contact_role, course_area, topic, description, every_unit, every, event_type, fee, guest_role, options_cat, fees_cat, body_template
loader.save(create_courses_line(1,None,['Individual therapies', '', ''],['', '', ''],None,None,None,'IT',None,['', '', ''],u'W',1,4,2,1,None,1,u''))
loader.save(create_courses_line(2,None,['Life groups', '', ''],['', '', ''],None,None,None,'LG',None,['', '', ''],u'W',1,4,2,1,None,1,u''))
loader.save(create_courses_line(3,None,['Other groups', '', ''],['', '', ''],None,None,None,'OG',None,['', '', ''],u'W',1,5,1,1,None,1,u''))
loader.flush_deferred_objects()
| [
"luc.saffre@gmail.com"
] | luc.saffre@gmail.com |
b0894045935a4bd7927b3d27d1bcbe9580c0d34e | 683e5c676c02a746ba35ef6045c99983ad4b4b0c | /matomo_sdk_py/matomo_sdk_py.py | be960c84fb4b3446264311e9ee51c37069567f5a | [
"Apache-2.0"
] | permissive | autofitcloud/matomo-sdk-py | f14ec95b6f8c9b94f74dcfa3f81f90612188a0cb | 802434b125da8c9da168a18d82e8f539716df501 | refs/heads/master | 2023-05-24T21:01:30.376179 | 2019-12-04T13:26:46 | 2019-12-04T13:26:46 | 217,569,529 | 0 | 0 | Apache-2.0 | 2023-05-22T22:31:29 | 2019-10-25T16:03:40 | Python | UTF-8 | Python | false | false | 1,773 | py | import requests
SKIP_PING=False
def ping_matomo(action_name, action_base, idsite, uuid_val, matomo_url):
"""
Gather anonymous usage statistics
action_name - same field in matomo
action_base - website URL in matomo, e.g. https://example.com
idsite - integer representing ID of website in matomo
uuid_val - matomo field "uid"
matomo_url - URL of matomo host, eg https://example.matomo.cloud/piwik.php
"""
# if any previous failure, just skip it completely
global SKIP_PING # http://stackoverflow.com/questions/423379/ddg#423596
if SKIP_PING:
return
from urllib.parse import urljoin, urlencode
# build action url
# https://stackoverflow.com/questions/9718541/reconstructing-absolute-urls-from-relative-urls-on-a-page#comment51058834_9718651
action_url = urljoin(action_base, action_name)
# https://stackoverflow.com/a/39144239/4126114
req_i = {
"idsite": idsite,
"rec": 1,
"action_name": action_name,
"uid": uuid_val,
# use the UID for matomo's visitor ID,
# truncated to 16 characters as documented
# More info at:
# https://matomo.org/docs/user-id/
# https://developer.matomo.org/api-reference/tracking-api
"cid": uuid_val[:16],
"url": action_url
}
payload = {"requests": ["?"+urlencode(req_i)]}
# use POST instead of GET to avoid arguments showing up in the clear
# https://developer.matomo.org/api-reference/tracking-api
try:
response = requests.post(matomo_url, json=payload, timeout=1) # 1 second
except requests.exceptions.ConnectionError as error:
# just ignore the failure to connect
# in order not to obstruct the CLI
SKIP_PING=True
pass
except requests.exceptions.ReadTimeout as error:
# also ignore
SKIP_PING=True
pass
| [
"shadiakiki1986@gmail.com"
] | shadiakiki1986@gmail.com |
8f6323614670e2e31fd6089d6d719aba5fe21293 | 82b3bcc6467c93c8b84948e7df1ec32fe4c4b004 | /WEEK 4/Day 4/python-lesson-day4/stuff.py | 7a5f26446b1d590d01b86288ca383be2de42f360 | [] | no_license | MrAch26/Developers_Institute | b7868fc94556bfced4cb53537278c3ec17991426 | 840c9710278af033ccdb5f5c3edd7a2a97476aba | refs/heads/master | 2023-03-28T19:31:11.666544 | 2021-04-06T06:54:20 | 2021-04-06T06:54:20 | 298,250,894 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 471 | py |
def test(*args):
print(type(args))
print(args[4])
print("printing Args")
for item in args:
print(item)
# * packs items into a tuple, and unpacks into a list
# ** packs items into a dict
def kwtest(**kwargs):
print(type(kwargs))
print("printing KWargs")
for k,v in kwargs.items():
print(k,"-",v)
def packitup(*args, **kwargs):
return args, kwargs
result = ((1,2,3), {name:"jon", surname:"spiller"})
result[0][2] # 3
result[1]['surname'] # "spiller" | [
"MrAch26@users.noreply.github.com"
] | MrAch26@users.noreply.github.com |
40605fa6ce7b50c0bece515198274ad164e27a67 | 9189089752d970ced51a1c50503fce399b93f589 | /create-sw-file-tools/play_with_geonames-v2.py | c8ba486cc37a6e1a1d6455a22448a6bb495b3c04 | [] | no_license | SwannedLakee/Feynman-knowledge-engine | 464b0d187213b28d9e3995c5e27b41e216359046 | 9a09ff8041dda20dadc3368700b718266757cf24 | refs/heads/master | 2023-03-24T05:25:30.320650 | 2020-03-13T12:31:59 | 2020-03-13T12:31:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,121 | py | #!/usr/bin/env python3
import sys
from the_semantic_db_code import *
from the_semantic_db_functions import *
from the_semantic_db_processor import *
#C = context_list("geonames AU")
# is new_context() faster than context_list() ??
C = new_context("geonames US")
# NB: may need to filter down to ASCII chars.
#file = "data/ascii-just-adelaide.txt"
#file = "data/ascii-cities15000.txt"
#file = "data/short-play.txt" # yup. code seems to work!
#file = "data/AU.txt" # nope! Bugs out on non-ascii chars.
#file = "data/ascii-AU.txt" # tidied using clean-ascii.sh
#file = "data/ascii-cities15000.txt"
file = "data/ascii-US.txt" # too big for RAM for now.
#file = "data/ascii-cities1000.txt"
with open(file,'r') as f:
for line in f:
# print("line:",line)
# fields = len(line.split("\t"))
# print("fields:",fields)
id,name,asciiname,altname,lat,long,feat_class,feat_code,country,cc2,admin1,admin2,admin3,admin4,population,elevation,dem,tz,mod_date = line.split("\t")
# print("id: ",id)
# print("name: ",asciiname)
# print("lat: ",lat)
# print("long: ",long)
# print("country: ",country)
# print("population:",population)
# print("dem: ",dem)
# print("tz: ",tz)
# print()
x = ket("id: " + id)
# C.learn("id",x,"geonameid: " + id)
C.add_learn("id",asciiname,x)
C.learn("name",x,asciiname)
C.learn("latitude",x,"latitude: " + lat)
C.learn("latitude-self",x,x.multiply(float(lat)))
C.learn("longitude",x,"longitude: " + long)
C.learn("longitude-self",x,x.multiply(float(long)))
C.learn("country-code",x,"country-code: " + country)
if int(population) > 0:
C.learn("population",x,"population: " + population)
C.learn("population-self",x,x.multiply(int(population)))
if elevation != '':
C.learn("elevation",x,"m: " + elevation)
if tz != '':
C.learn("tz",x,"time-zone: " + tz)
name = "sw-examples/improved-geonames-us.sw"
save_sw(C,name)
# first play with profiler:
#import cProfile
#cProfile.run('save_sw(C,name)')
#print(C.dump_universe())
| [
"garry@semantic-db.org"
] | garry@semantic-db.org |
be45c52e65245d926bc6bd8a07045f441de50ede | 92137962a84e724df31b63367854349a875f1c43 | /tapis_cli/clients/basic/main.py | 24b0135901c881fa43e4e7cdef09ea4e8c6aea53 | [
"BSD-3-Clause"
] | permissive | TACC-Cloud/tapis-cli | e3a26e79a20d1ada4cb2dc9ef204cae3e385bfe7 | d34e8635d3dbacc8276cf52b6bae04caacd655de | refs/heads/main | 2023-04-08T14:47:27.707885 | 2022-02-13T17:43:26 | 2022-02-13T17:43:26 | 203,083,094 | 11 | 3 | BSD-3-Clause | 2022-04-01T20:23:23 | 2019-08-19T02:21:28 | Python | UTF-8 | Python | false | false | 1,122 | py | from ..http import HTTPFormatOne, HTTPFormatMany
__all__ = ['BasicHTTPFormatOne', 'BasicHTTPFormatMany']
def add_common_arguments(parser):
parser.add_argument('--username',
dest='username',
type=str,
help="Username")
parser.add_argument('--password',
dest='password',
type=str,
help="Password")
return parser
class BasicHTTPFormatOne(HTTPFormatOne):
"""HTTP+Basic Record Display
"""
def get_parser(self, prog_name):
parser = super(BasicHTTPFormatOne, self).get_parser(prog_name)
parser = add_common_arguments(parser)
return parser
def take_action(self, parsed_args):
return ((), ())
class BasicHTTPFormatMany(HTTPFormatMany):
"""HTTP+Basic Records Listing
"""
def get_parser(self, prog_name):
parser = super(BasicHTTPFormatMany, self).get_parser(prog_name)
parser = add_common_arguments(parser)
return parser
def take_action(self, parsed_args):
return ((), ())
| [
"vaughn@tacc.utexas.edu"
] | vaughn@tacc.utexas.edu |
f807bce27f1dd1c18871c787b6582c62f5b1e254 | 82b946da326148a3c1c1f687f96c0da165bb2c15 | /sdk/python/pulumi_azure_native/web/v20210101/get_web_app_domain_ownership_identifier_slot.py | 3b8508cf4c91fe3a7690d3c8355195664eae3ed8 | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | morrell/pulumi-azure-native | 3916e978382366607f3df0a669f24cb16293ff5e | cd3ba4b9cb08c5e1df7674c1c71695b80e443f08 | refs/heads/master | 2023-06-20T19:37:05.414924 | 2021-07-19T20:57:53 | 2021-07-19T20:57:53 | 387,815,163 | 0 | 0 | Apache-2.0 | 2021-07-20T14:18:29 | 2021-07-20T14:18:28 | null | UTF-8 | Python | false | false | 4,347 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
__all__ = [
'GetWebAppDomainOwnershipIdentifierSlotResult',
'AwaitableGetWebAppDomainOwnershipIdentifierSlotResult',
'get_web_app_domain_ownership_identifier_slot',
]
@pulumi.output_type
class GetWebAppDomainOwnershipIdentifierSlotResult:
"""
A domain specific resource identifier.
"""
def __init__(__self__, id=None, kind=None, name=None, type=None, value=None):
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if kind and not isinstance(kind, str):
raise TypeError("Expected argument 'kind' to be a str")
pulumi.set(__self__, "kind", kind)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
if value and not isinstance(value, str):
raise TypeError("Expected argument 'value' to be a str")
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def id(self) -> str:
"""
Resource Id.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def kind(self) -> Optional[str]:
"""
Kind of resource.
"""
return pulumi.get(self, "kind")
@property
@pulumi.getter
def name(self) -> str:
"""
Resource Name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter
def value(self) -> Optional[str]:
"""
String representation of the identity.
"""
return pulumi.get(self, "value")
class AwaitableGetWebAppDomainOwnershipIdentifierSlotResult(GetWebAppDomainOwnershipIdentifierSlotResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetWebAppDomainOwnershipIdentifierSlotResult(
id=self.id,
kind=self.kind,
name=self.name,
type=self.type,
value=self.value)
def get_web_app_domain_ownership_identifier_slot(domain_ownership_identifier_name: Optional[str] = None,
name: Optional[str] = None,
resource_group_name: Optional[str] = None,
slot: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetWebAppDomainOwnershipIdentifierSlotResult:
"""
A domain specific resource identifier.
:param str domain_ownership_identifier_name: Name of domain ownership identifier.
:param str name: Name of the app.
:param str resource_group_name: Name of the resource group to which the resource belongs.
:param str slot: Name of the deployment slot. If a slot is not specified, the API will delete the binding for the production slot.
"""
__args__ = dict()
__args__['domainOwnershipIdentifierName'] = domain_ownership_identifier_name
__args__['name'] = name
__args__['resourceGroupName'] = resource_group_name
__args__['slot'] = slot
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:web/v20210101:getWebAppDomainOwnershipIdentifierSlot', __args__, opts=opts, typ=GetWebAppDomainOwnershipIdentifierSlotResult).value
return AwaitableGetWebAppDomainOwnershipIdentifierSlotResult(
id=__ret__.id,
kind=__ret__.kind,
name=__ret__.name,
type=__ret__.type,
value=__ret__.value)
| [
"noreply@github.com"
] | morrell.noreply@github.com |
8f2f52a14e7110870ebe759c5b112bb4055bfa0f | 27327e2e0a6844a58a2c7019effabd10f35a652c | /pythonchallenge/crossin_mryk2.py | 4f088c487c0071f4354e39d3957b533f4fca0b84 | [] | no_license | jtr109/AdvancePython | 996b975483502ebfb2c3f03ceb1d1d2b6bbf0b30 | a72926c533d41495d9d1d60a8d020d3fe047d0e2 | refs/heads/master | 2021-01-20T10:59:49.282810 | 2016-08-25T10:09:20 | 2016-08-25T10:09:20 | 66,245,617 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 177 | py | def sort_and_unique(l):
new_l = list(set(l))
new_l.sort()
return new_l
if __name__ == '__main__':
l = [4, 7, 3, 4, 1, 9, 8, 3, 7]
print(sort_and_unique(l))
| [
"lyp_login@outlook.com"
] | lyp_login@outlook.com |
23017da9226d9dc9b4476b492e8cdfa7b4fb0f17 | e3098a32e5825c88db0f20938ec4ca89054ec52c | /shaney.py | 78ec8bd87bc43fce563e32825057b5ae9c7f92f4 | [] | no_license | yuvipanda/frailgrey | 9e807c25f3e5e08ee537f3c7ac46a534407f74da | 472973aa7477bca7936fd44bc14b8fad62f4e647 | refs/heads/master | 2020-06-05T01:35:47.935109 | 2012-10-05T06:45:41 | 2012-10-05T06:45:41 | 259,656 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,027 | py | # shaney.py by Greg McFarlane
# some editing by Joe Strout
#
# search for "Mark V. Shaney" on the WWW for more info!
import sys
import random
import string
def generate(text, count):
words = text.split()
end_sentence = []
dict = {}
prev1 = ''
prev2 = ''
for word in words:
if prev1 != '' and prev2 != '':
key = (prev2, prev1)
if dict.has_key(key):
dict[key].append(word)
else:
dict[key] = [word]
if prev1[-1:] == '.':
end_sentence.append(key)
prev2 = prev1
prev1 = word
key = ()
sentence = ""
while 1:
if dict.has_key(key):
word = random.choice(dict[key])
sentence = sentence + word + ' '
key = (key[1], word)
if key in end_sentence:
yield sentence
sentence = ""
count = count - 1
if count <= 0:
break
else:
key = random.choice(end_sentence)
| [
"yuvipanda@gmail.com"
] | yuvipanda@gmail.com |
740b55aeecc180691958ce5d79ae04df440cc154 | ee00ebe5e71c36b05fbff993b19e9723b963313f | /45_jump_game_2.py | 4a4f2be0a2b148c4e181e07c60f39a8a23966d34 | [] | no_license | 26XINXIN/leetcode | f365560d93604a28abf399707b333f3c11f924ec | 78ed11f34fd03e9a188c9c6cb352e883016d05d9 | refs/heads/master | 2021-06-28T16:31:45.103879 | 2020-09-19T20:33:55 | 2020-09-19T20:33:55 | 144,975,903 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 525 | py | class Solution:
def jump(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
n = len(nums)
first, last = 0, 0
jump_times = 0
while last < n - 1:
# print(first, last)
farest = 0
for i in range(first, last + 1):
farest = farest if farest > i + nums[i] else i + nums[i]
first = last
last = farest
jump_times += 1
return jump_times
print(Solution().jump([1,2])) | [
"yangxin.nlp@bytedance.com"
] | yangxin.nlp@bytedance.com |
039fd47273a7f03c29529774896c1ad054dd0856 | 5bd8909ecedbc68b23e2bf6d4560a02b4eea3fa1 | /blog/migrations/0008_auto_20201228_0115.py | 8aa17a3ceebb7508b529e2be38c5e11866ee05ee | [] | no_license | HadiGhazali/zoomit | 5c0ca881ed43a1f0baccb90d88309c35ac549e74 | 39a14394e84d86e434f506f270e80084710a4507 | refs/heads/main | 2023-02-19T11:31:18.949557 | 2021-01-20T18:14:55 | 2021-01-20T18:14:55 | 318,006,841 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,010 | py | # Generated by Django 3.1.4 on 2020-12-28 01:15
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('blog', '0007_auto_20201225_1529'),
]
operations = [
migrations.RenameField(
model_name='hitcount',
old_name='date',
new_name='create_date',
),
migrations.AddField(
model_name='hitcount',
name='update_date',
field=models.DateTimeField(auto_now_add=True, null=True),
),
migrations.AlterField(
model_name='urlhit',
name='post',
field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='url_hit', related_query_name='url_hit', to='blog.post', verbose_name='post'),
),
migrations.AlterField(
model_name='urlhit',
name='url',
field=models.CharField(max_length=150),
),
]
| [
"hadivardanjani1378@gmail.com"
] | hadivardanjani1378@gmail.com |
a4a7daf902c1db9cb57fbf3b4c0b6878a28e5589 | e21599d08d2df9dac2dee21643001c0f7c73b24f | /Others/Modules/xml/create_xml.py | 943aeadcfe0f48fa947e92b0b54c945162ad1537 | [] | no_license | herolibra/PyCodeComplete | c7bf2fb4ce395737f8c67749148de98a36a71035 | 4ef7d2c3aec6d28a53eed0e649cdeb74df3d783b | refs/heads/master | 2022-07-17T05:39:03.554760 | 2020-05-03T07:00:14 | 2020-05-03T07:00:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,350 | py | # coding=utf-8
import xml.dom.minidom
# 在内存中创建一个空的文档
doc = xml.dom.minidom.Document()
# 创建一个根节点Managers对象
root = doc.createElement('Managers')
# 设置根节点的属性
root.setAttribute('company', 'xx科技')
root.setAttribute('address', '科技软件园')
# 将根节点添加到文档对象中
doc.appendChild(root)
managerList = [{'name' : 'joy', 'age' : 27, 'sex' : '女'},
{'name' : 'tom', 'age' : 30, 'sex' : '男'},
{'name' : 'ruby', 'age' : 29, 'sex' : '女'}
]
for i in managerList :
nodeManager = doc.createElement('Manager')
nodeName = doc.createElement('name')
# 给叶子节点name设置一个文本节点,用于显示文本内容
nodeName.appendChild(doc.createTextNode(str(i['name'])))
nodeAge = doc.createElement("age")
nodeAge.appendChild(doc.createTextNode(str(i["age"])))
nodeSex = doc.createElement("sex")
nodeSex.appendChild(doc.createTextNode(str(i["sex"])))
# 将各叶子节点添加到父节点Manager中,
# 最后将Manager添加到根节点Managers中
nodeManager.appendChild(nodeName)
nodeManager.appendChild(nodeAge)
nodeManager.appendChild(nodeSex)
root.appendChild(nodeManager)
# 开始写xml文档
fp = open('Manager.xml', 'w')
doc.writexml(fp, indent='\t', addindent='\t', newl='\n', encoding="utf-8")
| [
"zengyuetian@cloutropy.com"
] | zengyuetian@cloutropy.com |
dd31f3e5ecd70f29f6e610e0bb210939483e3274 | bd4812ba7af196d2e866cbf2935b2e7308d95066 | /python/leetcode/389_find_difference.py | 7133e92ba94094b445c901ba3128895d677dbe32 | [
"Apache-2.0"
] | permissive | yxun/notebook | f507201e15c4376f0655121724254c0d5275c3b1 | 00eb1953d872a9a93a13d7cf23d8e4ed641d1ce7 | refs/heads/master | 2023-09-01T03:50:48.142295 | 2023-08-17T12:11:25 | 2023-08-17T12:11:25 | 207,569,654 | 2 | 2 | Apache-2.0 | 2023-08-17T12:11:26 | 2019-09-10T13:38:49 | Java | UTF-8 | Python | false | false | 1,128 | py | #%%
"""
- Find the Difference
- https://leetcode.com/problems/find-the-difference/
- Easy
Given two strings s and t which consist of only lowercase letters.
String t is generated by random shuffling string s and then add one more letter at a random position.
Find the letter that was added in t.
Example:
Input:
s = "abcd"
t = "abcde"
Output:
e
Explanation:
'e' is the letter that was added.
"""
#%%
##
class S1:
def findTheDifference(self, s, t):
"""
:type s: str
:type t: str
:rtype: str
"""
m = sorted(s)
n = sorted(t)
for i in range(len(m)):
if m[i] != n[i]:
return n[i]
return n[-1]
#%%
class S2:
def findTheDifference(self, s, t):
res = {}
for i in s:
res[i] = res.get(i,0) + 1
for j in t:
res[j] = res.get(j,0) - 1
for key in res:
if abs(res[key]) == 1:
return key
#%%
class S3:
def findTheDifference(self, s, t):
from collections import Counter
return list((Counter(t) - Counter(s)).keys()).pop()
| [
"yuanlin.yxu@gmail.com"
] | yuanlin.yxu@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.