blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 288 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 684 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 147 values | src_encoding stringclasses 25 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 128 12.7k | extension stringclasses 142 values | content stringlengths 128 8.19k | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
0c31515b7bee2f75d50298d6e5f8034a79cfcdcc | 6f05f7d5a67b6bb87956a22b988067ec772ba966 | /data/train/python/dba09ec8b643897d28ddeb551b50ebe871f56568test_npcdmod.py | dba09ec8b643897d28ddeb551b50ebe871f56568 | [
"MIT"
] | permissive | harshp8l/deep-learning-lang-detection | 93b6d24a38081597c610ecf9b1f3b92c7d669be5 | 2a54293181c1c2b1a2b840ddee4d4d80177efb33 | refs/heads/master | 2020-04-07T18:07:00.697994 | 2018-11-29T23:21:23 | 2018-11-29T23:21:23 | 158,597,498 | 0 | 0 | MIT | 2018-11-21T19:36:42 | 2018-11-21T19:36:41 | null | UTF-8 | Python | false | false | 7,988 | py | #!/usr/bin/env python
# Copyright (C) 2009-2010:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
#
# This file is used to test the npcd broker module
#
import os, sys, string, time
from multiprocessing import Queue
from shinken_test import unittest, ShinkenTest
from shinken.objects.module import Module
from shinken.modulesctx import modulesctx
npcdmod_broker = modulesctx.get_module('npcdmod')
Npcd_broker = npcdmod_broker.Npcd_broker
sys.setcheckinterval(10000)
modconf = Module()
modconf.module_name = "ncpd"
modconf.module_type = npcdmod_broker.properties['type']
modconf.modules = []
modconf.properties = npcdmod_broker.properties.copy()
class TestNpcd(ShinkenTest):
def add(self, b):
self.broks[b.id] = b
def fake_check(self, ref, exit_status, output="OK"):
print "fake", ref
now = time.time()
ref.schedule()
check = ref.actions.pop()
self.sched.add(check) # check is now in sched.checks[]
# fake execution
check.check_time = now
check.output = output
check.exit_status = exit_status
check.execution_time = 0.001
check.status = 'waitconsume'
self.sched.waiting_results.append(check)
def scheduler_loop(self, count, reflist):
for ref in reflist:
(obj, exit_status, output) = ref
obj.checks_in_progress = []
for loop in range(1, count + 1):
print "processing check", loop
for ref in reflist:
(obj, exit_status, output) = ref
obj.update_in_checking()
self.fake_check(obj, exit_status, output)
self.sched.consume_results()
self.worker_loop()
for ref in reflist:
(obj, exit_status, output) = ref
obj.checks_in_progress = []
self.sched.update_downtimes_and_comments()
#time.sleep(ref.retry_interval * 60 + 1)
#time.sleep(60 + 1)
def worker_loop(self):
self.sched.delete_zombie_checks()
self.sched.delete_zombie_actions()
checks = self.sched.get_to_run_checks(True, False)
actions = self.sched.get_to_run_checks(False, True)
#print "------------ worker loop checks ----------------"
#print checks
#print "------------ worker loop actions ----------------"
#self.show_actions()
#print "------------ worker loop new ----------------"
for a in actions:
#print "---> fake return of action", a.id
a.status = 'inpoller'
a.exit_status = 0
self.sched.put_results(a)
#self.show_actions()
#print "------------ worker loop end ----------------"
def update_broker(self):
self.sched.get_new_broks()
ids = self.sched.brokers['Default-Broker']['broks'].keys()
ids.sort()
for i in ids:
brok = self.sched.brokers['Default-Broker']['broks'][i]
brok.prepare()
self.npcdmod_broker.manage_brok(brok)
self.sched.broks = {}
def print_header(self):
print "#" * 80 + "\n" + "#" + " " * 78 + "#"
print "#" + string.center(self.id(), 78) + "#"
print "#" + " " * 78 + "#\n" + "#" * 80 + "\n"
def write_correct_config(self):
file = open("npcd.cfg", "w")
file.write("perfdata_file = /tmp/pfnerf")
file.write("perfdata_spool_dir = /tmp/pnp4shinken/var/perfdata")
file.write("perfdata_spool_filename=pferf")
file.close()
def write_incomplete_config(self):
file = open("npcd.cfg", "w")
file.write("perfdata_file = /tmp/pfnerf")
file.write("perfdata_spool_filename=pferf")
file.close()
def test_write_perfdata_file(self):
self.print_header()
if os.path.exists("./perfdata"):
os.unlink("./perfdata")
self.npcdmod_broker = Npcd_broker(modconf, None, './perfdata', '.', 'perfdata-target', 15)
self.npcdmod_broker.properties['to_queue'] = 0
self.npcdmod_broker.init()
self.sched.conf.skip_initial_broks = False
self.sched.brokers['Default-Broker'] = {'broks' : {}, 'has_full_broks' : False}
self.sched.fill_initial_broks('Default-Broker')
print "got initial broks"
now = time.time()
host = self.sched.hosts.find_by_name("test_host_0")
host.checks_in_progress = []
host.act_depend_of = [] # ignore the router
router = self.sched.hosts.find_by_name("test_router_0")
router.checks_in_progress = []
router.act_depend_of = [] # ignore the router
svc = self.sched.services.find_srv_by_name_and_hostname("test_host_0", "test_ok_0")
svc.checks_in_progress = []
svc.act_depend_of = [] # no hostchecks on critical checkresults
self.scheduler_loop(2, [[host, 0, 'UP | value1=1 value2=2'], [router, 0, 'UP | rtt=10'], [svc, 2, 'BAD | value1=0 value2=0']])
self.update_broker()
self.assert_(os.path.exists("./perfdata"))
if os.path.exists("./perfdata"):
self.npcdmod_broker.logfile.close()
os.unlink("./perfdata")
def test_npcd_got_missing_conf(self):
self.print_header()
if os.path.exists("./perfdata"):
os.unlink("./perfdata")
self.npcdmod_broker = Npcd_broker(modconf, None, './perfdata', '.', 'perfdata-target', 15)
self.npcdmod_broker.properties['to_queue'] = 0
self.npcdmod_broker.from_q = Queue()
self.npcdmod_broker.init()
self.sched.conf.skip_initial_broks = False
self.sched.brokers['Default-Broker'] = {'broks' : {}, 'has_full_broks' : False}
self.sched.fill_initial_broks('Default-Broker')
print "got initial broks"
now = time.time()
host = self.sched.hosts.find_by_name("test_host_0")
host.checks_in_progress = []
host.act_depend_of = [] # ignore the router
router = self.sched.hosts.find_by_name("test_router_0")
router.checks_in_progress = []
router.act_depend_of = [] # ignore the router
svc = self.sched.services.find_srv_by_name_and_hostname("test_host_0", "test_ok_0")
svc.checks_in_progress = []
svc.act_depend_of = [] # no hostchecks on critical checkresults
# We are a bad guy, and we change the service name
svc.service_description = "Unkown"
# and we force it to raise an asking now
self.npcdmod_broker.last_need_data_send = 0
self.scheduler_loop(2, [[host, 0, 'UP | value1=1 value2=2'], [router, 0, 'UP | rtt=10'], [svc, 2, 'BAD | value1=0 value2=0']])
self.update_broker()
self.assert_(os.path.exists("./perfdata"))
if os.path.exists("./perfdata"):
self.npcdmod_broker.logfile.close()
os.unlink("./perfdata")
print "Len" * 20, self.npcdmod_broker.from_q.qsize()
self.assert_(self.npcdmod_broker.from_q.qsize() == 1)
self.npcdmod_broker.from_q.get()
self.npcdmod_broker.from_q.close()
if __name__ == '__main__':
import cProfile
command = """unittest.main()"""
unittest.main()
#cProfile.runctx( command, globals(), locals(), filename="Thruk.profile" )
| [
"aliostad+github@gmail.com"
] | aliostad+github@gmail.com |
c5424efea98053414c6bf63cc096de8998a411cf | 6d233ad2059a941e4ce4c5b5ee3857b8a4a0d212 | /Everyday_alg/2021/09/2021_09_29/super-washing-machines.py | 4dd95588ead44ee7f767566be49366cffbb885ba | [] | no_license | Alexanderklau/Algorithm | 7c38af7debbe850dfc7b99cdadbf0f8f89141fc6 | eac05f637a55bfcc342fa9fc4af4e2dd4156ea43 | refs/heads/master | 2022-06-12T21:07:23.635224 | 2022-06-12T08:12:07 | 2022-06-12T08:12:07 | 83,501,915 | 5 | 3 | null | null | null | null | UTF-8 | Python | false | false | 1,553 | py | # coding: utf-8
__author__ = 'Yemilice_lau'
"""
假设有 n 台超级洗衣机放在同一排上。开始的时候,每台洗衣机内可能有一定量的衣服,也可能是空的。
在每一步操作中,你可以选择任意 m (1 <= m <= n) 台洗衣机,与此同时将每台洗衣机的一件衣服送到相邻的一台洗衣机。
给定一个整数数组 machines 代表从左至右每台洗衣机中的衣物数量,请给出能让所有洗衣机中剩下的衣物的数量相等的 最少的操作步数 。
如果不能使每台洗衣机中衣物的数量相等,则返回 -1 。
示例 1:
输入:machines = [1,0,5]
输出:3
解释:
第一步: 1 0 <-- 5 => 1 1 4
第二步: 1 <-- 1 <-- 4 => 2 1 3
第三步: 2 1 <-- 3 => 2 2 2
示例 2:
输入:machines = [0,3,0]
输出:2
解释:
第一步: 0 <-- 3 0 => 1 2 0
第二步: 1 2 --> 0 => 1 1 1
示例 3:
输入:machines = [0,2,0]
输出:-1
解释:
不可能让所有三个洗衣机同时剩下相同数量的衣物。
"""
class Solution(object):
def findMinMoves(self, machines):
"""
:type machines: List[int]
:rtype: int
"""
tot = sum(machines)
n = len(machines)
if tot % n:
return -1
avg = tot // n
ans, s = 0, 0
for num in machines:
num -= avg
s += num
ans = max(ans, abs(s), num)
return ans | [
"429095816@qq.com"
] | 429095816@qq.com |
5d228ece9d0927cce69c4e5f117d66a84a74b75c | 2b2dc38c581d4313dee547af7f9714df29b9e000 | /tests/clients/test_model.py | 5f2080c485fbb4f707a727549250b41ab93f756b | [
"MIT",
"Apache-2.0"
] | permissive | jkeelan/faculty | 72145791171b3b32ee98c956e36d0f65ca74ff87 | 3cf50f243fba1bfe7a346de88654d3616ac35b15 | refs/heads/master | 2020-11-25T23:12:55.335878 | 2019-12-15T15:34:00 | 2019-12-15T15:34:00 | 228,884,443 | 0 | 0 | Apache-2.0 | 2019-12-18T16:55:28 | 2019-12-18T16:55:27 | null | UTF-8 | Python | false | false | 5,077 | py | # Copyright 2018-2019 Faculty Science Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import uuid
from datetime import datetime
import pytest
import attr
from dateutil.tz import UTC
from marshmallow import ValidationError
from faculty.clients.model import (
ExperimentModelSource,
ExperimentModelSourceSchema,
Model,
ModelClient,
ModelSchema,
ModelVersion,
ModelVersionSchema,
)
PROJECT_ID = uuid.uuid4()
MODEL_ID = uuid.uuid4()
VERSION_ID = uuid.uuid4()
USER_ID = uuid.uuid4()
REGISTERED_AT = datetime(2019, 8, 19, 15, 23, 53, 268000, tzinfo=UTC)
REGISTERED_AT_STRING = "2019-08-19T15:23:53.268Z"
EXPERIMENT_MODEL_SOURCE = ExperimentModelSource(
experiment_id=43, experiment_run_id=uuid.uuid4()
)
EXPERIMENT_MODEL_SOURCE_JSON = {
"type": "experiment",
"experimentId": EXPERIMENT_MODEL_SOURCE.experiment_id,
"experimentRunId": str(EXPERIMENT_MODEL_SOURCE.experiment_run_id),
}
MODEL_VERSION = ModelVersion(
id=VERSION_ID,
version_number=23,
registered_at=REGISTERED_AT,
registered_by=USER_ID,
artifact_path="scheme:path/to/artifact",
source=EXPERIMENT_MODEL_SOURCE,
)
MODEL_VERSION_JSON = {
"modelVersionId": str(VERSION_ID),
"modelVersionNumber": MODEL_VERSION.version_number,
"registeredAt": REGISTERED_AT_STRING,
"registeredBy": str(USER_ID),
"artifactPath": MODEL_VERSION.artifact_path,
"source": EXPERIMENT_MODEL_SOURCE_JSON,
}
MODEL = Model(
id=MODEL_ID,
name="model name",
description="model description",
user_ids=[USER_ID],
latest_version=MODEL_VERSION,
)
MODEL_JSON = {
"modelId": str(MODEL_ID),
"name": MODEL.name,
"description": MODEL.description,
"users": [str(USER_ID)],
"latestVersion": MODEL_VERSION_JSON,
}
def test_experiment_model_source_schema():
data = ExperimentModelSourceSchema().load(EXPERIMENT_MODEL_SOURCE_JSON)
assert data == EXPERIMENT_MODEL_SOURCE
def test_model_version_schema():
data = ModelVersionSchema().load(MODEL_VERSION_JSON)
assert data == MODEL_VERSION
def test_model_schema():
data = ModelSchema().load(MODEL_JSON)
assert data == MODEL
def test_model_schema_without_latest_version():
model_json = MODEL_JSON.copy()
del model_json["latestVersion"]
data = ModelSchema().load(model_json)
assert data == attr.evolve(MODEL, latest_version=None)
@pytest.mark.parametrize(
"schema", [ExperimentModelSourceSchema, ModelVersionSchema, ModelSchema]
)
def test_schemas_invalid(schema):
with pytest.raises(ValidationError):
schema().load({})
def test_model_client_get(mocker):
mocker.patch.object(ModelClient, "_get", return_value=MODEL)
schema_mock = mocker.patch("faculty.clients.model.ModelSchema")
client = ModelClient(mocker.Mock())
assert client.get(PROJECT_ID, MODEL_ID) == MODEL
schema_mock.assert_called_once_with()
ModelClient._get.assert_called_once_with(
"/project/{}/model/{}".format(PROJECT_ID, MODEL_ID),
schema_mock.return_value,
)
def test_model_client_list(mocker):
mocker.patch.object(ModelClient, "_get", return_value=[MODEL])
schema_mock = mocker.patch("faculty.clients.model.ModelSchema")
client = ModelClient(mocker.Mock())
assert client.list(PROJECT_ID) == [MODEL]
schema_mock.assert_called_once_with(many=True)
ModelClient._get.assert_called_once_with(
"/project/{}/model".format(PROJECT_ID, MODEL_ID),
schema_mock.return_value,
)
def test_model_client_get_version(mocker):
mocker.patch.object(ModelClient, "_get", return_value=MODEL_VERSION)
schema_mock = mocker.patch("faculty.clients.model.ModelVersionSchema")
client = ModelClient(mocker.Mock())
assert (
client.get_version(PROJECT_ID, MODEL_ID, VERSION_ID) == MODEL_VERSION
)
schema_mock.assert_called_once_with()
ModelClient._get.assert_called_once_with(
"/project/{}/model/{}/version/{}".format(
PROJECT_ID, MODEL_ID, VERSION_ID
),
schema_mock.return_value,
)
def test_model_client_list_versions(mocker):
mocker.patch.object(ModelClient, "_get", return_value=[MODEL_VERSION])
schema_mock = mocker.patch("faculty.clients.model.ModelVersionSchema")
client = ModelClient(mocker.Mock())
assert client.list_versions(PROJECT_ID, MODEL_ID) == [MODEL_VERSION]
schema_mock.assert_called_once_with(many=True)
ModelClient._get.assert_called_once_with(
"/project/{}/model/{}/version".format(PROJECT_ID, MODEL_ID),
schema_mock.return_value,
)
| [
"wacrozier@gmail.com"
] | wacrozier@gmail.com |
0944e48d424011351fdc9b9140279c65238f531c | 2125593138c50b1fba5e46cd4d88d6c04d0b417a | /06_DJANGO_ADVANCE/03_IMAGE_UPLOAD/sns/migrations/0002_posting_likes.py | 0524e0f4d31ff8c728982f782cc3dca428482c9c | [] | no_license | minkishome/TIL-master | 5f0e6ef61b34a2983961ccf44f7523603ccb5907 | d8edc0ff8abff3b2239a2d751eee263b722013a6 | refs/heads/master | 2023-01-21T00:43:30.165535 | 2020-08-25T14:56:18 | 2020-08-25T14:56:18 | 203,070,283 | 0 | 1 | null | 2023-01-05T01:08:10 | 2019-08-19T00:18:31 | Python | UTF-8 | Python | false | false | 531 | py | # Generated by Django 2.2.6 on 2019-10-21 07:18
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('sns', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='posting',
name='likes',
field=models.ManyToManyField(blank=True, related_name='like_postings', to=settings.AUTH_USER_MODEL),
),
]
| [
"minkishome@gmail.com"
] | minkishome@gmail.com |
77443ff141c26d56888e06b61fc6591b0dfe0500 | 13b84a8620f928159b5205d89db61df0e5bfb60a | /20200613/prob_2.py | 11416e76908279b28ca8cfa999550e6de97e1875 | [] | no_license | steinstadt/CodeForces | 548c5a5fe23fba512a4b675eaf264bfce9b44c1e | db130008d3bd1a957bcad9ab40f3a9461c534174 | refs/heads/master | 2021-04-16T15:14:19.736276 | 2020-11-21T07:36:58 | 2020-11-21T07:36:58 | 249,365,440 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 285 | py | # Problem 1154 A - Restoring Three Numbers
# input
num_list = list(map(int, input().split()))
num_list = sorted(num_list)
# initialization
ans_list = [0] * 3
# count
for i in range(3):
ans_list[i] = num_list[-1] - num_list[i]
# output
print(" ".join(list(map(str, ans_list))))
| [
"steinstadt@keio.jp"
] | steinstadt@keio.jp |
666e013da80d8695c8333bae27b1d872f86c8955 | a51b1814a9bf2fdcf880772fefaa2ab79e8c7308 | /runestone/chapter-2/proper-python-class.py | 6eda5173d70e4fa37807360532296ec5fc1d76da | [] | no_license | 369geofreeman/MITx_6.00.1x | d38913805168440969034e1d82611b0dbcd7a29a | ba84f70cc4e7cfbd4b685b742aa87d3f85cbbf59 | refs/heads/master | 2023-04-21T01:55:08.538290 | 2021-05-13T13:03:50 | 2021-05-13T13:03:50 | 282,055,845 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,966 | py | # Writing a proper Python class
# A class that works well in the Python ecosystem.
# Each class should have a docstring to provide some level of documentation on how to use the class.
#
# Each class should have a __str__ magic method to give it a meaninigful string representation.
#
# Each class should have a proper __repr__ magic method for representation in the interactive shell, the debugger, and other cases where string conversion does not happen.
#
# Each class should be comparable so it can be sorted and meaningfully compared with other instances. At a minimum this means implementing __eq__ and __lt__.
#
# You should think about access control each instance variable. Which attributes do you want to make public, which attributes do you want to make read only, and which attributes do you want to control or do value checking on before you allow them to be changed.
#
# If the class is a container for other classes then there are some further considerations:
#
# You should be able to find out how many things the container holds using len
#
# You should be able to iterate over the items in the container.
#
# You may want to allow users to access the items in the container using the square bracket index notation.
# In this example we build a basic implementation of the MSDie class:
import random
class MSDie:
"""
Multi-sided die
Instance Variables:
current_value
num_sides
"""
def __init__(self, num_sides):
self.num_sides = num_sides
self.current_value = self.roll()
def roll(self):
self.current_value = random.randrange(1,self.num_sides+1)
return self.current_value
def __str__(self):
return str(self.current_value)
def __repr__(self):
return "MSDie({}) : {}".format(self.num_sides, self.current_value)
my_die = MSDie(6)
for i in range(5):
print(my_die)
my_die.roll()
d_list = [MSDie(6), MSDie(20)]
print(d_list)
| [
"geofreeman369@gmail.com"
] | geofreeman369@gmail.com |
426bcc79c656b5cffc242436a10ed9cfa654f2bb | 2c80605554a75d02d57278a9339217d9e7c37f5d | /bank/tpot/pipeline_gen_5_idx_1_2019.09.10_04-14-24.py | a2b5819cd32b6cf812a35dd23a1e7ebac6f852be | [] | no_license | zhangsheng377/Kesci | e28cbe155d8ff4be3307500a76644ec403dc86ae | 967bb3362ad1c6225eef5ca40baf610e9b0aeb6f | refs/heads/master | 2020-07-07T01:59:17.349753 | 2020-02-02T11:30:28 | 2020-02-02T11:30:28 | 203,208,926 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 946 | py | import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import RobustScaler
from xgboost import XGBClassifier
# NOTE: Make sure that the class is labeled 'target' in the data file
tpot_data = pd.read_csv('PATH/TO/DATA/FILE', sep='COLUMN_SEPARATOR', dtype=np.float64)
features = tpot_data.drop('target', axis=1).values
training_features, testing_features, training_target, testing_target = \
train_test_split(features, tpot_data['target'].values, random_state=None)
# Average CV score on the training set was:0.9090338029086228
exported_pipeline = make_pipeline(
RobustScaler(),
XGBClassifier(learning_rate=0.1, max_depth=6, min_child_weight=3, n_estimators=100, nthread=1, subsample=0.9000000000000001)
)
exported_pipeline.fit(training_features, training_target)
results = exported_pipeline.predict(testing_features)
| [
"435878393@qq.com"
] | 435878393@qq.com |
cc9e00941de4cb85b76f01ec8444c338862ce15c | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/nouns/_mooching.py | bb4b21a3d0e13344ce2578feccbf8078202a50f3 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 235 | py |
from xai.brain.wordbase.nouns._mooch import _MOOCH
#calss header
class _MOOCHING(_MOOCH, ):
def __init__(self,):
_MOOCH.__init__(self)
self.name = "MOOCHING"
self.specie = 'nouns'
self.basic = "mooch"
self.jsondata = {}
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
24c147e69c88039955887900d3b812f300ecb882 | 1e187e5aa5fad78541a7afaec38fedbd3e9c81c7 | /src/incidents/__init__.py | 3628ecd6a052f4aa0beae7429dc1bc1db97ad1ca | [] | no_license | mattrobenolt/incidents | 0e60bf3d2b792ff4da1cdf6fc4252a245d645559 | 2b59fab8b762138c5adc3a0a65377fee10d41c95 | refs/heads/master | 2020-05-17T02:04:33.725661 | 2014-09-20T07:50:00 | 2014-09-20T07:50:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 131 | py | def autodiscover():
pass
default_app_config = 'incidents.apps.IncidentsConfig'
from .celery import app as celery_app # noqa
| [
"matt@ydekproductions.com"
] | matt@ydekproductions.com |
5733cd16bf8346d7f4fafeea4b8362e1ec616684 | 5bc5acc7cac75d26312f8b73ed8a4e80f7e144a2 | /admin_interface/templatetags/admin_interface_tags.py | d2e07d2f091f0cf9c9b752083c8d6c3c583425dd | [
"MIT"
] | permissive | gilsonbp/django-admin-interface | 1019b40ef5d09f0e18d4ef55b37d49416e549876 | f6ce51c839be0bfedda5deb440a2588117a28765 | refs/heads/master | 2020-12-28T20:43:13.038317 | 2016-07-12T16:55:01 | 2016-07-12T16:55:01 | 64,975,304 | 0 | 0 | MIT | 2020-04-10T20:00:59 | 2016-08-05T00:09:29 | Python | UTF-8 | Python | false | false | 433 | py | # -*- coding: utf-8 -*-
from django import template
from admin_interface.models import Theme
register = template.Library()
@register.assignment_tag(takes_context = True)
def get_admin_interface_theme(context):
obj_qs = Theme.objects.filter(active = True)[:1]
obj_ls = list(obj_qs)
obj = None
if len(obj_ls):
obj = obj_ls[0]
else:
obj = Theme.get_or_create_default_theme()
return obj
| [
"fabio.caccamo@gmail.com"
] | fabio.caccamo@gmail.com |
7b98333ac3fd36da58a48b8384faba21df2c93de | 07490c73801dd7d055f852732126506088524725 | /tests/test_extension.py | b3bf510dafc6ce1d4d3a083966ce2359bb25727c | [
"Apache-2.0"
] | permissive | consideRatio/jupyterlab_iframe | 202a3740ff718cf93bdcac8dd3171c92fdf0f8f9 | 3c5f51352225ca9235d6e6d378e22bbdf983912f | refs/heads/master | 2021-02-15T20:40:07.750413 | 2020-02-28T03:17:36 | 2020-02-28T03:17:36 | 244,929,842 | 0 | 0 | Apache-2.0 | 2020-03-04T15:03:33 | 2020-03-04T15:03:32 | null | UTF-8 | Python | false | false | 876 | py | # for Coverage
from mock import patch, MagicMock
from jupyterlab_iframe.extension import load_jupyter_server_extension, IFrameHandler, ProxyHandler, ProxyWSHandler
class TestExtension:
def test_load_jupyter_server_extension(self):
m = MagicMock()
m.web_app.settings = {}
m.web_app.settings['base_url'] = '/test'
load_jupyter_server_extension(m)
def test_handler(self):
import tornado.web
app = tornado.web.Application()
m = MagicMock()
h = IFrameHandler(app, m)
h._transforms = []
h.get()
def test_proxy_handler(self):
import tornado.web
app = tornado.web.Application()
m = MagicMock()
h = ProxyHandler(app, m)
h._transforms = []
with patch('requests.get') as m2:
m2.return_value.text = 'test'
h.get()
| [
"t.paine154@gmail.com"
] | t.paine154@gmail.com |
9c12235910c2089f308d5efc8451bb32b2a84b3e | 5d25b942873144363546c8b0ccbd2df4fbec0aa0 | /utils/tserial.py | 0c5267fd8d46ea264eddd7f9d4d6cfea9556b728 | [
"Apache-2.0"
] | permissive | Sumalyo/mjmech | 228d23b3690b1244ec0c6825c231c2368d805045 | c222725b9ee799a595c53d1f85195e013a600a04 | refs/heads/master | 2021-04-14T04:59:54.043304 | 2020-03-19T12:57:37 | 2020-03-19T12:57:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,893 | py | #!/usr/bin/env python
# Copyright 2015 Josh Pieper, jjp@pobox.com. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''%prog [options]
Interact with a serial device capable of reporting tlog style
telemetry. Optionally log the data to disk in a tlog file.
'''
import optparse
import serial
import struct
class Serial(object):
def __init__(self, options):
self.options = options
self.port = serial.Serial(port=options.serial,
baudrate=options.baudrate)
# Try to stop anything that might be spewing.
self.stop()
# Try to dump anything that is still in the receive queue.
self.port.setTimeout(0.1)
result = self.port.read(8192)
print 'ignored %d bytes on start' % len(result)
def stop(self):
self.port.write('\ntel stop\n')
def readline(self):
while True:
line = self.port.readline()
if line.startswith('unknown'):
continue
if line.strip() == '':
continue
return line
def list(self):
result = []
self.port.write('\ntel list\n')
while True:
line = self.readline()
if line.startswith("OK"):
break
result.append(line.strip())
return result
def schema(self, name):
self.port.write('\ntel schema %s\n' % name)
line = self.readline()
assert line.startswith('schema ' + name), 'got unexpected schema response: ' + line
size_str = self.port.read(4)
assert len(size_str) == 4
size = struct.unpack('<I', size_str)[0]
data = self.port.read(size)
return data
def rate(self, name, rate):
self.port.write('\ntel rate %s %d\n' % (name, rate))
def read_next_data(self):
# Read until we get an "emit" line.
self.port.setTimeout(None)
line = ''
while True:
line = self.readline()
if line.startswith('emit '):
break
name = line.split(' ')[1].strip()
size_str = self.port.read(4)
assert len(size_str) == 4
size = struct.unpack('<I', size_str)[0]
data = self.port.read(size)
return name, data
class LogWriter(object):
def __init__(self, name):
self.fd = open(name, 'wb')
self.fd.write('TLOG0002')
self.fd.flush()
self.next_identifier = 1
self.names = {}
BLOCK_SCHEMA = 1
BLOCK_DATA = 2
def make_pstring(self, data):
return struct.pack('<I', len(data)) + data
def _make_schema_block(self, identifier, name, schema):
result = ''
result += struct.pack('<II', identifier, 0)
result += self.make_pstring(name)
result += schema
return result
def _make_data_block(self, identifier, data):
result = struct.pack('<IH', identifier, 0) + data
return result
def write_schema(self, name, schema):
identifier = self.next_identifier
self.next_identifier += 1
self.names[name] = identifier
self.write_block(self.BLOCK_SCHEMA,
self._make_schema_block(identifier, name, schema))
def write_data(self, name, data):
identifier = self.names[name]
self.write_block(self.BLOCK_DATA,
self._make_data_block(identifier, data))
def write_block(self, block_id, data):
self.fd.write(struct.pack('<HI', block_id, len(data)) + data)
self.fd.flush()
def main():
usage, description = __doc__.split('\n\n', 1)
parser = optparse.OptionParser(usage=usage, description=description)
parser.add_option('--serial', '-s', default='/dev/ttyACM0')
parser.add_option('--baudrate', '-b', type='int', default=115200)
parser.add_option('--list', '-l', action='store_true')
parser.add_option('--name', '-n', action='append', default=[])
parser.add_option('--rate', '-r', type='int', default=1,
help='1 is every update, otherwise ms')
parser.add_option('--output', '-o', help='output tlog file')
options, args = parser.parse_args()
ser = Serial(options)
if options.list:
print '\n'.join(ser.list())
return
if len(options.name) == 0:
# If no names are specified, get everything.
print 'getting names'
options.name = ser.list()
output = None
if options.output:
output = LogWriter(options.output)
print 'getting schemas'
# Get the schema for all the requested things.
for name in options.name:
schema = ser.schema(name)
if output:
output.write_schema(name, schema)
print 'got schema for %s len %d' % (name, len(schema))
print 'setting rates'
# Now start everything being sent out.
for name in options.name:
ser.rate(name, options.rate)
print 'starting to record'
# Now, we just continue reading, looking for more data to come
# out.
try:
records = 0
while True:
name, data = ser.read_next_data()
if output:
output.write_data(name, data)
records += 1
print 'count: %d\r' % records,
finally:
pass
#ser.stop()
if __name__ == '__main__':
main()
| [
"jjp@pobox.com"
] | jjp@pobox.com |
84ad07d973ff96ef23edea1992f9a7f987eb23ca | dcc0bb6b30ab22a2b5aea3b0f2f5bf403c28dc9b | /awx_collection/test/awx/conftest.py | bdaa0db3bf529a94233f8be6be7abf9d089906cb | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference",
"GPL-3.0-only"
] | permissive | EStork09/awx | e358b8eaecdb59693fa7d9883555aa5259cc1641 | 0c0e172caf7036737a511cbd532b4dc72d900725 | refs/heads/devel | 2020-08-22T01:18:12.674380 | 2019-10-20T00:45:02 | 2019-10-20T00:45:02 | 178,763,134 | 0 | 0 | Apache-2.0 | 2019-04-09T16:38:38 | 2019-04-01T01:24:13 | Python | UTF-8 | Python | false | false | 4,187 | py | import io
import json
import datetime
import importlib
from contextlib import redirect_stdout
from unittest import mock
from requests.models import Response
import pytest
from awx.main.tests.functional.conftest import _request
from awx.main.models import Organization, Project, Inventory, Credential, CredentialType
def sanitize_dict(din):
'''Sanitize Django response data to purge it of internal types
so it may be used to cast a requests response object
'''
if isinstance(din, (int, str, type(None), bool)):
return din # native JSON types, no problem
elif isinstance(din, datetime.datetime):
return din.isoformat()
elif isinstance(din, list):
for i in range(len(din)):
din[i] = sanitize_dict(din[i])
return din
elif isinstance(din, dict):
for k in din.copy().keys():
din[k] = sanitize_dict(din[k])
return din
else:
return str(din) # translation proxies often not string but stringlike
@pytest.fixture
def run_module():
def rf(module_name, module_params, request_user):
def new_request(self, method, url, **kwargs):
kwargs_copy = kwargs.copy()
if 'data' in kwargs:
kwargs_copy['data'] = json.loads(kwargs['data'])
# make request
rf = _request(method.lower())
django_response = rf(url, user=request_user, expect=None, **kwargs_copy)
# requests library response object is different from the Django response, but they are the same concept
# this converts the Django response object into a requests response object for consumption
resp = Response()
py_data = django_response.data
sanitize_dict(py_data)
resp._content = bytes(json.dumps(django_response.data), encoding='utf8')
resp.status_code = django_response.status_code
return resp
stdout_buffer = io.StringIO()
# Requies specific PYTHONPATH, see docs
# Note that a proper Ansiballz explosion of the modules will have an import path like:
# ansible_collections.awx.awx.plugins.modules.{}
# We should consider supporting that in the future
resource_module = importlib.import_module('plugins.modules.{}'.format(module_name))
# Ansible params can be passed as an invocation argument or over stdin
# this short circuits within the AnsibleModule interface
def mock_load_params(self):
self.params = module_params
with mock.patch.object(resource_module.TowerModule, '_load_params', new=mock_load_params):
# Call the test utility (like a mock server) instead of issuing HTTP requests
with mock.patch('tower_cli.api.Session.request', new=new_request):
# Ansible modules return data to the mothership over stdout
with redirect_stdout(stdout_buffer):
try:
resource_module.main()
except SystemExit:
pass # A system exit indicates successful execution
module_stdout = stdout_buffer.getvalue().strip()
result = json.loads(module_stdout)
return result
return rf
@pytest.fixture
def organization():
return Organization.objects.create(name='Default')
@pytest.fixture
def project(organization):
return Project.objects.create(
name="test-proj",
description="test-proj-desc",
organization=organization,
playbook_files=['helloworld.yml'],
local_path='_92__test_proj',
scm_revision='1234567890123456789012345678901234567890',
scm_url='localhost',
scm_type='git'
)
@pytest.fixture
def inventory(organization):
return Inventory.objects.create(
name='test-inv',
organization=organization
)
@pytest.fixture
def machine_credential(organization):
ssh_type = CredentialType.defaults['ssh']()
ssh_type.save()
return Credential.objects.create(
credential_type=ssh_type, name='machine-cred',
inputs={'username': 'test_user', 'password': 'pas4word'}
)
| [
"arominge@redhat.com"
] | arominge@redhat.com |
c7f42f7749b90a9c3dd47f55c00466c5b63d3493 | 97884252481ff208519194ecd63dc3a79c250220 | /pyobs/robotic/lco/scripts/script.py | 0c57a0bb4cb505d10961df96b185ca12ba1e1049 | [
"MIT"
] | permissive | pyobs/pyobs-core | a1f30137d7f991bad4e115de38f543e59a6e30d2 | 2d7a06e5485b61b6ca7e51d99b08651ea6021086 | refs/heads/master | 2023-09-01T20:49:07.610730 | 2023-08-29T09:20:05 | 2023-08-29T09:20:05 | 174,351,157 | 9 | 3 | NOASSERTION | 2023-09-14T20:39:48 | 2019-03-07T13:41:27 | Python | UTF-8 | Python | false | false | 2,064 | py | import logging
from typing import Dict, Any, Optional
from pyobs.robotic.scripts import Script
from pyobs.robotic import TaskSchedule, TaskArchive, TaskRunner
log = logging.getLogger(__name__)
class LcoScript(Script):
"""Auto SCRIPT script for LCO configs."""
def __init__(self, scripts: Dict[str, Script], **kwargs: Any):
"""Initialize a new LCO auto focus script.
Args:
scripts: External scripts to run
"""
Script.__init__(self, **kwargs)
# store
self.scripts = scripts
def _get_config_script(self, config: Dict[str, Any]) -> Script:
"""Get config script for given configuration.
Args:
config: Config to create runner for.
Returns:
Script for running config
Raises:
ValueError: If could not create runner.
"""
# what do we run?
config_type = config["extra_params"]["script_name"]
if config_type not in self.scripts:
raise ValueError('No script found for script type "%s".' % config_type)
# create script handler
return self.get_object(self.scripts[config_type], Script, configuration=config)
async def can_run(self) -> bool:
"""Checks, whether this task could run now.
Returns:
True, if task can run now.
"""
# get config runner
runner = self._get_config_script(self.configuration)
# if any runner can run, we proceed
return await runner.can_run()
async def run(
self,
task_runner: TaskRunner,
task_schedule: Optional[TaskSchedule] = None,
task_archive: Optional[TaskArchive] = None,
) -> None:
"""Run script.
Raises:
InterruptedError: If interrupted
"""
# get config runner
runner = self._get_config_script(self.configuration)
# run it
await runner.run(task_runner=task_runner, task_schedule=task_schedule, task_archive=task_archive)
__all__ = ["LcoScript"]
| [
"thusser@uni-goettingen.de"
] | thusser@uni-goettingen.de |
837f297f47a3d31dca3856ca30dc1c07ea466c51 | 0c469c4100fe9d352e83731688e388062a3c55c7 | /Graphs/1135. Connecting Cities With Minimum Cost.py | 079eb10f622723f7ab046130350ee32299f45381 | [] | no_license | asperaa/back_to_grind | 9e055c7e6561384e5b7ae52f01063e4beb34a298 | 5ea1976b9d5c6d04800e296e45e8ff90fdde5001 | refs/heads/master | 2022-12-16T18:32:01.443743 | 2020-09-05T13:29:39 | 2020-09-05T13:29:39 | 254,910,528 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,081 | py | """We are the captains of oue ships, and we stay 'till the end. We see our stories through.
"""
"""1135. Connecting Cities With Minimum Cost
"""
class Solution:
def minimumCost(self, n ,edges):
parent = [i for i in range(n+1)]
rank = [0] * (n+1)
self.cost = 0
self.connected_components = n
def find(x):
if parent[x] != x:
parent[x] = find(parent[x])
return parent[x]
def union(x, y, w):
rx, ry = find(x), find(y)
if rx != ry:
if rank[rx] < rank[ry]:
parent[rx] = ry
elif rank[ry] < rank[rx]:
parent[ry] = rx
else:
parent[rx] = ry
rank[ry] += 1
self.connected_components -= 1
self.cost += w
edges.sort(key=lambda x: x[2])
for u, v, w in edges:
union(u, v, w)
if self.connected_components == 1:
return self.cost
return -1 | [
"adityaankur44@gmail.com"
] | adityaankur44@gmail.com |
01822b078b05a3660a8aaa3154dd9afeb8922100 | ef605b30b118dbb5902a360c2dc74634f8d8023a | /ve/Lib/site-packages/tests/test_archaism.py | edf8f96ad05816c5b8f19b27af3ad2eee643f92d | [] | no_license | lugnitdgp/avskr2.0 | 691b82e529fba667ebf0885b52f0c58b5076f3cb | 278e4f6f8ce4677e213150716704330d83debf9f | refs/heads/master | 2022-12-06T03:10:38.743582 | 2018-10-29T13:14:38 | 2018-10-29T13:14:38 | 151,551,343 | 4 | 10 | null | 2022-11-22T13:59:07 | 2018-10-04T09:58:45 | Python | UTF-8 | Python | false | false | 620 | py | """Tests for archaism.misc check."""
from __future__ import absolute_import
from .check import Check
from proselint.checks.archaism import misc as chk
class TestCheck(Check):
"""The test class for archaism.misc."""
__test__ = True
@property
def this_check(self):
"""Bolierplate."""
return chk
def test_smoke(self):
"""Basic smoke test for archaism.misc."""
assert self.passes("""Smoke phrase with nothing flagged.""")
assert self.passes("""I want to sleep, and maybe dream.""")
assert not self.passes("""I want to sleep, perchance to dream.""")
| [
"divyanshumehta@outlook.com"
] | divyanshumehta@outlook.com |
e717b68e6f98bb33f19c3f68eb602455a799f7f6 | dc80f94c1a244002db468fc7242d5fcaafe439dc | /powerdns_client/models/error.py | f5299db89c3ad43e6f2cbbe70c6da422abc9ead3 | [
"MIT"
] | permissive | sanvu88/python-powerdns-client | f675e1ee162bb76190b41ddf0cfc34e2305a757b | 57dd0460995a5407c6f5c963553b4df0f4859667 | refs/heads/master | 2023-02-04T07:05:31.095951 | 2020-12-15T16:48:15 | 2020-12-15T16:48:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,975 | py | # coding: utf-8
"""
PowerDNS Authoritative HTTP API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: 0.0.13
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class Error(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'error': 'str',
'errors': 'list[str]'
}
attribute_map = {
'error': 'error',
'errors': 'errors'
}
def __init__(self, error=None, errors=None): # noqa: E501
"""Error - a model defined in Swagger""" # noqa: E501
self._error = None
self._errors = None
self.discriminator = None
self.error = error
if errors is not None:
self.errors = errors
@property
def error(self):
"""Gets the error of this Error. # noqa: E501
A human readable error message # noqa: E501
:return: The error of this Error. # noqa: E501
:rtype: str
"""
return self._error
@error.setter
def error(self, error):
"""Sets the error of this Error.
A human readable error message # noqa: E501
:param error: The error of this Error. # noqa: E501
:type: str
"""
if error is None:
raise ValueError("Invalid value for `error`, must not be `None`") # noqa: E501
self._error = error
@property
def errors(self):
"""Gets the errors of this Error. # noqa: E501
Optional array of multiple errors encountered during processing # noqa: E501
:return: The errors of this Error. # noqa: E501
:rtype: list[str]
"""
return self._errors
@errors.setter
def errors(self, errors):
"""Sets the errors of this Error.
Optional array of multiple errors encountered during processing # noqa: E501
:param errors: The errors of this Error. # noqa: E501
:type: list[str]
"""
self._errors = errors
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(Error, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, Error):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"67791576+underline-bot@users.noreply.github.com"
] | 67791576+underline-bot@users.noreply.github.com |
2784d73cd9183dd106ee06fba4d4c4e10409acc4 | 4fdaee9f2612a8c429991a2042dffcee80e7a641 | /rootfs/qboxhd/rootfs/usr/local/lib/enigma2/python/Screens/Scart.py | 64e4a69e009284ed03b409f8e33f5041d03a4d0e | [] | no_license | OpenSH4/qboxhd | 841072db3b0eaecdcac116b5f96268d47115cdec | 91dd37a5311b5c53fb088ab0ce902ee49552ece0 | refs/heads/master | 2020-09-07T17:55:36.114816 | 2012-01-08T21:33:02 | 2012-01-08T21:33:02 | 220,866,062 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,089 | py | from qboxhd import QBOXHD
from Screen import Screen
from MessageBox import MessageBox
from Components.AVSwitch import AVSwitch
class Scart(Screen):
def __init__(self, session, start_visible=True):
Screen.__init__(self, session)
self.avswitch = AVSwitch()
if start_visible:
self.onExecBegin.append(self.showMessageBox)
self.msgVisible = None
else:
self.msgVisible = False
def showMessageBox(self):
if self.msgVisible is None:
self.onExecBegin.remove(self.showMessageBox)
self.msgVisible = False
if not self.msgVisible:
self.msgVisible = True
self.avswitch.setInput("SCART")
self.msgBox = self.session.openWithCallback(self.MsgBoxClosed, MessageBox, _("If you see this, something is wrong with\nyour scart connection. Press OK to return."), MessageBox.TYPE_ERROR)
def MsgBoxClosed(self, *val):
self.msgBox = None
self.switchToTV()
def switchToTV(self, *val):
if self.msgVisible:
if self.msgBox:
self.msgBox.close() # ... MsgBoxClosed -> switchToTV again..
return
self.avswitch.setInput("ENCODER")
self.msgVisible = False
| [
"duopaguilar@0410bcea-ab32-4fec-9f21-c18eae94034e"
] | duopaguilar@0410bcea-ab32-4fec-9f21-c18eae94034e |
37f45c2414abdc4e1e1c66d8ba58d473235b0f3b | 6fcfb638fa725b6d21083ec54e3609fc1b287d9e | /python/numenta_nupic/nupic-master/src/nupic/frameworks/viz/network_visualization.py | 6c37b7608dbabfbc809f5fc33aba4327f6ef7336 | [] | no_license | LiuFang816/SALSTM_py_data | 6db258e51858aeff14af38898fef715b46980ac1 | d494b3041069d377d6a7a9c296a14334f2fa5acc | refs/heads/master | 2022-12-25T06:39:52.222097 | 2019-12-12T08:49:07 | 2019-12-12T08:49:07 | 227,546,525 | 10 | 7 | null | 2022-12-19T02:53:01 | 2019-12-12T07:29:39 | Python | UTF-8 | Python | false | false | 2,674 | py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2017, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import networkx as nx
from nupic.frameworks.viz import DotRenderer as DEFAULT_RENDERER
class NetworkVisualizer(object):
"""
Network visualization framework entry point.
Usage:
NetworkVisualizer(network).render()
You may optionally specify a specific renderers. e.g.:
viz = NetworkVisualizer(network)
viz.render(renderer=GraphVizRenderer)
viz.render(renderer=NetworkXRenderer)
"""
def __init__(self, network):
"""
:param network: nupic.engine.network
"""
self.network = network
def export(self):
"""
Exports a network as a networkx MultiDiGraph intermediate representation
suitable for visualization.
:return: networkx MultiDiGraph
"""
graph = nx.MultiDiGraph()
# Add regions to graph as nodes, annotated by name
regions = self.network.getRegions()
for idx in xrange(regions.getCount()):
regionPair = regions.getByIndex(idx)
regionName = regionPair[0]
graph.add_node(regionName, label=regionName)
# Add links between regions to graph as edges, annotate by input-output
# name pairs
for linkName, link in self.network.getLinks():
graph.add_edge(link.getSrcRegionName(),
link.getDestRegionName(),
src=link.getSrcOutputName(),
dest=link.getDestInputName())
return graph
def render(self, renderer=DEFAULT_RENDERER):
"""
Render network.
:param renderer: Constructor parameter to a "renderer" implementation.
Return value for which must have a "render" method that accepts a single
argument (a networkx graph instance).
"""
renderer().render(self.export())
| [
"659338505@qq.com"
] | 659338505@qq.com |
05267bb3879a6e0fadb7cb02e558d512ea7128ca | e9538b7ad6d0ce0ccfbb8e10c458f9e0b73926f6 | /tests/unit/modules/remote_management/lxca/test_lxca_cmms.py | 96d4f5c61556d09603513b44da7afb31557e60e3 | [] | no_license | ansible-collection-migration/misc.not_a_real_collection | b3ef8090c59de9ac30aca083c746ec3595d7f5f5 | 7ab1af924a3db4ada2f714b09bb392614344cb1e | refs/heads/master | 2020-12-18T13:48:51.849567 | 2020-01-22T17:39:18 | 2020-01-22T17:39:18 | 235,400,821 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,778 | py | import json
import pytest
from ansible_collections.misc.not_a_real_collection.tests.unit.compat import mock
from ansible_collections.misc.not_a_real_collection.plugins.modules import lxca_cmms
@pytest.fixture(scope='module')
@mock.patch('ansible_collections.misc.not_a_real_collection.plugins.module_utils.remote_management.lxca.common.close_conn', autospec=True)
def setup_module(close_conn):
close_conn.return_value = True
class TestMyModule():
@pytest.mark.parametrize('patch_ansible_module',
[
{},
{
"auth_url": "https://10.240.14.195",
"login_user": "USERID",
},
{
"auth_url": "https://10.240.14.195",
"login_password": "Password",
},
{
"login_user": "USERID",
"login_password": "Password",
},
],
indirect=['patch_ansible_module'])
@pytest.mark.usefixtures('patch_ansible_module')
@mock.patch('ansible_collections.misc.not_a_real_collection.plugins.module_utils.remote_management.lxca.common.setup_conn', autospec=True)
@mock.patch('ansible_collections.misc.not_a_real_collection.plugins.modules.lxca_cmms.execute_module', autospec=True)
def test_without_required_parameters(self, _setup_conn, _execute_module,
mocker, capfd, setup_module):
"""Failure must occurs when all parameters are missing"""
with pytest.raises(SystemExit):
_setup_conn.return_value = "Fake connection"
_execute_module.return_value = "Fake execution"
lxca_cmms.main()
out, err = capfd.readouterr()
results = json.loads(out)
assert results['failed']
assert 'missing required arguments' in results['msg']
@mock.patch('ansible_collections.misc.not_a_real_collection.plugins.module_utils.remote_management.lxca.common.setup_conn', autospec=True)
@mock.patch('ansible_collections.misc.not_a_real_collection.plugins.modules.lxca_cmms.execute_module', autospec=True)
@mock.patch('ansible_collections.misc.not_a_real_collection.plugins.modules.lxca_cmms.AnsibleModule', autospec=True)
def test__argument_spec(self, ansible_mod_cls, _execute_module, _setup_conn, setup_module):
expected_arguments_spec = dict(
login_user=dict(required=True),
login_password=dict(required=True, no_log=True),
command_options=dict(default='cmms', choices=['cmms', 'cmms_by_uuid',
'cmms_by_chassis_uuid']),
auth_url=dict(required=True),
uuid=dict(default=None),
chassis=dict(default=None),
)
_setup_conn.return_value = "Fake connection"
_execute_module.return_value = []
mod_obj = ansible_mod_cls.return_value
args = {
"auth_url": "https://10.243.30.195",
"login_user": "USERID",
"login_password": "password",
"command_options": "cmms",
}
mod_obj.params = args
lxca_cmms.main()
assert(mock.call(argument_spec=expected_arguments_spec,
supports_check_mode=False) == ansible_mod_cls.call_args)
@mock.patch('ansible_collections.misc.not_a_real_collection.plugins.module_utils.remote_management.lxca.common.setup_conn', autospec=True)
@mock.patch('ansible_collections.misc.not_a_real_collection.plugins.modules.lxca_cmms._cmms_by_uuid',
autospec=True)
@mock.patch('ansible_collections.misc.not_a_real_collection.plugins.modules.lxca_cmms.AnsibleModule',
autospec=True)
def test__cmms_empty_list(self, ansible_mod_cls, _get_cmms, _setup_conn, setup_module):
mod_obj = ansible_mod_cls.return_value
args = {
"auth_url": "https://10.243.30.195",
"login_user": "USERID",
"login_password": "password",
"uuid": "3C737AA5E31640CE949B10C129A8B01F",
"command_options": "cmms_by_uuid",
}
mod_obj.params = args
_setup_conn.return_value = "Fake connection"
empty_nodes_list = []
_get_cmms.return_value = empty_nodes_list
ret_cmms = _get_cmms(mod_obj, args)
assert mock.call(mod_obj, mod_obj.params) == _get_cmms.call_args
assert _get_cmms.return_value == ret_cmms
| [
"ansible_migration@example.com"
] | ansible_migration@example.com |
a023ca5921b4dae35ed7b05846b33c20b9bb7352 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/otherforms/_rimmed.py | 5ba54997dbd01072557b8d6aa85dd5252bdbec4d | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 214 | py |
#calss header
class _RIMMED():
def __init__(self,):
self.name = "RIMMED"
self.definitions = rim
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['rim']
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
cbd5229791354c1ff4eb92cd07ccdc2794be4190 | 9f7512711f78d71a5de7ec54001411cb9c319424 | /contents/serializers.py | 3353c1a830a69e69410f7cedfb9c577b5f4d1ca5 | [] | no_license | charles-co/glc_project | a819ad5b401ba2279901f8f752f7a9331271d376 | ae8c3cba6dcb416d7afa3abbbf439f48003b6e9f | refs/heads/main | 2023-03-29T17:28:05.518810 | 2021-04-03T18:00:16 | 2021-04-03T18:00:16 | 336,250,626 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 841 | py | from rest_framework import serializers
from .models import Audio, Video, Podcast, TV
class AudioSerializer(serializers.ModelSerializer):
class Meta:
model = Audio
fields = ['title', 'audio_file', 'created_at']
read_only_fields = ['created_at', 'audio_file', 'title']
class VideoSerializer(serializers.ModelSerializer):
class Meta:
model = Video
fields = ['title', 'file', 'created_at']
read_only_fields = ['created_at', 'file', 'title']
class PodcastSerializer(serializers.ModelSerializer):
class Meta:
model = Podcast
fields = ['title', 'file', 'created_at']
read_only_fields = ['created_at', 'file', 'title']
class TVSerializer(serializers.ModelSerializer):
class Meta:
model = TV
fields = "__all__" | [
"charlesboy49@gmail.com"
] | charlesboy49@gmail.com |
75e33e57ef1502b4f1a14c2688c82459ca329830 | 34599596e145555fde0d4264a1d222f951f49051 | /pcat2py/class/215f5b12-5cc5-11e4-af55-00155d01fe08.py | 5f97ccd66399240e21dbbe12dc4b66d9264c974b | [
"MIT"
] | permissive | phnomcobra/PCAT2PY | dc2fcbee142ce442e53da08476bfe4e68619346d | 937c3b365cdc5ac69b78f59070be0a21bdb53db0 | refs/heads/master | 2021-01-11T02:23:30.669168 | 2018-02-13T17:04:03 | 2018-02-13T17:04:03 | 70,970,520 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,522 | py | #!/usr/bin/python
################################################################################
# 215f5b12-5cc5-11e4-af55-00155d01fe08
#
# Justin Dierking
# justindierking@hardbitsolutions.com
# phnomcobra@gmail.com
#
# 10/24/2014 Original Construction
################################################################################
class Finding:
def __init__(self):
self.output = []
self.is_compliant = False
self.uuid = "215f5b12-5cc5-11e4-af55-00155d01fe08"
def check(self, cli):
# Initialize Compliance
self.is_compliant = False
# Get Registry DWORD
dword = cli.get_reg_dword(r'HKCU:\Software\Policies\Microsoft\Office\15.0\outlook\options\pubcal', 'DisableOfficeOnline')
# Output Lines
self.output = [r'HKCU:\Software\Policies\Microsoft\Office\15.0\outlook\options\pubcal', ('DisableOfficeOnline=' + str(dword))]
if dword == 1:
self.is_compliant = True
return self.is_compliant
def fix(self, cli):
cli.powershell(r"New-Item -path 'HKCU:\Software\Policies\Microsoft\Office\15.0\outlook'")
cli.powershell(r"New-Item -path 'HKCU:\Software\Policies\Microsoft\Office\15.0\outlook\options'")
cli.powershell(r"New-Item -path 'HKCU:\Software\Policies\Microsoft\Office\15.0\outlook\options\pubcal'")
cli.powershell(r"Set-ItemProperty -path 'HKCU:\Software\Policies\Microsoft\Office\15.0\outlook\options\pubcal' -name 'DisableOfficeOnline' -value 1 -Type DWord")
| [
"phnomcobra@gmail.com"
] | phnomcobra@gmail.com |
f58631458dc7d58f759d59e1985ada8b6acda70d | c5611d343da32ab98e14beaa5df296c43b39de6c | /fec/version/v6_4/F132.py | 2cf21bbc4cd3db3fe21b36a4e31de26196f89912 | [
"Unlicense"
] | permissive | h4ck3rm1k3/FEC-Field-Documentation | 97973fc9cd919cdb161a416647ae5752ef5815d9 | c2f1f36e14c67ac3656c09f801b9f595d3e9f92e | refs/heads/master | 2018-12-28T16:07:52.499054 | 2014-06-07T18:35:49 | 2014-06-07T18:35:49 | 12,922,288 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,545 | py | import fechbase
class Records(fechbase.RecordsBase):
def __init__(self):
fechbase.RecordsBase.__init__(self)
self.fields = [
{'name': 'FORM TYPE', 'number': '1'},
{'name': 'FILER COMMITTEE ID NUMBER', 'number': '2'},
{'name': 'TRANSACTION ID NUMBER', 'number': '3'},
{'name': 'BACK REFERENCE TRAN ID NUMBER', 'number': '4'},
{'name': 'BACK REFERENCE SCHED NAME', 'number': '5'},
{'name': 'ENTITY TYPE', 'number': '6'},
{'name': 'CONTRIBUTOR ORGANIZATION NAME', 'number': '7'},
{'name': 'CONTRIBUTOR LAST NAME', 'number': '8'},
{'name': 'CONTRIBUTOR FIRST NAME', 'number': '9'},
{'name': 'CONTRIBUTOR MIDDLE NAME', 'number': '10'},
{'name': 'CONTRIBUTOR PREFIX', 'number': '11'},
{'name': 'CONTRIBUTOR SUFFIX', 'number': '12'},
{'name': 'CONTRIBUTOR STREET 1', 'number': '13'},
{'name': 'CONTRIBUTOR STREET 2', 'number': '14'},
{'name': 'CONTRIBUTOR CITY', 'number': '15'},
{'name': 'CONTRIBUTOR STATE', 'number': '16'},
{'name': 'CONTRIBUTOR ZIP', 'number': '17'},
{'name': 'DONATION DATE', 'number': '18'},
{'name': 'DONATION AMOUNT', 'number': '19'},
{'name': 'DONATION AGGREGATE AMOUNT', 'number': '20'},
{'name': 'MEMO CODE', 'number': '21'},
{'name': 'MEMO TEXT/DESCRIPTION', 'number': '22'},
]
self.fields_names = self.hash_names(self.fields)
| [
"jamesmikedupont@googlemail.com"
] | jamesmikedupont@googlemail.com |
b8978e81877849076f0a1387b6b4004126382019 | 6b8dc095ef6e10c9ccf92e3c6402e80919d747ad | /glad/__main__.py | 4f394de5da9ab3674e0995e5bff47587ea218118 | [
"MIT"
] | permissive | caomw/glad | 5daecc0562e063240d7678ee2e5361e8562f0899 | 1fb8f8e68be000dd8b2c1634083939340ed33b06 | refs/heads/master | 2021-01-17T10:57:14.364606 | 2016-02-16T15:15:36 | 2016-02-16T15:15:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,594 | py | #!/usr/bin/env python
"""
Uses the official Khronos-XML specs to generate a
GL/GLES/EGL/GLX/WGL Loader made for your needs. Glad currently supports
the languages C, D and Volt.
"""
from collections import namedtuple
import logging
import sys
from glad.opener import URLOpener
from glad.spec import SPECS
import glad.lang
Version = namedtuple('Version', ['major', 'minor'])
logger = logging.getLogger('glad')
def main():
import os.path
import argparse
from argparse import ArgumentParser
opener = URLOpener()
def get_spec(value):
if value not in SPECS:
raise argparse.ArgumentTypeError('Unknown specification')
spec_cls = SPECS[value]
if os.path.exists(value + '.xml'):
logger.info('using local specification: \'%s.xml\'', value)
return spec_cls.from_file(value + '.xml')
logger.info('getting \'%s\' specification from SVN', value)
return spec_cls.from_svn(opener=opener)
def ext_file(value):
msg = 'Invalid extensions argument'
if os.path.exists(value):
msg = 'Invalid extensions file'
try:
with open(value, 'r') as f:
return f.read().split()
except IOError:
pass
else:
return [v.strip() for v in value.split(',') if v]
raise argparse.ArgumentTypeError(msg)
def version(value):
if value is None or len(value.strip()) == 0:
return None
v = value
if '.' not in v:
v = '{}.0'.format(v)
try:
return Version(*map(int, v.split('.')))
except ValueError:
pass
raise argparse.ArgumentTypeError('Invalid version: "{}"'.format(value))
def cmdapi(value):
try:
return dict((p[0], version(p[1])) for p in
(list(map(str.strip, e.split('='))) for e in
filter(bool, map(str.strip, value.split(',')))))
except IndexError:
pass
raise argparse.ArgumentTypeError(
'Invalid api-string: "{}"'.format(value)
)
description = __doc__
parser = ArgumentParser(description=description)
parser.add_argument('--profile', dest='profile',
choices=['core', 'compatibility'],
default='compatibility',
help='OpenGL profile (defaults to compatibility)')
parser.add_argument('--out-path', dest='out', required=True,
help='Output path for loader')
parser.add_argument('--api', dest='api', type=cmdapi,
help='API type/version pairs, like "gl=3.2,gles=", '
'no version means latest')
parser.add_argument('--generator', dest='generator', default='d',
choices=['c', 'c-debug', 'd', 'volt'], required=True,
help='Language to generate the binding for')
parser.add_argument('--extensions', dest='extensions',
default=None, type=ext_file,
help='Path to extensions file or comma separated '
'list of extensions, if missing '
'all extensions are included')
parser.add_argument('--spec', dest='spec', default='gl',
choices=['gl', 'egl', 'glx', 'wgl'],
help='Name of the spec')
parser.add_argument('--no-loader', dest='no_loader', action='store_true')
parser.add_argument('--quiet', dest='quiet', action='store_true')
ns = parser.parse_args()
if not ns.quiet:
logging.basicConfig(
format='[%(asctime)s][%(levelname)s\t][%(name)-7s\t]: %(message)s',
datefmt='%m/%d/%Y %H:%M:%S', level=logging.DEBUG
)
spec = get_spec(ns.spec)
if spec.NAME == 'gl':
spec.profile = ns.profile
api = ns.api
if api is None or len(api.keys()) == 0:
api = {spec.NAME: None}
generator_cls, loader_cls = glad.lang.get_generator(
ns.generator, spec.NAME.lower()
)
if loader_cls is None:
return parser.error('API/Spec not yet supported')
loader = loader_cls(api)
loader.disabled = ns.no_loader
logger.info('generating \'%s\' bindings', spec.NAME)
with generator_cls(ns.out, spec, api, ns.extensions, loader=loader, opener=opener) as generator:
generator.generate()
logger.info('generating \'%s\' bindings - done', spec.NAME)
if __name__ == '__main__':
main()
| [
"admin@dav1d.de"
] | admin@dav1d.de |
d851910d19a0d3466e44744e769c7836bf963a17 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /gaussiana/ch3_2020_03_09_19_03_02_961002.py | b6ec6c3e2fc6cb965c085057e28d3e3249ad4873 | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 154 | py | import math
def calcula_gaussiana(x,μ,σ):
a = 1 / (σ * (math.sqrt(2*math.pi)))
e = math.e**(-0.5*((x-μ/σ)**2))
f = a*e
return f | [
"you@example.com"
] | you@example.com |
7d003ce2dd96154be17d0e9c27d616d75141b708 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02664/s139946174.py | a9fab029ec7d95fbc4310c62188b5118675ab097 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 678 | py | s = input()
for i in range(len(s)):
if s[i] == '?':
if i == 0 and len(s) == 1:
s = s.replace('?','D',1)
elif i == 0 and s[1] == 'D':
s = s.replace('?','P',1)
elif i == 0 and s[1] == 'P':
s = s.replace('?','D',1)
elif i == 0 and s[1] == '?':
s = s.replace('?','D',1)
elif s[i-1] =='P':
s = s.replace('?','D',1)
elif s[i-1] =='D' and (i ==len(s)-1):
s = s.replace('?','D',1)
elif s[i-1] =='D' and (i <len(s)-1 and(s[i+1] == 'P' or s[i+1] == '?')):
s = s.replace('?','D',1)
else:
s = s.replace('?','P',1)
print(s) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
387bbbaa243d5506e9160eaeb8c0d8cae9238225 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/nouns/_contributions.py | f025a05c56b781cdfac70c6760e2ec39d2810e27 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 280 | py |
from xai.brain.wordbase.nouns._contribution import _CONTRIBUTION
#calss header
class _CONTRIBUTIONS(_CONTRIBUTION, ):
def __init__(self,):
_CONTRIBUTION.__init__(self)
self.name = "CONTRIBUTIONS"
self.specie = 'nouns'
self.basic = "contribution"
self.jsondata = {}
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
bbe80e141ae120828745a2e0c964fe0706b6d465 | 86ca43076bd78484a32b527308ac15ed19844d81 | /tests/configuration_tests.py | eaca320507e178bf5a30dfc5ff75d25cff363b05 | [] | no_license | simonemmott/k2_core | d393ec5685dbb80f5c9301f6f1f3b4eb17feda24 | ef102ac151b3819714aa5f02d5aab8c1235030d6 | refs/heads/master | 2020-06-10T02:08:51.107293 | 2019-06-28T22:25:58 | 2019-06-28T22:25:58 | 193,552,540 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 152 | py | from unittest import TestCase
class ConfigurationTests(TestCase):
def test_pass(self):
self.assertTrue(True, 'Testing is not working') | [
"simon.emmott@yahoo.co.uk"
] | simon.emmott@yahoo.co.uk |
f3b8ddd933238a900cc93cfe07fca3e814be5673 | 4526ed71f39d70111c3787ec90b4932a183c452c | /2016/Pyquen_DYtoMuMu_M_30_TuneZ2_8TeV16_pythia6_cfi.py | f842b4cf0e3b2f718e982fc53fa749012b9573a1 | [] | no_license | CMS-HIN-dilepton/MCRequest | 773f414739efc529dc957a044232478b1c4f1c03 | ff49d22fde2c4a006fe7fa02d4cf53d794f91888 | refs/heads/master | 2021-05-02T12:16:51.891664 | 2020-06-20T18:35:52 | 2020-06-20T18:35:52 | 45,127,457 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,465 | py | import FWCore.ParameterSet.Config as cms
from Configuration.Generator.PythiaUEZ2Settings_cfi import *
generator = cms.EDFilter("PyquenGeneratorFilter",
comEnergy = cms.double(8160.0),
aBeamTarget = cms.double(208.0),
protonSide = cms.untracked.int32(1),
qgpInitialTemperature = cms.double(1.0), ## initial temperature of QGP; allowed range [0.2,2.0]GeV;
qgpProperTimeFormation = cms.double(0.1), ## proper time of QGP formation; allowed range [0.01,10.0]fm/c;
hadronFreezoutTemperature = cms.double(0.14),
doRadiativeEnLoss = cms.bool(True), ## if true, perform partonic radiative en loss
doCollisionalEnLoss = cms.bool(False),
qgpNumQuarkFlavor = cms.int32(0), ## number of active quark flavors in qgp; allowed values: 0,1,2,3
numQuarkFlavor = cms.int32(0), ## to be removed
doIsospin = cms.bool(True),
angularSpectrumSelector = cms.int32(0), ## angular emitted gluon spectrum :
embeddingMode = cms.bool(False),
backgroundLabel = cms.InputTag("generator"), ## ineffective in no mixing
doQuench = cms.bool(False),
bFixed = cms.double(0.0), ## fixed impact param (fm); valid only if cflag_=0
cFlag = cms.int32(0), ## centrality flag
bMin = cms.double(0.0), ## min impact param (fm); valid only if cflag_!=0
bMax = cms.double(0.0), ## max impact param (fm); valid only if cflag_!=0
pythiaPylistVerbosity = cms.untracked.int32(1),
pythiaHepMCVerbosity = cms.untracked.bool(True),
maxEventsToPrint = cms.untracked.int32(0),
PythiaParameters = cms.PSet(
pythiaUESettingsBlock,
processParameters = cms.vstring('MSEL=0 !User defined processes',
'MSUB(1)=1 !Incl Z0/gamma* production',
'MSTP(43)=3 !Both Z0 and gamma*',
'MDME(174,1)=0 !Z decay into d dbar',
'MDME(175,1)=0 !Z decay into u ubar',
'MDME(176,1)=0 !Z decay into s sbar',
'MDME(177,1)=0 !Z decay into c cbar',
'MDME(178,1)=0 !Z decay into b bbar',
'MDME(179,1)=0 !Z decay into t tbar',
'MDME(182,1)=0 !Z decay into e- e+',
'MDME(183,1)=0 !Z decay into nu_e nu_ebar',
'MDME(184,1)=1 !Z decay into mu- mu+',
'MDME(185,1)=0 !Z decay into nu_mu nu_mubar',
'MDME(186,1)=0 !Z decay into tau- tau+',
'MDME(187,1)=0 !Z decay into nu_tau nu_taubar',
'CKIN(1)=30. !Minimum sqrt(s_hat) value (=Z mass)'),
# This is a vector of ParameterSet names to be read, in this order
parameterSets = cms.vstring('pythiaUESettings',
'processParameters')
)
)
configurationMetadata = cms.untracked.PSet(
version = cms.untracked.string('$Revision: 1.1 $'),
name = cms.untracked.string('$Source: /local/reps/CMSSW/CMSSW/Configuration/GenProduction/python/HI/Pyquen_DYtoMuMu_M_30_TuneZ2_5TeV02_pythia6_cfi.py,v $'),
annotation = cms.untracked.string('PYQUEN DYmumu Mass 30 Tune Z2 at 5.023 TeV')
)
ProductionFilterSequence = cms.Sequence(generator)
| [
"emilien.chapon@cern.ch"
] | emilien.chapon@cern.ch |
fe3b3ed217100b8e438eba5edcd8c3e95c0dfa46 | 4b2a333ddb07ba7b5dbbf382eee5851468e69635 | /sharpy/interfaces/unit_cache.py | b01665ffbbab5da46dfcfb35a3d1a203ba25c465 | [
"MIT"
] | permissive | rwill128/sharpy-sc2 | 36810c69d2563b7cc839d353e47c3c3c9cc255cb | 999a101fd2f6ecb8bccb405bf3ace276be08c112 | refs/heads/develop | 2023-01-29T11:52:55.687375 | 2020-12-06T11:26:35 | 2020-12-06T11:26:35 | 319,097,836 | 1 | 0 | MIT | 2020-12-06T18:03:31 | 2020-12-06T18:03:31 | null | UTF-8 | Python | false | false | 1,824 | py | from abc import abstractmethod, ABC
from typing import Optional, List, Union, Iterable, Dict
from sc2 import UnitTypeId
from sc2.position import Point2
from sc2.unit import Unit
from sc2.units import Units
class IUnitCache(ABC):
@property
@abstractmethod
def own_unit_cache(self) -> Dict[UnitTypeId, Units]:
pass
@property
@abstractmethod
def enemy_unit_cache(self) -> Dict[UnitTypeId, Units]:
pass
@property
@abstractmethod
def own_townhalls(self) -> Units:
"""Returns all of our own townhalls."""
pass
@property
@abstractmethod
def enemy_townhalls(self) -> Units:
"""Returns all known enemy townhalls."""
pass
@property
@abstractmethod
def enemy_workers(self) -> Units:
pass
@property
@abstractmethod
def mineral_fields(self) -> Dict[Point2, Unit]:
pass
@property
@abstractmethod
def mineral_wall(self) -> Units:
"""Returns all known mineral wall mineral field units."""
pass
@abstractmethod
def by_tag(self, tag: int) -> Optional[Unit]:
pass
@abstractmethod
def by_tags(self, tags: List[int]) -> Units:
pass
@abstractmethod
def own(self, type_id: Union[UnitTypeId, Iterable[UnitTypeId]]) -> Units:
"""Returns all own units of the specified type(s)."""
pass
@abstractmethod
def enemy(self, type_id: Union[UnitTypeId, Iterable[UnitTypeId]]) -> Units:
"""Returns all enemy units of the specified type(s)."""
pass
@abstractmethod
def own_in_range(self, position: Point2, range: Union[int, float]) -> Units:
pass
@abstractmethod
def enemy_in_range(self, position: Point2, range: Union[int, float], only_targetable=True) -> Units:
pass
| [
"aki.vanttinen@sedgestudios.com"
] | aki.vanttinen@sedgestudios.com |
4aee4411b6209081f04e28197515d86a72e8e17b | 38f15289bd03cef50f1013926a641c789fe338aa | /pythonCodeReference/PythonExamples/Examples/Example2/code1.py | 0d4beb6f70ab9daddd4947171b36dc2f8b6e80c2 | [] | no_license | Recorichardretardo/Python | e9f9a9b8291dc015fe7438329da066754d0ba965 | 5a810b9d53257e321dd15bd105100da89cef7835 | refs/heads/master | 2021-05-26T08:35:23.271610 | 2020-04-10T12:02:46 | 2020-04-10T12:02:46 | 254,060,199 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 355 | py | import random
friends = ["Rolf","Bob","Jen"]
print("Jen" in friends)
number = random.randint(2,9)
user_input = input("Enter 'y' if you would like to play: ")
if user_input in ("y","Y"):
user_number = int(input("Guess our number: "))
if user_number == number:
print("you guessed correctly!")
else:
print("Sorry, it's wrong!") | [
"you@example.com"
] | you@example.com |
72e3ff38fd3cbb9042e232d08d74054b4ba37d32 | 219d7cf7cf00b778ff1a5709406c144fcf2132f3 | /Conditional Statements - Exercise/07. World Swimming Record.py | 3e853322a0847be5d79e473bc6890c3d9585ab8e | [] | no_license | SilviaKoynova/Softuni-Programming-Basics-Python | e8e175419383815c65c4e110fdb2b752d940e887 | 0dfef0850f2cb8471dfee1af89f137be4e887cb8 | refs/heads/main | 2023-07-13T00:35:09.389302 | 2021-08-27T07:43:45 | 2021-08-27T07:43:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 486 | py | from math import floor
record = float(input())
distance_in_metres = float(input())
time_in_seconds = float(input())
distance_needed = distance_in_metres * time_in_seconds
slowing = floor(distance_in_metres / 15)
slowing_add = slowing * 12.5
total_time = slowing_add + distance_needed
if record > total_time:
print(f"Yes, he succeeded! The new world record is {total_time:.2f} seconds.")
else:
print(f"No, he failed! He was {total_time - record:.2f} seconds slower.") | [
"noreply@github.com"
] | SilviaKoynova.noreply@github.com |
30f772c9774da896ad9f128a42b7578e18156e0e | 7426522061b222e8d3336b18ff941bb98ff9626c | /qtoggleserver/core/api/funcs/firmware.py | d85ee25f86309a65400e2f535bc0d019072cc04b | [
"Apache-2.0"
] | permissive | DigitEgal/qtoggleserver | 82833aaeb6f0bdad5f28243f132a639f4b406001 | 54b6ac53742af9529fd349d4fc207b0dc8a38d3b | refs/heads/dev | 2023-05-07T14:49:11.273023 | 2021-04-30T20:40:08 | 2021-04-30T20:40:08 | 360,039,836 | 0 | 0 | Apache-2.0 | 2021-04-21T05:18:08 | 2021-04-21T05:13:07 | null | UTF-8 | Python | false | false | 1,739 | py |
import logging
from qtoggleserver.core import api as core_api
from qtoggleserver.core.api import schema as core_api_schema
from qtoggleserver.core.typing import GenericJSONDict
from qtoggleserver.system import fwupdate
logger = logging.getLogger(__name__)
@core_api.api_call(core_api.ACCESS_LEVEL_ADMIN)
async def get_firmware(request: core_api.APIRequest) -> GenericJSONDict:
current_version = await fwupdate.get_current_version()
status = await fwupdate.get_status()
if status == fwupdate.STATUS_IDLE:
try:
latest_version, latest_date, latest_url = await fwupdate.get_latest()
return {
'version': current_version,
'latest_version': latest_version,
'latest_date': latest_date,
'latest_url': latest_url,
'status': status
}
except Exception as e:
logger.error('get latest firmware failed: %s', e, exc_info=True)
return {
'version': current_version,
'status': status
}
else:
return {
'version': current_version,
'status': status
}
@core_api.api_call(core_api.ACCESS_LEVEL_ADMIN)
async def patch_firmware(request: core_api.APIRequest, params: GenericJSONDict) -> None:
core_api_schema.validate(params, core_api_schema.PATCH_FIRMWARE)
status = await fwupdate.get_status()
if status not in (fwupdate.STATUS_IDLE, fwupdate.STATUS_ERROR):
raise core_api.APIError(503, 'busy')
if params.get('url'):
await fwupdate.update_to_url(params['url'])
else: # Assuming params['version']
await fwupdate.update_to_version(params['version'])
| [
"ccrisan@gmail.com"
] | ccrisan@gmail.com |
8810778d919db3e759f97b0b6e2e03e245363908 | 8fd4a35c61532f98e4e3888b1ca013ca3f7a2072 | /tests/test_configdict.py | 1f4e317e1ddb7aed6ebb704a3231691b9ea1c8c5 | [
"MIT"
] | permissive | menchant/bio96 | c785e0e2b1634f0f9df8645266139e62a4e2b3f0 | 97db6f7ae7b8f247c08ade1021c2906f71fdf1a5 | refs/heads/master | 2022-03-30T09:12:30.901537 | 2019-12-09T20:32:05 | 2019-12-09T20:32:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,157 | py | #!/usr/bin/env python3
from bio96 import *
def test_empty():
config = configdict({})
assert config.meta == {}
assert config.rows == {}
assert config.irows == {}
assert config.cols == {}
assert config.icols == {}
assert config.wells == {}
assert config.user == {}
def test_user():
config = configdict({'x': 1})
assert config.meta == {}
assert config.rows == {}
assert config.irows == {}
assert config.cols == {}
assert config.icols == {}
assert config.wells == {}
assert config.user == {'x': 1}
def test_meta():
config = configdict({'x': 1, 'meta': {'y': 2}})
assert config.meta == {'y': 2}
assert config.rows == {}
assert config.irows == {}
assert config.cols == {}
assert config.icols == {}
assert config.wells == {}
assert config.user == {'x': 1}
def test_rows():
config = configdict({'x': 1, 'row': {'y': 2}})
assert config.meta == {}
assert config.rows == {'y': 2}
assert config.irows == {}
assert config.cols == {}
assert config.icols == {}
assert config.wells == {}
assert config.user == {'x': 1}
def test_irows():
config = configdict({'x': 1, 'irow': {'y': 2}})
assert config.meta == {}
assert config.rows == {}
assert config.irows == {'y': 2}
assert config.cols == {}
assert config.icols == {}
assert config.wells == {}
assert config.user == {'x': 1}
def test_cols():
config = configdict({'x': 1, 'col': {'y': 2}})
assert config.meta == {}
assert config.rows == {}
assert config.irows == {}
assert config.cols == {'y': 2}
assert config.icols == {}
assert config.wells == {}
assert config.user == {'x': 1}
def test_icols():
config = configdict({'x': 1, 'icol': {'y': 2}})
assert config.meta == {}
assert config.rows == {}
assert config.irows == {}
assert config.cols == {}
assert config.icols == {'y': 2}
assert config.wells == {}
assert config.user == {'x': 1}
def test_wells():
config = configdict({'x': 1, 'well': {'y': 2}})
assert config.meta == {}
assert config.rows == {}
assert config.irows == {}
assert config.cols == {}
assert config.icols == {}
assert config.wells == {'y': 2}
assert config.user == {'x': 1}
def test_getattr():
config = configdict({})
config.meta['x'] = 1; assert config.meta == {'x': 1}
config.rows['x'] = 2; assert config.rows == {'x': 2}
config.irows['x'] = 3; assert config.irows == {'x': 3}
config.cols['x'] = 4; assert config.cols == {'x': 4}
config.icols['x'] = 5; assert config.icols == {'x': 5}
config.wells['x'] = 6; assert config.wells == {'x': 6}
def test_setattr():
config = configdict({})
config.meta = {'x': 1}; assert config['meta']['x'] == 1
config.rows = {'x': 2}; assert config['row']['x'] == 2
config.irows = {'x': 3}; assert config['irow']['x'] == 3
config.cols = {'x': 4}; assert config['col']['x'] == 4
config.icols = {'x': 5}; assert config['icol']['x'] == 5
config.wells = {'x': 6}; assert config['well']['x'] == 6
| [
"kale@thekunderts.net"
] | kale@thekunderts.net |
51c9734e2bb76d57a89179adee9869b3f01dc271 | acf7457d3a799cb9bff12686d2d616688bcd4b5b | /packages/python/plotly/plotly/validators/image/legendgrouptitle/font/_size.py | 0703f95a46c6524e5ae4e1e051274c52c66992be | [
"MIT"
] | permissive | plotly/plotly.py | f4f61639f08160f16195efc95b5901dc5a937346 | 975a704074f01c078e0fdfa32bdf17130bf89e69 | refs/heads/master | 2023-09-06T06:15:08.340035 | 2023-08-24T12:28:14 | 2023-08-24T12:28:14 | 14,579,099 | 14,751 | 2,989 | MIT | 2023-09-08T19:55:32 | 2013-11-21T05:53:08 | Python | UTF-8 | Python | false | false | 461 | py | import _plotly_utils.basevalidators
class SizeValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(
self, plotly_name="size", parent_name="image.legendgrouptitle.font", **kwargs
):
super(SizeValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "style"),
min=kwargs.pop("min", 1),
**kwargs,
)
| [
"nicolas@plot.ly"
] | nicolas@plot.ly |
ebc358fe1d3d3c0c6443b2922f596ffa70817af4 | 1f44c056f79b0b8f2f32cdf417c80efe1913ed77 | /ABC60C.py | 0d759ee33dd5d4caef3293fe2a219e0c801f9d8f | [] | no_license | saki-engineering/PyAtCoder | 4cc88d2a43991a5202cd71b48be0e936fb32137e | b008e86523c6d500beec53344172311872d50ff4 | refs/heads/master | 2020-09-15T08:39:17.698396 | 2020-04-24T05:55:02 | 2020-04-24T05:55:02 | 223,397,683 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 324 | py | #coding: utf-8
import math
import heapq
import bisect
import numpy as np
from collections import Counter, deque
#from scipy.misc import comb
N,T = map(int, input().split())
t = list(map(int, input().split()))
ans = 0
for i in range(N-1):
if t[i+1] < t[i]+T: ans += (t[i+1]-t[i])
else: ans += T
ans += T
print(ans) | [
"sakiharu31415@yahoo.co.jp"
] | sakiharu31415@yahoo.co.jp |
28e05c285d256dc10661cfb3388091b923e9de6d | 7d45be20297033536b16a139a017bcec0e8880dc | /13.py | db9a4c6a5a31b3d4c73ad3543b8f6f9b8a9ecd56 | [] | no_license | BhagyashreeKarale/dichackathon | f6e199e65eaddaaa5f1594c444c469d9359e94c3 | 3aba9caaf26f96f70f8b5315358a3dfcf5264da3 | refs/heads/main | 2023-08-10T11:18:31.101163 | 2021-09-11T18:56:17 | 2021-09-11T18:56:17 | 405,456,534 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 265 | py | # Q16.Write a Python program to map two lists into a dictionary.
l1=[1,2,3,4,5,6,7]
l2=["riya","ankita","rahul","priya","anshika","rose","aarti"]
dic={}
for i in range(len(l1)):
dic[(l1[i])]=l2[i]
print(dic)
#using zip function
dic=dict(zip(l1,l2))
print(dic) | [
"noreply@github.com"
] | BhagyashreeKarale.noreply@github.com |
37c6d17dfd9ccc0f2d0c1c121cb6154050ccd519 | 22251b1ada3681204a1f4e75ce6f42ca94b9b939 | /api/migrations/0001_initial.py | 9fdce3d738c81ba66205e2a797fe1cf73bde3d12 | [] | no_license | vipin-s0106/Basic_Django_Angular_RestAPI | 755dc79216642306205aad2cdb2f47f310407437 | ce0afb721c8b04fc932a076b0509e36583d3728c | refs/heads/master | 2022-04-14T16:48:57.779977 | 2020-04-12T11:57:43 | 2020-04-12T11:57:43 | 255,039,751 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 581 | py | # Generated by Django 3.0.4 on 2020-04-11 10:28
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Movie',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('movie_name', models.CharField(max_length=150)),
('caption_image', models.FileField(blank=True, null=True, upload_to='')),
],
),
]
| [
"vipin.s0106@gmail.com"
] | vipin.s0106@gmail.com |
2612ca728d330306f8e47ea5564a10249735f8b0 | b5a9d42f7ea5e26cd82b3be2b26c324d5da79ba1 | /tensorflow/python/estimator/canned/dnn_linear_combined.py | 103503382500fff9b56683b255782ae3d189f6e1 | [
"Apache-2.0"
] | permissive | uve/tensorflow | e48cb29f39ed24ee27e81afd1687960682e1fbef | e08079463bf43e5963acc41da1f57e95603f8080 | refs/heads/master | 2020-11-29T11:30:40.391232 | 2020-01-11T13:43:10 | 2020-01-11T13:43:10 | 230,088,347 | 0 | 0 | Apache-2.0 | 2019-12-25T10:49:15 | 2019-12-25T10:49:14 | null | UTF-8 | Python | false | false | 1,383 | py | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""dnn_linear_combined python module.
Importing from tensorflow.python.estimator is unsupported
and will soon break!
"""
# pylint: disable=unused-import,g-bad-import-order,g-import-not-at-top,wildcard-import
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow_estimator.python.estimator.canned import dnn_linear_combined
# Include attrs that start with single underscore.
_HAS_DYNAMIC_ATTRIBUTES = True
dnn_linear_combined.__all__ = [
s for s in dir(dnn_linear_combined) if not s.startswith('__')
]
from tensorflow_estimator.python.estimator.canned.dnn_linear_combined import *
| [
"v-grniki@microsoft.com"
] | v-grniki@microsoft.com |
2529cc61a7d88aa268a9a9af8cf9b21c8ad0bc54 | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_118/2411.py | a9582de00a37f3252cf64204b04fe4ab4faf23f1 | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 468 | py | #!/usr/bin/python
f = open("C-small-attempt0.in", "r")
T = int(f.readline())
def palindrome(num):
strNum = str(num)
l = len(strNum)
for i in range(l/2):
if strNum[i] != strNum[l-i-1]:
return False
return True
for t in range(1, T+1):
lb, ub = map(int, f.readline().strip().split())
c = 0
for i in range(lb, ub+1):
if palindrome(i):
root = int(i**0.5)
if root*root == i and palindrome(root):
c += 1
print "Case #{0}: {1}".format(t, c)
| [
"miliar1732@gmail.com"
] | miliar1732@gmail.com |
ba4984a720c6e1306f796c62fb2630c6a574f05f | dcd3a08831759b4458a9bac4e44a7bbfac626dc1 | /python/protobufs/services/team/actions/join_team_pb2.py | 871af1cf3126313e5c2df2090091876645a639bf | [
"MIT"
] | permissive | getcircle/protobuf-registry | 433b6ad788831b34ccd86e2b42a3ec6606adc698 | 20ad8463b7ac6e2cf279c08bcd3e953993fe9153 | refs/heads/master | 2021-05-01T00:11:04.763067 | 2016-12-05T04:46:44 | 2016-12-05T04:46:44 | 27,981,855 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | true | 3,602 | py | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: protobufs/services/team/actions/join_team.proto
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from protobufs.services.team import containers_pb2 as protobufs_dot_services_dot_team_dot_containers__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='protobufs/services/team/actions/join_team.proto',
package='services.team.actions.join_team',
syntax='proto3',
serialized_pb=b'\n/protobufs/services/team/actions/join_team.proto\x12\x1fservices.team.actions.join_team\x1a(protobufs/services/team/containers.proto\"\x1c\n\tRequestV1\x12\x0f\n\x07team_id\x18\x01 \x01(\t\"D\n\nResponseV1\x12\x36\n\x06member\x18\x01 \x01(\x0b\x32&.services.team.containers.TeamMemberV1b\x06proto3'
,
dependencies=[protobufs_dot_services_dot_team_dot_containers__pb2.DESCRIPTOR,])
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_REQUESTV1 = _descriptor.Descriptor(
name='RequestV1',
full_name='services.team.actions.join_team.RequestV1',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='team_id', full_name='services.team.actions.join_team.RequestV1.team_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=126,
serialized_end=154,
)
_RESPONSEV1 = _descriptor.Descriptor(
name='ResponseV1',
full_name='services.team.actions.join_team.ResponseV1',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='member', full_name='services.team.actions.join_team.ResponseV1.member', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=156,
serialized_end=224,
)
_RESPONSEV1.fields_by_name['member'].message_type = protobufs_dot_services_dot_team_dot_containers__pb2._TEAMMEMBERV1
DESCRIPTOR.message_types_by_name['RequestV1'] = _REQUESTV1
DESCRIPTOR.message_types_by_name['ResponseV1'] = _RESPONSEV1
RequestV1 = _reflection.GeneratedProtocolMessageType('RequestV1', (_message.Message,), dict(
DESCRIPTOR = _REQUESTV1,
__module__ = 'protobufs.services.team.actions.join_team_pb2'
# @@protoc_insertion_point(class_scope:services.team.actions.join_team.RequestV1)
))
_sym_db.RegisterMessage(RequestV1)
ResponseV1 = _reflection.GeneratedProtocolMessageType('ResponseV1', (_message.Message,), dict(
DESCRIPTOR = _RESPONSEV1,
__module__ = 'protobufs.services.team.actions.join_team_pb2'
# @@protoc_insertion_point(class_scope:services.team.actions.join_team.ResponseV1)
))
_sym_db.RegisterMessage(ResponseV1)
# @@protoc_insertion_point(module_scope)
| [
"mwhahn@gmail.com"
] | mwhahn@gmail.com |
0e66bd19449a34a92382dfade04ca2dd9697c3f2 | f1a8e308c76866e2fba20401e6f1d5842dd60c46 | /Algorithms and Data Structures Practice/LeetCode Questions/Sorting/88. Merge Sorted Array.py | f673b376d6a5371cbd0f842e740c2aeb8490f3ec | [] | no_license | harman666666/Algorithms-Data-Structures-and-Design | 6e5da0c1f701e7dfc7b045ecd1209463131d3fc7 | 483f0c93faca8ccaf038b77ebe2fa712f6b0c6bc | refs/heads/master | 2021-07-14T10:11:27.588838 | 2021-07-07T01:47:42 | 2021-07-07T01:47:42 | 101,330,760 | 3 | 1 | null | 2018-10-15T04:52:07 | 2017-08-24T19:32:03 | Python | UTF-8 | Python | false | false | 2,274 | py | '''
88. Merge Sorted Array
Easy
2197
4153
Add to List
Share
Given two sorted integer arrays nums1 and nums2, merge nums2 into nums1 as one sorted array.
Note:
The number of elements initialized in nums1 and nums2 are m and n respectively.
You may assume that nums1 has enough space (size that is equal to m + n) to hold additional elements from nums2.
Example:
Input:
nums1 = [1,2,3,0,0,0], m = 3
nums2 = [2,5,6], n = 3
Output: [1,2,2,3,5,6]
Constraints:
-10^9 <= nums1[i], nums2[i] <= 10^9
nums1.length == m + n
nums2.length == n
Accepted
592,599
Submissions
1,514,076
'''
'''
MEMORIZE THE BEAUTIFUL WAY:
'''
def merge(self, nums1, m, nums2, n):
while m > 0 and n > 0:
if nums1[m-1] >= nums2[n-1]:
nums1[m+n-1] = nums1[m-1]
m -= 1
else:
nums1[m+n-1] = nums2[n-1]
n -= 1
if n > 0:
nums1[:n] = nums2[:n]
class Solution:
def merge(self, nums1: List[int], m: int, nums2: List[int], n: int) -> None:
"""
Do not return anything, modify nums1 in-place instead.
"""
# SWAP ALL THE ELEMENTS IN NUMS1 TO THE END FIRST!!!
end = len(nums1) - 1
sizeNums1 = len(nums1) - len(nums2)
swapPtr = sizeNums1 - 1
while swapPtr != -1:
nums1[swapPtr], nums1[end] = nums1[end], nums1[swapPtr]
swapPtr -= 1
end -= 1
print(nums1)
inPtr = 0
l = end + 1
r = 0
if len(nums2) == 0:
return nums1
while inPtr != len(nums1):
if r == len(nums2) and l == len(nums1):
return nums1
elif l == len(nums1):
nums2[r], nums1[inPtr] = nums1[inPtr], nums2[r]
r += 1
elif r == len(nums2):
nums1[l], nums1[inPtr] = nums1[inPtr], nums1[l]
l += 1
elif nums2[r] < nums1[l]:
nums2[r], nums1[inPtr] = nums1[inPtr], nums2[r]
r += 1
else:
nums1[l], nums1[inPtr] = nums1[inPtr], nums1[l]
l += 1
inPtr += 1
| [
"harman.j.singh@hotmail.com"
] | harman.j.singh@hotmail.com |
530dfcf02bcc8d889a76b628309b95f3fec8528f | 3a39e879fb2901207afcfc238b169ddefa104055 | /Chapter05/Docs/errbackspider/errbackspider/spiders/errback_spider.py | 9b55761a795c9081c4c163df0d07bfd92348f856 | [] | no_license | Synapses/Web_Scraping_with_Python | cb32ddd468250b9f11ad16d3576d0920693e708c | 3bb8cd47d0e1e182bb8ee800d32e24f45bf13ab0 | refs/heads/master | 2023-03-15T09:19:02.754593 | 2020-06-16T02:17:11 | 2020-06-16T02:17:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,852 | py | import scrapy
from scrapy.spidermiddlewares.httperror import HttpError
from twisted.internet.error import DNSLookupError
from twisted.internet.error import TimeoutError, TCPTimedOutError
class ErrbackSpider(scrapy.Spider):
name = "errback_example"
start_urls = [
"http://www.httpbin.org", # HTTP 200 expected
"http://www.httpbin.org/status/404", # Not found error
"http://www.httpbin.org/status/500", # server issue
"http://www.httpbin.org:12345", # non-responding host, timeout expected
"http://www.httphttpbinbin.org", # DNS error expected
]
def start_request(self):
for u in self.start_urls:
yield scrapy.Request(u, callback=self.parse_httpbin,
errback=self.errback_httpbin,
dont_filter=True)
def parse_httpbin(self, response):
self.logger.info('Got successful response from {}'.format(response.url))
# do something useful here...
def errback_httpbin(self, failure):
# Log all failures
self.logger.error(repr(failure))
# in case you want to do something special for some errors,
# you may need the failure's type:
if failure.check(HttpError):
# these exceptions come from HttpError spider middleware
# you can get the non-200 response
response = failure.value.response
self.logger.error('HttpError on %s', response.url)
elif failure.check(DNSLookupError):
# this is the original request
request = failure.request
self.logger.error('DNSLookupError on %s', request.url)
elif failure.check(TimeoutError, TCPTimedOutError):
request = failure.request
self.logger.error('TimeoutError on %s', request.url)
| [
"philip.dongfei@gmail.com"
] | philip.dongfei@gmail.com |
fcd82eee8042da29dd0e74e9c8fca7af2e9bcb0f | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03015/s715773814.py | 9cd7cb639cfb7486945fee440b90b3bdbe4a21eb | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 597 | py | def main():
MOD = 10 ** 9 + 7
EQ = 0 # 等しくなり得る
SMALL = 1 # 未満確定
S = map(int, input())
dp = [1, 0]
for x in S:
ndp = [0] * 2
if x == 0:
ndp[EQ] = dp[EQ] # (0,0)
ndp[SMALL] = dp[SMALL] * 3 # (0,0),(0,1),(1,0)
elif x == 1:
ndp[EQ] = dp[EQ] * 2 # (0,1),(1,0)
ndp[SMALL] = dp[EQ] + dp[SMALL] * 3 # EQ->(0,0), SMALL->(0,0),(0,1),(1,0)
*dp, = map(lambda x: x % MOD, ndp)
ans = sum(dp) % MOD # 取り忘れ
print(ans)
if __name__ == '__main__':
main()
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
fa890b0f406730ad6d9f53bbbeb35a89a1eba5c9 | a25acab883494fa90cccc7255cac67251b40a21d | /specific/anchor/utils.py | 9e18add0d1f25afe548b7ec6e5abe9ed6f489f2f | [] | no_license | csliuchang/PupaDetector | cd8d85ca0cdb236dae28b82cdac144e17ce8f76f | b88dfdfd3f52e1df7cd44b5e1d7086acbe1ec046 | refs/heads/master | 2023-08-12T13:05:19.796420 | 2021-09-17T08:54:28 | 2021-09-17T08:54:28 | 397,140,426 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,914 | py | import torch
def images_to_levels(target, num_levels):
"""Convert targets by image to targets by feature level.
[target_img0, target_img1] -> [target_level0, target_level1, ...]
"""
target = torch.stack(target, 0)
level_targets = []
start = 0
for n in num_levels:
end = start + n
level_targets.append(target[:, start:end])
start = end
return level_targets
def anchor_inside_flags(flat_anchors,
valid_flags,
img_shape,
allowed_border=0):
"""Check whether the anchors are inside the border.
Parameters
----------
flat_anchors : torch.Tensor
Flatten anchors, shape (n, 4).
valid_flags : torch.Tensor
An existing valid flags of anchors.
img_shape : tuple(int)
Shape of current image.
allowed_border : int, optional
The border to allow the valid anchor. Defaults to 0.
Returns
-------
torch.Tensor
Flags indicating whether the anchors are inside a
valid range.
"""
img_h, img_w = img_shape[:2]
if allowed_border >= 0:
inside_flags = valid_flags & \
(flat_anchors[:, 0] >= -allowed_border) & \
(flat_anchors[:, 1] >= -allowed_border) & \
(flat_anchors[:, 2] < img_w + allowed_border) & \
(flat_anchors[:, 3] < img_h + allowed_border)
else:
inside_flags = valid_flags
return inside_flags
def calc_region(bbox, ratio, featmap_size=None):
"""Calculate a proportional bbox region.
The bbox center are fixed and the new h' and w' is h * ratio and w * ratio.
Parameters
----------
bbox : Tensor
Bboxes to calculate regions, shape (n, 4).
ratio : float
Ratio of the output region.
featmap_size : tuple
Feature map size used for clipping the boundary.
Returns
-------
tuple
x1, y1, x2, y2
"""
x1 = torch.round((1 - ratio) * bbox[0] + ratio * bbox[2]).long()
y1 = torch.round((1 - ratio) * bbox[1] + ratio * bbox[3]).long()
x2 = torch.round(ratio * bbox[0] + (1 - ratio) * bbox[2]).long()
y2 = torch.round(ratio * bbox[1] + (1 - ratio) * bbox[3]).long()
if featmap_size is not None:
x1 = x1.clamp(min=0, max=featmap_size[1])
y1 = y1.clamp(min=0, max=featmap_size[0])
x2 = x2.clamp(min=0, max=featmap_size[1])
y2 = y2.clamp(min=0, max=featmap_size[0])
return (x1, y1, x2, y2)
def meshgrid(y, x):
"""Generate mesh grid of y and x.
Parameters
----------
y : torch.Tensor
Grids of y dimension.
x : torch.Tensor
Grids of x dimension.
Returns
-------
tuple[torch.Tensor]
The mesh grids of y and x.
"""
H = y.shape[0]
W = x.shape[0]
xx = x.repeat(H).view(H, W)
yy = y.view(-1, 1).repeat(1, W)
return yy, xx
| [
"598306303@qq.com"
] | 598306303@qq.com |
f3485f6e457870ef4a389aca1ae87f74e7857980 | 7b72e319c16fa66644a29a930e46a10c943ac533 | /flopy/modflow/mfpcg.py | 2eb540900fead397aebfcd83147bd61134f82845 | [
"BSD-3-Clause"
] | permissive | kwilcox/flopy | 58e9297ee6cb4cf95de8a57a5b338f9ff1b1cc61 | 527c4ee452ea779bdebd6c1c540452d145e26943 | refs/heads/master | 2020-12-01T11:42:01.608949 | 2015-01-28T19:03:55 | 2015-01-28T19:03:55 | 28,347,156 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,967 | py | from flopy.mbase import Package
class ModflowPcg(Package):
'''Pcg Package
Only programmed to work with the default values; may need work for other options'''
def __init__(self, model, mxiter=50, iter1=30, npcond=1,
hclose=1e-5, rclose=1e-5, relax=1.0, nbpol=0, iprpcg=0, mutpcg=3,
damp=1.0, dampt=1.0, ihcofadd=0,
extension='pcg', unitnumber=27):
Package.__init__(self, model, extension, 'PCG', unitnumber) # Call ancestor's init to set self.parent, extension, name and unit number
self.heading = '# PCG for MODFLOW, generated by Flopy.'
self.url = 'pcg.htm'
self.mxiter = mxiter
self.iter1 = iter1
self.npcond = npcond
self.hclose = hclose
self.rclose = rclose
self.relax = relax
self.nbpol = nbpol
self.iprpcg = iprpcg
self.mutpcg = mutpcg
self.damp = damp
self.dampt = dampt
self.ihcofadd = ihcofadd
self.parent.add_package(self)
def __repr__( self ):
return 'Preconditioned conjugate gradient solver package class'
def write_file(self):
# Open file for writing
f_pcg = open(self.fn_path, 'w')
f_pcg.write('%s\n' % self.heading)
ifrfm = self.parent.get_ifrefm()
if ifrfm:
f_pcg.write('{0} '.format(self.mxiter))
f_pcg.write('{0} '.format(self.iter1))
f_pcg.write('{0} '.format(self.npcond))
f_pcg.write('{0} '.format(self.ihcofadd))
f_pcg.write('\n')
f_pcg.write('{0} '.format(self.hclose))
f_pcg.write('{0} '.format(self.rclose))
f_pcg.write('{0} '.format(self.relax))
f_pcg.write('{0} '.format(self.nbpol))
f_pcg.write('{0} '.format(self.iprpcg))
f_pcg.write('{0} '.format(self.mutpcg))
f_pcg.write('{0} '.format(self.damp))
if self.damp < 0:
f_pcg.write('{0} '.format(self.dampt))
f_pcg.write('\n')
else:
f_pcg.write('{0:10d}'.format(self.mxiter))
f_pcg.write('{0:10d}'.format(self.iter1))
f_pcg.write('{0:10d}'.format(self.npcond))
f_pcg.write('{0:10d}'.format(self.ihcofadd))
f_pcg.write('\n')
f_pcg.write('{0:9.4e} '.format(self.hclose))
f_pcg.write('{0:9.4e} '.format(self.rclose))
f_pcg.write('{0:9.4e} '.format(self.relax))
f_pcg.write('{0:10d}'.format(self.nbpol))
f_pcg.write('{0:10d}'.format(self.iprpcg))
f_pcg.write('{0:10d}'.format(self.mutpcg))
f_pcg.write('{0:9.4e} '.format(self.damp))
if self.damp < 0:
f_pcg.write('{0:9.4e} '.format(self.dampt))
f_pcg.write('\n')
f_pcg.close()
@staticmethod
def load(f, model, ext_unit_dict=None):
if type(f) is not file:
filename = f
f = open(filename, 'r')
#dataset 0 -- header
while True:
line = f.readline()
if line[0] != '#':
break
#dataset 1
ifrfm = model.get_ifrefm()
if model.version != 'mf2k':
ifrfm = True
ihcofadd = 0
dampt = 0.
if ifrfm:
t = line.strip().split()
mxiter = int(t[0])
iter1 = int(t[1])
npcond = int(t[2])
try:
ihcofadd = int(t[3])
except:
pass
line = f.readline()
t = line.strip().split()
hclose = float(t[0])
rclose = float(t[1])
relax = float(t[2])
nbpol = int(t[3])
iprpcg = int(t[4])
mutpcg = int(t[5])
damp = float(t[6])
if damp < 0.:
dampt = float(t[7])
else:
mxiter = int(line[0:10].strip())
iter1 = int(line[10:20].strip())
npcond = int(line[20:30].strip())
try:
ihcofadd = int(line[30:40].strip())
except:
pass
line = f.readline()
hclose = float(line[0:10].strip())
rclose = float(line[10:20].strip())
relax = float(line[20:30].strip())
nbpol = int(line[30:40].strip())
iprpcg = int(line[40:50].strip())
mutpcg = int(line[50:60].strip())
damp = float(line[60:70].strip())
if damp < 0.:
dampt = float(line[70:80].strip())
pcg = ModflowPcg(model, mxiter=mxiter, iter1=iter1, npcond=npcond, ihcofadd=ihcofadd,\
hclose=hclose, rclose=rclose, relax=relax, nbpol=nbpol,\
iprpcg=iprpcg, mutpcg=mutpcg, damp=damp, dampt=dampt)
return pcg
| [
"langevin@usgs.gov"
] | langevin@usgs.gov |
dc56fca9c3f0400c2c546ec88a36f50896da3a5b | e8ae11e5017507da59e2e92d423b6a1994490de4 | /env/lib/python2.7/site-packages/azure/mgmt/network/models/express_route_circuit_routes_table_summary.py | 202ba003885f50452d9a8f36be1ac4f62ebc3c8a | [] | no_license | teopeurt/ansible-ubuntu-server | 613d00cea28bc6531acf4a39aeeb9cd0baa2a391 | b5b6127d2ee9723c5088443efe2ffb8ae30cfea7 | refs/heads/master | 2021-06-28T12:49:50.935753 | 2017-07-31T17:34:33 | 2017-07-31T17:34:33 | 98,912,808 | 0 | 1 | null | 2020-07-24T00:05:31 | 2017-07-31T17:32:56 | Makefile | UTF-8 | Python | false | false | 2,276 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft and contributors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class ExpressRouteCircuitRoutesTableSummary(Model):
"""
The routes table associated with the ExpressRouteCircuit
:param neighbor: Neighbor.
:type neighbor: str
:param v: BGP version number spoken to the neighbor.
:type v: int
:param as_property: Autonomous system number.
:type as_property: int
:param up_down: The length of time that the BGP session has been in the
Established state, or the current status if not in the Established state.
:type up_down: str
:param state_pfx_rcd: Current state of the BGP session, and the number of
prefixes that have been received from a neighbor or peer group.
:type state_pfx_rcd: str
"""
_attribute_map = {
'neighbor': {'key': 'neighbor', 'type': 'str'},
'v': {'key': 'v', 'type': 'int'},
'as_property': {'key': 'as', 'type': 'int'},
'up_down': {'key': 'upDown', 'type': 'str'},
'state_pfx_rcd': {'key': 'statePfxRcd', 'type': 'str'},
}
def __init__(self, neighbor=None, v=None, as_property=None, up_down=None, state_pfx_rcd=None):
self.neighbor = neighbor
self.v = v
self.as_property = as_property
self.up_down = up_down
self.state_pfx_rcd = state_pfx_rcd
| [
"me@teopeurt.com"
] | me@teopeurt.com |
4e7414a3a31a6dc0edb60e6832a9aea5fff43856 | dacb257a90310eba03f3128221120a7d54b894ba | /_templates/component_template.py | 17012640af10b7c3b7946c401e3122b013cd8f93 | [
"MIT"
] | permissive | SiChiTong/pysmartnode | 92351efa02e52aa84185a53896957c453b12540a | a0998ad6582a28fe5a0529fb15dd4f61e254d25f | refs/heads/master | 2023-01-05T10:00:14.907988 | 2020-09-01T10:07:45 | 2020-09-01T10:07:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,938 | py | # Author: Kevin Köck
# Copyright Kevin Köck 2018-2020 Released under the MIT license
# Created on 2018-06-22
"""
example config for MyComponent:
{
package: <package_path>
component: MyComponent
constructor_args: {
my_value: "hi there"
# mqtt_topic: sometopic # optional, defaults to home/<controller-id>/<component_name>/<component-count>/set
# mqtt_topic2: sometopic # optional, defautls to home/sometopic
# friendly_name: null # optional, friendly name shown in homeassistant gui with mqtt discovery
# discover: true # optional, if false no discovery message for homeassistant will be sent.
}
}
"""
__updated__ = "2020-03-29"
__version__ = "1.91"
import uasyncio as asyncio
from pysmartnode import config
from pysmartnode import logging
from pysmartnode.utils.component import ComponentBase, DISCOVERY_SWITCH
import gc
####################
# choose a component name that will be used for logging (not in leightweight_log),
# a default mqtt topic that can be changed by received or local component configuration
# as well as for the component name in homeassistant.
COMPONENT_NAME = "MyComponent"
# define the type of the component according to the homeassistant specifications
_COMPONENT_TYPE = "switch"
####################
_log = logging.getLogger(COMPONENT_NAME)
_mqtt = config.getMQTT()
gc.collect()
_unit_index = -1
# This template is for a very general component.
# It might be better to either use the templates for a specific type of
# component like a sensor or a switch.
class MyComponent(ComponentBase):
def __init__(self, my_value, # extend or shrink according to your sensor
mqtt_topic=None, mqtt_topic2=None,
friendly_name=None, discover=True, **kwargs):
# This makes it possible to use multiple instances of MyComponent
# It is needed for every default value for mqtt.
# Initialize before super()__init__(...) to not pass the wrong value.
global _unit_index
_unit_index += 1
super().__init__(COMPONENT_NAME, __version__, _unit_index, discover=discover, **kwargs)
# discover: boolean, if this component should publish its mqtt discovery.
# This can be used to prevent combined Components from exposing underlying
# hardware components like a power switch
# This will generate a topic like: home/31f29s/MyComponent0/set
self._command_topic = mqtt_topic or _mqtt.getDeviceTopic(
"{!s}{!s}".format(COMPONENT_NAME, self._count), is_request=True)
# These calls subscribe the topics.
_mqtt.subscribeSync(self._command_topic, self.on_message1, self, check_retained_state=True)
# check_retained_state will subscribe to the state topic (home/31f29s/MyComponent0)
# first, so the original state of the device can be restored.
# The state topic will then be unsubscribed and the requested command topic subscribed.
_mqtt.subscribeSync(mqtt_topic2 or "home/sometopic", self.on_message2, self)
self.my_value = my_value
self._frn = friendly_name # will default to unique name in discovery if None
self._loop_task = asyncio.create_task(self._loop())
# the component might get removed in which case it should be able to locate and stop
# any running loops it created (otherwise the component will create Exceptions and
# won't be able to be fully removed from RAM)
gc.collect()
async def _init_network(self):
await super()._init_network()
# All _init_network methods of every component will be called after each other.
# Therefore every _init_network of previously registered components will have
# run when this one is running.
# NEVER start loops here because it will block the _init_network of all other components!
# Start a new uasyncio task in __init__() if you need additional loops.
# This method is only used for subscribing topics, publishing discovery and logging.
# It can be used for similar network oriented initializations.
async def _loop(self):
# A loop should either only do network oriented tasks or only
# non-network oriented tasks to ensure that the device works
# even when the network is unavailable. A compromise could be
# to use network oriented tasks with timeouts if those delays
# aren't a problem for the device functionality.
while True:
await asyncio.sleep(5)
await _mqtt.publish(self._command_topic[:-4], "ON", qos=1) # publishing to state_topic
async def _remove(self):
"""Will be called if the component gets removed"""
# Cancel any loops/asyncio coroutines started by the component
self._loop_task.cancel()
await super()._remove()
async def _discovery(self, register=True):
"""
Send discovery messages
:param register: if True send discovery message, if False send empty discovery message
to remove the component from homeassistant.
:return:
"""
name = "{!s}{!s}".format(COMPONENT_NAME, self._count)
component_topic = _mqtt.getDeviceTopic(name)
# component topic could be something completely user defined.
# No need to follow the pattern:
component_topic = self._command_topic[:-4] # get the state topic of custom component topic
friendly_name = self._frn # define a friendly name for the homeassistant gui.
# Doesn't need to be unique
if register:
await self._publishDiscovery(_COMPONENT_TYPE, component_topic, name, DISCOVERY_SWITCH,
friendly_name)
else:
await self._deleteDiscovery(_COMPONENT_TYPE, name)
del name, component_topic, friendly_name
gc.collect()
async def on_message1(self, topic, message, retained):
"""
MQTTHandler is calling this subscribed async method whenever a message is received for the subscribed topic.
:param topic: str
:param message: str/dict/list (json converted)
:param retained: bool
:return:
"""
print("Do something")
return True # When returning True, the value of arg "message" will be
# published to the state topic as a retained message
async def on_message2(self, topic, message, retained):
"""
MQTTHandler is calling this subscribed async method whenever a message is received for the subscribed topic.
:param topic: str
:param message: str/dict/list (json converted)
:param retained: bool
:return:
"""
print("Do something else")
return True # When returning True, the value of arg "message" will be
# published to the state topic as a retained message
| [
"kevinkk525@users.noreply.github.com"
] | kevinkk525@users.noreply.github.com |
642c338f2025399808ac2d1a89087670e21e1dd9 | 365558f4e8ddc829f0ddca3f7d44ba62da27542a | /updates/api/mixins.py | 508f98039750b944973f416e4ae53a5ae150c9e5 | [] | no_license | paulitstep/restapi | 9f1a1458c85cccad1a51a1f00f9a948ccca90f5e | 465775f166b342bb416973335585225e16cb0ac4 | refs/heads/master | 2022-12-11T05:28:29.495535 | 2020-01-03T12:07:14 | 2020-01-03T12:07:14 | 229,033,566 | 0 | 0 | null | 2022-11-22T04:55:07 | 2019-12-19T10:44:52 | Python | UTF-8 | Python | false | false | 264 | py | from django.utils.decorators import method_decorator
from django.views.decorators.csrf import csrf_exempt
class CSRFExemptMixin(object):
@method_decorator(csrf_exempt)
def dispatch(self, *args, **kwargs):
return super().dispatch(*args, **kwargs)
| [
"pasha-mo1@rambler.ru"
] | pasha-mo1@rambler.ru |
502205acb9cda35045cccadbb6e2af22d5604de8 | d75fbceb28ad14b07ae4057a8b23ec0bd3682628 | /code/chap01/GeometryDemo.py | 1894209492bd0745a775c3814309fabaf5b7aac6 | [] | no_license | wubinbai/pygame-book | 0707a0b36f41bc6f0b1282707e6c4f6cbed9c87a | 9de1f7516a2aec940ffa97f9686cc0520bad2deb | refs/heads/master | 2020-12-21T15:51:08.397619 | 2020-01-30T12:37:52 | 2020-01-30T12:37:52 | 236,478,999 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,354 | py | # OOP Geometry Demo
class Point():
x = 0.0
y = 0.0
def __init__(self, x, y):
self.x = x
self.y = y
print("Point constructor")
def ToString(self):
return "{X:" + str(self.x) + ",Y:" + str(self.y) + "}"
class Size():
width = 0.0
height = 0.0
def __init__(self,width,height):
self.width = width
self.height = height
print("Size constructor")
def ToString(self):
return "{WIDTH=" + str(self.width) + \
",HEIGHT=" + str(self.height) + "}"
class Circle(Point):
radius = 0.0
def __init__(self, x, y, radius):
super().__init__(x,y)
self.radius = radius
print("Circle constructor")
def ToString(self):
return super().ToString() + \
",{RADIUS=" + str(self.radius) + "}"
class Rectangle(Point,Size):
def __init__(self, x, y, width, height):
Point.__init__(self,x,y)
Size.__init__(self,width,height)
print("Rectangle constructor")
def ToString(self):
return Point.ToString(self) + "," + Size.ToString(self)
p = Point(10,20)
print(p.ToString())
s = Size(80,70)
print(s.ToString())
c = Circle(100,100,50)
print(c.ToString())
r = Rectangle(200,250,40,50)
print(r.ToString())
| [
"wubinbai@yahoo.com"
] | wubinbai@yahoo.com |
7783c08daad08e45d2628bdba38bd60685bafafa | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /gaussiana/ch3_2020_03_07_23_12_34_972398.py | 1961ce12d71b8ef3172faaed1cc7b3049bb38b32 | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 171 | py | import math
def calcula_gaussiana (x, y, z):
a = 1/(z*(2* math.pi)**(1/2))
b = (math.e) ** (-0.5*c)
c = ((x - y)/z)**2
gaussiana = a*b
return gaussiana | [
"you@example.com"
] | you@example.com |
7a8871f3e6f1d6ffbb7082df68f5813e5b281528 | 3c000380cbb7e8deb6abf9c6f3e29e8e89784830 | /venv/Lib/site-packages/cobra/modelimpl/cloud/aapicsubnetpool.py | b8476311897c66bef3a0147220099b199efa5dcf | [] | no_license | bkhoward/aciDOM | 91b0406f00da7aac413a81c8db2129b4bfc5497b | f2674456ecb19cf7299ef0c5a0887560b8b315d0 | refs/heads/master | 2023-03-27T23:37:02.836904 | 2021-03-26T22:07:54 | 2021-03-26T22:07:54 | 351,855,399 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,396 | py | # coding=UTF-8
# **********************************************************************
# Copyright (c) 2013-2020 Cisco Systems, Inc. All rights reserved
# written by zen warriors, do not modify!
# **********************************************************************
from cobra.mit.meta import ClassMeta
from cobra.mit.meta import StatsClassMeta
from cobra.mit.meta import CounterMeta
from cobra.mit.meta import PropMeta
from cobra.mit.meta import Category
from cobra.mit.meta import SourceRelationMeta
from cobra.mit.meta import NamedSourceRelationMeta
from cobra.mit.meta import TargetRelationMeta
from cobra.mit.meta import DeploymentPathMeta, DeploymentCategory
from cobra.model.category import MoCategory, PropCategory, CounterCategory
from cobra.mit.mo import Mo
# ##################################################
class AApicSubnetPool(Mo):
meta = ClassMeta("cobra.model.cloud.AApicSubnetPool")
meta.isAbstract = True
meta.moClassName = "cloudAApicSubnetPool"
meta.moClassName = "cloudAApicSubnetPool"
meta.rnFormat = ""
meta.category = MoCategory.REGULAR
meta.label = "Abstract Apic Subnet Pool"
meta.writeAccessMask = 0xc00000001
meta.readAccessMask = 0xc00000001
meta.isDomainable = False
meta.isReadOnly = False
meta.isConfigurable = True
meta.isDeletable = True
meta.isContextRoot = False
meta.concreteSubClasses.add("cobra.model.cloud.ApicSubnetPool")
meta.rnPrefixes = [
]
prop = PropMeta("str", "childAction", "childAction", 4, PropCategory.CHILD_ACTION)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("deleteAll", "deleteall", 16384)
prop._addConstant("deleteNonPresent", "deletenonpresent", 8192)
prop._addConstant("ignore", "ignore", 4096)
meta.props.add("childAction", prop)
prop = PropMeta("str", "createdBy", "createdBy", 52857, PropCategory.REGULAR)
prop.label = "Created By"
prop.isConfig = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "USER"
prop._addConstant("SYSTEM", "system-internal", 1)
prop._addConstant("USER", "user", 0)
meta.props.add("createdBy", prop)
prop = PropMeta("str", "dn", "dn", 1, PropCategory.DN)
prop.label = "None"
prop.isDn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("dn", prop)
prop = PropMeta("str", "rn", "rn", 2, PropCategory.RN)
prop.label = "None"
prop.isRn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("rn", prop)
prop = PropMeta("str", "status", "status", 3, PropCategory.STATUS)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("created", "created", 2)
prop._addConstant("deleted", "deleted", 8)
prop._addConstant("modified", "modified", 4)
meta.props.add("status", prop)
prop = PropMeta("str", "subnet", "subnet", 52856, PropCategory.REGULAR)
prop.label = "Subnet"
prop.isConfig = True
prop.isAdmin = True
meta.props.add("subnet", prop)
def __init__(self, parentMoOrDn, markDirty=True, **creationProps):
namingVals = []
Mo.__init__(self, parentMoOrDn, markDirty, *namingVals, **creationProps)
# End of package file
# ##################################################
| [
"bkhoward@live.com"
] | bkhoward@live.com |
32b236c7d226d86c465cc89e6ca3ee8726a36cf5 | 0d15b6706d1016b604b351d57204852ff6613600 | /articles/admin.py | 1e1ea65ea50180b8980335603648ca7861f27e91 | [] | no_license | yoonmyunghoon/Django-prac | e431b00482e2b5dde5b6555b862c658f86ec6328 | 848275f43f514b7d84b6bcec1e7fee90f9b1a378 | refs/heads/master | 2023-03-03T23:21:57.607503 | 2021-02-22T08:46:58 | 2021-02-22T08:46:58 | 339,078,445 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 777 | py | from django.contrib import admin
from .models import Article, Comment, Hashtag
@admin.register(Article)
class ArticleAdmin(admin.ModelAdmin):
list_display = ("id", "title", "content", "image", "created_at", "updated_at")
list_display_links = ("content",)
list_filter = ("created_at",)
list_editable = ("title",)
list_per_page = 2
# admin.site.register(Article, ArticleAdmin)
@admin.register(Comment)
class CommentAdmin(admin.ModelAdmin):
list_display = ("id", "article_id", "content", "created_at", "updated_at")
list_filter = ("created_at",)
list_editable = ("content",)
list_per_page = 2
# admin.site.register(Comment, CommentAdmin)
@admin.register(Hashtag)
class HashtagAdmin(admin.ModelAdmin):
liset_display = ("content",)
| [
"youn1791472@gmail.com"
] | youn1791472@gmail.com |
fbfd4b1e2fa77069d31dc9861da0475a94f7c072 | 7832e7dc8f1583471af9c08806ce7f1117cd228a | /aliyun-python-sdk-rds/aliyunsdkrds/request/v20140815/CloneDBInstanceRequest.py | cb2978c432125971753d5407b3917c60679c46d8 | [
"Apache-2.0"
] | permissive | dianplus/aliyun-openapi-python-sdk | d6494850ddf0e66aaf04607322f353df32959725 | 6edf1ed02994245dae1d1b89edc6cce7caa51622 | refs/heads/master | 2023-04-08T11:35:36.216404 | 2017-11-02T12:01:15 | 2017-11-02T12:01:15 | 109,257,597 | 0 | 0 | NOASSERTION | 2023-03-23T17:59:30 | 2017-11-02T11:44:27 | Python | UTF-8 | Python | false | false | 4,506 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
class CloneDBInstanceRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Rds', '2014-08-15', 'CloneDBInstance','rds')
def get_ResourceOwnerId(self):
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self,ResourceOwnerId):
self.add_query_param('ResourceOwnerId',ResourceOwnerId)
def get_RestoreTime(self):
return self.get_query_params().get('RestoreTime')
def set_RestoreTime(self,RestoreTime):
self.add_query_param('RestoreTime',RestoreTime)
def get_Period(self):
return self.get_query_params().get('Period')
def set_Period(self,Period):
self.add_query_param('Period',Period)
def get_DBInstanceStorage(self):
return self.get_query_params().get('DBInstanceStorage')
def set_DBInstanceStorage(self,DBInstanceStorage):
self.add_query_param('DBInstanceStorage',DBInstanceStorage)
def get_ResourceOwnerAccount(self):
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self,ResourceOwnerAccount):
self.add_query_param('ResourceOwnerAccount',ResourceOwnerAccount)
def get_ClientToken(self):
return self.get_query_params().get('ClientToken')
def set_ClientToken(self,ClientToken):
self.add_query_param('ClientToken',ClientToken)
def get_BackupId(self):
return self.get_query_params().get('BackupId')
def set_BackupId(self,BackupId):
self.add_query_param('BackupId',BackupId)
def get_OwnerAccount(self):
return self.get_query_params().get('OwnerAccount')
def set_OwnerAccount(self,OwnerAccount):
self.add_query_param('OwnerAccount',OwnerAccount)
def get_OwnerId(self):
return self.get_query_params().get('OwnerId')
def set_OwnerId(self,OwnerId):
self.add_query_param('OwnerId',OwnerId)
def get_UsedTime(self):
return self.get_query_params().get('UsedTime')
def set_UsedTime(self,UsedTime):
self.add_query_param('UsedTime',UsedTime)
def get_DBInstanceClass(self):
return self.get_query_params().get('DBInstanceClass')
def set_DBInstanceClass(self,DBInstanceClass):
self.add_query_param('DBInstanceClass',DBInstanceClass)
def get_VSwitchId(self):
return self.get_query_params().get('VSwitchId')
def set_VSwitchId(self,VSwitchId):
self.add_query_param('VSwitchId',VSwitchId)
def get_PrivateIpAddress(self):
return self.get_query_params().get('PrivateIpAddress')
def set_PrivateIpAddress(self,PrivateIpAddress):
self.add_query_param('PrivateIpAddress',PrivateIpAddress)
def get_ResourceGroupId(self):
return self.get_query_params().get('ResourceGroupId')
def set_ResourceGroupId(self,ResourceGroupId):
self.add_query_param('ResourceGroupId',ResourceGroupId)
def get_VPCId(self):
return self.get_query_params().get('VPCId')
def set_VPCId(self,VPCId):
self.add_query_param('VPCId',VPCId)
def get_DBInstanceDescription(self):
return self.get_query_params().get('DBInstanceDescription')
def set_DBInstanceDescription(self,DBInstanceDescription):
self.add_query_param('DBInstanceDescription',DBInstanceDescription)
def get_DBInstanceId(self):
return self.get_query_params().get('DBInstanceId')
def set_DBInstanceId(self,DBInstanceId):
self.add_query_param('DBInstanceId',DBInstanceId)
def get_PayType(self):
return self.get_query_params().get('PayType')
def set_PayType(self,PayType):
self.add_query_param('PayType',PayType)
def get_InstanceNetworkType(self):
return self.get_query_params().get('InstanceNetworkType')
def set_InstanceNetworkType(self,InstanceNetworkType):
self.add_query_param('InstanceNetworkType',InstanceNetworkType) | [
"haowei.yao@alibaba-inc.com"
] | haowei.yao@alibaba-inc.com |
c1298158bf73240b9c238c14aac0733c0bc1b59d | 29c476c037a05170ff2ddef8edd07014d3751614 | /0x06-python-classes/4-square.py | 88ef31648dc094728817a3acd69b73ce8832f99e | [] | no_license | hacheG/holbertonschool-higher_level_programming | a0aaddb30665833bd260766dac972b7f21dda8ea | 535b1ca229d7cf61124a128bb5725e5200c27fbc | refs/heads/master | 2020-07-22T23:09:27.486886 | 2020-02-13T19:41:34 | 2020-02-13T19:41:34 | 207,360,462 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 490 | py | #!/usr/bin/python3
class Square():
"""Class Square with setter & getter"""
def __init__(self, size=0):
self.__size = size
@property
def size(self):
return self.__size
@size.setter
def size(self, x):
if not isinstance(x, int):
raise TypeError("size must be an integer")
if x < 0:
raise ValueError("size must be >= 0")
self.__size = x
def area(self):
return (self.__size * self.__size)
| [
"943@holbertonschool.com"
] | 943@holbertonschool.com |
a53f41a47bb29cda9a8831a17ee8ca50f996caea | 8616892b6541602b53fdd94d1552d8e96b7ab722 | /dessn/configurations/old/combined_simple_nolowz.py | bc61fe17df88b12a52d7a966b4534f1e4c36302e | [
"MIT"
] | permissive | dessn/sn-bhm | 7c436877832ec10e4af318a6befff9fb8ffcbf3a | f320a41f9a4f2be49073437e98addca79e938160 | refs/heads/master | 2020-08-28T03:34:38.180148 | 2019-05-01T04:23:23 | 2019-05-01T04:23:23 | 45,723,864 | 1 | 0 | null | 2017-04-25T02:54:43 | 2015-11-07T05:27:44 | Python | UTF-8 | Python | false | false | 2,485 | py | import os
import logging
import socket
from dessn.framework.fitter import Fitter
from dessn.framework.models.approx_model import ApproximateModelW, ApproximateModel, ApproximateModelOl
from dessn.framework.simulations.snana_bulk import SNANACombinedBulk
from dessn.framework.simulations.selection_effects import lowz_sel, des_sel
from dessn.planck.planck import get_planck
if __name__ == "__main__":
logging.basicConfig(level=logging.DEBUG)
plot_dir = os.path.dirname(os.path.abspath(__file__)) + "/plots/%s/" % os.path.basename(__file__)[:-3]
dir_name = plot_dir + "output/"
pfn1 = plot_dir + os.path.basename(__file__)[:-3]
file = os.path.abspath(__file__)
print(dir_name)
if not os.path.exists(dir_name):
os.makedirs(dir_name)
models = ApproximateModelW()
# Turn off mass and skewness for easy test
simulation = [SNANACombinedBulk(152, ["SHINTON_LOWZ_MATRIX_G10_SKEWC_SKEWX1", "SHINTON_LOWZ_MATRIX_C11_SKEWC_SKEWX1"],
"CombinedLowZ", manual_selection=lowz_sel(cov_scale=0.01), num_calib=50),
SNANACombinedBulk(208, ["SHINTON_DES_MATRIX_G10_SKEWC_SKEWX1", "SHINTON_DES_MATRIX_C11_SKEWC_SKEWX1"],
"CombinedDES", manual_selection=des_sel(), num_calib=21)]
fitter = Fitter(dir_name)
fitter.set_models(models)
fitter.set_simulations(simulation)
fitter.set_num_cosmologies(100)
fitter.set_num_walkers(1)
fitter.set_max_steps(3000)
h = socket.gethostname()
if h != "smp-hk5pn72": # The hostname of my laptop. Only will work for me, ha!
fitter.fit(file)
else:
from chainconsumer import ChainConsumer
# results = fitter.load()
# print("Data loaded")
m, s, chain, truth, weight, old_weight, posterior = fitter.load()
c = ChainConsumer()
c.add_chain(chain, weights=weight, posterior=posterior, name="Approx")
c.configure(spacing=1.0)
parameters = [r"$\Omega_m$", "$w$"]
print(c.analysis.get_latex_table(transpose=True))
c.plotter.plot(filename=pfn1 + ".png", truth=truth, parameters=parameters)
print("Plotting distributions")
c = ChainConsumer()
c.add_chain(chain, weights=weight, posterior=posterior, name="Approx")
c.configure(label_font_size=10, tick_font_size=10, diagonal_tick_labels=False)
c.plotter.plot_distributions(filename=pfn1 + "_dist.png", truth=truth, col_wrap=8)
| [
"samuelreay@gmail.com"
] | samuelreay@gmail.com |
57bce43d697126c0d1fdf8886d4be50f39e8e18a | 34599596e145555fde0d4264a1d222f951f49051 | /pcat2py/class/26048566-5cc5-11e4-af55-00155d01fe08.py | f2d5557e83a298ecbd3b520346c5dd7d68781382 | [
"MIT"
] | permissive | phnomcobra/PCAT2PY | dc2fcbee142ce442e53da08476bfe4e68619346d | 937c3b365cdc5ac69b78f59070be0a21bdb53db0 | refs/heads/master | 2021-01-11T02:23:30.669168 | 2018-02-13T17:04:03 | 2018-02-13T17:04:03 | 70,970,520 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,019 | py | #!/usr/bin/python
################################################################################
# 26048566-5cc5-11e4-af55-00155d01fe08
#
# Justin Dierking
# justindierking@hardbitsolutions.com
# phnomcobra@gmail.com
#
# 10/24/2014 Original Construction
################################################################################
class Finding:
def __init__(self):
self.output = []
self.is_compliant = False
self.uuid = "26048566-5cc5-11e4-af55-00155d01fe08"
def check(self, cli):
# Initialize Compliance
self.is_compliant = True
# Execute command and parse capture standard output
stdout = cli.system("grep -r usb-storage /etc/modprobe.conf /etc/modprobe.d")
# Split output lines
self.output = stdout.split('\n')
# Process standard output
for line in self.output:
if len(line.strip()) > 0:
self.is_compliant = False
return self.is_compliant
| [
"phnomcobra@gmail.com"
] | phnomcobra@gmail.com |
25414baa7b52f3f2d2f1fbc697b602bd893b8ab7 | 1a31dfb66512aa66c407484f2ea8b0fb370669a4 | /account/urls.py | 74fd4189f5be454d9fbca20ea9f45a0439c5e14d | [] | no_license | nisha-eng/dstt | 790129f2918e0210421039baba0a4e8c877a7627 | bab89000242aec3a1a6fb05447ec52b14722809f | refs/heads/main | 2023-03-01T02:50:20.767421 | 2021-02-05T05:23:08 | 2021-02-05T05:23:08 | 336,171,818 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 172 | py | from django.contrib import admin
from django.urls import path,include
from . import views
urlpatterns = [
path('signup/',views.SignUpView.as_view(), name='signup'),
] | [
"mdipakpatidar@gmail.com"
] | mdipakpatidar@gmail.com |
27a02432307f60e2349e5f62217bdc229641b3a0 | 0e1e643e864bcb96cf06f14f4cb559b034e114d0 | /Exps_9_v3/I_w_Mgt_to_Wxyz_focus_to_Cxy_focus_series_Pyramid/pyramid_1side/bce_s001_tv_s0p1_L3/step12_L2345678.py | 65d732c2f60394ea9ab9e20e7ad6db3c04b3fa69 | [] | no_license | KongBOy/kong_model2 | 33a94a9d2be5b0f28f9d479b3744e1d0e0ebd307 | 1af20b168ffccf0d5293a393a40a9fa9519410b2 | refs/heads/master | 2022-10-14T03:09:22.543998 | 2022-10-06T11:33:42 | 2022-10-06T11:33:42 | 242,080,692 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,245 | py | '''
目前只有 step12 一定需要切換資料夾到 該komg_model所在的資料夾 才能執行喔!
'''
if(__name__ == "__main__"):
#############################################################################################################################################################################################################
### 把 kong_model2 加入 sys.path
import os
code_exe_path = os.path.realpath(__file__) ### 目前執行 step10_b.py 的 path
code_exe_path_element = code_exe_path.split("\\") ### 把 path 切分 等等 要找出 kong_model 在第幾層
kong_layer = code_exe_path_element.index("kong_model2") + 1 ### 找出 kong_model2 在第幾層
kong_model2_dir = "\\".join(code_exe_path_element[:kong_layer]) ### 定位出 kong_model2 的 dir
import sys ### 把 kong_model2 加入 sys.path
sys.path.append(kong_model2_dir)
# print(__file__.split("\\")[-1])
# print(" code_exe_path:", code_exe_path)
# print(" code_exe_path_element:", code_exe_path_element)
# print(" kong_layer:", kong_layer)
# print(" kong_model2_dir:", kong_model2_dir)
#############################################################################################################################################################################################################
from step12_result_analyzer import Row_col_exps_analyzer
from step11_L2345678 import *
#############################################################################################################################################################################################################
kong_to_py_layer = len(code_exe_path_element) - 1 - kong_layer ### 中間 -1 是為了長度轉index
# print(" kong_to_py_layer:", kong_to_py_layer)
if (kong_to_py_layer == 0): template_dir = ""
elif(kong_to_py_layer == 2): template_dir = code_exe_path_element[kong_layer + 1][0:] ### [7:] 是為了去掉 step1x_, 後來覺得好像改有意義的名字不去掉也行所以 改 0
elif(kong_to_py_layer == 3): template_dir = code_exe_path_element[kong_layer + 1][0:] + "/" + code_exe_path_element[kong_layer + 2][0:] ### [5:] 是為了去掉 mask_ ,前面的 mask_ 是為了python 的 module 不能 數字開頭, 隨便加的這樣子, 後來覺得 自動排的順序也可以接受, 所以 改0
elif(kong_to_py_layer > 3): template_dir = code_exe_path_element[kong_layer + 1][0:] + "/" + code_exe_path_element[kong_layer + 2][0:] + "/" + "/".join(code_exe_path_element[kong_layer + 3: -1])
# print(" template_dir:", template_dir)
#############################################################################################################################################################################################################
ana_dir = template_dir
#############################################################################################################################################################################################################
"""
以下留下一些example這樣子
core_amount == 7 是因為 目前 see_amount == 7 ,想 一個core 一個see
task_amount == 7 是因為 目前 see_amount == 7
single_see_multiprocess == True 代表 see內 還要 切 multiprocess,
single_see_core_amount == 2 代表切2分
所以總共會有 7*2 = 14 份 process 要同時處理,
但建議不要用,已經測過,爆記憶體了
"""
#################################################################################################################################################################################################################
#################################################################################################################################################################################################################
ana_name = "ch032_1side_all__2side_all"
analyzer = Row_col_exps_analyzer(ana_describe=f"{ana_dir}/0_ana_{ana_name}",
ana_what_sees="see",
ana_what="rec",
row_col_results=ch032_1side_all__2side_all, show_in_img=False, show_gt_img=False, bgr2rgb=True, add_loss=False, img_h=512, img_w=512)\
.analyze_row_col_results_all_single_see(single_see_multiprocess=False, single_see_core_amount=6)\
.Gather_all_see_final_img()
'''
analyzer = Col_exps_analyzer(ana_describe=f"{ana_dir}/0_ana_{ana_name}",
ana_what_sees="see",
ana_what="wc",
col_results=ch032_1side_all__2side_all, show_in_img=False, show_gt_img=False, bgr2rgb=True, add_loss=False, img_h=512, img_w=512)\
.analyze_col_results_all_single_see(single_see_multiprocess=True, single_see_core_amount=6)\
.Gather_all_see_final_img()
'''
| [
"s89334roy@yahoo.com.tw"
] | s89334roy@yahoo.com.tw |
afd99a73aa5211d3722e521ba63eeacc2c19e7b9 | 642526009a434c2a6e04fe0293279a151b216d0a | /dkube/sdk/internal/dkube_client/models/git_commit_details.py | 136441f2723ae8ca83f5e11383723fe023b5aa91 | [] | no_license | mak-454/dkube-sdk | d4b8e7f7b1d8c0b0f64b10940ae42ab9d62f4654 | d2ba78a0abbda589efc0dbd957d9a8f6fd227464 | refs/heads/master | 2022-12-26T03:17:55.627379 | 2020-05-09T17:29:08 | 2020-05-09T17:29:08 | 262,622,772 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,283 | py | # coding: utf-8
"""
Dkube api server
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: 0.2.2
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class GitCommitDetails(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'id': 'str',
'message': 'str',
'_date': 'str'
}
attribute_map = {
'id': 'id',
'message': 'message',
'_date': 'date'
}
def __init__(self, id=None, message=None, _date=None): # noqa: E501
"""GitCommitDetails - a model defined in Swagger""" # noqa: E501
self._id = None
self._message = None
self.__date = None
self.discriminator = None
if id is not None:
self.id = id
if message is not None:
self.message = message
if _date is not None:
self._date = _date
@property
def id(self):
"""Gets the id of this GitCommitDetails. # noqa: E501
:return: The id of this GitCommitDetails. # noqa: E501
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this GitCommitDetails.
:param id: The id of this GitCommitDetails. # noqa: E501
:type: str
"""
self._id = id
@property
def message(self):
"""Gets the message of this GitCommitDetails. # noqa: E501
:return: The message of this GitCommitDetails. # noqa: E501
:rtype: str
"""
return self._message
@message.setter
def message(self, message):
"""Sets the message of this GitCommitDetails.
:param message: The message of this GitCommitDetails. # noqa: E501
:type: str
"""
self._message = message
@property
def _date(self):
"""Gets the _date of this GitCommitDetails. # noqa: E501
:return: The _date of this GitCommitDetails. # noqa: E501
:rtype: str
"""
return self.__date
@_date.setter
def _date(self, _date):
"""Sets the _date of this GitCommitDetails.
:param _date: The _date of this GitCommitDetails. # noqa: E501
:type: str
"""
self.__date = _date
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(GitCommitDetails, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, GitCommitDetails):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"ahmed.khan@oneconvergence.com"
] | ahmed.khan@oneconvergence.com |
81f19be92df94fa408323f3520f3036d02d3faf7 | f576f0ea3725d54bd2551883901b25b863fe6688 | /sdk/appcomplianceautomation/azure-mgmt-appcomplianceautomation/generated_samples/report_delete.py | e50e47e30f53534b8cd9c42e05cde1528836604c | [
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
] | permissive | Azure/azure-sdk-for-python | 02e3838e53a33d8ba27e9bcc22bd84e790e4ca7c | c2ca191e736bb06bfbbbc9493e8325763ba990bb | refs/heads/main | 2023-09-06T09:30:13.135012 | 2023-09-06T01:08:06 | 2023-09-06T01:08:06 | 4,127,088 | 4,046 | 2,755 | MIT | 2023-09-14T21:48:49 | 2012-04-24T16:46:12 | Python | UTF-8 | Python | false | false | 1,558 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
from azure.mgmt.appcomplianceautomation import AppComplianceAutomationToolForMicrosoft365
"""
# PREREQUISITES
pip install azure-identity
pip install azure-mgmt-appcomplianceautomation
# USAGE
python report_delete.py
Before run the sample, please set the values of the client ID, tenant ID and client secret
of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
"""
def main():
client = AppComplianceAutomationToolForMicrosoft365(
credential=DefaultAzureCredential(),
)
response = client.report.begin_delete(
report_name="testReportName",
).result()
print(response)
# x-ms-original-file: specification/appcomplianceautomation/resource-manager/Microsoft.AppComplianceAutomation/preview/2022-11-16-preview/examples/Report_Delete.json
if __name__ == "__main__":
main()
| [
"noreply@github.com"
] | Azure.noreply@github.com |
f20b7d9b1b0b8082fe92f67c3e8295493a8683b5 | 548fbb3bf6648e76e711ee398148cae9ee10a0d2 | /1460_Make_Two_Arrays_Equal_by_Reversing_Sub-arrays.py | 1a71ef2613f25f56e85d7662d98f139a97bb1ffe | [] | no_license | KonstantinSKY/LeetCode | 34cce8eda7182aa6a1616b3471b0cfe9310fe1d4 | 1570122134b962412b0530c3850eb37f1c8c585e | refs/heads/master | 2023-04-16T17:03:23.753146 | 2023-04-03T18:16:21 | 2023-04-03T18:16:21 | 310,714,169 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 694 | py | """ 1460. Make Two Arrays Equal by Reversing Sub-arrays https://leetcode.com/problems/make-two-arrays-equal-by-reversing-sub-arrays/"""
import time
from typing import List
class Solution:
def canBeEqual2(self, target: List[int], arr: List[int]) -> bool:
return "".join(map(str, sorted(arr))) == "".join(map(str, sorted(target)))
def canBeEqual(self, target: List[int], arr: List[int]) -> bool:
return sorted(arr) == sorted(target)
if __name__ == "__main__":
start_time = time.time()
print(Solution().canBeEqual([1, 2, 3, 4], [2, 4, 1, 3]))
print(Solution().canBeEqual([3, 7, 9], [3, 7, 11]))
print("--- %s seconds ---" % (time.time() - start_time))
| [
"sky012877@gmail.com"
] | sky012877@gmail.com |
0dc609373cfbab318ad5a08f86a53f9b9863311b | dc1df09e627fd5155d4b4eae8915a40d94b2fcf3 | /code/configs_pain/config_train_pain_lstm_wbn_512_milcepain_weighted_2min.py | 3bcdc396596d28d6b8771b1f3e8b5f2e653d5a45 | [] | no_license | menorashid/gross_pain | 0a2145e3b912f23788e22bc4eda6978a65e481fa | 2dbebc596a15e54fb3af0cfca2185f901e78a72d | refs/heads/master | 2021-07-09T07:45:52.457667 | 2020-11-10T01:32:26 | 2020-11-10T01:32:26 | 198,445,713 | 0 | 3 | null | null | null | null | UTF-8 | Python | false | false | 1,634 | py | from rhodin.python.utils import io as rhodin_utils_io
import os
config_dict = {
# model type
'model_type' : 'pain_lstm_wbn_512',
'new_folder_style' : True,
# General parameters
'dpi' : 190,
'input_types' : ['img_crop', 'segment_key'],
'output_types' : ['pain','segment_key'],
'label_types_train' : ['img_crop','pain','segment_key'],
'label_types_test' : ['img_crop','pain','segment_key'],
'num_workers' : 32,
# opt parameters
'num_epochs' : 10,
'save_every' : 5,
'train_test_every' : 5,
'learning_rate' : 1e-4,# baseline: 0.001=1e-3
'test_every' : 1,
'plot_every' : 100,
'print_every' : 10,
'backward_every' : 1,
# LPS dataset parameters
'views' : '[0,1,2,3]',
'image_width' : 128,
'image_height' : 128,
# network parameters
'batch_size_train' : 1200,
'batch_size_test' : 1200,
# loss
'loss_type' : 'MIL_Loss_Pain_CE',
'loss_weighted': True,
'accuracy_type' : ['argmax_pain'],
'deno' : 'random',
'deno_test' : 8,
# dataset
'training_set' : 'LPS_2fps_crop_timeseg',
'csv_str_aft': '_reduced_2fps_frame_index_withSegIndexAndIntKey.csv',
'num_frames_per_seg': 240, #10 min long segs
'min_size_seg': 10,
'img_mean' : (0.485, 0.456, 0.406),
'img_std' : (0.229, 0.224, 0.225),
'active_cameras' : False,
'every_nth_frame' : 1,
'project_wandb': 'debug',
}
| [
"mhnrashid@gmail.com"
] | mhnrashid@gmail.com |
b04182488241419bfaab9735a28b9f92c4a548e4 | d7016f69993570a1c55974582cda899ff70907ec | /sdk/applicationinsights/azure-mgmt-applicationinsights/azure/mgmt/applicationinsights/v2021_10/aio/_application_insights_management_client.py | e846f9038ef7e10c2422551c70ad202f265a897d | [
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] | permissive | kurtzeborn/azure-sdk-for-python | 51ca636ad26ca51bc0c9e6865332781787e6f882 | b23e71b289c71f179b9cf9b8c75b1922833a542a | refs/heads/main | 2023-03-21T14:19:50.299852 | 2023-02-15T13:30:47 | 2023-02-15T13:30:47 | 157,927,277 | 0 | 0 | MIT | 2022-07-19T08:05:23 | 2018-11-16T22:15:30 | Python | UTF-8 | Python | false | false | 3,923 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from copy import deepcopy
from typing import Any, Awaitable, TYPE_CHECKING
from azure.core.rest import AsyncHttpResponse, HttpRequest
from azure.mgmt.core import AsyncARMPipelineClient
from .. import models as _models
from ..._serialization import Deserializer, Serializer
from ._configuration import ApplicationInsightsManagementClientConfiguration
from .operations import LiveTokenOperations
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from azure.core.credentials_async import AsyncTokenCredential
class ApplicationInsightsManagementClient: # pylint: disable=client-accepts-api-version-keyword
"""Composite Swagger for Application Insights Management Client.
:ivar live_token: LiveTokenOperations operations
:vartype live_token: azure.mgmt.applicationinsights.v2021_10.aio.operations.LiveTokenOperations
:param credential: Credential needed for the client to connect to Azure. Required.
:type credential: ~azure.core.credentials_async.AsyncTokenCredential
:param base_url: Service URL. Default value is "https://management.azure.com".
:type base_url: str
:keyword api_version: Api Version. Default value is "2021-10-14". Note that overriding this
default value may result in unsupported behavior.
:paramtype api_version: str
"""
def __init__(
self, credential: "AsyncTokenCredential", base_url: str = "https://management.azure.com", **kwargs: Any
) -> None:
self._config = ApplicationInsightsManagementClientConfiguration(credential=credential, **kwargs)
self._client = AsyncARMPipelineClient(base_url=base_url, config=self._config, **kwargs)
client_models = {k: v for k, v in _models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._deserialize = Deserializer(client_models)
self._serialize.client_side_validation = False
self.live_token = LiveTokenOperations(self._client, self._config, self._serialize, self._deserialize)
def _send_request(self, request: HttpRequest, **kwargs: Any) -> Awaitable[AsyncHttpResponse]:
"""Runs the network request through the client's chained policies.
>>> from azure.core.rest import HttpRequest
>>> request = HttpRequest("GET", "https://www.example.org/")
<HttpRequest [GET], url: 'https://www.example.org/'>
>>> response = await client._send_request(request)
<AsyncHttpResponse: 200 OK>
For more information on this code flow, see https://aka.ms/azsdk/dpcodegen/python/send_request
:param request: The network request you want to make. Required.
:type request: ~azure.core.rest.HttpRequest
:keyword bool stream: Whether the response payload will be streamed. Defaults to False.
:return: The response of your network call. Does not do error handling on your response.
:rtype: ~azure.core.rest.AsyncHttpResponse
"""
request_copy = deepcopy(request)
request_copy.url = self._client.format_url(request_copy.url)
return self._client.send_request(request_copy, **kwargs)
async def close(self) -> None:
await self._client.close()
async def __aenter__(self) -> "ApplicationInsightsManagementClient":
await self._client.__aenter__()
return self
async def __aexit__(self, *exc_details: Any) -> None:
await self._client.__aexit__(*exc_details)
| [
"noreply@github.com"
] | kurtzeborn.noreply@github.com |
c620fc9fb4331c7c63ef5f9998dd1d072ee6db93 | 4ef89629e12458a49e9d39dc4797de622732b598 | /day07(编码与解码)/编码与解码.py | 621b20205398f0bcee6ff421237cfe7ba1a0c353 | [] | no_license | LambertlLan/python | ea235b5cc76114575f9341f49b797645aca2938c | 9e1a52537fc3e1cb88cadb32d1e2a8012acb2c7f | refs/heads/master | 2021-01-19T17:06:59.277307 | 2017-09-27T06:18:44 | 2017-09-27T06:18:44 | 101,049,533 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 251 | py | # __author: Lambert
# __date: 2017/8/18 15:19
s = 'i am 特斯拉'
s_to_gbk = s.encode('gbk') # 编码为gbk打印出b'i am \xcc\xd8\xcb\xb9\xc0\xad'
gbk_to_s = s_to_gbk.decode('gbk') # 解码为utf-8打印出i am 特斯拉
print(s)
print(s_to_gbk)
| [
"landingyu@163.com"
] | landingyu@163.com |
6c6bc3ce51cbb1d79ae1d01b116f8c0591c1f260 | b1ffcbd977595bccf15dd56e965bda62867d1e10 | /omrdatasettools/tests/MuscimaPlusPlusSymbolImageGeneratorTest.py | 5b181d8d980a6e4e9611b93088185b257cfbfbff | [
"CC-BY-NC-SA-4.0",
"GPL-2.0-only",
"CC-BY-SA-3.0",
"MIT",
"GPL-1.0-or-later",
"CC-BY-SA-4.0",
"LicenseRef-scancode-public-domain",
"AGPL-3.0-only"
] | permissive | fzalkow/OMR-Datasets | 7ded5bb9278e47c84a16de01081876d6bb2e6dbe | c9e7a986199998d6a735875503e6dcce5fdf1193 | refs/heads/master | 2020-09-14T15:30:45.824800 | 2020-01-06T12:07:52 | 2020-01-06T12:07:52 | 223,169,792 | 0 | 0 | MIT | 2019-11-21T12:32:31 | 2019-11-21T12:32:30 | null | UTF-8 | Python | false | false | 1,219 | py | import os
import shutil
import unittest
from glob import glob
from omrdatasettools.downloaders.MuscimaPlusPlusDatasetDownloader import MuscimaPlusPlusDatasetDownloader
from omrdatasettools.image_generators.MuscimaPlusPlusSymbolImageGenerator import MuscimaPlusPlusSymbolImageGenerator
class MuscimaPlusPlusSymbolImageGeneratorTest(unittest.TestCase):
def test_download_extract_and_render_all_symbols(self):
# Arrange
datasetDownloader = MuscimaPlusPlusDatasetDownloader()
# Act
datasetDownloader.download_and_extract_dataset("temp/muscima_pp_raw")
image_generator = MuscimaPlusPlusSymbolImageGenerator()
image_generator.extract_and_render_all_symbol_masks("temp/muscima_pp_raw", "temp/muscima_img")
all_image_files = [y for x in os.walk("temp/muscima_img") for y in glob(os.path.join(x[0], '*.png'))]
expected_number_of_symbols = 91254
actual_number_of_symbols = len(all_image_files)
# Assert
self.assertEqual(expected_number_of_symbols, actual_number_of_symbols)
# Cleanup
os.remove(datasetDownloader.get_dataset_filename())
shutil.rmtree("temp")
if __name__ == '__main__':
unittest.main()
| [
"alexander.pacha@gmail.com"
] | alexander.pacha@gmail.com |
13b48c6f824a2b31e36afd16858253b90c3721da | 399dae0b5ad9ca27cde175d25b5435958674eb50 | /System/Get the Status of Password Policy Applied under Group Policy/get-the-status-of-password-policy-applied-under-group-policy.py | 026229831e5b1d828ef15c972de9b6c071daed84 | [] | no_license | kannanch/pythonscripts | 61e3ea9e8ebf6a6b0ec2a4a829664e4507b803ba | 843a522236f9c2cc2aadc68d504c71bb72600bd9 | refs/heads/master | 2020-06-12T11:18:00.404673 | 2019-06-28T11:24:37 | 2019-06-28T11:24:37 | 194,282,297 | 1 | 0 | null | 2019-06-28T13:55:56 | 2019-06-28T13:55:56 | null | UTF-8 | Python | false | false | 7,668 | py | #To define a particular parameter, replace the 'parameterName' inside itsm.getParameter('variableName') with that parameter's name
emailto='tamil111@yopmail.com' #To define a particular receiver email address here
import os
import subprocess
from subprocess import PIPE, Popen
import re
import shutil
def ipaddress():
import socket
return socket.gethostbyname(socket.gethostname())
def computername():
import os
return os.environ['COMPUTERNAME']
try:
workdir=os.environ['PROGRAMDATA']+r'\temp'
if not os.path.exists(workdir):
os.mkdir(workdir)
except:
workdir=os.environ['SYTEMDRIVE']
bat_file=workdir+r'Bat_file.bat'
check=['0','90','7','1','4']
content='''start cmd.exe /c "secedit /export /cfg C:\\ProgramData\\temp\\group-policy.inf /log export.log"
'''
with open(bat_file, 'wb') as fr:
fr.write(content)
def Email(fileToSend,To):
from mailjet_rest import Client
import os
api_key='3e70858a7a5c5fbc245a662d5d9aa238' # API KEY of Mail Jet
api_secret= 'a337abcc84d8fb062f6f1597d966ae6f' # API SECRET KEY of Mail Jet
mailjet = Client(auth=(api_key, api_secret), version='v3.1')
import base64
with open(fileToSend, 'rb') as fp:
ki=base64.b64encode(fp.read())
data = {
'Messages': [
{
"From": {
"Email": "c1operations123@gmail.com",
},
"To": [
{
"Email": "%s"%To,
}
],
"Subject": "Status of the Password Policy ",
"TextPart": "Dear passenger 1, welcome to Mailjet! May the delivery force be with you!",
"HTMLPart": """<h3> Hi
Please find the attachment which contains the Status of the Password Policy
Thank you.</h3>""",
"Attachments": [
{
"ContentType": "text/csv",
"Filename": "group-policy.csv",
"Base64Content": "%s"%ki
}
]
}
]
}
result = mailjet.send.create(data=data)
ret=result.status_code
if ret==200:
out=result.json()
out=str(out)
if "success" in out:
print "Email has been Sent Successfully to the following mail adddress :",'"'+emailto+'"'
else:
print "Error sending email"
def zip_item(path,final_path): # Creating ZIP file
import zipfile
zip_ref = zipfile.ZipFile(path, 'r')
zip_ref.extractall(final_path)
zip_ref.close()
return final_path
def Download(URL, DownloadTo = None, FileName = None):
import urllib
import ssl
if FileName:
FileName = FileName
else:
FileName = URL.split('/')[-1]
if DownloadTo:
DownloadTo = DownloadTo
else:
DownloadTo = os.path.join(os.environ['TEMP'])
DF = os.path.join(DownloadTo, FileName)
with open(os.path.join(DownloadTo, FileName), 'wb') as f:
try:
context = ssl._create_unverified_context()
f.write(urllib.urlopen(URL,context=context).read())
except:
f.write(urllib.urlopen(URL).read())
if os.path.isfile(DF):
return DF
else:
return False
def mailjet(DEST):
BDPATH = Download(r'https://drive.google.com/uc?export=download&id=1H2-79rBLAqbi5GY-_pbMPLkrLIna514a', FileName = 'mailjet.zip')
SRC = os.path.join(os.environ['TEMP'])
path=zip_item(BDPATH,SRC)
SRC = os.path.join(os.environ['TEMP'],'mailjet')
from distutils.dir_util import copy_tree
copy_tree(SRC, DEST)
def remove():
try:
os.remove("C:\\ProgramData\\temp\\group-policy.inf")
os.remove('C:\\ProgramData\\temp\\test.txt')
os.remove(path)
except:
pass
obj = subprocess.Popen(bat_file, shell = True, stdout = PIPE, stderr = PIPE)
out, err = obj.communicate()
print err
path="C:\\ProgramData\\temp\\group-policy.csv"
if os.path.isfile("C:\\ProgramData\\temp\\group-policy.inf"):
with open("C:\\ProgramData\\temp\\group-policy.inf",'r') as f:
with open('C:\\ProgramData\\temp\\test.txt','w+') as wr:
k= f.read().decode('utf-16')
k1=wr.write(k)
with open("C:\\ProgramData\\temp\\test.txt",'r') as f:
k=f.readlines()[3:8]
header=[]
value=[]
for i in k:
header.append(i.split('=')[0].strip())
value.append(i.split('=')[1].replace('\n','').strip())
header=list(filter(None, header))
value=list(filter(None, value))
if header and value:
with open(path,'w+') as wr:
wr.write("\t\tPASSWORD GROUP POLICIES :\n\n")
wr.write('COMPUTER NAME,'+str(computername()))
wr.write('\nIP ADDRESS,'+str(ipaddress()))
wr.write('\n\n\n')
for i in header:
wr.write(unicode(str(i)+',').encode('utf-8'))
wr.write('\n')
for i in value:
wr.write(unicode(str(i)+',').encode('utf-8'))
wr.write('\n\n\n')
if check[0]==value[0]:
wr.write(str("\n\nMinimum Password age is defined as ".upper()+','+check[0]))
else:
wr.write (str("\n\nMinimum Password age is not defined as ".upper()+','+check[0]))
if check[1]==value[1]:
wr.write (str("\n\nMaximum Password age is defined as ".upper()+','+check[1]))
else:
wr.write (str("\n\nMaximum Password age is not defined as ".upper()+','+check[1]))
if check[2]==value[2]:
wr.write (str("n\nMinimum Password length is defined as ".upper()+','+check[2]))
else:
wr.write (str("\n\nMinimum Password length is not defined as ".upper()+','+check[2]))
if check[3]==value[3]:
wr.write (str("\n\nPassword complexity is enabled ".upper()+','+check[3]))
else:
wr.write (str("\n\nPassword complexity is not enabled ".upper()+','+check[3]))
if check[4]==value[4]:
wr.write (str("\n\nPassword History Size is Maintained as ".upper()+','+check[4]))
else:
wr.write (str("\n\nPassword History Size is not Maintained as ".upper()+','+check[4]))
else:
print "Could not create Group policy file in specified directory"
HOMEPATH = r"C:\Program Files (x86)"
if os.path.exists(HOMEPATH):
HOMEPATH = r"C:\Program Files (x86)"
else:
HOMEPATH =r"C:\Program Files"
DEST= os.path.join(HOMEPATH,r'COMODO\Comodo ITSM\Lib\site-packages')
Folders=os.listdir(DEST)
Nodow=0
Del_folders=['mailjet-1.4.1-py2.7.egg-info', 'mailjet_rest', 'mailjet_rest-1.3.0-py2.7.egg-info']
for i in Del_folders:
if i in Folders:
Nodow=Nodow+1
if Nodow>2:
c=0
else:
DEST=mailjet(DEST)
if os.path.exists(path):
print "Password Policy Report has been successfully created\n"
Email(path,emailto)
remove()
else:
print "Password Policy Report has been successfully created"
| [
"noreply@github.com"
] | kannanch.noreply@github.com |
19469cce8eb38d26a47e5d060cf86c0f3ee08c64 | 65b55130f41747ccb239219ae9010ab06b60d430 | /src/tweets/migrations/0002_auto_20191219_0905.py | 432c3cc422a6919ca45813b41bcd040fce51c11d | [] | no_license | amrebrahem22/TweetMe-App | d5c2f5fc20565356a88fdde357433ac54bc5dfac | cad027a34c84f9b2530759ec6b080a5f80a02ffc | refs/heads/master | 2020-11-24T19:12:27.526977 | 2020-03-24T21:44:30 | 2020-03-24T21:44:30 | 228,306,534 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 336 | py | # Generated by Django 3.0 on 2019-12-19 07:05
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('tweets', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='tweet',
options={'ordering': ['-timestamp']},
),
]
| [
"amrebrahem226@gmail.com"
] | amrebrahem226@gmail.com |
f79bc0f915b6e19e4535d43c6adf1a04f3e23c65 | aa3d7adc78fd141a730c9cc00b9a6439a90cf74c | /0x0C-python-almost_a_circle/16-main.py | d837a7788eea8df5ce01facbffa9004af9ddcfff | [] | no_license | Lord-Gusarov/holbertonschool-higher_level_programming | 450eee78c4f7d91f05110d86e7879487802f4fe7 | 65a4ff7b2752cfec08caf7d0ff0b7b97a602ddd1 | refs/heads/main | 2023-04-19T16:45:14.137188 | 2021-05-15T16:33:12 | 2021-05-15T16:33:12 | 319,210,969 | 2 | 2 | null | null | null | null | UTF-8 | Python | false | false | 594 | py | #!/usr/bin/python3
""" 16-main """
from models.rectangle import Rectangle
if __name__ == "__main__":
list_input = [
{'id': 89, 'width': 10, 'height': 4},
{'id': 7, 'width': 1, 'height': 7}
]
json_list_input = Rectangle.to_json_string(list_input)
list_output = Rectangle.from_json_string(json_list_input)
print("[{}] {}".format(type(list_input), list_input))
print("[{}] {}".format(type(json_list_input), json_list_input))
print("[{}] {}".format(type(list_output), list_output))
print("---------------------")
print(type(list_output[0]))
| [
"2367@holbertonschool.com"
] | 2367@holbertonschool.com |
8ea4b391036f8705ccf1667a6b6ad7ce8be21474 | d66ba9654d9eb57807b4a63ef9991a3a6868dc1a | /tests/unit/vsphere/test_ESXi_Ssh.py | a3c5795bf7dc20acd4c5fdb50705ec625f83f69c | [
"Apache-2.0"
] | permissive | nirvishek/k8-vmware | bcef1afcdf559c9f1c2b1f6df3d606612140d4c7 | 986c153b61e028a033b62aa6d198b068a4ed0eb0 | refs/heads/main | 2023-02-26T02:12:53.681127 | 2021-01-28T21:18:27 | 2021-01-28T23:18:21 | 321,978,494 | 0 | 0 | Apache-2.0 | 2020-12-16T12:40:24 | 2020-12-16T12:40:24 | null | UTF-8 | Python | false | false | 1,876 | py | from os import environ
from unittest import TestCase
from pytest import skip
from k8_vmware.vsphere.ESXi_Ssh import ESXi_Ssh
# todo add support for ssh keys in GitHub actions
class test_ESXi_Ssh(TestCase):
def setUp(self) -> None:
self.ssh = ESXi_Ssh()
self.ssh_config = self.ssh.ssh_config()
self.ssh_user = self.ssh_config.get('ssh_user')
self.ssh_key = self.ssh_config.get('ssh_key')
if self.ssh_key is None:
skip("Skipping test because environment variable ssh_host is not configured")
# base methods
def test_exec_ssh_command(self):
assert self.ssh.exec_ssh_command('uname') == {'error': '', 'output': 'VMkernel\n', 'status': True}
assert self.ssh.exec_ssh_command('aaaa' ) == {'error': 'sh: aaaa: not found\n', 'output': '', 'status': False}
def test_get_get_ssh_params(self):
ssh_params = self.ssh.get_ssh_params('aaa')
assert ssh_params == ['-t', '-i', environ.get('ESXI_SSH_KEY'),
environ.get('ESXI_SSH_USER') + '@' + environ.get('VSPHERE_HOST'),
'aaa']
def test_exec(self):
#self.ssh.exec('uname' ) == 'VMkernel'
self.ssh.exec('cd /bin ; pwd') == '/bin'
def test_ssh_config(self):
config = self.ssh.ssh_config()
assert config['ssh_host'] == environ.get('VSPHERE_HOST' )
assert config['ssh_user'] == environ.get('ESXI_SSH_USER')
assert config['ssh_key' ] == environ.get('ESXI_SSH_KEY' )
# helper methods
def test_uname(self):
assert self.ssh.uname() == 'VMkernel'
def test_exec(self):
assert 'Usage: esxcli system {cmd} [cmd options]' in self.ssh.exec('esxcli system') # you can also use this to see the commands avaiable in the `esxcli system` namespace
# helper methods: esxcli
| [
"dinis.cruz@owasp.org"
] | dinis.cruz@owasp.org |
987ce85af7e23d8c4b0aeea8fc530b883735bbb0 | 015106a1a964305ef8ceb478cc56fd7d4fbd86d5 | /112.py | a3e985fd5dfa5147a4c09bcfb679f424513b7079 | [] | no_license | zenmeder/leetcode | 51a0fa4dc6a82aca4c67b5f4e0ee8916d26f976a | 0fddcc61923d760faa5fc60311861cbe89a54ba9 | refs/heads/master | 2020-12-02T18:16:10.825121 | 2018-10-30T11:47:53 | 2018-10-30T11:47:53 | 96,505,735 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 629 | py | #!/usr/local/bin/ python3
# -*- coding:utf-8 -*-
# __author__ = "zenmeder"
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def hasPathSum(self, root, sum):
"""
:type root: TreeNode
:type sum: int
:rtype: bool
"""
if not root:
return False
if not root.left and not root.right and root.val == sum:
return True
return self.hasPathSum(root.left, sum - root.val) or self.hasPathSum(root.right, sum - root.val)
| [
"zenmeder@gmail.com"
] | zenmeder@gmail.com |
fdbb8e6f76b79b579fa2a26476cc539f384aed8e | 8698757521458c2061494258886e5d3cdfa6ff11 | /datasets/BRATSLabeled.py | 9f34b75633370774aec0390fa852e3a0bba31f76 | [
"MIT"
] | permissive | ricvo/argo | 546c91e84d618c4bc1bb79a6bc7cba01dca56d57 | a10c33346803239db8a64c104db7f22ec4e05bef | refs/heads/master | 2023-02-25T01:45:26.412280 | 2020-07-05T22:55:35 | 2020-07-05T22:55:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,662 | py | """
Module for managing BRATS dataset
"""
from datasets.BrainDataset import modalities
from datasets.LabeledBrainDataset import LabeledBrainDataset
import os
import fnmatch
import numpy as np
import re
import json
import pdb
NPROCS = 40
TRAIN_LOOP = "train_loop"
TRAIN = "train"
VALIDATION = "validation"
TEST = "test"
class BRATSLabeled(LabeledBrainDataset):
def __init__(self, params):
super().__init__(params)
self._no_of_classes = 4
self._train_set_x, self._train_set_y, \
self._validation_set_x, self._validation_set_y, \
self._test_set_x, self._test_set_y = self.load_float_brains(self._data_dir)
def dataset_id(self, params):
"""
This method interprets the parameters and generate an id
"""
id = 'BRATSLabeled'
id += super().dataset_id(params)
return id
# overriding
@property
def x_shape_train(self):
return self._train_set_x_shape
# overriding
@property
def x_shape_eval(self):
return self._train_set_x_shape
# overriding
def get_label(self, filename):
# label = -1
with open(self._labels_file, 'r') as json_file:
labels_dict = json.load(json_file)
label = np.nonzero(labels_dict[filename])[0].astype(np.int32)[0]
return label
# overriding
def load_file_names(self, root, data_type):
original_files = []
label_files = []
with open(self._split_file, 'r') as file:
files_to_find = json.load(file)[data_type]
for path, dirs, files in os.walk(root):
if self._modalities is not None:
reg_filter = '*_' + str(modalities[self._modalities[0]]) + '_*'
for f in fnmatch.filter(files, reg_filter):
# idx = f.find('_' + str(modalities[self._modalities[0]]))
# idx = f.find('_')
# label_file_name = f[:idx]
start_idx = f.find('Brats')
end_idx = f.find('_' + str(modalities[self._modalities[0]]))
label_file_name = f[start_idx:end_idx]
if label_file_name in files_to_find:
fullname = root + '/' + f
if self._slices is not None:
slice = re.findall('_([0-9][0-9]*)', f)
if self._slices[0] <= int(slice[0]) <= self._slices[1]:
original_files.append(fullname)
label_files.append(label_file_name)
else:
original_files.append(fullname)
label_files.append(label_file_name)
else:
for f in files:
idx = f.find('_')
label_file_name = f[:idx]
if label_file_name in files_to_find:
fullname = root + '/' + f
# idx = f.find('_' + str(modalities['T2']))
original_files.append(fullname)
label_files.append(label_file_name)
# pdb.set_trace()
dataset_tuple = [original_files, label_files]
return np.asarray(dataset_tuple)
# def load_file_names(self, root, data_type):
# original_files = []
# label_files = []
# for path, dirs, files in os.walk(root + '/' + data_type):
# if self._modalities != None:
# reg_filter = '*_' + str(modalities[self._modalities[0]]) + '_*'
# for f in fnmatch.filter(files, reg_filter):
# fullname = root + '/' + data_type + '/' + f
# start_idx = f.find('Brats')
# end_idx = f.find('_' + str(modalities[self._modalities[0]]))
# label_file_name = f[start_idx:end_idx]
# original_files.append(fullname)
# label_files.append(label_file_name)
# else:
# for f in files:
# fullname = root + '/' + data_type + '/' + f
# start_idx = f.find('BRATS')
# end_idx = f.find('_' + str(modalities['T2']))
# label_file_name = f[start_idx:end_idx]
# original_files.append(fullname)
# label_files.append(label_file_name)
# dataset_tuple = [original_files, label_files]
# return np.asarray(dataset_tuple)
| [
"volpi@rist.ro"
] | volpi@rist.ro |
958c0cefa044a3940bef8b558c75cefd6765486f | 8a452b71e3942d762fc2e86e49e72eac951b7eba | /leetcode/editor/en/[2094]Finding 3-Digit Even Numbers.py | 63ca649cd3b26d92fe74691ac60176b9ea5153f7 | [] | no_license | tainenko/Leetcode2019 | 7bea3a6545f97c678a176b93d6622f1f87e0f0df | 8595b04cf5a024c2cd8a97f750d890a818568401 | refs/heads/master | 2023-08-02T18:10:59.542292 | 2023-08-02T17:25:49 | 2023-08-02T17:25:49 | 178,761,023 | 5 | 0 | null | 2019-08-27T10:59:12 | 2019-04-01T01:04:21 | JavaScript | UTF-8 | Python | false | false | 1,890 | py | # You are given an integer array digits, where each element is a digit. The
# array may contain duplicates.
#
# You need to find all the unique integers that follow the given requirements:
#
#
#
# The integer consists of the concatenation of three elements from digits in
# any arbitrary order.
# The integer does not have leading zeros.
# The integer is even.
#
#
# For example, if the given digits were [1, 2, 3], integers 132 and 312 follow
# the requirements.
#
# Return a sorted array of the unique integers.
#
#
# Example 1:
#
#
# Input: digits = [2,1,3,0]
# Output: [102,120,130,132,210,230,302,310,312,320]
# Explanation: All the possible integers that follow the requirements are in
# the output array.
# Notice that there are no odd integers or integers with leading zeros.
#
#
# Example 2:
#
#
# Input: digits = [2,2,8,8,2]
# Output: [222,228,282,288,822,828,882]
# Explanation: The same digit can be used as many times as it appears in digits.
#
# In this example, the digit 8 is used twice each time in 288, 828, and 882.
#
#
# Example 3:
#
#
# Input: digits = [3,7,5]
# Output: []
# Explanation: No even integers can be formed using the given digits.
#
#
#
# Constraints:
#
#
# 3 <= digits.length <= 100
# 0 <= digits[i] <= 9
#
# Related Topics Array Hash Table Sorting Enumeration 👍 159 👎 156
# leetcode submit region begin(Prohibit modification and deletion)
from itertools import permutations
class Solution:
def findEvenNumbers(self, digits: List[int]) -> List[int]:
digits.sort()
res = set()
for nums in permutations(digits, 3):
if nums[0] == 0 or nums[2] % 2 != 0:
continue
res.add(100 * nums[0] + 10 * nums[1] + nums[2])
return sorted(list(res))
# leetcode submit region end(Prohibit modification and deletion)
| [
"31752048+tainenko@users.noreply.github.com"
] | 31752048+tainenko@users.noreply.github.com |
5ab3c1d017f326b6053d303d02438e96dab26c5f | c81d7dfef424b088bf2509a1baf406a80384ea5a | /venv/Lib/site-packages/pandas/tests/io/json/test_compression.py | 94c00feb942478e173958850f2531ee22ee34d36 | [] | no_license | Goutham2591/OMK_PART2 | 111210d78fc4845481ed55c852b8f2f938918f4a | cb54fb21ebf472bffc6ee4f634bf1e68303e113d | refs/heads/master | 2022-12-10T01:43:08.213010 | 2018-04-05T02:09:41 | 2018-04-05T02:09:41 | 124,828,094 | 0 | 1 | null | 2022-12-07T23:43:03 | 2018-03-12T03:20:14 | Python | UTF-8 | Python | false | false | 4,754 | py | import pytest
import moto
import pandas as pd
from pandas import compat
import pandas.util.testing as tm
from pandas.util.testing import assert_frame_equal, assert_raises_regex
COMPRESSION_TYPES = [None, 'bz2', 'gzip', 'xz']
def decompress_file(path, compression):
if compression is None:
f = open(path, 'rb')
elif compression == 'gzip':
import gzip
f = gzip.GzipFile(path, 'rb')
elif compression == 'bz2':
import bz2
f = bz2.BZ2File(path, 'rb')
elif compression == 'xz':
lzma = compat.import_lzma()
f = lzma.open(path, 'rb')
else:
msg = 'Unrecognized compression type: {}'.format(compression)
raise ValueError(msg)
result = f.read().decode('utf8')
f.close()
return result
@pytest.mark.parametrize('compression', COMPRESSION_TYPES)
def test_compression_roundtrip(compression):
if compression == 'xz':
tm._skip_if_no_lzma()
df = pd.DataFrame([[0.123456, 0.234567, 0.567567],
[12.32112, 123123.2, 321321.2]],
index=['A', 'B'], columns=['X', 'Y', 'Z'])
with tm.ensure_clean() as path:
df.to_json(path, compression=compression)
assert_frame_equal(df, pd.read_json(path, compression=compression))
# explicitly ensure file was compressed.
uncompressed_content = decompress_file(path, compression)
assert_frame_equal(df, pd.read_json(uncompressed_content))
def test_compress_zip_value_error():
df = pd.DataFrame([[0.123456, 0.234567, 0.567567],
[12.32112, 123123.2, 321321.2]],
index=['A', 'B'], columns=['X', 'Y', 'Z'])
with tm.ensure_clean() as path:
import zipfile
pytest.raises(zipfile.BadZipfile, df.to_json, path, compression="zip")
def test_read_zipped_json():
uncompressed_path = tm.get_data_path("tsframe_v012.json")
uncompressed_df = pd.read_json(uncompressed_path)
compressed_path = tm.get_data_path("tsframe_v012.json.zip")
compressed_df = pd.read_json(compressed_path, compression='zip')
assert_frame_equal(uncompressed_df, compressed_df)
@pytest.mark.parametrize('compression', COMPRESSION_TYPES)
def test_with_s3_url(compression):
boto3 = pytest.importorskip('boto3')
pytest.importorskip('s3fs')
if compression == 'xz':
tm._skip_if_no_lzma()
df = pd.read_json('{"a": [1, 2, 3], "b": [4, 5, 6]}')
with moto.mock_s3():
conn = boto3.resource("s3", region_name="us-east-1")
bucket = conn.create_bucket(Bucket="pandas-test")
with tm.ensure_clean() as path:
df.to_json(path, compression=compression)
with open(path, 'rb') as f:
bucket.put_object(Key='test-1', Body=f)
roundtripped_df = pd.read_json('s3://pandas-test/test-1',
compression=compression)
assert_frame_equal(df, roundtripped_df)
@pytest.mark.parametrize('compression', COMPRESSION_TYPES)
def test_lines_with_compression(compression):
if compression == 'xz':
tm._skip_if_no_lzma()
with tm.ensure_clean() as path:
df = pd.read_json('{"a": [1, 2, 3], "b": [4, 5, 6]}')
df.to_json(path, orient='records', lines=True, compression=compression)
roundtripped_df = pd.read_json(path, lines=True,
compression=compression)
assert_frame_equal(df, roundtripped_df)
@pytest.mark.parametrize('compression', COMPRESSION_TYPES)
def test_chunksize_with_compression(compression):
if compression == 'xz':
tm._skip_if_no_lzma()
with tm.ensure_clean() as path:
df = pd.read_json('{"a": ["foo", "bar", "baz"], "b": [4, 5, 6]}')
df.to_json(path, orient='records', lines=True, compression=compression)
roundtripped_df = pd.concat(pd.read_json(path, lines=True, chunksize=1,
compression=compression))
assert_frame_equal(df, roundtripped_df)
def test_write_unsupported_compression_type():
df = pd.read_json('{"a": [1, 2, 3], "b": [4, 5, 6]}')
with tm.ensure_clean() as path:
msg = "Unrecognized compression type: unsupported"
assert_raises_regex(ValueError, msg, df.to_json,
path, compression="unsupported")
def test_read_unsupported_compression_type():
with tm.ensure_clean() as path:
msg = "Unrecognized compression type: unsupported"
assert_raises_regex(ValueError, msg, pd.read_json,
path, compression="unsupported")
| [
"amatar@unomaha.edu"
] | amatar@unomaha.edu |
c0080ebbd057dfd7b6f07d0cf6da607e25d703d7 | 18a6b272d4c55b24d9c179ae1e58959674e53afe | /tf_rl/examples/PETS/eager/mbexp_eager.py | 4f9236da79b133db42fdd2e3a7f81192485f4173 | [
"MIT"
] | permissive | Rowing0914/TF2_RL | 6cce916f409b3d4ef2a5a40a0611908f20d08b2c | c1b7f9b376cbecf01deb17f76f8e761035ed336a | refs/heads/master | 2022-12-10T09:58:57.456415 | 2021-05-23T02:43:21 | 2021-05-23T02:43:21 | 233,476,950 | 9 | 1 | MIT | 2022-12-08T07:02:42 | 2020-01-12T23:53:48 | Python | UTF-8 | Python | false | false | 1,926 | py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os
import pprint
from dotmap import DotMap
from MBExperiment import MBExperiment
from MPC import MPC
from config import create_config
import env # We run this so that the env is registered
import tensorflow as np
import numpy as np
import random
import tensorflow as tf
def set_global_seeds(seed):
np.random.seed(seed)
random.seed(seed)
tf.set_random_seed(seed)
def main(env, ctrl_type, ctrl_args, overrides, logdir):
set_global_seeds(0)
ctrl_args = DotMap(**{key: val for (key, val) in ctrl_args})
cfg = create_config(env, ctrl_type, ctrl_args, overrides, logdir)
cfg.pprint()
assert ctrl_type == 'MPC'
cfg.exp_cfg.exp_cfg.policy = MPC(cfg.ctrl_cfg)
exp = MBExperiment(cfg.exp_cfg)
os.makedirs(exp.logdir)
with open(os.path.join(exp.logdir, "config.txt"), "w") as f:
f.write(pprint.pformat(cfg.toDict()))
exp.run_experiment()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--env', type=str, default="halfcheetah",
help='Environment name: select from [cartpole, reacher, pusher, halfcheetah]')
parser.add_argument('-ca', '--ctrl_arg', action='append', nargs=2, default=[],
help='Controller arguments, see https://github.com/kchua/handful-of-trials#controller-arguments')
parser.add_argument('-o', '--override', action='append', nargs=2, default=[],
help='Override default parameters, see https://github.com/kchua/handful-of-trials#overrides')
parser.add_argument('-logdir', type=str, default='log',
help='Directory to which results will be logged (default: ./log)')
args = parser.parse_args()
main(args.env, "MPC", args.ctrl_arg, args.override, args.logdir)
| [
"kosakaboat@gmail.com"
] | kosakaboat@gmail.com |
1159ee063dcd1f503d6966b5d6d5dfda32dae906 | 872e03095723c0baf07c191381c576114d65e1a6 | /utils/json2csv.py | 3e694047f325778140e4d45eefd4051d1c6a570a | [
"CC0-1.0"
] | permissive | paulgb/twarc | abfc5da667dfb13f3fbc317a9c74e8ce3a1c25fc | e5cbcf255620891484f1a3f024ebf5d7de7f45a7 | refs/heads/master | 2021-01-11T17:19:13.353890 | 2017-01-22T20:26:53 | 2017-01-22T20:26:53 | 79,743,163 | 1 | 0 | null | 2017-01-22T20:22:21 | 2017-01-22T20:22:21 | null | UTF-8 | Python | false | false | 3,592 | py | #!/usr/bin/env python
"""
A sample JSON to CSV program. Multivalued JSON properties are space delimited
CSV columns. If you'd like it adjusted send a pull request!
"""
import sys
import json
import fileinput
if sys.version_info[0] < 3:
import unicodecsv as csv
else:
import csv
def main():
sheet = csv.writer(sys.stdout, encoding="utf-8")
sheet.writerow(get_headings())
for line in fileinput.input():
tweet = json.loads(line)
sheet.writerow(get_row(tweet))
def get_headings():
return [
'coordinates',
'created_at',
'hashtags',
'media',
'urls',
'favorite_count',
'id',
'in_reply_to_screen_name',
'in_reply_to_status_id',
'in_reply_to_user_id',
'lang',
'place',
'possibly_sensitive',
'retweet_count',
'reweet_id',
'retweet_screen_name',
'source',
'text',
'tweet_url',
'user_created_at',
'user_screen_name',
'user_default_profile_image',
'user_description',
'user_favourites_count',
'user_followers_count',
'user_friends_count',
'user_listed_count',
'user_location',
'user_name',
'user_screen_name',
'user_statuses_count',
'user_time_zone',
'user_urls',
'user_verified',
]
def get_row(t):
get = t.get
user = t.get('user').get
row = [
coordinates(t),
get('created_at'),
hashtags(t),
media(t),
urls(t),
get('favorite_count'),
get('id_str'),
get('in_reply_to_screen_name'),
get('in_reply_to_status_id'),
get('in_reply_to_user_id'),
get('lang'),
place(t),
get('possibly_sensitive'),
get('retweet_count'),
retweet_id(t),
retweet_screen_name(t),
get('source'),
get('text'),
tweet_url(t),
user('created_at'),
user('screen_name'),
user('default_profile_image'),
user('description'),
user('favourites_count'),
user('followers_count'),
user('friends_count'),
user('listed_count'),
user('location'),
user('name'),
user('screen_name'),
user('statuses_count'),
user('time_zone'),
user_urls(t),
user('verified'),
]
return row
def coordinates(t):
if 'coordinates' in t and t['coordinates']:
return '%f %f' % tuple(t['coordinates']['coordinates'])
return None
def hashtags(t):
return ' '.join([h['text'] for h in t['entities']['hashtags']])
def media(t):
if 'media' in t['entities']:
return ' '.join([h['expanded_url'] for h in t['entities']['media']])
else:
return None
def urls(t):
return ' '.join([h['expanded_url'] for h in t['entities']['urls']])
def place(t):
if t['place']:
return t['place']['full_name']
def retweet_id(t):
if 'retweeted_status' in t and t['retweeted_status']:
return t['retweeted_status']['id_str']
def retweet_screen_name(t):
if 'retweeted_status' in t and t['retweeted_status']:
return t['retweeted_status']['user']['screen_name']
def tweet_url(t):
return "https://twitter.com/%s/status/%s" % (t['user']['screen_name'], t['id_str'])
def user_urls(t):
u = t.get('user')
if not u:
return None
urls = []
if 'entities' in u and 'url' in u['entities'] and 'urls' in u['entities']['url']:
for url in u['entities']['url']['urls']:
if url['expanded_url']:
urls.append(url['expanded_url'])
return ' '.join(urls)
if __name__ == "__main__":
main()
| [
"ehs@pobox.com"
] | ehs@pobox.com |
60df2b5f60e00bb56cd3b767c8383554ae2bc7fd | 1819b161df921a0a7c4da89244e1cd4f4da18be4 | /WhatsApp_FarmEasy/env/lib/python3.6/site-packages/web3/_utils/module_testing/event_contract.py | 0bd02242103e50ec3c05497d5689d11702ad9679 | [
"MIT"
] | permissive | sanchaymittal/FarmEasy | 889b290d376d940d9b3ae2fa0620a573b0fd62a0 | 5b931a4287d56d8ac73c170a6349bdaae71bf439 | refs/heads/master | 2023-01-07T21:45:15.532142 | 2020-07-18T14:15:08 | 2020-07-18T14:15:08 | 216,203,351 | 3 | 2 | MIT | 2023-01-04T12:35:40 | 2019-10-19T12:32:15 | JavaScript | UTF-8 | Python | false | false | 2,148 | py |
EVNT_CONTRACT_CODE = (
"6080604052348015600f57600080fd5b5061010b8061001f6000396000f30060806040526004361"
"0603f576000357c0100000000000000000000000000000000000000000000000000000000900463"
"ffffffff1680635818fad7146044575b600080fd5b348015604f57600080fd5b50606c600480360"
"38101908080359060200190929190505050606e565b005b7ff70fe689e290d8ce2b2a388ac28db3"
"6fbb0e16a6d89c6804c461f65a1b40bb15816040518082815260200191505060405180910390a17"
"f56d2ef3c5228bf5d88573621e325a4672ab50e033749a601e4f4a5e1dce905d481604051808281"
"5260200191505060405180910390a1505600a165627a7a72305820ff79430a04cf654d7b46edc52"
"9ccaa5d7f77607f54bb58210be0c48455292c810029"
)
EVNT_CONTRACT_RUNTIME = (
"608060405260043610603f576000357c01000000000000000000000000000000000000000000000"
"00000000000900463ffffffff1680635818fad7146044575b600080fd5b348015604f57600080fd"
"5b50606c60048036038101908080359060200190929190505050606e565b005b7ff70fe689e290d"
"8ce2b2a388ac28db36fbb0e16a6d89c6804c461f65a1b40bb158160405180828152602001915050"
"60405180910390a17f56d2ef3c5228bf5d88573621e325a4672ab50e033749a601e4f4a5e1dce90"
"5d4816040518082815260200191505060405180910390a1505600a165627a7a72305820ff79430a"
"04cf654d7b46edc529ccaa5d7f77607f54bb58210be0c48455292c810029"
)
EVNT_CONTRACT_ABI = [
{
"constant": False,
"inputs": [
{
"name": "arg0",
"type": "uint256"
}
],
"name": "logTwoEvents",
"outputs": [],
"payable": False,
"stateMutability": "nonpayable",
"type": "function"
},
{
"anonymous": False,
"inputs": [
{
"indexed": False,
"name": "arg0",
"type": "uint256"
}
],
"name": "LogSingleWithIndex",
"type": "event"
},
{
"anonymous": False,
"inputs": [
{
"indexed": False,
"name": "arg0",
"type": "uint256"
}
],
"name": "LogSingleArg",
"type": "event"
}
]
| [
"sanchaymittal@gmail.com"
] | sanchaymittal@gmail.com |
5cb2a04dcb821e6b980289779d5191a0c6fb6caa | 036d01ba60f2d5a4aca50af6166572725fdd1c02 | /Demo/simple.py | 6004c9ef633e85210212d2460e2ac558031bf0d5 | [
"Python-2.0"
] | permissive | balabit-deps/balabit-os-7-python-ldap | f428541a2869d041f085dc7f67faf415503e4940 | 4fb1ca98915566dabb5f4ddb81aed9b8c28e3739 | refs/heads/master | 2022-07-23T03:40:41.370245 | 2022-07-16T02:24:42 | 2022-07-16T02:24:42 | 158,245,881 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,415 | py | from __future__ import print_function
import sys,getpass
import ldap
#l = ldap.open("localhost", 31001)
l = ldap.open("marta.it.uq.edu.au")
login_dn = "cn=root,ou=CSEE,o=UQ,c=AU"
login_pw = getpass.getpass("Password for %s: " % login_dn)
l.simple_bind_s(login_dn, login_pw)
#
# create a new sub organisation
#
try:
dn = "ou=CSEE,o=UQ,c=AU"
print("Adding", repr(dn))
l.add_s(dn,
[
("objectclass",["organizationalUnit"]),
("ou", ["CSEE"]),
("description", [
"Department of Computer Science and Electrical Engineering"]),
]
)
except _ldap.LDAPError:
pass
#
# create an entry for me
#
dn = "cn=David Leonard,ou=CSEE,o=UQ,c=AU"
print("Updating", repr(dn))
try:
l.delete_s(dn)
except:
pass
l.add_s(dn,
[
("objectclass", ["organizationalPerson"]),
("sn", ["Leonard"]),
("cn", ["David Leonard"]),
("description", ["Ph.D. student"]),
("display-name", ["David Leonard"]),
#("commonname", ["David Leonard"]),
("mail", ["david.leonard@csee.uq.edu.au"]),
("othermailbox", ["d@openbsd.org"]),
("givenname", ["David"]),
("surname", ["Leonard"]),
("seeAlso", ["http://www.csee.uq.edu.au/~leonard/"]),
("url", ["http://www.csee.uq.edu.au/~leonard/"]),
#("homephone", []),
#("fax", []),
#("otherfacsimiletelephonenumber",[]),
#("officefax", []),
#("mobile", []),
#("otherpager", []),
#("officepager", []),
#("pager", []),
("info", ["info"]),
("title", ["Mr"]),
#("telephonenumber", []),
("l", ["Brisbane"]),
("st", ["Queensland"]),
("c", ["AU"]),
("co", ["co"]),
("o", ["UQ"]),
("ou", ["CSEE"]),
#("homepostaladdress", []),
#("postaladdress", []),
#("streetaddress", []),
#("street", []),
("department", ["CSEE"]),
("comment", ["comment"]),
#("postalcode", []),
("physicaldeliveryofficename", ["Bldg 78, UQ, St Lucia"]),
("preferredDeliveryMethod", ["email"]),
("initials", ["DRL"]),
("conferenceinformation", ["MS-conferenceinformation"]),
#("usercertificate", []),
("labeleduri", ["labeleduri"]),
("manager", ["cn=Jaga Indulska"]),
("reports", ["reports"]),
("jpegPhoto", [open("/www/leonard/leonard.jpg","r").read()]),
("uid", ["leonard"]),
("userPassword", [""])
])
#
# search beneath the CSEE/UQ/AU tree
#
res = l.search_s(
"ou=CSEE, o=UQ, c=AU",
_ldap.SCOPE_SUBTREE,
"objectclass=*",
)
print(res)
l.unbind()
| [
"testbot@balabit.com"
] | testbot@balabit.com |
61eb80caea52c64ff2aac740efc4aef246ca5fae | c857d225b50c5040e132d8c3a24005a689ee9ce4 | /problem131.py | 3cc59ea9340c137db33174ff2534c8458f94a073 | [] | no_license | pythonsnake/project-euler | 0e60a6bd2abeb5bf863110c2a551d5590c03201e | 456e4ef5407d2cf021172bc9ecfc2206289ba8c9 | refs/heads/master | 2021-01-25T10:44:27.876962 | 2011-10-21T00:46:02 | 2011-10-21T00:46:02 | 2,335,706 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 404 | py | """
There are some prime values, p, for which there exists a positive integer, n, such that the expression n3 + n2p is a perfect cube.
for example, when p = 19, 83 + 8219 = 123.
what is perhaps most surprising is that for each prime with this property the value of n is unique, and there are only four such primes below one-hundred.
how many primes below one million have this remarkable property?
""" | [
"pythonsnake98@gmail.com"
] | pythonsnake98@gmail.com |
f3996da9ea17ade40a477be3c1899ae180d2f7b4 | 2318b1fb55630a97b2311b825a0a67f4da62b84b | /test_package/conanfile.py | 242769dc09ce27d81daa7854c97d6902c0978315 | [
"MIT"
] | permissive | lasote/conan-hello-package | d5347aad04277b55db1bd58c5be9a3182540b287 | cacaa8c4209b21af327cda2a904335bba4e736fe | refs/heads/master | 2020-04-01T14:52:36.956040 | 2018-10-16T16:17:58 | 2018-10-16T16:17:58 | 153,311,670 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 458 | py | from conans import ConanFile, CMake, tools
import os
class HelloReuseConan(ConanFile):
settings = "os", "compiler", "build_type", "arch"
generators = "cmake"
def build(self):
cmake = CMake(self)
cmake.configure()
cmake.build()
def test(self):
# equal to ./bin/greet, but portable win: .\bin\greet
if not tools.cross_building(self.settings):
self.run(os.sep.join([".", "bin", "greet"]))
| [
"lasote@gmail.com"
] | lasote@gmail.com |
19ec0dd5c87163554bceb312e8139a3796b6abf7 | 66238a554cc0f9cc05a5a218d3a5b3debe0d7066 | /ex15/ex15.py | 363596bbfeb75af79ed58fd0581a4812cc65ddb7 | [] | no_license | relaxdiego/learnpythonthehardway | 9dd2877bef2932e496e140694f34b419a373fe28 | d9ad5a69668004ee1fbb99d39ea2ce8af8a4278d | refs/heads/master | 2016-09-05T10:31:50.712280 | 2014-03-14T21:08:53 | 2014-03-14T21:08:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 694 | py | # Import the argv module located in the sys package
from sys import argv
# Unpack the arguments to these two variables
script, filename = argv
# Open the file referred to by 'filename'
txt = open(filename)
# Print a notification
print "Here's your file %r:" % filename
# Print the contents of the file
print txt.read()
# Be a good citizen and close the file handle
txt.close()
# Print another notification
print "Type the filename again:"
# Ask for user input
file_again = raw_input("> ")
# Open the file referred to by 'file_again'
txt_again = open(file_again)
# Print that contents of that file!
print txt_again.read()
# Be a good citizen and close the file handle
txt_again.close() | [
"mmaglana@gmail.com"
] | mmaglana@gmail.com |
c3729c6ca9e232fb2d6692cd5cdd5456263af160 | f770a1f73701451487ff9e988f9e7de53173e842 | /arguments/example.py | 448947ec09ba492f5225b82b6e8fad828e3ec4b1 | [] | no_license | 935048000/python-LV1.0 | a15aa3d1cc9343818d1b7c2ec19f99c2e673f0c7 | 86c5db2869e5c456f73a9953b2355946635dde4d | refs/heads/master | 2021-07-15T02:31:17.022013 | 2018-11-01T03:32:36 | 2018-11-01T03:32:36 | 91,559,178 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 660 | py | import argparse
from argparse import ArgumentParser
if __name__ == '__main__':
ap = ArgumentParser ()
# ap.add_argument("-f", required = True, help = "文件名称")
ap.add_argument ("-d", help="数据库")
ap.add_argument ("-show", help="显示结果个数")
args = vars (ap.parse_args ())
print (args)
print (args['d'])
print (type (args['show']))
arg2 = args['show']
print (int (arg2[:4]),
int (arg2[4:6]),
int (arg2[6:8]),
int (arg2[8:10]),
int (arg2[10:12]),
int (arg2[12:14]))
if args['d']:
print ('yes')
else:
print ('no')
| [
"935048000@qq.com"
] | 935048000@qq.com |
6310b2382e4c2cde172373bfe6590255e202e258 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/97/usersdata/188/54680/submittedfiles/lecker.py | 85810d48da2d28fab217c1e4ad053f3574403d67 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 865 | py | # -*- coding: utf-8 -*-
from __future__ import division
def lecker(lista):
cont=0
for i in range (0,len(lista),1):
if i==0:
if lista[i]>lista[i+1]:
cont=cont+1
if i==(len(lista)-1):
if lista[i]>lista[i-1]:
cont=cont+1
else:
if lista[i]>lista[i-1] and lista[i].lista[i+1]:
cont=cont+1
if cont==1:
return True
else:
return False
n=int(input('Digite o número de elementos da lista:'))
a=[]
for i in range (0,n,1):
valor=int(input('Digite o valor a ser anexado à lista:'))
a.append(valor)
b=[]
for i in range (0,n,1):
valor=int(input('Digite o valor a ser anexado à lista:'))
b.append(valor)
if lecker(a)==True:
print('S')
else:
print('N')
if lecker(b)==True:
print('S')
else:
print('N')
| [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
816038e7cec6f8a881a164b412cebe0929e6723c | f8d1d9a732fa88982c8515b0588fbfc7b4781a8e | /archive/const.py | aad869db587ad6575e5585c2485b5b4e5ba71ca1 | [
"MIT"
] | permissive | afcarl/HASS-data-science | ef5b68071eba0ecc67a7e24714e935b9b4dc02dc | 7edd07a1519682683b42d140d6268a87d91522ec | refs/heads/master | 2020-03-21T03:29:39.800584 | 2018-01-26T07:15:08 | 2018-01-26T07:15:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 651 | py | """
Constants required for bayes_sensor.py
"""
ATTR_OBSERVATIONS = 'observations'
ATTR_PROBABILITY = 'probability'
ATTR_PROBABILITY_THRESHOLD = 'probability_threshold'
CONF_OBSERVATIONS = 'observations'
CONF_PRIOR = 'prior'
CONF_PROBABILITY_THRESHOLD = 'probability_threshold'
CONF_P_GIVEN_F = 'prob_given_false'
CONF_P_GIVEN_T = 'prob_given_true'
CONF_TO_STATE = 'to_state'
CONF_DEVICE_CLASS = 'device_class'
CONF_ENTITY_ID = 'entity_id' # These are HA defaults
CONF_NAME = 'name'
CONF_PLATFORM = 'platform'
STATE_ON = 'on'
STATE_OFF = 'off'
STATE_UNKNOWN = 'unknown'
DEFAULT_NAME = "Bayesian Binary Sensor"
DEFAULT_PROBABILITY_THRESHOLD = 0.5
| [
"robmarkcole@gmail.com"
] | robmarkcole@gmail.com |
da54c4a13811b72a2a731d1c9dda5104f27e2835 | 04c06575a49a3f4e30e4f3f2bf2365585664d2e8 | /python_leetcode_2020/Python_Leetcode_2020/1047_remove_all_adjacent_duplicates.py | ee5c01304d69945cc3fc5a194560193a58a8c61e | [] | no_license | xiangcao/Leetcode | 18da3d5b271ff586fdf44c53f1a677423ca3dfed | d953abe2c9680f636563e76287d2f907e90ced63 | refs/heads/master | 2022-06-22T04:45:15.446329 | 2022-06-17T13:03:01 | 2022-06-17T13:03:01 | 26,052,392 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 586 | py | """
Given a string S of lowercase letters, a duplicate removal consists of choosing two adjacent and equal letters, and removing them.
We repeatedly make duplicate removals on S until we no longer can.
Return the final string after all such duplicate removals have been made. It is guaranteed the answer is unique.
"""
class Solution:
def removeDuplicates(self, S: str) -> str:
output = []
for ch in S:
if output and ch == output[-1]:
output.pop()
else:
output.append(ch)
return ''.join(output)
| [
"xiangcao_liu@apple.com"
] | xiangcao_liu@apple.com |
04815718877eb52cdbe84b257a2c90fc487f98b1 | 8ebb138562884f01cae3d3ffaad9501a91e35611 | /dbCruiseKeywords/insertKeywordsAMT21.py | 5329fd597a44cceb752bbc3d035661f6b461df8e | [] | no_license | simonscmap/DBIngest | 7b92214034e90f8de88b06c17b48f83c769d8d35 | 9ae035cbf7453df375f0af5e920df3880a419107 | refs/heads/master | 2021-07-16T07:12:31.749027 | 2020-08-13T16:28:24 | 2020-08-13T16:28:24 | 200,295,674 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,694 | py | import sys
import pycmap
sys.path.append('../')
import insertFunctions as iF
import config_vault as cfgv
import pandas as pd
sys.path.append('../dbCatalog/')
import catalogFunctions as cF
"""-----------------------------"""
""" AMT21 CRUISE KEYWORDS"""
"""-----------------------------"""
cruise_name = 'AMT21'
server = 'Rainier'
rawFilePath = cfgv.rep_cruise_keywords_raw
rawFileName = 'AMT21.xlsx'
keyword_col = 'cruise_keywords'
############################
""" Reads in the keyword excel file"""
df = pd.read_excel(rawFilePath + rawFileName)
ID = cF.getCruiseID(cruise_name)
prov_df = cF.getLonghurstProv(cruise_name)
ocean_df = cF.getOceanName(cruise_name)
seasons_df = cF.getCruiseSeasons(cruise_name)
months_df = cF.getCruiseMonths(cruise_name)
years_df = cF.getCruiseYear(cruise_name)
details_df = cF.getCruiseDetails(cruise_name)
short_name_df = cF.getCruiseAssosiatedShortName(cruise_name)
# long_name_df = cF.getCruiseAssosiatedLongName(cruise_name)
short_name_syn_df = cF.getShortNameSynonyms(cruise_name)
dataset_name_df = cF.getCruiseAssosiatedDataset_Name(cruise_name)
df = cF.addDFtoKeywordDF(df, dataset_name_df)
df = cF.addDFtoKeywordDF(df, short_name_syn_df)
df = cF.addDFtoKeywordDF(df, prov_df)
df = cF.addDFtoKeywordDF(df, ocean_df)
df = cF.addDFtoKeywordDF(df, seasons_df)
df = cF.addDFtoKeywordDF(df, months_df)
df = cF.addDFtoKeywordDF(df, years_df)
df = cF.addDFtoKeywordDF(df, details_df)
df = cF.addDFtoKeywordDF(df, short_name_df)
# df = cF.addDFtoKeywordDF(df, long_name_df)
df = cF.removeDuplicates(df)
df = cF.stripWhitespace(df,keyword_col)
df = cF.removeAnyRedundantWord(df)
""" INSERTS INTO tblCruise_Keywords"""
cF.insertCruiseKeywords(ID,df,server)
| [
"norlandrhagen@gmail.com"
] | norlandrhagen@gmail.com |
eb224a9856ac7ac78adfc83b92d604827d93fa54 | 77e0a93598c3db5240ecdeba677a8c7e4f9778ca | /Third academic course/Digital signals/6 Бодя/show-result.py | 0a42092c800aa1e0eb4f18d6b931c6d9a47b94d4 | [] | no_license | andrsj/education | 7d7b28e59bceb40a2de63f9dbc2aba734d24d7f1 | 3630b2abbb6d444b4079dd7f5d988769ef24e2b8 | refs/heads/master | 2021-02-26T06:07:13.180215 | 2020-03-13T15:37:51 | 2020-03-13T15:37:51 | 245,501,243 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,995 | py | import matplotlib.pyplot as plt
from math import sin, pi
from numpy import array, arange, abs as np_abs
from numpy.fft import rfft, rfftfreq
import numpy as np
import math
import sys
def progressBar(value, endvalue, bar_length=20):
percent = float(value) / endvalue
arrow = '-' * int(round(percent * bar_length)-1) + '>'
spaces = ' ' * (bar_length - len(arrow))
sys.stdout.write("\rPercent: [{0}] {1}%".format(arrow + spaces, int(round(percent * 100))))
sys.stdout.flush()
fd = 44100
with open("Current/a4shot2000.txt", "r") as f1:
content1 = f1.read().splitlines()
with open("Current/filtred.txt", "r") as f2:
content2 = f2.read().splitlines()
with open("Current/huming.txt", "r") as f3:
content3 = f3.read().splitlines()
N = len(content1)
sound1 = []
sound2 = []
huming = []
for i in range(N):
progressBar(i,N)
print(' Reading file text.txt')
sound1.append(float(content1[i]))
for i in range(N):
progressBar(i,N)
print(' Reading file filtred.txt')
sound2.append(float(content2[i]))
for i in range(N):
progressBar(i,N)
print(' Reading file filtred.txt')
huming.append(float(content3[i]))
x = [[],[]]
for i in range(N):
x[0].append(i/fd)
progressBar(i,N)
print(' Reading file text.txt')
for i in range(N):
x[1].append(i/fd)
progressBar(i,N)
print(' Reading file filtred.sd')
hmsound1 = []
hmsound2 = []
for i in range(N):
progressBar(i,N)
print('Creating window on text.txt')
hmsound1.append(sound1[i]*huming[i])
for i in range(N):
progressBar(i,N)
print('Creating window on filtred.txt')
hmsound2.append(sound2[i]*huming[i])
spectrum1 = rfft(hmsound1)
spectrum2 = rfft(hmsound2)
plt.figure()
plt.subplot(221)
plt.grid()
plt.plot(x[0], hmsound1)
plt.xlabel('T')
plt.title('high.sd')
plt.subplot(222)
plt.grid()
plt.plot(x[1], hmsound2)
plt.title('filtred.sd')
plt.subplot(223)
plt.grid()
plt.plot(rfftfreq(N, 1/fd), np_abs(spectrum1)/N)
plt.subplot(224)
plt.grid()
plt.plot(rfftfreq(N, 1/fd), np_abs(spectrum2)/N)
plt.show() | [
"61803449+andrsj@users.noreply.github.com"
] | 61803449+andrsj@users.noreply.github.com |
340b149fcbcb3ebc7c8da876e4f8f31e5443a3cf | 5dd8ce7f11c8f568e19fa821f07bb238733da972 | /Src/xmds2_0/xpdeint/Features/Validation.py | ff220c34a16abf4c7fee376ee584e7ed1e3a27fd | [] | no_license | htsenyasa/MachineLearningGrossPitaevskiiEq | fba9fa92879c2c805288950d344333f60d8c7ae4 | cae0b690841a55fda9d3ec49df7f8321a9835b95 | refs/heads/master | 2023-06-23T00:58:21.036638 | 2021-03-07T22:19:10 | 2021-03-07T22:19:10 | 345,210,097 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,534 | py | #!/usr/bin/env python
##################################################
## DEPENDENCIES
import sys
import os
import os.path
try:
import builtins as builtin
except ImportError:
import builtins as builtin
from os.path import getmtime, exists
import time
import types
from Cheetah.Version import MinCompatibleVersion as RequiredCheetahVersion
from Cheetah.Version import MinCompatibleVersionTuple as RequiredCheetahVersionTuple
from Cheetah.Template import Template
from Cheetah.DummyTransaction import *
from Cheetah.NameMapper import NotFound, valueForName, valueFromSearchList, valueFromFrameOrSearchList
from Cheetah.CacheRegion import CacheRegion
import Cheetah.Filters as Filters
import Cheetah.ErrorCatchers as ErrorCatchers
from xpdeint.Features._Validation import _Validation
import textwrap
##################################################
## MODULE CONSTANTS
VFFSL=valueFromFrameOrSearchList
VFSL=valueFromSearchList
VFN=valueForName
currentTime=time.time
__CHEETAH_version__ = '2.4.4'
__CHEETAH_versionTuple__ = (2, 4, 4, 'development', 0)
__CHEETAH_genTime__ = 1484975071.706055
__CHEETAH_genTimestamp__ = 'Sat Jan 21 16:04:31 2017'
__CHEETAH_src__ = '/home/mattias/xmds-2.2.3/admin/staging/xmds-2.2.3/xpdeint/Features/Validation.tmpl'
__CHEETAH_srcLastModified__ = 'Thu Aug 22 16:32:53 2013'
__CHEETAH_docstring__ = 'Autogenerated by Cheetah: The Python-Powered Template Engine'
if __CHEETAH_versionTuple__ < RequiredCheetahVersionTuple:
raise AssertionError(
'This template was compiled with Cheetah version'
' %s. Templates compiled before version %s must be recompiled.'%(
__CHEETAH_version__, RequiredCheetahVersion))
##################################################
## CLASSES
class Validation(_Validation):
##################################################
## CHEETAH GENERATED METHODS
def __init__(self, *args, **KWs):
super(Validation, self).__init__(*args, **KWs)
if not self._CHEETAH__instanceInitialized:
cheetahKWArgs = {}
allowedKWs = 'searchList namespaces filter filtersLib errorCatcher'.split()
for k,v in list(KWs.items()):
if k in allowedKWs: cheetahKWArgs[k] = v
self._initCheetahInstance(**cheetahKWArgs)
def description(self, **KWS):
## Generated from @def description: Runtime variable validation at line 26, col 1.
trans = KWS.get("trans")
if (not trans and not self._CHEETAH__isBuffering and not callable(self.transaction)):
trans = self.transaction # is None unless self.awake() was called
if not trans:
trans = DummyTransaction()
_dummyTrans = True
else: _dummyTrans = False
write = trans.response().write
SL = self._CHEETAH__searchList
_filter = self._CHEETAH__currentFilter
########################################
## START - generated method body
write('''Runtime variable validation''')
########################################
## END - generated method body
return _dummyTrans and trans.response().getvalue() or ""
def mainBegin(self, dict, **KWS):
## CHEETAH: generated from @def mainBegin($dict) at line 29, col 1.
trans = KWS.get("trans")
if (not trans and not self._CHEETAH__isBuffering and not callable(self.transaction)):
trans = self.transaction # is None unless self.awake() was called
if not trans:
trans = DummyTransaction()
_dummyTrans = True
else: _dummyTrans = False
write = trans.response().write
SL = self._CHEETAH__searchList
_filter = self._CHEETAH__currentFilter
########################################
## START - generated method body
#
if not VFFSL(SL,"runValidationChecks",True): # generated from line 31, col 3
return
write('''// Run-time validation checks
''')
for validationCheck in VFFSL(SL,"validationChecks",True): # generated from line 35, col 3
_v = VFN(VFFSL(SL,"textwrap",True),"dedent",False)(validationCheck) # u'${textwrap.dedent(validationCheck)}' on line 36, col 1
if _v is not None: write(_filter(_v, rawExpr='${textwrap.dedent(validationCheck)}')) # from line 36, col 1.
write('''
''')
#
########################################
## END - generated method body
return _dummyTrans and trans.response().getvalue() or ""
def writeBody(self, **KWS):
## CHEETAH: main method generated for this template
trans = KWS.get("trans")
if (not trans and not self._CHEETAH__isBuffering and not callable(self.transaction)):
trans = self.transaction # is None unless self.awake() was called
if not trans:
trans = DummyTransaction()
_dummyTrans = True
else: _dummyTrans = False
write = trans.response().write
SL = self._CHEETAH__searchList
_filter = self._CHEETAH__currentFilter
########################################
## START - generated method body
#
# Validation.tmpl
#
# Created by Graham Dennis on 2008-03-21.
#
# Copyright (c) 2008-2012, Graham Dennis
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
write('''
''')
########################################
## END - generated method body
return _dummyTrans and trans.response().getvalue() or ""
##################################################
## CHEETAH GENERATED ATTRIBUTES
_CHEETAH__instanceInitialized = False
_CHEETAH_version = __CHEETAH_version__
_CHEETAH_versionTuple = __CHEETAH_versionTuple__
_CHEETAH_genTime = __CHEETAH_genTime__
_CHEETAH_genTimestamp = __CHEETAH_genTimestamp__
_CHEETAH_src = __CHEETAH_src__
_CHEETAH_srcLastModified = __CHEETAH_srcLastModified__
featureName = 'Validation'
_mainCheetahMethod_for_Validation= 'writeBody'
## END CLASS DEFINITION
if not hasattr(Validation, '_initCheetahAttributes'):
templateAPIClass = getattr(Validation, '_CHEETAH_templateClass', Template)
templateAPIClass._addCheetahPlumbingCodeToClass(Validation)
# CHEETAH was developed by Tavis Rudd and Mike Orr
# with code, advice and input from many other volunteers.
# For more information visit http://www.CheetahTemplate.org/
##################################################
## if run from command line:
if __name__ == '__main__':
from Cheetah.TemplateCmdLineIface import CmdLineIface
CmdLineIface(templateObj=Validation()).run()
| [
"htsenyasa@gmail.com"
] | htsenyasa@gmail.com |
8979b43397731f7136edd31fef1a18c6b1719f03 | 48f73b5b78da81c388d76d685ec47bb6387eefdd | /scrapeHackerrankCode/codes/countingsort4.py | 19499fa9bfd4b0ac98cfcdcd0832d5e38c370504 | [] | no_license | abidkhan484/hacerrankScraping | ad0ceda6c86d321d98768b169d63ea1ee7ccd861 | 487bbf115117bd5c293298e77f15ae810a50b82d | refs/heads/master | 2021-09-18T19:27:52.173164 | 2018-07-18T12:12:51 | 2018-07-18T12:12:51 | 111,005,462 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 537 | py | # Wrong Answer
# Python 3
def counting_sort(li, last):
mylist = []
for i in range(100):
if li:
mylist.extend(li[i])
for i in mylist:
if i in last:
print(i, end=' ')
else:
print('-',end=' ')
return mylist
n = int(input().strip())
mylist = [[] for i in range(100)]
last_items = []
for i in range(n):
m, a = input().split()
m = int(m)
mylist[m].append(a)
if ((n//2) <= i):
last_items.append(a)
counting_sort(mylist, last_items)
| [
"abidkhan484@gmail.com"
] | abidkhan484@gmail.com |
095f28bd50f1fd571b4353f0807f9c0b3d1088f0 | 60aa3bcf5ace0282210685e74ee8ed31debe1769 | /simulation/objects/components/example.py | 4e2e03f6ed0f3bffcca45b85be3a36be97e6506f | [] | no_license | TheBreadGuy/sims4-ai-engine | 42afc79b8c02527353cc084117a4b8da900ebdb4 | 865212e841c716dc4364e0dba286f02af8d716e8 | refs/heads/master | 2023-03-16T00:57:45.672706 | 2016-05-01T17:26:01 | 2016-05-01T17:26:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,150 | py | from sims4.tuning.tunable import Tunable, TunableFactory
from objects.components import Component, componentmethod
from sims4.log import Logger
from objects.components.types import EXAMPLE_COMPONENT
logger = Logger('ExampleComponent')
class ExampleComponent(Component, component_name=EXAMPLE_COMPONENT):
__qualname__ = 'ExampleComponent'
def __init__(self, owner, example_name):
super().__init__(owner)
self.example_name = example_name
@componentmethod
def example_component_method(self, prefix=''):
logger.warn('{}self={} owner={} example_name={}', prefix, self, self.owner, self.example_name)
def on_location_changed(self, old_location):
self.example_component_method('on_location_changed: ')
class TunableExampleComponent(TunableFactory):
__qualname__ = 'TunableExampleComponent'
FACTORY_TYPE = ExampleComponent
def __init__(self, description='Example component, do not use on objects!', callback=None, **kwargs):
super().__init__(example_name=Tunable(str, 'No name given.', description='Name to use to distinguish this component'), description=description, **kwargs)
| [
"jp@bellgeorge.com"
] | jp@bellgeorge.com |
181a43c318a97a1f549a48a039263e10796b9c5c | 5a52ccea88f90dd4f1acc2819997fce0dd5ffb7d | /alipay/aop/api/request/AlipayCloudCloudbaseFunctionArgumentModifyRequest.py | 55f25f723f5f0d999a2b595cebad11846630e3a9 | [
"Apache-2.0"
] | permissive | alipay/alipay-sdk-python-all | 8bd20882852ffeb70a6e929038bf88ff1d1eff1c | 1fad300587c9e7e099747305ba9077d4cd7afde9 | refs/heads/master | 2023-08-27T21:35:01.778771 | 2023-08-23T07:12:26 | 2023-08-23T07:12:26 | 133,338,689 | 247 | 70 | Apache-2.0 | 2023-04-25T04:54:02 | 2018-05-14T09:40:54 | Python | UTF-8 | Python | false | false | 4,052 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.FileItem import FileItem
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.AlipayCloudCloudbaseFunctionArgumentModifyModel import AlipayCloudCloudbaseFunctionArgumentModifyModel
class AlipayCloudCloudbaseFunctionArgumentModifyRequest(object):
def __init__(self, biz_model=None):
self._biz_model = biz_model
self._biz_content = None
self._version = "1.0"
self._terminal_type = None
self._terminal_info = None
self._prod_code = None
self._notify_url = None
self._return_url = None
self._udf_params = None
self._need_encrypt = False
@property
def biz_model(self):
return self._biz_model
@biz_model.setter
def biz_model(self, value):
self._biz_model = value
@property
def biz_content(self):
return self._biz_content
@biz_content.setter
def biz_content(self, value):
if isinstance(value, AlipayCloudCloudbaseFunctionArgumentModifyModel):
self._biz_content = value
else:
self._biz_content = AlipayCloudCloudbaseFunctionArgumentModifyModel.from_alipay_dict(value)
@property
def version(self):
return self._version
@version.setter
def version(self, value):
self._version = value
@property
def terminal_type(self):
return self._terminal_type
@terminal_type.setter
def terminal_type(self, value):
self._terminal_type = value
@property
def terminal_info(self):
return self._terminal_info
@terminal_info.setter
def terminal_info(self, value):
self._terminal_info = value
@property
def prod_code(self):
return self._prod_code
@prod_code.setter
def prod_code(self, value):
self._prod_code = value
@property
def notify_url(self):
return self._notify_url
@notify_url.setter
def notify_url(self, value):
self._notify_url = value
@property
def return_url(self):
return self._return_url
@return_url.setter
def return_url(self, value):
self._return_url = value
@property
def udf_params(self):
return self._udf_params
@udf_params.setter
def udf_params(self, value):
if not isinstance(value, dict):
return
self._udf_params = value
@property
def need_encrypt(self):
return self._need_encrypt
@need_encrypt.setter
def need_encrypt(self, value):
self._need_encrypt = value
def add_other_text_param(self, key, value):
if not self.udf_params:
self.udf_params = dict()
self.udf_params[key] = value
def get_params(self):
params = dict()
params[P_METHOD] = 'alipay.cloud.cloudbase.function.argument.modify'
params[P_VERSION] = self.version
if self.biz_model:
params[P_BIZ_CONTENT] = json.dumps(obj=self.biz_model.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
if self.biz_content:
if hasattr(self.biz_content, 'to_alipay_dict'):
params['biz_content'] = json.dumps(obj=self.biz_content.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
else:
params['biz_content'] = self.biz_content
if self.terminal_type:
params['terminal_type'] = self.terminal_type
if self.terminal_info:
params['terminal_info'] = self.terminal_info
if self.prod_code:
params['prod_code'] = self.prod_code
if self.notify_url:
params['notify_url'] = self.notify_url
if self.return_url:
params['return_url'] = self.return_url
if self.udf_params:
params.update(self.udf_params)
return params
def get_multipart_params(self):
multipart_params = dict()
return multipart_params
| [
"jishupei.jsp@alibaba-inc.com"
] | jishupei.jsp@alibaba-inc.com |
bed37091c684cb97919804235df6b467b860396c | d6d87140d929262b5228659f89a69571c8669ec1 | /airbyte-connector-builder-server/connector_builder/generated/models/datetime_stream_slicer_all_of.py | dcd4d06d5fd5964f1ca2e312c73d2e0afb113de3 | [
"MIT",
"Elastic-2.0"
] | permissive | gasparakos/airbyte | b2bb2246ec6a10e1f86293da9d86c61fc4a4ac65 | 17c77fc819ef3732fb1b20fa4c1932be258f0ee9 | refs/heads/master | 2023-02-22T20:42:45.400851 | 2023-02-09T07:43:24 | 2023-02-09T07:43:24 | 303,604,219 | 0 | 0 | MIT | 2020-10-13T06:18:04 | 2020-10-13T06:06:17 | null | UTF-8 | Python | false | false | 2,953 | py | # coding: utf-8
from __future__ import annotations
from datetime import date, datetime # noqa: F401
import re # noqa: F401
from typing import Any, Dict, List, Optional # noqa: F401
from pydantic import AnyUrl, BaseModel, EmailStr, Field, validator # noqa: F401
from connector_builder.generated.models.any_of_interpolated_stringstring import AnyOfInterpolatedStringstring
from connector_builder.generated.models.any_of_min_max_datetimestring import AnyOfMinMaxDatetimestring
from connector_builder.generated.models.request_option import RequestOption
class DatetimeStreamSlicerAllOf(BaseModel):
"""NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
Do not edit the class manually.
DatetimeStreamSlicerAllOf - a model defined in OpenAPI
start_datetime: The start_datetime of this DatetimeStreamSlicerAllOf.
end_datetime: The end_datetime of this DatetimeStreamSlicerAllOf.
step: The step of this DatetimeStreamSlicerAllOf.
cursor_field: The cursor_field of this DatetimeStreamSlicerAllOf.
datetime_format: The datetime_format of this DatetimeStreamSlicerAllOf.
config: The config of this DatetimeStreamSlicerAllOf.
cursor: The cursor of this DatetimeStreamSlicerAllOf [Optional].
cursor_end: The cursor_end of this DatetimeStreamSlicerAllOf [Optional].
start_time_option: The start_time_option of this DatetimeStreamSlicerAllOf [Optional].
end_time_option: The end_time_option of this DatetimeStreamSlicerAllOf [Optional].
stream_state_field_start: The stream_state_field_start of this DatetimeStreamSlicerAllOf [Optional].
stream_state_field_end: The stream_state_field_end of this DatetimeStreamSlicerAllOf [Optional].
lookback_window: The lookback_window of this DatetimeStreamSlicerAllOf [Optional].
"""
start_datetime: AnyOfMinMaxDatetimestring = Field(alias="start_datetime")
end_datetime: AnyOfMinMaxDatetimestring = Field(alias="end_datetime")
step: str = Field(alias="step")
cursor_field: AnyOfInterpolatedStringstring = Field(alias="cursor_field")
datetime_format: str = Field(alias="datetime_format")
config: Dict[str, Any] = Field(alias="config")
cursor: Optional[Dict[str, Any]] = Field(alias="_cursor", default=None)
cursor_end: Optional[Dict[str, Any]] = Field(alias="_cursor_end", default=None)
start_time_option: Optional[RequestOption] = Field(alias="start_time_option", default=None)
end_time_option: Optional[RequestOption] = Field(alias="end_time_option", default=None)
stream_state_field_start: Optional[str] = Field(alias="stream_state_field_start", default=None)
stream_state_field_end: Optional[str] = Field(alias="stream_state_field_end", default=None)
lookback_window: Optional[AnyOfInterpolatedStringstring] = Field(alias="lookback_window", default=None)
DatetimeStreamSlicerAllOf.update_forward_refs() | [
"noreply@github.com"
] | gasparakos.noreply@github.com |
b4b8677d5f8abdf8ce876a8899616a1256c74a3f | dfaf6f7ac83185c361c81e2e1efc09081bd9c891 | /k8sdeployment/k8sstat/python/kubernetes/client/models/v2beta1_horizontal_pod_autoscaler.py | 81b1f32db2c950b486b75b8aa10be0dde44097e6 | [
"MIT",
"Apache-2.0"
] | permissive | JeffYFHuang/gpuaccounting | d754efac2dffe108b591ea8722c831d979b68cda | 2c63a63c571240561725847daf1a7f23f67e2088 | refs/heads/master | 2022-08-09T03:10:28.185083 | 2022-07-20T00:50:06 | 2022-07-20T00:50:06 | 245,053,008 | 0 | 0 | MIT | 2021-03-25T23:44:50 | 2020-03-05T02:44:15 | JavaScript | UTF-8 | Python | false | false | 7,284 | py | # coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
OpenAPI spec version: v1.15.6
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
class V2beta1HorizontalPodAutoscaler(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'api_version': 'str',
'kind': 'str',
'metadata': 'V1ObjectMeta',
'spec': 'V2beta1HorizontalPodAutoscalerSpec',
'status': 'V2beta1HorizontalPodAutoscalerStatus'
}
attribute_map = {
'api_version': 'apiVersion',
'kind': 'kind',
'metadata': 'metadata',
'spec': 'spec',
'status': 'status'
}
def __init__(self, api_version=None, kind=None, metadata=None, spec=None, status=None): # noqa: E501
"""V2beta1HorizontalPodAutoscaler - a model defined in OpenAPI""" # noqa: E501
self._api_version = None
self._kind = None
self._metadata = None
self._spec = None
self._status = None
self.discriminator = None
if api_version is not None:
self.api_version = api_version
if kind is not None:
self.kind = kind
if metadata is not None:
self.metadata = metadata
if spec is not None:
self.spec = spec
if status is not None:
self.status = status
@property
def api_version(self):
"""Gets the api_version of this V2beta1HorizontalPodAutoscaler. # noqa: E501
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources # noqa: E501
:return: The api_version of this V2beta1HorizontalPodAutoscaler. # noqa: E501
:rtype: str
"""
return self._api_version
@api_version.setter
def api_version(self, api_version):
"""Sets the api_version of this V2beta1HorizontalPodAutoscaler.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources # noqa: E501
:param api_version: The api_version of this V2beta1HorizontalPodAutoscaler. # noqa: E501
:type: str
"""
self._api_version = api_version
@property
def kind(self):
"""Gets the kind of this V2beta1HorizontalPodAutoscaler. # noqa: E501
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds # noqa: E501
:return: The kind of this V2beta1HorizontalPodAutoscaler. # noqa: E501
:rtype: str
"""
return self._kind
@kind.setter
def kind(self, kind):
"""Sets the kind of this V2beta1HorizontalPodAutoscaler.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds # noqa: E501
:param kind: The kind of this V2beta1HorizontalPodAutoscaler. # noqa: E501
:type: str
"""
self._kind = kind
@property
def metadata(self):
"""Gets the metadata of this V2beta1HorizontalPodAutoscaler. # noqa: E501
:return: The metadata of this V2beta1HorizontalPodAutoscaler. # noqa: E501
:rtype: V1ObjectMeta
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""Sets the metadata of this V2beta1HorizontalPodAutoscaler.
:param metadata: The metadata of this V2beta1HorizontalPodAutoscaler. # noqa: E501
:type: V1ObjectMeta
"""
self._metadata = metadata
@property
def spec(self):
"""Gets the spec of this V2beta1HorizontalPodAutoscaler. # noqa: E501
:return: The spec of this V2beta1HorizontalPodAutoscaler. # noqa: E501
:rtype: V2beta1HorizontalPodAutoscalerSpec
"""
return self._spec
@spec.setter
def spec(self, spec):
"""Sets the spec of this V2beta1HorizontalPodAutoscaler.
:param spec: The spec of this V2beta1HorizontalPodAutoscaler. # noqa: E501
:type: V2beta1HorizontalPodAutoscalerSpec
"""
self._spec = spec
@property
def status(self):
"""Gets the status of this V2beta1HorizontalPodAutoscaler. # noqa: E501
:return: The status of this V2beta1HorizontalPodAutoscaler. # noqa: E501
:rtype: V2beta1HorizontalPodAutoscalerStatus
"""
return self._status
@status.setter
def status(self, status):
"""Sets the status of this V2beta1HorizontalPodAutoscaler.
:param status: The status of this V2beta1HorizontalPodAutoscaler. # noqa: E501
:type: V2beta1HorizontalPodAutoscalerStatus
"""
self._status = status
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V2beta1HorizontalPodAutoscaler):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"JeffYFHuang@github.com"
] | JeffYFHuang@github.com |
c4ee68989153810cacf00394dc731b07ff96d94f | a3035906490a1f4fd4527292263a9712eb505f59 | /tests/test_flask_pyoidc.py | 97904176da13cd95c1195d91167ce956b53aa579 | [
"Apache-2.0"
] | permissive | liam-middlebrook/Flask-pyoidc | ee6c48cf14792961d932e74e2872c9279f71be58 | e3bef3f865fba8a833b0687cd621af96fd733540 | refs/heads/master | 2020-05-29T12:18:53.458702 | 2016-06-16T07:09:00 | 2016-06-16T07:09:00 | 68,504,245 | 0 | 0 | null | 2016-09-18T07:23:15 | 2016-09-18T07:23:13 | Python | UTF-8 | Python | false | false | 5,170 | py | import json
import time
from six.moves.urllib.parse import parse_qsl, urlparse
from mock import MagicMock
import flask
import pytest
import responses
from flask import Flask
from oic.oic.message import IdToken, OpenIDSchema
from flask_pyoidc.flask_pyoidc import OIDCAuthentication
ISSUER = 'https://op.example.com'
class TestOIDCAuthentication(object):
@pytest.fixture(autouse=True)
def create_flask_app(self):
self.app = Flask(__name__)
self.app.config.update({'SERVER_NAME': 'localhost',
'SECRET_KEY': 'test_key'})
@responses.activate
def test_store_internal_redirect_uri_on_static_client_reg(self):
responses.add(responses.GET, ISSUER + '/.well-known/openid-configuration',
body=json.dumps(dict(issuer=ISSUER, token_endpoint=ISSUER + '/token')),
content_type='application/json')
authn = OIDCAuthentication(self.app, issuer=ISSUER,
client_registration_info=dict(client_id='abc',
client_secret='foo'))
assert len(authn.client.registration_response['redirect_uris']) == 1
assert authn.client.registration_response['redirect_uris'][
0] == 'http://localhost/redirect_uri'
@pytest.mark.parametrize('method', [
'GET',
'POST'
])
def test_configurable_userinfo_endpoint_method_is_used(self, method):
state = 'state'
nonce = 'nonce'
sub = 'foobar'
authn = OIDCAuthentication(self.app, provider_configuration_info={'issuer': ISSUER,
'token_endpoint': '/token'},
client_registration_info={'client_id': 'foo'},
userinfo_endpoint_method=method)
authn.client.do_access_token_request = MagicMock(
return_value={'id_token': IdToken(**{'sub': sub, 'nonce': nonce}),
'access_token': 'access_token'})
userinfo_request_mock = MagicMock(return_value=OpenIDSchema(**{'sub': sub}))
authn.client.do_user_info_request = userinfo_request_mock
with self.app.test_request_context('/redirect_uri?code=foo&state=' + state):
flask.session['state'] = state
flask.session['nonce'] = nonce
flask.session['destination'] = '/'
authn._handle_authentication_response()
userinfo_request_mock.assert_called_with(method=method, state=state)
def test_no_userinfo_request_is_done_if_no_userinfo_endpoint_method_is_specified(self):
state = 'state'
authn = OIDCAuthentication(self.app, provider_configuration_info={'issuer': ISSUER},
client_registration_info={'client_id': 'foo'},
userinfo_endpoint_method=None)
userinfo_request_mock = MagicMock()
authn.client.do_user_info_request = userinfo_request_mock
authn._do_userinfo_request(state, None)
assert not userinfo_request_mock.called
def test_authenticatate_with_extra_request_parameters(self):
extra_params = {"foo": "bar", "abc": "xyz"}
authn = OIDCAuthentication(self.app, provider_configuration_info={'issuer': ISSUER},
client_registration_info={'client_id': 'foo'},
extra_request_args=extra_params)
with self.app.test_request_context('/'):
a = authn._authenticate()
request_params = dict(parse_qsl(urlparse(a.location).query))
assert set(extra_params.items()).issubset(set(request_params.items()))
def test_reauthentication_necessary_with_None(self):
authn = OIDCAuthentication(self.app, provider_configuration_info={'issuer': ISSUER},
client_registration_info={'client_id': 'foo'})
assert authn._reauthentication_necessary(None) is True
def test_reauthentication_necessary_with_valid_id_token(self):
authn = OIDCAuthentication(self.app, provider_configuration_info={'issuer': ISSUER},
client_registration_info={'client_id': 'foo'})
test_time = 20
id_token = {'iss': ISSUER}
assert authn._reauthentication_necessary(id_token) is False
def test_dont_reauthenticate_with_valid_id_token(self):
authn = OIDCAuthentication(self.app, provider_configuration_info={'issuer': ISSUER},
client_registration_info={'client_id': 'foo'})
client_mock = MagicMock()
callback_mock = MagicMock()
callback_mock.__name__ = 'test_callback' # required for Python 2
authn.client = client_mock
with self.app.test_request_context('/'):
flask.session['destination'] = '/'
flask.session['id_token'] = {'exp': time.time() + 25}
authn.oidc_auth(callback_mock)()
assert not client_mock.construct_AuthorizationRequest.called
assert callback_mock.called is True
| [
"rebecka.gulliksson@umu.se"
] | rebecka.gulliksson@umu.se |
2c7646b0df57962bdba6ec700e05df505503b1a0 | 09e57dd1374713f06b70d7b37a580130d9bbab0d | /benchmark/startCirq2972.py | a07d39124316f5c715b11f6b2b7fe2f731a83585 | [
"BSD-3-Clause"
] | permissive | UCLA-SEAL/QDiff | ad53650034897abb5941e74539e3aee8edb600ab | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | refs/heads/main | 2023-08-05T04:52:24.961998 | 2021-09-19T02:56:16 | 2021-09-19T02:56:16 | 405,159,939 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,317 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 5/15/20 4:49 PM
# @File : grover.py
# qubit number=4
# total number=46
import cirq
import cirq.google as cg
from typing import Optional
import sys
from math import log2
import numpy as np
#thatsNoCode
from cirq.contrib.svg import SVGCircuit
# Symbols for the rotation angles in the QAOA circuit.
def make_circuit(n: int, input_qubit):
c = cirq.Circuit() # circuit begin
c.append(cirq.H.on(input_qubit[0])) # number=9
c.append(cirq.H.on(input_qubit[2])) # number=39
c.append(cirq.H.on(input_qubit[1])) # number=2
c.append(cirq.H.on(input_qubit[2])) # number=3
c.append(cirq.H.on(input_qubit[3])) # number=4
c.append(cirq.H.on(input_qubit[0])) # number=5
c.append(cirq.H.on(input_qubit[3])) # number=16
c.append(cirq.CZ.on(input_qubit[1],input_qubit[3])) # number=17
c.append(cirq.H.on(input_qubit[3])) # number=18
c.append(cirq.H.on(input_qubit[1])) # number=6
c.append(cirq.H.on(input_qubit[2])) # number=7
c.append(cirq.H.on(input_qubit[3])) # number=8
c.append(cirq.H.on(input_qubit[3])) # number=43
c.append(cirq.CZ.on(input_qubit[0],input_qubit[3])) # number=44
c.append(cirq.H.on(input_qubit[3])) # number=45
c.append(cirq.H.on(input_qubit[3])) # number=40
c.append(cirq.CZ.on(input_qubit[0],input_qubit[3])) # number=41
c.append(cirq.H.on(input_qubit[3])) # number=42
c.append(cirq.CNOT.on(input_qubit[0],input_qubit[3])) # number=33
c.append(cirq.X.on(input_qubit[3])) # number=34
c.append(cirq.CNOT.on(input_qubit[0],input_qubit[3])) # number=35
c.append(cirq.CNOT.on(input_qubit[0],input_qubit[3])) # number=25
c.append(cirq.CNOT.on(input_qubit[0],input_qubit[3])) # number=12
c.append(cirq.H.on(input_qubit[2])) # number=30
c.append(cirq.CZ.on(input_qubit[0],input_qubit[2])) # number=31
c.append(cirq.H.on(input_qubit[2])) # number=32
c.append(cirq.X.on(input_qubit[2])) # number=21
c.append(cirq.H.on(input_qubit[2])) # number=36
c.append(cirq.CZ.on(input_qubit[0],input_qubit[2])) # number=37
c.append(cirq.H.on(input_qubit[2])) # number=38
c.append(cirq.H.on(input_qubit[0])) # number=26
c.append(cirq.CZ.on(input_qubit[3],input_qubit[0])) # number=27
c.append(cirq.H.on(input_qubit[0])) # number=28
c.append(cirq.CNOT.on(input_qubit[3],input_qubit[0])) # number=14
c.append(cirq.Y.on(input_qubit[2])) # number=29
# circuit end
c.append(cirq.measure(*input_qubit, key='result'))
return c
def bitstring(bits):
return ''.join(str(int(b)) for b in bits)
if __name__ == '__main__':
qubit_count = 4
input_qubits = [cirq.GridQubit(i, 0) for i in range(qubit_count)]
circuit = make_circuit(qubit_count,input_qubits)
circuit = cg.optimized_for_sycamore(circuit, optimizer_type='sqrt_iswap')
circuit_sample_count =2000
simulator = cirq.Simulator()
result = simulator.run(circuit, repetitions=circuit_sample_count)
frequencies = result.histogram(key='result', fold_func=bitstring)
writefile = open("../data/startCirq2972.csv","w+")
print(format(frequencies),file=writefile)
print("results end", file=writefile)
print(circuit.__len__(), file=writefile)
print(circuit,file=writefile)
writefile.close() | [
"wangjiyuan123@yeah.net"
] | wangjiyuan123@yeah.net |
44054f2db9a1717828d12b508c84eb70aac5ec41 | bf06bf980ef359615604d53567d1cc435a980b78 | /data/HW3/hw3_393.py | 3737707ba248e3457fb769feea86090504bcb756 | [] | no_license | am3030/IPT | dd22f5e104daa07a437efdf71fb58f55bcaf82d7 | 6851c19b2f25397f5d4079f66dbd19ba982245c5 | refs/heads/master | 2021-01-23T05:03:53.777868 | 2017-03-09T18:10:36 | 2017-03-09T18:10:36 | 86,270,526 | 0 | 0 | null | 2017-03-26T22:53:42 | 2017-03-26T22:53:42 | null | UTF-8 | Python | false | false | 929 | py |
def main():
scaleType = input("What temperature scale would you like? Please enter 'K' for kelvins or 'C' for Celcius. ")
if scaleType == "C":
tempC = float(input("What is the temperature of the water? "))
if tempC <= 0.0:
print("At this temperature, water is solid ice.")
if tempC > 0.0:
print("At this temperature, water is a liquid.")
if tempC > 100.0:
print("At the temperature, water is gaseous water vapor.")
if scaleType == "K":
tempK = float(input("What is the temperature of the water? "))
if tempK <= 0:
print("That's impossible!")
if tempK > 0:
print("At this temperature, water is solid ice.")
if tempK > 273.2:
print("At this temperature, water is a liquid.")
if tempK > 373.2:
print("At this temperature, water is gaseous water vapor.")
main()
| [
"mneary1@umbc.edu"
] | mneary1@umbc.edu |
1b76298b5547d8d29c729380de7d3f35010fc778 | 0ddbd741aef53f75902131853243891a93c81ef6 | /select_folder/folder_gui.py | 7fd7ece2562fe6617fb2148dc16152313c8844cc | [] | no_license | JennyPeterson10/The-Tech-Academy-Python-Projects | 3be2a10e0f8203fe920059e7a98508f3b8fe493f | 08841a3a7d514a0e1d3e7dddd440e5641e646fe8 | refs/heads/master | 2020-04-28T19:38:32.690859 | 2019-04-11T18:38:56 | 2019-04-11T18:38:56 | 175,517,454 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 749 | py | # Created by Jenny Peterson March 29, 2019
from tkinter import *
import tkinter as tk
import folder_main
import folder_func
def load_gui(self):
# Define button and label
self.btn_browse = tk.Button(self.master,width=15,height=1,text='Browse...',command=lambda: folder_func.openDir(self))
self.btn_browse.grid(row=0,column=0,padx=(20,0),pady=(40,0),sticky=W)
self.lbl_directory = tk.Label(self.master,width=15,height=1,text='Selected Directory: ')
self.lbl_directory.grid(row=1,column=0,padx=(20,0),pady=(10,0),sticky=W)
# Define text field
self.txt_browse = tk.Entry(self.master,text='',width=55)
self.txt_browse.grid(row=1,column=1,padx=(20,0),pady=(10,0),sticky=E+W)
if __name__ == "__main__":
pass
| [
"you@example.com"
] | you@example.com |
c7eacdcc987476d50e88c69c835152407a384efc | dfb3f2a0aef80a76af7cf748cdc615d3b0a97e30 | /Python_OOP/bike.py | 7d1cdc867bda6714cfb1ed6bfb758f921371584b | [] | no_license | ahrav/Coding_Dojo_Assignments | 090f8e22b8a68a0f1cadb69df3bcef7393ca8346 | 161bd8bb633bfb08186d42d32ae1d2b889e5cc97 | refs/heads/master | 2021-07-21T21:55:12.785361 | 2017-10-29T09:38:00 | 2017-10-29T09:38:00 | 108,720,365 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,007 | py | class bike(object):
def __init__(self, price, max_speed):
self.price = price
self.max_speed = max_speed
self.init_miles = 0
def displayinfo(self):
print self.price
print self.max_speed
print self.init_miles
return self
def ride(self):
self.init_miles += 10
print "Riding {} miles".format(self.init_miles)
return self
def reverse(self):
self.init_miles -= 5
if self.init_miles > 0:
print "Reversing {} miles".format(self.init_miles)
elif self.init_miles == 0:
print "Haven't moved"
else:
print "Went "+ str(abs(self.init_miles)) + " miles in opposite direction"
return self
user1 = bike(22, '25mph')
user2 = bike(25, '30mph')
user3 = bike(50, '50mph')
user1.ride()
user1.ride()
user1.ride()
user1.reverse()
user1.displayinfo()
user2.ride().ride().reverse().reverse().displayinfo()
user3.reverse().reverse().reverse().displayinfo()
| [
"ahravdutta02@gmail.com"
] | ahravdutta02@gmail.com |
442125862c5851b52822ed3df15064a90e45b9dc | 3c24e501eae18b841aaa6cc2f5f030ec7d7aaea9 | /Day-20/UsApp/forms.py | e93acab849712f018afad3da8c532815dd95540f | [] | no_license | SatheeshMatampalli/Django-Polytechnic-Internship | 2c26be5f7e48c7690e7f987c29ec6082b073862f | 8223d442c313ef94ec4dba21f54dfb79f01f45ee | refs/heads/main | 2023-02-04T04:36:40.073852 | 2020-12-24T06:57:12 | 2020-12-24T06:57:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,414 | py | from django.contrib.auth.forms import UserCreationForm
from django.contrib.auth.models import User
from django import forms
from django.forms import ModelForm
from UsApp.models import ImPfle
class UsReg(UserCreationForm):
password1 = forms.CharField(widget=forms.PasswordInput(attrs={"class":"form-control","placeholder":"Enter Your Password"}))
password2 = forms.CharField(widget=forms.PasswordInput(attrs={"class":"form-control","placeholder":"Enter Confirm Password"}))
class Meta:
model = User
fields = ['username']
widgets = {
"username":forms.TextInput(attrs = {
"class":"form-control",
"placeholder":"Enter Your Username",
}),
}
class Updf(ModelForm):
class Meta:
model = User
fields =["username","email","first_name","last_name"]
widgets ={
"username":forms.TextInput(attrs={
"class":"form-control",
"placeholder":"Update Username",
}),
"email":forms.EmailInput(attrs={
"class":"form-control",
"placeholder":"Update Emailid",
}),
"first_name":forms.TextInput(attrs={
"class":"form-control",
"placeholder":"Update First Name",
}),
"last_name":forms.TextInput(attrs={
"class":"form-control",
"placeholder":"Update Last Name",
}),
}
class Imp(ModelForm):
class Meta:
model = ImPfle
fields = ["age","im"]
widgets = {
"age":forms.NumberInput(attrs = {
"class":"form-control",
"placeholder":"Update Your Age",
})
} | [
"rravikumar34@gmail.com"
] | rravikumar34@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.